content
stringlengths
10
4.9M
// // AlbumSecondViewController.h // DouPaiwo // 专辑详情界面内容 // Created by J006 on 15/9/14. // Copyright (c) 2015年 <EMAIL>. All rights reserved. // #import "BaseViewController.h" #import "AlbumInstance.h" @interface AlbumSecondViewController : BaseViewController - (void)initAlbumSecondViewControllerWithAlbum :(AlbumInstance*)album; @end
import re import pytest from pandas import Timedelta import pandas.tseries.frequencies as frequencies import pandas.tseries.offsets as offsets @pytest.mark.parametrize( "freq_input,expected", [ (frequencies.to_offset("10us"), offsets.Micro(10)), (offsets.Hour(), offsets.Hour()), ((5, "T"), offsets.Minute(5)), ("2h30min", offsets.Minute(150)), ("2h 30min", offsets.Minute(150)), ("2h30min15s", offsets.Second(150 * 60 + 15)), ("2h 60min", offsets.Hour(3)), ("2h 20.5min", offsets.Second(8430)), ("1.5min", offsets.Second(90)), ("0.5S", offsets.Milli(500)), ("15l500u", offsets.Micro(15500)), ("10s75L", offsets.Milli(10075)), ("1s0.25ms", offsets.Micro(1000250)), ("1s0.25L", offsets.Micro(1000250)), ("2800N", offsets.Nano(2800)), ("2SM", offsets.SemiMonthEnd(2)), ("2SM-16", offsets.SemiMonthEnd(2, day_of_month=16)), ("2SMS-14", offsets.SemiMonthBegin(2, day_of_month=14)), ("2SMS-15", offsets.SemiMonthBegin(2)), ], ) def test_to_offset(freq_input, expected): result = frequencies.to_offset(freq_input) assert result == expected @pytest.mark.parametrize( "freqstr,expected", [("-1S", -1), ("-2SM", -2), ("-1SMS", -1), ("-5min10s", -310)] ) def test_to_offset_negative(freqstr, expected): result = frequencies.to_offset(freqstr) assert result.n == expected @pytest.mark.parametrize( "freqstr", [ "2h20m", "U1", "-U", "3U1", "-2-3U", "-2D:3H", "1.5.0S", "2SMS-15-15", "2SMS-15D", "100foo", # Invalid leading +/- signs. "+-1d", "-+1h", "+1", "-7", "+d", "-m", # Invalid shortcut anchors. "SM-0", "SM-28", "SM-29", "SM-FOO", "BSM", "SM--1", "SMS-1", "SMS-28", "SMS-30", "SMS-BAR", "SMS-BYR", "BSMS", "SMS--2", ], ) def test_to_offset_invalid(freqstr): # see gh-13930 # We escape string because some of our # inputs contain regex special characters. msg = re.escape(f"Invalid frequency: {freqstr}") with pytest.raises(ValueError, match=msg): frequencies.to_offset(freqstr) def test_to_offset_no_evaluate(): with pytest.raises(ValueError, match="Could not evaluate"): frequencies.to_offset(("", "")) @pytest.mark.parametrize( "freqstr,expected", [ ("2D 3H", offsets.Hour(51)), ("2 D3 H", offsets.Hour(51)), ("2 D 3 H", offsets.Hour(51)), (" 2 D 3 H ", offsets.Hour(51)), (" H ", offsets.Hour()), (" 3 H ", offsets.Hour(3)), ], ) def test_to_offset_whitespace(freqstr, expected): result = frequencies.to_offset(freqstr) assert result == expected @pytest.mark.parametrize( "freqstr,expected", [("00H 00T 01S", 1), ("-00H 03T 14S", -194)] ) def test_to_offset_leading_zero(freqstr, expected): result = frequencies.to_offset(freqstr) assert result.n == expected @pytest.mark.parametrize("freqstr,expected", [("+1d", 1), ("+2h30min", 150)]) def test_to_offset_leading_plus(freqstr, expected): result = frequencies.to_offset(freqstr) assert result.n == expected @pytest.mark.parametrize( "kwargs,expected", [ (dict(days=1, seconds=1), offsets.Second(86401)), (dict(days=-1, seconds=1), offsets.Second(-86399)), (dict(hours=1, minutes=10), offsets.Minute(70)), (dict(hours=1, minutes=-10), offsets.Minute(50)), (dict(weeks=1), offsets.Day(7)), (dict(hours=1), offsets.Hour(1)), (dict(hours=1), frequencies.to_offset("60min")), (dict(microseconds=1), offsets.Micro(1)), ], ) def test_to_offset_pd_timedelta(kwargs, expected): # see gh-9064 td = Timedelta(**kwargs) result = frequencies.to_offset(td) assert result == expected def test_to_offset_pd_timedelta_invalid(): # see gh-9064 msg = "Invalid frequency: 0 days 00:00:00" td = Timedelta(microseconds=0) with pytest.raises(ValueError, match=msg): frequencies.to_offset(td) @pytest.mark.parametrize( "shortcut,expected", [ ("W", offsets.Week(weekday=6)), ("W-SUN", offsets.Week(weekday=6)), ("Q", offsets.QuarterEnd(startingMonth=12)), ("Q-DEC", offsets.QuarterEnd(startingMonth=12)), ("Q-MAY", offsets.QuarterEnd(startingMonth=5)), ("SM", offsets.SemiMonthEnd(day_of_month=15)), ("SM-15", offsets.SemiMonthEnd(day_of_month=15)), ("SM-1", offsets.SemiMonthEnd(day_of_month=1)), ("SM-27", offsets.SemiMonthEnd(day_of_month=27)), ("SMS-2", offsets.SemiMonthBegin(day_of_month=2)), ("SMS-27", offsets.SemiMonthBegin(day_of_month=27)), ], ) def test_anchored_shortcuts(shortcut, expected): result = frequencies.to_offset(shortcut) assert result == expected
import { SignalStrength20 } from "../../"; export = SignalStrength20;
/** * \brief Returns true iff the given timer is active / enqueued */ bool timer_is_running(struct timer *timer) { return timer && (timer->next || timer->prev || timer_queue == timer || timers_pending_insertion == timer); }
Climate denier in White House prompts a 'March for Science' on Earth Day Denis Hayes, coordinator of the first Earth Day, helped organize this year's action, which may prove to be an important moment for those opposed to President Donald Trump. Denis Hayes, coordinator of the first Earth Day, helped organize this year's action, which may prove to be an important moment for those opposed to President Donald Trump. Photo: JORDAN STEAD, SEATTLEPI.COM Photo: JORDAN STEAD, SEATTLEPI.COM Image 1 of / 40 Caption Close Climate denier in White House prompts a 'March for Science' on Earth Day 1 / 40 Back to Gallery The power of climate change deniers in Congress -- plus a president who once called global warming "a hoax" -- has galvanized Earth Day on its 47th anniversary. Its main event this year, on April 22, will be a March for Science in Washington, D.C., and around the world. Its goal being "to save science from this assault," in the words of Denis Hayes, president of the Seattle-based Bullitt Foundation. "The concept of white coats marching is intriguing," said Hayes, who as a Stanford law student helped organize the first Earth Day in 1970. The assault on science is now and real. The Trump administration is talking of a 19-20 percent cut in the National Institutes of Health, the nation's bulwark against disease. The NIH usually approves grants on a multi-year basis. "If the cuts go through, they will be able to make no grants at all in 2018," Hayes said. The federal Sea Grant program supports 3,000 scientists across the country, a fair number of them -- under University of Washington auspices -- working to restore Olympic Peninsula salmon runs and bolster the shellfish industry. The Trump administration wants to totally eliminate Sea Grant. The list goes on, from blocking publications by U.S. Department of Agriculture scientists until review by political appointees, to scrubbing references to climate change on the White House website. The Environmental Protection Agency is targeted for a 31 percent budget cut. "Unlike any movement I can think of, environmentalism is a science-based movement," Hayes argues. The first Earth Day in 1970, with its marches and teach-ins, was science-inspired. Rachel Carson, in her bestseller "Silent Spring," had outlined the threat to bird life posed by the pesticide DDT. Scientists had outlined how nuclear testing in the atmosphere put strontium 90 into children's milk. President Richard Nixon was no environmentalist: A White House photo op showed the 37th president walking on the Pacific beach outside his San Clemente, California, home wearing wingtip shoes. Still, Nixon saw a popular cause and signed into law the National Environmental Policy Act, the Clean Air Act, the Endangered Species Act, and legislation creating the EPA. He did veto the Clean Water Act, but was overridden by an overwhelming bipartisan vote in Congress. After promising to be "the environmental president," George H.W. Bush signed amendments strengthening the Clean Air Act. "Science isn't Republican or Democratic; to the extent that we have smart or dumb public policies, science is for smart policies," Hayes said. The scientific community is speaking up, resisting a witch hunt being mounted against climate scientists by the House Science Committee and climate change-denying Rep. Joe Barton, a Texas Republican and ally of Big Oil. More than 14,000 women scientists signed a pledge, and 151 scientific institutions sent President Donald Trump a letter arguing that he should rescind his travel and immigration crackdown. The March for Science, like the Women's March earlier this year, began as a Reddit conversation. Its main event, as in 1970, will be a rally and teach-in on the National Mall in Washington, D.C. Denis and Gail Hayes were on a long-planned New Zealand trip when the desire of a National Mall march caught hold. Denis Hayes is now trying to raise the money necessary to pull it off, and to get the needed permits. "This has become a lot tougher since the women's march wonderful that it was," Hayes joked. "Can we do this fast?" Hayes asked, a month before the march. Nor, as with the first Earth Day, will you need travel to Washington, D.C., to be in on the action. According to Hayes, 400 groups around the country are planning marches or teach-ins or demonstrations. The first Earth Day, April 22, 1970, engaged 20 million Americans and launched a movement that has made America's air cleaner, cleaned up rivers that were fire hazards, and protected millions of acres of America's wild places and the country's scenic "crown jewels." It has spawned an Earth Day Network that works year-round with partners in 192 countries: A billion people now join Earth Day activities around the world, making it the globe's larges civic observance. The urgency of Earth Day in 2017 is unequaled since 1970. After all, the new EPA director, a former Oklahoma attorney general, is a climate change denier who made his mark suing the agency he now directs.
import sys import streamlit as st import textwrap import networkx as nx import pandas as pd import altair as alt import nx_altair as nxa import inspect from autogoal.kb import build_pipeline_graph @st.cache(allow_output_mutation=True) def eval_code(code, *variables): locals_dict = {} exec(code, globals(), locals_dict) if len(variables) == 0: return None if len(variables) == 1: return locals_dict[variables[0]] else: return [locals_dict[var] for var in variables] class Demo: def __init__(self): self.main_sections = { "Intro": self.intro, "High-Level API": self.high_level, "Pipelines": self.build_pipelines, } def intro(self): st.write("# AutoGOAL Demos") st.write( """ Welcome to the AutoGOAL Demo. In the left sidebar you will find all the available demos and additional controls specific to each of them. AutoGOAL is a framework in Python for automatically finding the best way to solve a given task. It has been designed mainly for automatic machine learning~(AutoML) but it can be used in any scenario where several possible strategies are available to solve a given computational task. """ ) st.write( """ ## About this demo The purpose of this demo application is to showcase the main use cases of AutoGOAL. Keep in mind that AutoGOAL is a software library, i.e., meant to be used from source code. This demo serves as an interactive and user-friendly introduction to the library, but it is in no case a full-featured AutoML application. There are two sections to showcase different components of AutoGOAL. You can switch sections in the left sidebar. * The **High-Level API** section presents the public interface of AutoGOAL in several datasets. * The **Pipelines** section shows the internal components of AutoGOAL and allows to explore the possible pipelines. """ ) st.write(""" ## Running the code To execute this demo on your own infrastructure, you need AutoGOAL's docker image. There are two images available, without and without GPU support. Download the corresponding Docker image: docker pull autogoal/autogoal """) st.write( """ Launch a Docker container. docker run --rm -p 8501:8501 autogoal/autogoal Navigate to <http://localhost:8501>. """ ) def high_level(self): st.write("# High-Level API") st.write( """ AutoGOAL is first and foremost a framework for Automatic Machine Learning. With a few simple lines of code, you can quickly find a close to optimal solution for classic machine learning problems. """ ) from autogoal import datasets dataset_descriptions = { "cars": """ [Cars](https://archive.ics.uci.edu/ml/datasets/Car+Evaluation) is a low-dimensionality supervised problem with 21 one-hot encoded features. """, "german_credit": """ [German Credit](https://archive.ics.uci.edu/ml/datasets/Statlog+%28German+Credit+Data%29) is a low-dimensionality supervised problem with 20 categorical or numerical features. """, "abalone": """ [Abalone](https://archive.ics.uci.edu/ml/datasets/Abalone) is a low-dimensionality supervised problem with 8 categorical or numerical features. """, "shuttle": """ [Shuttle](https://archive.ics.uci.edu/ml/datasets/Statlog+(Shuttle)) is a low-dimensionality supervised problem with 9 numerical features. """, "yeast": """ [Yeast](https://archive.ics.uci.edu/ml/datasets/Yeast) is a low-dimensionality supervised problem with 9 numerical features. """, "dorothea": """ [Dorothea](https://archive.ics.uci.edu/ml/datasets/dorothea) is a high-dimensionality sparse supervised problem with 100,000 numerical features. """, "gisette": """ [Gisette](https://archive.ics.uci.edu/ml/datasets/Gisette) is a high-dimensionality sparse supervised problem with 5,000 numerical features. """, "haha": """ [HAHA 2019](https://www.fing.edu.uy/inco/grupos/pln/haha/index.html#data) is a text classification problem with binary classes in Spanish. """, "meddocan": """ [MEDDOCAN 2019](https://github.com/PlanTL-SANIDAD/SPACCC_MEDDOCAN) is an entity recognition problem in Spanish medical documents. """, } override_types = { 'german_credit': ("MatrixContinuousDense()", "CategoricalVector()"), 'dorothea': ("MatrixContinuousSparse()", "CategoricalVector()"), 'gisette': ("MatrixContinuousSparse()", "CategoricalVector()"), 'haha': ("List(Sentence())", "CategoricalVector()"), 'meddocan': ("List(List(Word()))", "List(List(Postag()))"), } st.write( """Let's start by selecting one of the example datasets. These are sample datasets which are automatically downloaded by AutoGOAL, and can be used to benchmark new algorithms and showcase AutoML tools. """ ) dataset = st.selectbox("Select a dataset", list(dataset_descriptions)) st.write( dataset_descriptions[dataset] + "Here is the code to load this dataset." ) code = textwrap.dedent( f""" from autogoal.datasets import {dataset} X, y, *_ = {dataset}.load() """ ) st.code(code) X, y = eval_code(code, "X", "y") if st.checkbox("Preview data"): try: l = len(X) except: l = X.shape[0] head = st.slider("Preview N first items", 0, l, 5) if isinstance(X, list): st.write(X[:head]) else: st.write(X[:head, :]) st.write(y[:head]) st.write( """ The next step is to instantiate an AutoML solver and run it on this problem. The `AutoML` class provides a black-box interface to AutoGOAL. You can tweak the most important parameters at the left sidebar, even though sensible defaults are provided for all the parameters. """ ) st.sidebar.markdown("### AutoML parameters") iterations = st.sidebar.number_input("Number of iterations", 1, 10000, 100) global_timeout = st.sidebar.number_input( "Global timeout (seconds)", 1, 1000, 60 ) pipeline_timeout = st.sidebar.number_input( "Timeout per pipeline (seconds)", 1, 1000, 5 ) from autogoal.contrib.streamlit import StreamlitLogger if dataset in override_types: input_type, output_type = override_types[dataset] types_code = f""" input={input_type}, output={output_type}, """ st.info(f""" In most cases AutoGOAL can automatically infer the input and output type from the dataset. Sometimes, such as with `{dataset}`, the user will need to provide them explicitely. """) else: types_code = "" code = textwrap.dedent(f""" from autogoal.kb import * from autogoal.ml import AutoML automl = AutoML( errors="ignore", # ignore exceptions (e.g., timeouts) search_iterations={iterations}, # total iterations search_kwargs=dict( search_timeout={global_timeout}, # max time in total (approximate) evaluation_timeout={pipeline_timeout}, # max time per pipeline (approximate) ), {types_code} ) """ ) st.code(code) automl = eval_code(code, "automl") st.write( """ Click run to call the `fit` method. Keep in mind that many of these pipelines can be quite computationally heavy and both the hyperparameter configuration as well as the infrastructure where this demo is running might not allow for the best pipelines to execute. """ ) st.code("automl.fit(X, y)", language="Python") if st.button("Run it!"): automl.fit(X, y, logger=StreamlitLogger()) st.write( """ ## Next steps Take a look at the remaining examples in the sidebar. """ ) def build_pipelines(self): st.write("# Pipelines") st.write( "This example illustrates how AutoGOAL automatically builds " "a graph of pipelines for different problems settings." ) from autogoal.kb._data import DATA_TYPES types_str = [cls.__name__ for cls in DATA_TYPES] st.write( """ AutoGOAL pipeline discovery is based on a hierarchy of semantic datatypes. Each type represents a semantic datum that can be used in a machine learning algorithm, from matrices and vectors to sentences, entities and and images. The following picture shows all available semantic data types. You can click the top right corner to enlarge. """ ) st.image("/code/docs/guide/datatypes.png", use_column_width=True) from autogoal.contrib import find_classes all_classes = {k.__name__: k for k in find_classes()} st.write( f""" ## Algorithm Library AutoGOAL automatically builds pipelines by selecting from a wide range of algorithms implemented in `contrib` modules. The list of all available algorithms is shown here. There are a total of **{len(all_classes)}** algorithms implemented. Select one to display some information. """) class_name = st.selectbox("Select an algorithm", list(all_classes)) class_type = all_classes[class_name] st.write(f"### {class_type.__module__}.{class_name}") run_signature = inspect.signature(class_type.run) st.write(f"**Input type**: {run_signature.parameters['input'].annotation}") st.write(f"**Output type**: {run_signature.return_annotation}") st.write("#### Parameters") params = [] for name, param in inspect.signature(class_type.__init__).parameters.items(): if name == 'self': continue params.append(f"* **{name}**: {param.annotation}") st.write("\n".join(params)) st.write("## Pipeline Builder") st.write( """ AutoGOAL can automatically build pipelines given a desired input and output value. It uses the annotations of the `run` method of each algorithm to detect which algorithms can be connected. In the following section, you can select a desired input and output types and explore the pipelines that AutoGOAL discovers. In the left sidebar you can fine-tune the input value, e.g., make it a list of elements instead of a single element. """ ) st.sidebar.markdown("### Configure input and output types") list_input = st.sidebar.number_input("Input list (level)", 0, 3, 1) list_output = st.sidebar.number_input("Output list (level)", 0, 3, 0) tuples = st.sidebar.checkbox("Is supervised (use Tuple in input)", True) input_type = st.selectbox( "Select an input type", types_str, types_str.index('Sentence') ) output_type = st.selectbox( "Select and output type", types_str, types_str.index('CategoricalVector') ) input_type = input_type + "()" for i in range(list_input): input_type = f"List({input_type})" output_type = output_type + "()" for i in range(list_output): input_type = f"List({output_type})" if tuples: input_type = f"Tuple({input_type}, {output_type})" st.write(f"#### Defined input type: `{input_type}`") st.write(f"#### Defined output type: `{output_type}`") st.write( """ The following code uses explicitely AutoGOAL's pipeline discovery engine to find all the pipelines that can be constructed from the desired input to the desired output. """ ) code = textwrap.dedent( f""" from autogoal.kb import * from autogoal.kb import build_pipelines from autogoal.contrib import find_classes # explicitly build the graph of pipelines space = build_pipelines( input={input_type}, output={output_type}, registry=find_classes(), ) """ ) st.code(code) try: space = eval_code(code, "space") except Exception as e: if "No pipelines can be constructed" in str(e): st.error(str(e)) st.info("Try changing the input and output type or select **Is supervised** in the left sidebar.") return raise st.write( """ ### The Pipelines Graph This is the graph that represents all the posible pipelines find by AutoGOAL. Each node in this graph is an algorithm from the _Algorithm Library_ that is compatible with the input and output types of its neighbors. Any path from the top to the bottom of the graph represents a valid pipeline. """) graph = nx.DiGraph() def get_node_repr(node): try: return get_node_repr(node.inner) except: return dict( label=str(node).split(".")[-1], module=node.__module__.split("_")[0] ) for node in space.graph.nodes: attrs = get_node_repr(node) graph.add_node(attrs["label"], **attrs) for u, v in space.graph.edges: graph.add_edge(get_node_repr(u)["label"], get_node_repr(v)["label"]) pos = nx.nx_pydot.pydot_layout(graph, prog="dot", root=space.Start) chart = ( nxa.draw_networkx(graph, pos=pos, node_color="module", node_tooltip="label") .properties(height=500) .interactive() ) st.altair_chart(chart, use_container_width=True) st.write( """ ### Example Pipeline Here is an example pipeline that has been randomly sampled from the previous graph. You can try different samples. Notice how not only the nodes (algorithms) that participate in the pipeline are different each time, but also their internal hyperparameters change. When sampling a pipeline from the graph AutoGOAL samples all the internal hyperparameters as defined by the constructor. When these hyperparameters have complex values (e.g., an algorithm per-se), AutoGOAL recursively samples instances of the internal algorithms, and so on. """) st.code(space.sample()) st.button("Sample another pipeline") def run(self): main_section = st.sidebar.selectbox("Section", list(self.main_sections)) self.main_sections[main_section]() demo = Demo() demo.run()
<filename>src/com/jayantkrish/jklol/cli/AbstractCli.java package com.jayantkrish.jklol.cli; import java.io.IOException; import java.util.Arrays; import java.util.Set; import joptsimple.OptionException; import joptsimple.OptionParser; import joptsimple.OptionSet; import joptsimple.OptionSpec; import com.google.common.base.Joiner; import com.google.common.base.Preconditions; import com.google.common.collect.Iterables; import com.google.common.collect.Sets; import com.jayantkrish.jklol.boost.FunctionalGradientAscent; import com.jayantkrish.jklol.dtree.RegressionTreeTrainer; import com.jayantkrish.jklol.parallel.LocalMapReduceExecutor; import com.jayantkrish.jklol.parallel.MapReduceConfiguration; import com.jayantkrish.jklol.sequence.cli.TrainSequenceModel; import com.jayantkrish.jklol.training.DefaultLogFunction; import com.jayantkrish.jklol.training.GradientOptimizer; import com.jayantkrish.jklol.training.Lbfgs; import com.jayantkrish.jklol.training.LogFunction; import com.jayantkrish.jklol.training.LogFunctions; import com.jayantkrish.jklol.training.MinibatchLbfgs; import com.jayantkrish.jklol.training.NullLogFunction; import com.jayantkrish.jklol.training.StochasticGradientTrainer; import com.jayantkrish.jklol.util.Pseudorandom; import com.jayantkrish.jklol.util.TimeUtils; /** * Common framework for command line programs. This class provides * option parsing functionality and implementations of commonly-used * option sets, such as an option to seed the random number generator. * This class also provides basic logging functionality for the * passed-in options, making experiments easier to repeat. * <p> * Command-line programs using this class should implement all * abstract methods, then write a main method which instantiates a * class instance and invokes {@link #run}. See * {@link TrainSequenceModel} for an example. * * @author jayantk */ public abstract class AbstractCli { /** * Common sets of options which subclasses may optionally choose to * accept. To accept a given set of options, include the * corresponding value in the constructor for {@code AbstractCli}. */ public static enum CommonOptions { /** * Enables options for constructing a * {@code StochasticGradientTrainer}. For example, these options * include the number of training iterations and regularization * parameters. */ STOCHASTIC_GRADIENT, /** * Enables options for constructing a {@code Lbfgs} trainer. */ LBFGS, /** * Enables parallelization options configuring the execution of * embarassingly parallel tasks. These options set the default * {@code MapReduceExecutor} used by the program. For example, * these options include the maximum number of threads to use. */ MAP_REDUCE, /** * Enables options for performing boosting via functional gradient * ascent. */ FUNCTIONAL_GRADIENT_ASCENT, /** * Enables options for training regression trees. */ REGRESSION_TREE, }; private final Set<CommonOptions> opts; private OptionSet parsedOptions; // Help options. protected OptionSpec<Void> helpOpt; // Always available options. // Seed the random number generator protected OptionSpec<Long> randomSeed; // Prevents the program from printing out the input options protected OptionSpec<Void> noPrintOptions; // Stochastic gradient options. protected OptionSpec<Long> sgdIterations; protected OptionSpec<Integer> sgdBatchSize; protected OptionSpec<Double> sgdInitialStep; protected OptionSpec<Void> sgdNoDecayStepSize; protected OptionSpec<Void> sgdNoReturnAveragedParameters; protected OptionSpec<Double> sgdL2Regularization; protected OptionSpec<Double> sgdRegularizationFrequency; protected OptionSpec<Double> sgdClipGradients; protected OptionSpec<Void> sgdAdagrad; // LBFGS options. protected OptionSpec<Void> lbfgs; protected OptionSpec<Integer> lbfgsIterations; protected OptionSpec<Integer> lbfgsHessianRank; protected OptionSpec<Double> lbfgsL2Regularization; protected OptionSpec<Integer> lbfgsMinibatchSize; protected OptionSpec<Integer> lbfgsMinibatchIterations; protected OptionSpec<Void> lbfgsAdaptiveMinibatches; // Logging options for all optimization algorithms. protected OptionSpec<Integer> logInterval; protected OptionSpec<Integer> logParametersInterval; protected OptionSpec<String> logParametersDir; protected OptionSpec<Void> logBrief; // Map reduce options. protected OptionSpec<Integer> mrMaxThreads; protected OptionSpec<Integer> mrMaxBatchesPerThread; // Functional gradient ascent options protected OptionSpec<Integer> fgaIterations; protected OptionSpec<Integer> fgaBatchSize; protected OptionSpec<Double> fgaInitialStep; protected OptionSpec<Void> fgaNoDecayStepSize; // Regression tree options protected OptionSpec<Integer> rtreeMaxDepth; /** * Creates a command line program that accepts the specified set of * options. * * @param opts any optional option sets to accept */ public AbstractCli(CommonOptions... opts) { this.opts = Sets.newHashSet(opts); } /** * Runs the program, parsing any options from {@code args}. * * @param args arguments to the program, in the same format as * provided by {@code main}. */ public void run(String[] args) { // Add and parse options. OptionParser parser = new OptionParser(); initializeCommonOptions(parser); initializeOptions(parser); String errorMessage = null; try { parsedOptions = parser.parse(args); } catch (OptionException e) { errorMessage = e.getMessage(); } boolean printHelp = false; if (errorMessage != null) { // If an error occurs, the options don't parse. // Therefore, we must manually check if the help option was // given. for (int i = 0; i < args.length; i++) { if (args[i].equals("--help")) { printHelp = true; } } } if (errorMessage != null && !printHelp) { System.err.println(errorMessage); System.err.println("Try --help for more information about options."); System.exit(1); } if (printHelp || parsedOptions.has(helpOpt)) { // If a help option is given, print help then quit. try { parser.printHelpOn(System.err); } catch (IOException ioException) { throw new RuntimeException(ioException); } System.exit(0); } // Log any passed-in options. if (!parsedOptions.has(noPrintOptions)) { System.out.println("Command-line options:"); for (OptionSpec<?> optionSpec : parsedOptions.specs()) { if (parsedOptions.hasArgument(optionSpec)) { System.out.println("--" + Iterables.getFirst(optionSpec.options(), "") + " " + Joiner.on(" ").join(parsedOptions.valuesOf(optionSpec))); } else { System.out.println("--" + Iterables.getFirst(optionSpec.options(), "")); } } System.out.println(""); } // Run the program. long startTime = System.currentTimeMillis(); processOptions(parsedOptions); run(parsedOptions); long endTime = System.currentTimeMillis(); if (!parsedOptions.has(noPrintOptions)) { System.out.println("Total time elapsed: " + TimeUtils.durationToString(endTime - startTime)); } System.exit(0); } /** * Adds subclass-specific options to {@code parser}. Subclasses must * implement this method in order to accept class-specific options. * * @param parser option parser to which additional command-line * options should be added. */ public abstract void initializeOptions(OptionParser parser); /** * Runs the program using parsed {@code options}. * * @param options option values passed to the program */ public abstract void run(OptionSet options); /** * Adds common options to {@code parser}. * * @param parser */ private void initializeCommonOptions(OptionParser parser) { helpOpt = parser.acceptsAll(Arrays.asList("help", "h"), "Print this help message."); randomSeed = parser.accepts("randomSeed", "Seed to use for generating random numbers. " + "Program execution may still be nondeterministic, if multithreading is used."). withRequiredArg().ofType(Long.class).defaultsTo(0L); noPrintOptions = parser.accepts("noPrintOptions", "Don't print out the command-line options " + "passed in to this program or final runtime statistics."); if (opts.contains(CommonOptions.STOCHASTIC_GRADIENT)) { sgdIterations = parser.accepts("iterations", "Number of iterations (passes over the data) for stochastic gradient descent."). withRequiredArg().ofType(Long.class).defaultsTo(10L); sgdBatchSize = parser.accepts("batchSize", "Minibatch size, i.e., the number of examples processed per gradient computation. If unspecified, defaults to using the entire data set (gradient descent).") .withRequiredArg().ofType(Integer.class); sgdInitialStep = parser.accepts("initialStepSize", "Initial step size for stochastic gradient descent.") .withRequiredArg().ofType(Double.class).defaultsTo(1.0); sgdNoDecayStepSize = parser.accepts("noDecayStepSize", "Don't use a 1/sqrt(t) step size decay during stochastic gradient descent."); sgdNoReturnAveragedParameters = parser.accepts("noReturnAveragedParameters", "Get the average of the parameter iterates of stochastic gradient descent."); sgdL2Regularization = parser.accepts("l2Regularization", "Regularization parameter for the L2 norm of the parameter vector.") .withRequiredArg().ofType(Double.class).defaultsTo(0.0); sgdRegularizationFrequency = parser.accepts("regularizationFrequency", "Fraction of iterations on which to apply regularization. Must be between 0 and 1") .withRequiredArg().ofType(Double.class).defaultsTo(1.0); sgdClipGradients = parser.accepts("clipGradients", "Clip gradients to a max l2 norm of the given value.") .withRequiredArg().ofType(Double.class).defaultsTo(Double.MAX_VALUE); sgdAdagrad = parser.accepts("adagrad", "Use the adagrad algorithm for stochastic gradient descent."); } if (opts.contains(CommonOptions.LBFGS)) { lbfgs = parser.accepts("lbfgs"); lbfgsIterations = parser.accepts("lbfgsIterations", "Maximum number of iterations (passes over the data) for LBFGS."). withRequiredArg().ofType(Integer.class).defaultsTo(100); lbfgsHessianRank = parser.accepts("lbfgsHessianRank", "Rank (number of vectors) of LBFGS's inverse Hessian approximation.") .withRequiredArg().ofType(Integer.class).defaultsTo(30); lbfgsL2Regularization = parser.accepts("lbfgsL2Regularization", "L2 regularization imposed by LBFGS") .withRequiredArg().ofType(Double.class).defaultsTo(0.0); // Providing either of these options triggers the use of minibatch LBFGS lbfgsMinibatchIterations = parser.accepts("lbfgsMinibatchIterations", "If specified, run LBFGS on minibatches of the data with the specified number of iterations per minibatch.") .withRequiredArg().ofType(Integer.class).defaultsTo(-1); lbfgsMinibatchSize = parser.accepts("lbfgsMinibatchSize", "If specified, run LBFGS on minibatches of the data with the specified number of examples per minibatch.") .withRequiredArg().ofType(Integer.class).defaultsTo(-1); lbfgsAdaptiveMinibatches = parser.accepts("lbfgsAdaptiveMinibatches", "If given, LBFGS is run on minibatches of exponentially increasing size."); } if (opts.contains(CommonOptions.STOCHASTIC_GRADIENT) || opts.contains(CommonOptions.LBFGS)) { logInterval = parser.accepts("logInterval", "Number of training iterations between logging outputs.") .withRequiredArg().ofType(Integer.class).defaultsTo(1); logParametersInterval = parser.accepts("logParametersInterval", "Number of training iterations between serializing parameters to disk during training. " + "If unspecified, model parameters are not serialized to disk during training.") .withRequiredArg().ofType(Integer.class).defaultsTo(-1); logParametersDir = parser.accepts("logParametersDir", "Directory where serialized model " + "parameters are stored. Must be specified if logParametersInterval is specified.") .withRequiredArg().ofType(String.class); logBrief = parser.accepts("logBrief", "Hides training output."); } if (opts.contains(CommonOptions.MAP_REDUCE)) { mrMaxThreads = parser.accepts("maxThreads", "Maximum number of threads to use during parallel execution.") .withRequiredArg().ofType(Integer.class).defaultsTo(Runtime.getRuntime().availableProcessors()); mrMaxBatchesPerThread = parser.accepts("maxBatchesPerThread", "Number of batches of items to create per thread.") .withRequiredArg().ofType(Integer.class).defaultsTo(20); } if (opts.contains(CommonOptions.FUNCTIONAL_GRADIENT_ASCENT)) { fgaIterations = parser.accepts("fgaIterations", "Number of iterations of functional gradient ascent to perform.").withRequiredArg() .ofType(Integer.class).defaultsTo(10); fgaBatchSize = parser.accepts("fgaBatchSize", "Number of examples to process before each functional gradient update. If not provided, use the entire data set.") .withRequiredArg().ofType(Integer.class); fgaInitialStep = parser.accepts("fgaInitialStepSize", "Initial step size for functional gradient ascent.") .withRequiredArg().ofType(Double.class).defaultsTo(1.0); fgaNoDecayStepSize = parser.accepts("fgaNoDecayStepSize", "Don't use a 1/sqrt(t) step size decay during functional gradient ascent."); } if (opts.contains(CommonOptions.REGRESSION_TREE)) { rtreeMaxDepth = parser.accepts("rtreeMaxDepth", "Maximum depth of trained regression trees") .withRequiredArg().ofType(Integer.class).required(); } } /** * Initializes program state using any options processable by this * class. * * @param options */ private void processOptions(OptionSet options) { Pseudorandom.get().setSeed(options.valueOf(randomSeed)); if (opts.contains(CommonOptions.MAP_REDUCE)) { MapReduceConfiguration.setMapReduceExecutor(new LocalMapReduceExecutor( options.valueOf(mrMaxThreads), options.valueOf(mrMaxBatchesPerThread))); } if (opts.contains(CommonOptions.STOCHASTIC_GRADIENT) || opts.contains(CommonOptions.LBFGS)) { LogFunction log = null; if (parsedOptions.has(logBrief)) { log = new NullLogFunction(); } else { log = new DefaultLogFunction(parsedOptions.valueOf(logInterval), false, options.valueOf(logParametersInterval), options.valueOf(logParametersDir)); } LogFunctions.setLogFunction(log); } } /** * Creates a {@code StochasticGradientTrainer} configured using the * provided options. In order to use this method, pass * {@link CommonOptions#STOCHASTIC_GRADIENT} to the constructor. * * @return a stochastic gradient trainer configured using any * command-line options passed to the program */ private StochasticGradientTrainer createStochasticGradientTrainer(int numExamples) { Preconditions.checkState(opts.contains(CommonOptions.STOCHASTIC_GRADIENT)); long iterationsOption = parsedOptions.valueOf(sgdIterations); int batchSize = numExamples; if (parsedOptions.has(sgdBatchSize)) { batchSize = parsedOptions.valueOf(sgdBatchSize); } long numIterations = (int) Math.ceil(iterationsOption * numExamples / ((double) batchSize)); double initialStepSize = parsedOptions.valueOf(sgdInitialStep); double l2Regularization = parsedOptions.valueOf(sgdL2Regularization); LogFunction log = LogFunctions.getLogFunction(); StochasticGradientTrainer trainer = null; if (!parsedOptions.has(sgdAdagrad)) { trainer = StochasticGradientTrainer.createWithStochasticL2Regularization( numIterations, batchSize, initialStepSize, !parsedOptions.has(sgdNoDecayStepSize), !parsedOptions.has(sgdNoReturnAveragedParameters), parsedOptions.valueOf(sgdClipGradients), l2Regularization, parsedOptions.valueOf(sgdRegularizationFrequency), log); } else { trainer = StochasticGradientTrainer.createAdagrad( numIterations, batchSize, initialStepSize, !parsedOptions.has(sgdNoDecayStepSize), !parsedOptions.has(sgdNoReturnAveragedParameters), parsedOptions.valueOf(sgdClipGradients), l2Regularization, parsedOptions.valueOf(sgdRegularizationFrequency), log); } return trainer; } private GradientOptimizer createLbfgs(int numExamples) { Preconditions.checkState(opts.contains(CommonOptions.LBFGS)); if (parsedOptions.has(lbfgsAdaptiveMinibatches)) { int lbfgsMinibatchSizeInt = parsedOptions.valueOf(lbfgsMinibatchSize); Preconditions.checkState(lbfgsMinibatchSizeInt != -1, "Must specify initial adaptive batch size using --lbfgsMinibatchSize"); return MinibatchLbfgs.createAdaptiveSchedule(parsedOptions.valueOf(lbfgsHessianRank), parsedOptions.valueOf(lbfgsL2Regularization), numExamples, lbfgsMinibatchSizeInt, -1, LogFunctions.getLogFunction()); } int lbfgsMinibatchSizeInt = parsedOptions.valueOf(lbfgsMinibatchSize); int lbfgsMinibatchIterationsInt = parsedOptions.valueOf(lbfgsMinibatchIterations); if (lbfgsMinibatchSizeInt != -1 && lbfgsMinibatchIterationsInt != -1) { // Using these options triggers minibatch LBFGS with a fixed // sized schedule. int batchIterations = (int) Math.ceil(((double) parsedOptions.valueOf(lbfgsIterations)) / lbfgsMinibatchIterationsInt); return MinibatchLbfgs.createFixedSchedule(parsedOptions.valueOf(lbfgsHessianRank), parsedOptions.valueOf(lbfgsL2Regularization), batchIterations, lbfgsMinibatchSizeInt, lbfgsMinibatchIterationsInt, LogFunctions.getLogFunction()); } else if (lbfgsMinibatchIterationsInt == -1 && lbfgsMinibatchSizeInt == -1) { return new Lbfgs(parsedOptions.valueOf(lbfgsIterations), parsedOptions.valueOf(lbfgsHessianRank), parsedOptions.valueOf(lbfgsL2Regularization), LogFunctions.getLogFunction()); } throw new UnsupportedOperationException( "Must specify both or neither of --lbfgsMinibatchIterations and --lbfgsMinibatchSize"); } /** * Creates a gradient-based optimization algorithm based on the * given command-line parameters. To use this method, pass at least * one of {@link CommonOptions#STOCHASTIC_GRADIENT} or * {@link CommonOptions#LBFGS} to the constructor. This method * allows users to easily select different optimization algorithms * and parameterizations. * * @param numExamples * @return */ protected GradientOptimizer createGradientOptimizer(int numExamples) { if (opts.contains(CommonOptions.STOCHASTIC_GRADIENT) && opts.contains(CommonOptions.LBFGS)) { if (parsedOptions.has(lbfgs)) { return createLbfgs(numExamples); } else { return createStochasticGradientTrainer(numExamples); } } else if (opts.contains(CommonOptions.STOCHASTIC_GRADIENT)) { return createStochasticGradientTrainer(numExamples); } else if (opts.contains(CommonOptions.LBFGS)) { return createLbfgs(numExamples); } throw new UnsupportedOperationException("To use createGradientOptimizer, the CLI constructor must specify STOCHASTIC_GRADIENT and/or LBFGS."); } protected FunctionalGradientAscent createFunctionalGradientAscent(int numExamples) { Preconditions.checkState(opts.contains(CommonOptions.FUNCTIONAL_GRADIENT_ASCENT)); int batchSize = parsedOptions.has(fgaBatchSize) ? parsedOptions.valueOf(fgaBatchSize) : numExamples; int iterations = (int) Math.ceil(parsedOptions.valueOf(fgaIterations) * numExamples / ((double) batchSize)); double initialStep = parsedOptions.valueOf(fgaInitialStep); boolean noDecay = parsedOptions.has(fgaNoDecayStepSize); LogFunction log = new DefaultLogFunction(1, false); return new FunctionalGradientAscent(iterations, batchSize, initialStep, !noDecay, log); } protected RegressionTreeTrainer createRegressionTreeTrainer() { Preconditions.checkState(opts.contains(CommonOptions.REGRESSION_TREE)); return new RegressionTreeTrainer(parsedOptions.valueOf(rtreeMaxDepth)); } }
// Called every time the scheduler runs while the command is scheduled. @Override public void execute() { currentAngle = turret.getPosition(); if(targetAngle > currentAngle){ turret.move(-.6); } if(targetAngle < currentAngle){ turret.move(.6); } }
/** * Add hex integer BEFORE current getIndex. * @param buffer * @param n */ public static void prependHexInt(Buffer buffer, int n) { if (n==0) { int gi=buffer.getIndex(); buffer.poke(--gi,(byte)'0'); buffer.setGetIndex(gi); } else { boolean minus=false; if (n<0) { minus=true; n=-n; } int gi=buffer.getIndex(); while(n>0) { int d = 0xf&n; n=n>>4; buffer.poke(--gi,DIGIT[d]); } if (minus) buffer.poke(--gi,(byte)'-'); buffer.setGetIndex(gi); } }
/*! * @file SFGamma.cc * @brief Implementation of various forms of the Gamma function. * @author Segev BenZvi * @date 17 Apr 2012 * @version $Id: SFGamma.cc 26761 2015-08-25 20:53:58Z tweisgarber $ */ #include <hawcnest/Logging.h> #include <data-structures/math/SpecialFunctions.h> #ifdef HAVE_GSL // #include <gsl/gsl_sf_erf.h> #include <gsl/gsl_sf_gamma.h> #include <gsl/gsl_sf_psi.h> #include <gsl/gsl_cdf.h> // #else // #include <boost/math/special_functions/gamma.hpp> #include <boost/math/special_functions/expint.hpp> #include <boost/math/special_functions/digamma.hpp> #include <boost/math/special_functions/erf.hpp> #include <cmath> using namespace std; // #endif // HAVE_GSL namespace SpecialFunctions { // The complete gamma function double Gamma::G(const double a) { if (a <= 0.) log_fatal("domain error: a = " << a << " <= 0"); #ifdef HAVE_GSL return gsl_sf_gamma(a); #else return boost::math::tgamma(a); #endif } // The natural logarithm of the gamma function double Gamma::lnG(const double a) { if (a <= 0.) log_fatal("domain error: a <= 0"); #ifdef HAVE_GSL return gsl_sf_lngamma(a); #else return boost::math::lgamma(a); #endif } // The lower incomplete gamma function double Gamma::g(const double a, const double x) { if (a <= 0. || x < 0.) log_fatal("domain error: a <= 0 || x < 0"); return Gamma::G(a) * Gamma::P(a, x); } // The regularized lower incomplete gamma function double Gamma::P(const double a, const double x) { if (a <= 0. || x < 0.) log_fatal("domain error: a <= 0 || x < 0"); #ifdef HAVE_GSL return gsl_sf_gamma_inc_P(a, x); #else return boost::math::gamma_p(a, x); #endif } // The upper incomplete gamma function double Gamma::G(const double a, const double x) { // Note: a < 0 allowed as long as x >= 0 if (x < 0.) log_fatal("domain error: x < 0"); #ifdef HAVE_GSL return gsl_sf_gamma_inc(a, x); #else if (a > 0.) { return boost::math::tgamma(a, x); } else if (a == 0.) { return boost::math::expint(1, x); } else { // Use the recurrence relation G(a+1,x) = a*G(a,x) + x^a*exp(-x) // to calculate the upper imcomplete gamma function for a < 0 const double da = a - floor(a); double alpha = da; double ga = (da > 0. ? boost::math::tgamma(da, x) : boost::math::expint(1, x)); do { const double shift = exp(-x + (alpha -1.)*log(x)); ga = (ga - shift) / (alpha - 1.); alpha -= 1.; } while (alpha > a); return ga; } #endif } // The regularized upper incomplete gamma function double Gamma::Q(const double a, const double x) { // Note: a < 0 not allowed here due to pole in the Gamma function if (a <= 0. || x < 0.) log_fatal("domain error: a <= 0 || x < 0"); #ifdef HAVE_GSL return gsl_sf_gamma_inc_Q(a, x); #else return boost::math::gamma_q(a, x); #endif } // The error function double Gamma::Erf(const double x) { #ifdef HAVE_GSL return gsl_sf_erf(x); #else return boost::math::erf(x); #endif } // The complementary error function double Gamma::Erfc(const double x) { #ifdef HAVE_GSL return gsl_sf_erfc(x); #else return boost::math::erfc(x); #endif } // The inverse error function double Gamma::ErfInverse(const double x) { #ifdef HAVE_GSL //return gsl_sf_erfc(x); return gsl_cdf_ugaussian_Pinv(0.5*(1.0+x)); #else return boost::math::erf_inv(x); #endif } // The digamma function double Gamma::DG(const double x) { #ifdef HAVE_GSL return gsl_sf_psi(x); #else return boost::math::digamma(x); #endif } }
<reponame>ThorbenKuck/Keller package com.github.thorbenkuck.keller.di; import java.util.Map; public interface InstantiateStrategy { <T> T construct(final Class<T> type, final Map<Class<?>, Object> bindings); default <T> T get(final Class<T> type, final Map<Class<?>, Object> bindings) { if(bindings.get(type) != null) { return (T) bindings.get(type); } return construct(type, bindings); } boolean isApplicable(final Class<?> clazz); }
<reponame>max0x4e/bytecode<filename>src/ghc-9.2.1/libraries/directory/tests/FindFile001.hs<gh_stars>0 {-# LANGUAGE CPP #-} module FindFile001 where #include "util.inl" import qualified Data.List as List import System.FilePath ((</>)) main :: TestEnv -> IO () main _t = do createDirectory "bar" createDirectory "qux" writeFile "foo" "" writeFile ("bar" </> "foo") "" writeFile ("qux" </> "foo") ":3" -- make sure findFile is lazy enough T(expectEq) () (Just ("." </> "foo")) =<< findFile ("." : undefined) "foo" -- make sure relative paths work T(expectEq) () (Just ("." </> "bar" </> "foo")) =<< findFile ["."] ("bar" </> "foo") T(expectEq) () (Just ("." </> "foo")) =<< findFile [".", "bar"] ("foo") T(expectEq) () (Just ("bar" </> "foo")) =<< findFile ["bar", "."] ("foo") let f fn = (== ":3") <$> readFile fn for_ (List.permutations ["qux", "bar", "."]) $ \ ds -> do let (match, noMatch) = List.partition (== "qux") ds T(expectEq) ds (Just (List.head match </> "foo")) =<< findFileWith f ds "foo" T(expectEq) ds ((</> "foo") <$> match) =<< findFilesWith f ds "foo" T(expectEq) ds (Just (List.head noMatch </> "foo")) =<< findFileWith ((not <$>) . f) ds "foo" T(expectEq) ds ((</> "foo") <$> noMatch) =<< findFilesWith ((not <$>) . f) ds "foo" T(expectEq) ds Nothing =<< findFileWith (\ _ -> return False) ds "foo" T(expectEq) ds [] =<< findFilesWith (\ _ -> return False) ds "foo" -- make sure absolute paths are handled properly irrespective of 'dirs' -- https://github.com/haskell/directory/issues/72 absPath <- makeAbsolute ("bar" </> "foo") absPath2 <- makeAbsolute ("bar" </> "nonexistent") T(expectEq) () (Just absPath) =<< findFile [] absPath T(expectEq) () Nothing =<< findFile [] absPath2
/** * Component registering url policies and document view codecs. * * @author <a href="mailto:[email protected]">Anahide Tchertchian</a> */ public class URLServiceComponent extends DefaultComponent { public static final String NAME = URLServiceComponent.class.getName(); public static final String URL_PATTERNS_EXTENSION_POINT = "urlpatterns"; protected URLPolicyService urlPolicyService; @Override public void activate(ComponentContext context) { urlPolicyService = new URLPolicyServiceImpl(); } @Override public void deactivate(ComponentContext context) { urlPolicyService.clear(); urlPolicyService = null; } @Override @SuppressWarnings("unchecked") public <T> T getAdapter(Class<T> adapter) { if (adapter.isAssignableFrom(URLPolicyService.class)) { return (T) urlPolicyService; } return null; } @Override public void registerContribution(Object contribution, String extensionPoint, ComponentInstance contributor) { if (URL_PATTERNS_EXTENSION_POINT.equals(extensionPoint)) { urlPolicyService.addPatternDescriptor((URLPatternDescriptor) contribution); } } @Override public void unregisterContribution(Object contribution, String extensionPoint, ComponentInstance contributor) { if (URL_PATTERNS_EXTENSION_POINT.equals(extensionPoint)) { urlPolicyService.removePatternDescriptor((URLPatternDescriptor) contribution); } } }
Utility of the HandScan in monitoring disease activity and prediction of clinical response in rheumatoid arthritis patients: an explorative study Abstract Objectives The aims were to determine the ability of the HandScan to measure RA disease activity longitudinally, compared with DAS28, and to determine whether short-term (i.e. 1 month) changes in the OST score can predict treatment response at 3 or 6 months. Methods Participants visited the outpatient clinic before the start of (additional) RA medication and 1, 3 and 6 months thereafter. Disease activity was monitored at each visit with the HandScan and DAS28 in parallel. A mixed effects model with DAS28 as the outcome variable with a random intercept at patient level, visit month and DAS28 one visit earlier was used to evaluate whether changes in the OST score are related to changes in DAS28. Binary logistic regression was used to test the predictive value of short-term changes in the OST score together with the baseline OST score for achievement of treatment response (EULAR or ACR criteria). All models were adjusted for RA stage (early or established). Results In total, 64 RA patients were included. One unit change in OST score was found to be related to an average DAS28 change of 0.03 (95% CI: 0.01, 0.06, P = 0.03). When adding OST score as a variable in the longitudinal model, the ability of the model to estimate DAS28 (i.e. explained variance) increased by 2%, to 59%. Neither baseline OST score nor short-term change in OST score was predictive for treatment response at 3 or 6 months. Conclusion A longitudinal association of OST score with DAS28 exists, although explained variance is low. The predictive ability of short-term changes in HandScan for treatment response is limited. Introduction The treatment of RA has improved significantly over the last decades owing to earlier and more intensive treatment, with swift adjustment of treatment if the target is not achieved . To treat RA patients effectively, it is important to focus on achieving and maintaining remission (treat-to-target principle), thereby preventing or restricting joint damage. Therefore, patients visit the outpatient clinic regularly to monitor disease activity (i.e. tight-control principle) . The DAS assessing 28 joints (DAS28) is widely used to evaluate disease activity in individual patients. Joint tenderness and swelling of 28 joints, together with an acute phase reactant (ESR or CRP) and a visual analogue scale for the patient's experience of disease activity, are combined in the composite DAS28 measure. This method of evaluating disease activity has considerable inter-and intra-assessor variability, especially without formal training of assessors, and is time consuming and somewhat subjective . The HandScan, based on the principle of optical spectral transmission (OST), is a new method that has been developed to measure RA inflammation in hand (i.e. MCP1-5, IP1 and PIP2-5) and wrist joints. The RA patient places both hands in the HandScan and, by using red/near-infrared light, the grade of inflammation is assessed per joint (i.e. individual joint score), in addition to providing a total score of all included joints (i.e. total OST score). A HandScan measurement can be performed at any location, if the device is available, within 5 min, without taking much time of a health-care professional . More detailed information is provided in Supplementary Data S1, available at Rheumatology Advances in Practice online. In a cross-sectional study, the OST score as provided by the HandScan (range 0-66 ¼ worst inflammation) was reproducible, and it was correlated (coefficient ¼ 0.54) with the grade of inflammation of hand and wrist joints as assessed by ultrasonography . The outcome of the HandScan was more sensitive in detecting subclinical disease activity (as determined by ultrasonography) than physical examination, and its assessment is less time consuming than that of DAS28 . In addition, the HandScan might facilitate early detection of response to treatment, typically assessed at 3-6 months after the start of (added) therapy. This might be particularly relevant in (early) RA patients treated according to the tight-control principle, stepping up treatment to more intensive (biological) treatment modalities, such as TNF inhibitors (TNFi) . All previous research with the HandScan was cross-sectional. However, in light of the treat-to-target principle, it is important to establish specifically whether changes in OST score are associated with changes in DAS28 (as reference standard) in individual RA patients (i.e. whether a longitudinal association of OST score with disease activity exists). Also, for optimal treat-to-target strategies, it would be valuable if the OST score could predict clinical response to treatment early after treatment initiation. Furthermore, during the last decades patient-reported outcomes have become of more interest as a measure for the impact of disease; therefore, the relationship of OST score to individual components of DAS28, functional disability and quality of life of patients is also of interest . The aim of our explorative study was to determine the longitudinal association of the HandScan with DAS28 (i.e. whether changes OST score are related to changes in DAS28) in individual RA patients, which, if present and strong enough, would provide a rationale for its use as a disease activity monitoring instrument like DAS28. In addition, the longitudinal association of OST score with the swollen joint count (SJC), tender joint count (TJC), functional disability and quality of life of patients was determined. Furthermore, the ability of short-term (i.e. baseline to 1 month) changes in OST score to predict clinical response to conventional synthetic DMARDs (csDMARDs) or TNFi treatment at 3 or 6 months was studied. We hypothesized that a longitudinal association between OST score and DAS28 exists. Furthermore, we hypothesized that short-term changes in OST score can predict clinical response to treatment. Methods This is an observational cohort study, among RA patients. The institutional review boards of the participating centres confirmed that the Medical Research Involving Human Subjects Act (WMO) was not applicable to this study, and all patients gave written informed consent. Consecutive early and established RA patients visiting the outpatient clinic of participating centres, from 1 April 2017 to 31 May 2019, and satisfying the inclusion criteria were all eligible for inclusion. Inclusion criteria were meeting the 2010 ACR/EULAR criteria and age >18 years. Early RA patients were further required to be DMARD naïve and started DMARD therapy, usually a csDMARD such as MTX, according to the tightcontrolled treat-to-target principle. Established RA patients started with or switched to another TNFi because of active disease, also in a tight-controlled manner, as additional therapy. Exclusion criteria for both cohorts were rheumatic autoimmune disease other than RA or a current inflammatory joint disease other than RA (e.g. gout). Other exclusion criteria were glucocorticoid use <6 weeks before baseline for early RA and previous use of the same TNFi (i.e. restarting treatment) for established RA. All included patients visited the outpatient clinic immediately before starting their (additional) treatment (baseline) and 1, 3 and 6 months thereafter (i.e. tight controlled). In early RA patients, the csDMARD dose (typically MTX starting at 10 mg/week) was increased, if necessary, every month in steps of 5 mg, according to the treat-to-target principle. In established RA patients, the dose of the TNFi started was not modified during the study period of 6 months. Disease activity was measured at each visit, first with the HandScan and shortly afterwards with DAS28. The following baseline data were collected: age, gender, BMI, smoking status, alcohol use, RF status and anti-CCP status. DAS28 (and its components) and OST scores were collected at every visit, whereas the functional ability and quality of life were assessed at baseline and every 3 months thereafter, using the HAQ and EuroQol five dimensions questionnaire (EQ5D-5L), respectively. Statistical analysis Baseline characteristics and treatment response were described for all patients and stratified by RA stage (early or established; csDMARD therapy or TNFi therapy). Data of early and established RA were combined to obtain a more adequate sample size. The effect of RA stage was taken into account in all model-based analyses (e.g. see explanation of the mixed effect models and the binary logistic regression models later in this subsection) . Pearson or Spearman correlation coefficients, depending on the distribution of the data, of DAS28, SJC, TJC, HAQ and EQ5D-5L, with OST score were calculated for all patients, both concurrently and with time lags to explore the crude associations of OST score over time with other frequently used outcome measures. To determine whether changes in OST scores are related to changes in DAS28 in individual patients, an autoregressive mixed effects model with a random intercept at patient level was used . The outcome variable was DAS28; independent variables were OST score, visit month, RA stage and DAS28 at previous visit (i.e. autoregressor). The same analyses were performed for SJC (square root transformed), TJC (square root transformed), HAQ and EQ5D-5L as respective outcome variables. It was also explored whether RA stage (early vs established; csDMARD vs TNFi) modified the association between OST score and the outcomes by adding the interaction term (e.g. OST score*RA stage). Binary logistic regression was used to test the predictive value of short-term (i.e. 1 month) change in OST score together with baseline OST score for the outcome EULAR good response (yes/no), and ACR50 response (yes/no) at 3 or 6 months. Baseline DAS28 and short-term (i.e. 1 month) change in DAS28 were also evaluated in a similar separate analysis for comparison with the former model. This analysis was also adjusted for RA stage (early vs established) because the initiated therapy differed (csDMARD vs TNFi), and it was tested whether RA stage modified the association between changes in OST score and outcome (i.e. adding the interaction term OST score*RA stage). Owing to the exploratory nature of this study, no power calculation was performed. The statistical analyses were performed in SAS v.9.4 (SAS Institute, Inc; Cary, North Carolina, USA). All tests were two sided, and a P-value of 0.05 was considered statistically significant. Seven of 64 patients had missing information on DAS28 and/or OST score, but only at the 6 month visit. Given that mixed model analysis, using all longitudinally available data of the patients, is robust against sporadically missing data, imputation was deemed to have no additional value in this situation and was not performed . Results In total, 64 RA patients were included: n ¼ 32 with early RA (DMARD naïve, starting MTX and prednisone) and n ¼ 32 with established RA (starting with first or consecutive TNFi as additional therapy). All early RA patients were treated according to EULAR guidelines and remained on MTX treatment during the study. Regarding established RA patients, 26 of 32 were bDMARD naïve and started treatment with a first TNFi, whereas the others started a consecutive TNFi. More detailed information about medication use is shown in Supplementary Table S1, available at Rheumatology Advances in Practice online. In early RA patients, no treatment failures during the 6 month follow-up were observed, whereas five patients in the established RA cohort discontinued TNFi therapy owing to insufficient effectiveness. Three of them switched after 3 months to another bDMARD. One of 32 established RA patients experienced an adverse event (not related to TNFi therapy) and stopped therapy. Table 1 provides an overview of the baseline characteristics and treatment response of all patients, and separately, per cohort. Overall, similar outcomes were observed for early and established RA, except for SJC28 and number of alcohol users, both at baseline, and response to treatment during the study period, except for changes in HAQ (see Table 1). The DAS28, OST score, SJC, TJC, HAQ and EQ5D-5L values over time are shown in Fig. 1. The concurrent (i.e. at the same time point) correlations between DAS28 and OST score and between SJC and OST score were moderate (correlation coefficients ranging from 0.18 to 0.39 and from 0.35 to 0.47, respectively) and statistically significant. Lower (often) nonstatistically significant correlations of OST score were found with TJC, HAQ and EQ5D-5L (see Table 2). Nonconcurrent correlations were also generally lower and often not statistically significant. The longitudinal analysis showed that one unit change in OST score was associated with a change in DAS28 of, on average, 0.03 units (95% CI: 0.01, 0.06). Using standardized values, this could be interpreted as a change of one S.D. unit in OST score being related to a change in DAS28 of, on average, 0.13 S.D. unit (95% CI: 0.03, 0.23). Hence, changes in DAS28 value can, to some extent, be estimated from changes in the OST score. This association was not modified by RA stage (P ¼ 0.96 for the interaction term). When adding OST score to the model with only the previous DAS28 (autoregressor) and visit, the ability of the model to estimate HandScan to monitor RA https://academic.oup.com/rheumap DAS28 over time (i.e. explained variance) increased, by 2%, to 59% (Fig. 2). Changes in SJC and TJC of one S.D. unit were, on average, related to changes of 0.18 (95% CI: 0.05, 0.31) and 0.16 (95% CI: 0.05, 0.25) S.D. units of OST score, respectively. The explained variance increased by 4 and 3%, respectively (to 32 and 43%, respectively) when adding the OST score to the models. The association with SJC (but not TJC, P ¼ 0.52 for the interaction term) was found to be modified by RA stage (P ¼ 0.03 for the interaction term). Stratified analyses showed that one S.D. unit of OST score change was, on average, related to 0.08 (95% CI: À0.08, 0.14) and 0.37 (95% CI: 0. 15 9.52). Results for EULAR response at 6 months were in line with the above (see Table 3). For ACR50 response at 3 months, none of the variables were significant predictors. Short-term change in DAS28 (OR 3.92, 95% CI: 1.57, 9.28; standardized OR 3.69, 95% CI: 1.65, 9.52) was a significant predictor for ACR50 response at 6 months (see Table 3). In all analyses, the association of the short-term change in OST score with treatment response was not modified by RA stage as tested in the models (P ¼ 0.44/P ¼ 0.22 and P ¼ 0.20/P ¼ 0.30 for ACR50 and EULAR good response at 3/6 months, respectively). Discussion In this first longitudinal study of the HandScan, the concurrent correlations of HandScan (expressed as OST score) and DAS28 were, in general, low to moderate, consistent with data of a previous cross-sectional study . Although we established a longitudinal association of the HandScan with DAS28, which would be a prerequisite for using such an instrument for monitoring disease activity over time, the added value (explained A plausible explanation for the low ability to estimate DAS28 with OST scores might be that, in addition to the number of tender and swollen joints (from 28), an acute phase reactant (i.e. ESR) and a visual analogue scale expressing the patients' assessment of disease activity are part of DAS28 , whereas the OST score measures only RA inflammation of the hand and wrist joints (with a maximum of 22 joints). Therefore, we also evaluated components of DAS28 separately. The association with TJC and, especially, SJC was (somewhat) stronger than with DAS28, as apparent from the higher standardized regression coefficients and increase in explained variance by adding OST score to the longitudinal model. We could not establish a predictive association of baseline OST score or short-term changes in OST score with later response to treatment. This lack of predictive ability might also be attributable, in part, to the fact that OST scores only reflect joint inflammation in a limited set of joints, and response criteria are based on composite scores . As shown in Supplementary Table S1, available at Rheumatology Advances in Practice online, glucocorticoid therapy was used in early and established RA. Glucocorticoid therapy diminishes disease activity, but this will probably have been the case equally for DAS28 and the OST score. Therefore, we think that this has had no influence or only limited influence on the results of our main analysis (i.e. the longitudinal association between OST score and DAS28). Given that inflammation of OA joints is generally considerably less than in RA joints, and the DIP joints (mostly affected in OA) are not assessed in the HandScan, we expect the influence of concomitant OA on OST score results to have been limited. A limitation of this study is that the sample size is modest. The intention of the present study was to explore whether a longitudinal association of OST score with DAS28 existed, which is a prerequisite for using OST scores as a disease activity measurement in patients over time. Therefore, we aimed to include !30 early and 30 established RA patients. In the analyses, we combined early (n ¼ 32) and established (n ¼ 32) RA patients, correcting for RA stage. It turned out that RA stage did not influence the longitudinal association between OST score and other outcomes, except for SJC. A possible explanation might be the fact that in early RA patients the SJC was often zero at follow-up owing to the strict treat-to-target treatment approach, possibly obscuring small changes over time, whereas SJC was generally higher in established RA patients . In addition, the predictive association between short-term changes in OST score and longer-term response was also not influenced by RA stage. Furthermore, one would expect that the type of treatment might influence the ability of OST scores to detect changes in disease activity, because bDMARDs are known to suppress tissue vascularity more rapidly . We tested whether FIG. 2 Observed DAS28 vs estimated DAS28 (using full model with optical spectral transmission score) DAS28: DAS assessing 28 joints; predicted DAS28: DAS28 as estimated by the model, with optical spectral transmission score, visit month and DAS28 at the previous visit as variables. the relationship between OST scores and DAS28(-based response) was different between early (i.e. csDMARDtreated) and established (i.e. bDMARD-treated) patients, but could not detect a significant effect. Of course, given that the effect of RA stage and treatment modality are intertwined in our study, this might have muddled this effect. In the bDMARD-treated group, patients could have started their next bDMARD, which could have diminished the potential change in joint vascularity (and thus the ability of the HandScan to detect it), because vascularity was already reduced by the previous bDMARD. It is known that even in patients with inadequate response to bDMARDs, progression of joint damage is inhibited , and thus probably also joint vascularity. Lastly, given that the HandScan measures only hand and wrist joints, it might be applicable mainly for the subset of RA patients with involvement of the hand joints. This first study assessing the longitudinal association of the HandScan with disease activity measures relevant in monitoring treatment response warrants future research focusing on the development of a composite measure to assess disease activity where a joint count assessment (i.e. SJC and TJC) is replaced by OST scores. OST scores can be obtained without visiting a physician, because a HandScan measurement can be performed easily by a non-health-care professional, and at any location where the device can be placed; for example, in the outpatient waiting room. By implementing a disease activity index (including only variables that are assessed without visiting a physician, i.a. OST scores) into daily practice, the time of rheumatologists and/or nurse practitioners might be saved in busy outpatient clinics, because only those patients with active disease would require an additional visit to the rheumatologist or health-care professional for a more detailed assessment, including joint counts. Conclusion A longitudinal association of OST score with DAS28 exists, although the relationship is weak. As such, in this setting the OST score as a single measuring instrument is insufficient to assess disease activity comprehensively in RA patients. However, combining the OST score with other (routinely used) disease activity parameters might result in an adequate composite disease activity measure. M.M.A.V., J.T. and P.M.J.W. contributed to study design, data cleaning and data analysis. All authors contributed to data interpretation and writing the manuscript. All authors approved the final version and agree to the accountable for all aspects. Funding: The current work was supported by ZonMW 2Treat-MODIRA 436001001, University Medical Center Utrecht. Disclosure statement: J.M.v.L. reports grants from Roche, personal fees from Roche, personal fees from Arthrogen, grants from Thermofisher, personal fees from BMS, grants from MSD, personal fees from Eli Lilly, personal fees from Gesynta, personal fees from Leadiant, personal fees from Arxx Tx, grants from Astra Zeneca, personal fees from Sanofi, outside the submitted work. The other authors have declared no conflicts of interest. Data availability statement The data underlying this article will be shared on reasonable request to the corresponding author. Supplementary data Supplementary data are available at Rheumatology Advances in Practice online.
/** * Normalize point sample. * * @return the point sample */ @Nonnull public PointSample normalize() { if (count == 1) { this.addRef(); return this; } else { @Nonnull DeltaSet<UUID> scale = delta.scale(1.0 / count); @Nonnull PointSample pointSample = new PointSample(scale, weights, sum / count, rate, 1); scale.freeRef(); return pointSample; } }
#include<bits/stdc++.h> using namespace std; #define its_me ios_base::sync_with_stdio(0);cin.tie(0);cout.tie(0); #define rep(i,s,e) for(i=s;i<e;i++) #define mod 1000000007 #define in(a) for(auto &ghe:a) cin>>ghe; #define in2d(a) for(auto &ghe:a) for(auto &he:ghe) cin>>he; #define out(a) for(auto &ghe:a) cout<<ghe<<" ";cout<<endl; #define out2d(a) for(auto &ghe:a) {for(auto &he:ghe) cout<<he<<" ";cout<<endl;} #define loop(i,a) for(auto &i:a) #define chk(i) cout<<#i<<" : "<<i<<endl; #define check(i,j,k) cout<<#i<<":"<<i<<" "<<#j<<":"<<j<<" "<<#k<<":"<<k<<endl; #define show(a) for(auto i:a) cout<<i<<" ";cout<<endl; #define make(a,i) memset(a,i,sizeof(a)) #define inrange(a,b,c) (b>=a && b<c) #define vowel(a) (a=='a'||a=='A'||a=='e'||a=='E'||a=='i'||a=='I'||a=='o'||a=='O'||a=='u'||a=='U') #define fp(i) fixed<<setprecision(i) #define endl '\n' typedef long long ll; typedef unsigned long long ull; typedef long double ld; typedef vector <ll> vll; typedef vector <float> vf; typedef vector <ld> vld; #define pb(i) emplace_back(i) #define pob() pop_back() typedef pair <ll,ll> pll; #define F first #define S second #define mp(a,b) make_pair(a,b) typedef vector <pll> vp; #define all(v) v.begin(),v.end() ll strnum(string num,ll m) { ll res=0; loop(i,num) res=((res*10)%m+i-'0')%mod; return res; } #define modinv(n,m) modex(n,m-2,m) #define moddiv(n,d,m) (n*modinv(d,m))%m #define codiv(n,d,m) (n%(d*m))/d ll modex(ll x,ll p,ll m) { ll ans = 1; x=x%m; while(p>0) { if(p&1) ans=(ans*x)%m; p=p>>1; x=(x*x)%m; } return ans; } ll modex(string a,string b,ll m) { ll x=strnum(a,m),p=strnum(b,m-1); return modex(x,p,m); } vll prime; void primes(ll n) { bool p[n+1]; ll i,j; memset(p,1,sizeof(p)); p[0]=p[1]=0; for(i=2;i<=sqrt(n);i++) if(p[i]) for(j=2;j*i<=n;j++) p[j*i]=0; for(i=2;i<=n;i++) if(p[i]) prime.pb(i); } vll fctrs(ll n) { ll r=sqrt(n); vector <ll> v; for(ll i:prime) { if(i>r) break; if(n%i==0) { v.pb(i); while(n%i==0) n/=i; } } if(n>1) v.pb(n); return v; } bool isprm(ll n) { ll i,l=prime.size(),s=sqrt(n); for(i=0;i<l && prime[i]<=s;i++) if(n%prime[i]==0) return 0; return 1; } ll phi(ll n) { ll ans=n; for(ll i=2;i*i<=n;i++) { if(n%i==0) { while(n%i==0) n/=i; ans-=ans/i; } } if(n>1) ans-=ans/n; return ans; } string operator -(string a, string b) { ll la=a.length(),lb=b.length(),sub; bool carry=0; string res=""; while(la>0) { la--,lb--; if(lb>=0) { sub=a[la]-b[lb]-carry; if(sub<0) sub+=10,carry=1; else carry=0; a[la]=sub+'0'; } else { if(a[la]=='0' && carry) { a[la]='9'; continue; } a[la]-=carry; carry=0; } } while(a[la]=='0') la++; while(a[la]) res+=a[la],la++; if(res=="") res="0"; return res; } ll fact[1000001]={1}; ll ncr(ll n,ll r) { if(n<r) return 0; return moddiv(fact[n],(fact[n-r]*fact[r]),mod); } int main() { its_me; ll t=1; // cin>>t; while(t--) { ll n,i,x=0,y=0; cin>>n; ll a[n]; in(a); sort(a,a+n); rep(i,0,n) if(i<(n/2)) x+=a[i]; else y+=a[i]; cout<<x*x+y*y<<endl; } return 0; }
def load_behaviour(config: BehaviourConfigBlock) -> BehaviourInterface: behaviour = load_component(config) assert isinstance(behaviour, BehaviourInterface) for trigger_definition in config["triggers"]: trigger = load_component(trigger_definition) assert isinstance(trigger, TriggerInterface), assert_message( trigger, TriggerInterface ) behaviour.add(trigger) for condition_definition in config["conditions"]: condition = load_component(condition_definition) assert isinstance(condition, ConditionInterface), assert_message( condition, ConditionInterface ) behaviour.add(condition) for action_definition in config["actions"]: action = load_component(action_definition) assert isinstance(action, ActionInterface), assert_message(action, ActionInterface) behaviour.add(action) return behaviour
#include <cstdio> #include <iostream> #include <string> using namespace std; int n; string back(string str) { int i = str.find_last_of('/'); return str.substr(0,i); } string add(string str,string add_str) { return str+"/"+add_str; } int main() { int ii; char ch; string cmd; string result[1000]; string current = ""; string str,token; int kt,i,j,count = 0; int level = 0; scanf("%d",&n); scanf("%c",&ch); for (ii=1;ii<=n;ii++) { getline(cin,cmd); if (cmd.at(0) == 'c') { i = cmd.find_last_of(' '); if (cmd.at(i+1) == '/') { current = ""; i++; } { str = cmd.substr(i+1,cmd.length()); // cout << str << endl; while ((kt = str.find('/')) != string::npos) { token = str.substr(0,kt); if (token == "..") current = back(current); else (current = add(current,token)); str = str.substr(kt+1,str.length()); } if (str == "..") current = back(current); else (current = add(current,str)); } } else { count ++; if (current == "" || current == "/") result[count] = "/"; else result[count] = current+"/"; } } for (i=1;i<=count;i++) cout << result[i] << endl; return 0; }
import { AppError } from '../../errors/AppError'; import { Product } from '../../modules/products/entities/Product'; interface IStock { product_id: number; quantity: number; } export function checkStock(products: Product[], data: IStock) { const findProduct = products.find((get) => get.id === data.product_id); if (!findProduct) { throw new AppError('Product does not exists!', 404); } if (findProduct.stock === 0) { throw new AppError('Stock is empty', 400); } if (findProduct.stock < data.quantity) { throw new AppError('The amount informed is greater than the available', 400); } delete findProduct.created_at; delete findProduct.updated_at; return findProduct; }
/* * bigger than 1 means that 'this' is better than 'other' */ public double compare(Goodness other) { if (_maxWeight != other._maxWeight) { return 1 / (_maxWeight / other._maxWeight); } else if (_lst != other._lst) { return 1 / (_lst / other._lst); } else { return 1 / (_avgRegionDensity / other._avgRegionDensity); } }
/** * An example how to use DownloadButton * * @author Matti Tahvonen */ public class DownloadButtonExample extends AbstractTest { private static final long serialVersionUID = 2638100034569162593L; public DownloadButtonExample() { // Polling or Push needs to be enable to support response written hooks getUI().setPollInterval(1000); } @Override public Component getTestComponent() { final UI ui = getUI(); DownloadButton d = new DownloadButton((OutputStream out) -> { try { // super easy to provide dynamic content out.write(("Hello there " + Instant.now().toString()).getBytes("UTF-8")); } catch (UnsupportedEncodingException ex) { Logger.getLogger(DownloadButtonExample.class.getName()).log(Level.SEVERE, null, ex); } catch (IOException ex) { Logger.getLogger(DownloadButtonExample.class.getName()).log(Level.SEVERE, null, ex); } }).setFileNameProvider(() -> { // File name can be set with setter, but also dynamically // Note, that this is used for URL as well, so it is called // first time already when showing the button. The second // call actually sets the content disposition header that // affects the name browser uses for the downloaded file. return "file" + System.currentTimeMillis() + ".txt"; }).setMimeTypeProvider(() -> { // Mime type can be set with setter, but also dynamically return "text/dynamically-set-odd-file-type"; }).withCaption("Click to download"); d.setDisableOnClick(true); d.addDownloadCompletedListener(() -> { d.setEnabled(true); Notification.show("The response has been written to the client"); }); DownloadButton simple = new DownloadButton(out -> { try { out.write("Foobar".getBytes()); } catch (IOException ex) { Logger.getLogger(DownloadButtonExample.class.getName()).log(Level.SEVERE, null, ex); } }).withCaption("Simple Download"); DownloadButton failing = new DownloadButton(out -> { throw new RuntimeException("Issue generating a file!"); }).withCaption("Failing Download"); return new MVerticalLayout(d, simple, failing); } }
/******************************************************************************* * * Pentaho Data Integration * * Copyright (C) 2002-2012 by Pentaho : http://www.pentaho.com * ******************************************************************************* * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * ******************************************************************************/ package org.pentaho.di.trans.steps.getfilesrowscount; import java.io.InputStream; import org.apache.commons.vfs.FileObject; import org.pentaho.di.core.fileinput.FileInputList; import org.pentaho.di.core.row.RowMetaInterface; import org.pentaho.di.trans.step.BaseStepData; import org.pentaho.di.trans.step.StepDataInterface; /** * @author <NAME> * @since 06-Sept-2007 */ public class GetFilesRowsCountData extends BaseStepData implements StepDataInterface { public String thisline; public RowMetaInterface outputRowMeta; public RowMetaInterface convertRowMeta; public Object[] previousRow; public FileInputList files; public boolean last_file; public FileObject file; public long filenr; public InputStream fr; public long rownr; public int fileFormatType; public StringBuffer lineStringBuffer; public int totalpreviousfields; public int indexOfFilenameField; public Object[] readrow; public RowMetaInterface inputRowMeta; public char separator; /** * */ public GetFilesRowsCountData() { super(); previousRow = null; thisline=null; previousRow=null; fr=null; lineStringBuffer = new StringBuffer(256); totalpreviousfields=0; indexOfFilenameField=-1; readrow=null; separator='\n'; } }
# -*- coding: utf-8 -*- # Copyright (C) 2020 - Sean: mailto:<EMAIL> # GitHub: https://github.com/SeanTolstoyevski # This project is licensed under the MIT license. You are free to do whatever you want as long as you accept your liability. # : NVDA's modules import addonHandler import config import globalPluginHandler import NVDAObjects import speech import ui import gui import wx # : 3rd party module from .camlorn_audio import * from .setting import UnspokenSettingDialog from .soundtheme import * AUDIO_WIDTH = 10.0 # Width of the audio display. AUDIO_DEPTH = 5.0 # Distance of listener from display. confspec = { "active": "boolean(default=true)", "soundtheme": "string(default='default theme')" } addonHandler.initTranslation() class GlobalPlugin(globalPluginHandler.GlobalPlugin): scriptCategory = _("Unspoken") def __init__(self, *args, **kwargs): super(GlobalPlugin, self).__init__(*args, **kwargs) self.unspokenSetting = gui.mainFrame.sysTrayIcon.preferencesMenu.Append( id=wx.ID_ANY, item="unspoken setting") gui.mainFrame.sysTrayIcon.Bind(wx.EVT_MENU, self.showSetting, self.unspokenSetting) config.conf.spec["unspokenpy3"] = confspec init_camlorn_audio() loadSoundTheme(config.conf["unspokenpy3"]["soundtheme"]) self._NVDA_getSpeechTextForProperties = speech.speech.getPropertiesSpeech speech.speech.getPropertiesSpeech = self._hook_getSpeechTextForProperties self._previous_mouse_object = None def _hook_getSpeechTextForProperties(self, reason=NVDAObjects.controlTypes.OutputReason.QUERY, *args, **kwargs): role = kwargs.get('role', None) if role: if config.conf["unspokenpy3"]["active"] and \ 'role' in kwargs and role in sounds: kwargs['_role'] = kwargs['role'] del kwargs['role'] return self._NVDA_getSpeechTextForProperties(reason, *args, **kwargs) def showSetting(self, evt): dlg= UnspokenSettingDialog(gui.mainFrame) dlg.ShowModal() def play_object(self, obj): global AUDIO_WIDTH, AUDIO_DEPTH role = obj.role if role in sounds: desktop = NVDAObjects.api.getDesktopObject() desktop_max_x = desktop.location[2] desktop_max_y = desktop.location[3] desktop_aspect = float(desktop_max_y) / float(desktop_max_x) if obj.location != None: obj_x = obj.location[0] + (obj.location[2] / 2.0) obj_y = obj.location[1] + (obj.location[3] / 2.0) else: obj_x = desktop_max_x / 2.0 obj_y = desktop_max_y / 2.0 position_x = (obj_x / desktop_max_x) * \ (AUDIO_WIDTH * 2) - AUDIO_WIDTH position_y = (obj_y / desktop_max_y) * (desktop_aspect * AUDIO_WIDTH * 2) - (desktop_aspect * AUDIO_WIDTH) position_y *= -1 sounds[role].set_position(position_x, position_y, AUDIO_DEPTH * -1) sounds[role].play() def event_becomeNavigatorObject(self, obj, nextHandler, isFocus=False): print('Triggered test') if config.conf["unspokenpy3"]["active"]: self.play_object(obj) else: pass nextHandler() def event_mouseMove(self, obj, nextHandler, x, y): if obj != self._previous_mouse_object: self._previous_mouse_object = obj if config.conf["unspokenpy3"]["active"]: self.play_object(obj) nextHandler() def script_changeActivate(self, gesture): if config.conf["unspokenpy3"]["active"]: speech.cancelSpeech() ui.message(_("Disable Unspoken")) config.conf["unspokenpy3"]["active"] = False elif config.conf["unspokenpy3"]["active"] == False: speech.cancelSpeech() ui.message(_("Enable Unspoken")) config.conf["unspokenpy3"]["active"] = True else: pass script_changeActivate.__doc__ = _( "Changes the active / deactive mode of Unspoken.") __gestures = { "kb:control+shift+u": "changeActivate", } def terminate(self, *args, **kwargs): super().terminate(*args, **kwargs) gui.mainFrame.sysTrayIcon.Bind(wx.EVT_MENU, self.showSetting, self.unspokenSetting)
class MP: """A member of parliament data structure for saving basic information about an MP. Gender is either 'm' for male or 'f' for female. Language refers to mother tongue, either Finnish (fi) or Swedish (sv). Party field contains the party the MP represents at the time this object is created. Profession field may contain multiple different professions and/or the highest educational degree the MP holds. City field contains the municipality the MP currently resides in. Place of birth (pob) defines the municipality/city the MP was born in. Pob-field may contain a foreign city or a (Finnish) municipality that has ceased to exist. Electoral districts are listed by their name and duration. Duration in this case means, how long the MP represented that particular district. Duration may also be open-ended in the case of current MPs. Many districts have had name changes in the past. An example of the districts field content: 'Electoral District of Helsinki (04/2015-), Helsinki constituency (03/1991-03/1999), Electoral District of Helsinki (03/1999-12/2006)' Education field can be empty or contain one or more degree/profession names. For example: 'matriculated, nurse, Master of Social Sciences' """ __slots__ = [ "mp_id", "firstname", "lastname", "gender", "lang", "birthyear", "party", "profession", "city", "pob", "districts", "education", ] mp_id: int firstname: str lastname: str gender: str lang: str birthyear: int party: str profession: str city: str pob: str districts: str education: str
export interface UrlManager { getDashboardUrl(environment: string): string; getDocumentationLink(): string; getApiUrl(): string; }
<gh_stars>0 package it.softphone.rd.gwt.client.widget.base.tag; import it.softphone.rd.gwt.client.CommonWidgetsStyle; import it.softphone.rd.gwt.client.resources.base.TagBoxCss; import it.softphone.rd.gwt.client.widget.base.HintTextBox; import java.util.ArrayList; import java.util.List; import com.google.gwt.event.dom.client.KeyCodes; import com.google.gwt.event.dom.client.KeyDownEvent; import com.google.gwt.event.dom.client.KeyDownHandler; import com.google.gwt.user.client.ui.FlowPanel; import com.google.gwt.user.client.ui.IsWidget; import com.google.gwt.user.client.ui.Label; import com.google.gwt.user.client.ui.ScrollPanel; import com.google.gwt.user.client.ui.Widget; /** * <h1>A box containing {@link TagElement}</h1> * * This class consists in a container holding: * * <ul> * <li>{@link HintTextBox} that can add new TagElement to the box</li> * <li>A list of suggested tags, that on click will be added to the box</li> * </ul> * * @author <NAME> * */ public class TagBox implements IsWidget { private final ScrollPanel scroll; private final FlowPanel externalContainer; private final FlowPanel container; private final FlowPanel suggestContainer; private final TagBoxCss css; private final HintTextBox input; private final Label lTags = new Label("Tags:"); private final Label lSuggestedTags = new Label("Suggested Tags:"); private List<TagElement> elements = new ArrayList<TagElement>(); /** * Constructs an Empty TgBox */ public TagBox(){ this.scroll = new ScrollPanel(); this.externalContainer = new FlowPanel(); this.container = new FlowPanel(); this.suggestContainer = new FlowPanel(); this.input = new HintTextBox(); this.css = CommonWidgetsStyle.getTheme().getCommonsWidgetClientBundle().tagBox(); css.ensureInjected(); init(); } /** * Sets the suggested tags * @param tags the tags to add as suggested */ public void setSuggestedTags( List<String> tags){ suggestContainer.clear(); for ( String s : tags ){ suggestContainer.add(new TagElement(this,s,true)); } } /** * Creates a tag from the string, and adds it to the suggested tag container * @param tag the tag to add */ public void addSuggestedTag( String tag ){ suggestContainer.add(new TagElement(this,tag,true)); } private void init(){ externalContainer.add(lTags); scroll.setWidget(container); scroll.setWidth("99%"); // scroll.setAlwaysShowScrollBars(true); externalContainer.add(scroll); externalContainer.add(lSuggestedTags); externalContainer.add(suggestContainer); input.setWidth("100px"); input.setHint("Write a tag"); scroll.addStyleName(css.tagContainer()); externalContainer.addStyleName(css.tagExternalContainer()); container.add(input); input.addKeyDownHandler(new KeyDownHandler() { @Override public void onKeyDown(KeyDownEvent event) { if (event.getNativeKeyCode() == KeyCodes.KEY_ENTER) { String text = input.getValue(); TagElement element = new TagElement(TagBox.this,text); input.setText(""); if( elements.contains(element)) { return; } elements.add(element); container.add(element); } } }); } /** * Removes the given element from the box * @param e the element to remove * @return a boolean */ public boolean removeElement( TagElement e ){ return elements.remove(e); } /** * Adds an element to the box, if not already contained * @param tag the element to add */ public void addElement( TagElement tag ){ if ( elements.contains(tag) ) return; container.add(tag); elements.add(tag); } @Override public Widget asWidget() { return externalContainer; } }
def subtract_arrays(x, y): if len(x) != len(y): raise ValueError("Both arrays must have the same length.") z = [] for x_, y_ in zip(x, y): z.append(x_ - y_) return z
package patch import ( "github.com/stretchr/testify/assert" "testing" ) func TestCoverStructsField(t *testing.T) { type A struct { Name string ID int Desc *string Data []byte } desc := "abc" src := A{ Name: "test", ID: 0, Desc: &desc, Data: []byte("data"), } dst := A{} err := CoverStructsField(src, &dst) assert.Nil(t, err) assert.EqualValues(t, src, dst) src = A{ Name: "test", ID: 0, } dst = A{ Name: "test2", ID: 2, Desc: &desc, } err = CoverStructsField(src, &dst) assert.Nil(t, err) assert.EqualValues(t, src.Name, dst.Name) assert.EqualValues(t, 2, dst.ID) assert.EqualValues(t, &desc, dst.Desc) err = CoverStructsField(src, dst) assert.NotNil(t, err) err = CoverStructsField(struct { ID string }{ID: "1"}, &dst) assert.NotNil(t, err) }
/* * Write dirty or read not uptodate page lists of a stripe. */ static int stripe_chunks_rw(struct stripe *stripe) { int r; struct raid_set *rs = RS(stripe->sc); r = for_each_io_dev(stripe, stripe_get_references); if (r) { int max; struct stripe_cache *sc = &rs->sc; for_each_io_dev(stripe, stripe_chunk_rw); max = sc_active(sc); if (atomic_read(&sc->active_stripes_max) < max) atomic_set(&sc->active_stripes_max, max); atomic_inc(rs->stats + S_FLUSHS); } return r; }
London buses might be fitted with smart sensors after the road safety trials conducted. The intention is to reduce the number of collisions with pedestrians and cyclists. Smart London Buses Fleet TfL is “upgrading” its London buses fleet. The new safety sensors, which could save the lives of dozens of vulnerable road users, might be installed on all London buses by the end of 2016. Transport for London asked leading manufacturers to come forward with designs back in 2014. One of the favourites was CycleEye and its pedestrians sensing sensors. IoT Sensors For London Buses And Lorries “We’ve made some great strides in improving road safety in recent years, and although things are moving in the right direction there is still much to be done.” he said. Cycle Eye was rejected because of the “white noise” it picked up. Roadside railings, traffic lights, etc.More recent data suggests that the Israeli firm Mobileye could be the maker of new IoT system. The system does not come cheap, and it will cost TfL about £17 million to have the whole fleet of buses covered and connected if approved.The Mayor of London, Boris Johnson expressed his interest in the project, straight from the beginning. The Mayor also revealed that the TfL (Transport for London) is considering making the new IoT sensors mandatory for lorries entering the city centre too. Seven of the eight cyclist deaths this year have been as a result of collisions with HGVs. Smart London Buses In A Smart City One of TfL’s top priorities is to reduce the number of people killed or seriously injured on London’s roads by 50 percent by 2020.The new tech will monitor blind spots around the London buses using IoT sensors. Quite a big task to fit all 8.700 buses that are on the road. There is hope the TtL will begin live trials on London buses, by mid-2016. An expensive upgrade that will make a significant reduction in casualties. Thanks to the new tech, day or night, vulnerable road users, pedestrians, and cyclists will be safer in London. It is worth mentioning that, the new technology is also part of the new plan that envisions London as a leading “smart city” in the world. Update: We’ve been approached by the press office at TfL with a few corrections. Following TfL’s above-mentioned trials, a new project is being planned to determine the role of this safety technology. However, at this moment there are no plans to fit London’s buses with smart sensors.
package com.semihshn.paymentservice.domain.payment; import com.semihshn.paymentservice.domain.port.PaymentPort; import lombok.RequiredArgsConstructor; import org.springframework.stereotype.Service; @Service @RequiredArgsConstructor public class PaymentService { private final PaymentPort paymentPort; public Long create(Payment payment) { Payment temp = paymentPort.create(payment); return temp.getId(); } public Payment retrieve(Long id) { return paymentPort.retrieve(id); } public void delete(Long id) { paymentPort.delete(id); } }
<reponame>LobanovichMichael/HSE-initiatives<filename>telegram/TestCommand.java package ru.misha.telegram; import java.util.ArrayList; import java.util.HashMap; public class TestCommand implements Command { String chatID; String messageText; public TestCommand(String chatID, String messageText) { this.chatID = chatID; this.messageText = messageText; } @Override public String Run() { String botMessage = ""; String numberString = Data.themeMemory.get(chatID); int numberOfTheme = Integer.valueOf(numberString); if (messageText.isEmpty()) { //в этом if-е происходит выбор рандомного теста и выдача первого вопроса int testNumber = getNextTestNumber(numberOfTheme); if (testNumber == -1) { Data.lastCommandMemory.put(chatID, ""); return "Поздравляем! Вы решили все тесты, ждите обновления."; } Data.testMemory.put(chatID,testNumber); int i = 0; Data.testAnswersMemory.put(chatID, new ArrayList<String>()); Data.questionNumberMemory.put(chatID, i); botMessage = botMessage + Data.themes.get(numberOfTheme - 1).themeTests.get(testNumber).questions.get(i).questionText; } else { //все остальные вопросы int testNumber = Data.testMemory.get(chatID); int questionNumber = Data.questionNumberMemory.get(chatID); //todo сохранить ответ куда нибудь questionNumber++; ArrayList<String> answers = Data.testAnswersMemory.get(chatID); answers.add(messageText); ArrayList <Question> questions = Data.themes.get(numberOfTheme - 1).themeTests.get(testNumber).questions; if (questionNumber >= questions.size()){ // номер вопроса больше, чем размер списка вопросов - конец теста Data.lastCommandMemory.put(chatID, ""); // чистим память, чтобы при вводе следующей команды бот не думал, что мы все еще в тесте String answersForEnd = "Ваши ответы Правильные ответы %0A";//создаем строку для формирования таблицы правильных ответов и ответов пользователя for (int i = 0; i <questions.size(); i++) { String thisQuestionTrueAnswer = questions.get(i).questionAnswer; String thisQuestionUsersAnswer = answers.get(i); answersForEnd +=" " + thisQuestionUsersAnswer +" "+ thisQuestionTrueAnswer + "%0A"; } ArrayList<Integer> solvedList = Data.solvedTests.get(chatID).get(numberOfTheme - 1); solvedList.add(testNumber); return answersForEnd; } Data.questionNumberMemory.put(chatID, questionNumber); botMessage = botMessage + Data.themes.get(numberOfTheme - 1).themeTests.get(testNumber).questions.get(questionNumber).questionText; } return botMessage; } int getNextTestNumber(int numberOfTheme) { if (Data.solvedTests.get(chatID) == null) { Data.solvedTests.put(chatID, new HashMap<Integer, ArrayList<Integer>>()); } ArrayList <Integer> solvedTestsList = Data.solvedTests.get(chatID).get(numberOfTheme - 1); if (solvedTestsList == null) { solvedTestsList = new ArrayList<Integer>(); Data.solvedTests.get(chatID).put(numberOfTheme - 1, solvedTestsList); } ArrayList <Test> thisThemeTests = Data.themes.get(numberOfTheme - 1).themeTests; if (solvedTestsList.size() >= thisThemeTests.size()) { return -1; } int testNumber = Randomizer.getRandomInt(0, Data.themes.get(numberOfTheme - 1).themeTests.size()); if (solvedTestsList.contains(testNumber)){ while (solvedTestsList.contains(testNumber)) { testNumber ++; if (thisThemeTests.size() == testNumber) { testNumber = 0; } } } return testNumber; } }
<reponame>peitaosu/Logger //---------------------------------------------------// // MIT License // // Copyright @ 2018-2020 <NAME> All Rights Reserved // // https://github.com/peitaosu/Logger // //---------------------------------------------------// #include "LogEvent.h" LogEventException::LogEventException() { } LogEventException::LogEventException(std::string summary) { this->summary = summary; } LogEventException::LogEventException(std::string summary, std::string detail) { this->summary = summary; this->detail = detail; } std::string LogEventException::GetSummary() { return this->summary; } std::string LogEventException::GetDetail() { return this->detail; } LogEvent::LogEvent() { } LogEvent::LogEvent(LogEventLevel level, std::string message) { auto time = std::time(nullptr); auto tm = *std::localtime(&time); this->timestamp = tm; this->level = level; this->message = message; } LogEvent::LogEvent(LogEventLevel level, std::string message, LogEventException exception) { auto time = std::time(nullptr); auto tm = *std::localtime(&time); this->timestamp = tm; this->level = level; this->message = message; this->exception = exception; } std::tm LogEvent::GetTimestamp() { return this->timestamp; } LogEventLevel LogEvent::GetLevel() { return this->level; } std::string LogEvent::GetMessage() { return this->message; } LogEventException LogEvent::GetException() { return this->exception; }
def plotmaxima(dim): c_values = np.linspace(2, 6, 41) var = [findmaxima(c, dim)[-17:] for c in c_values] fig = plt.figure(1) plt.plot(c_values, [elem for elem in var], 'b-') plt.xlabel('c') plt.ylabel(dim) plt.ylim([3,12]) plt.title(dim + ' local maxes vs. c') plt.show()
<reponame>BartMassey/sort-race /* * Copyright (c) 2019 <NAME> * [This program is licensed under the "MIT License"] * Please see the file LICENSE in the source * distribution of this software for license terms. */ /* Terrible PRNG to avoid the rand crate here. */ fn rand(r: &mut usize) { // https://en.wikipedia.org/wiki/Linear_congruential_generator#Parameters_in_common_use (Newlib, Musl) *r = r.wrapping_add(1).wrapping_mul(6364136223846793005); } /* Shuffle an array in-place. */ fn shuffle(a: &mut[isize], mut r: usize) { let na = a.len(); for i in 0..na { rand(&mut r); let j = r % (na - i) + i; a.swap(i, j); } } fn partition( arr: &mut[isize], mut left_index: usize, mut right_index: usize ) -> usize { let pivot = arr[(left_index + right_index) / 2]; while left_index <= right_index { while arr[left_index] < pivot { left_index += 1; } while arr[right_index] > pivot { right_index -= 1; } if left_index <= right_index { let tmp = arr[left_index]; arr[left_index] = arr[right_index]; arr[right_index] = tmp; left_index += 1; if right_index > 0 { right_index -= 1; } } } left_index } fn quicksort(arr: &mut[isize], left_index: usize, right_index: usize) { let index = partition(arr, left_index, right_index); if left_index + 1 < index { quicksort(arr, left_index, index - 1); } if index < right_index { quicksort(arr, index, right_index); } } #[test] fn test_quicksort_sorts() { let mut a: Vec<isize> = (0..5000).collect(); shuffle(&mut a, 17); let na = a.len(); quicksort(&mut a, 0, na - 1); for i in 1..na { assert!(a[i-1] < a[i]); } } fn main() { let ns: Vec<isize> = std::env::args() .skip(1) .map(|s| s.parse().unwrap()) .collect(); let n = ns[0]; let r = ns[1] as usize; let mut a: Vec<isize> = (0..n).collect(); shuffle(&mut a, r); let na = a.len(); quicksort(&mut a, 0, na - 1); println!("{}", a[0]); }
package it.polimi.ingsw.client.cli; import it.polimi.ingsw.client.modelClient.GameClient; import it.polimi.ingsw.constant.enumeration.ResourceType; import it.polimi.ingsw.constant.model.Game; import it.polimi.ingsw.constant.model.NumberOfResources; import it.polimi.ingsw.constant.move.MoveChoseResources; import it.polimi.ingsw.constant.move.MoveType; import java.util.Scanner; /** * CliMoveChoseResources class. * Implements CliInterface. * Manage the chose resources move on the cli. */ public class CliMoveChoseResources implements CliInterface{ private final MoveChoseResources move; /** * Instantiates a new Cli move chose resources. * * @param myId of type int: the player's id. */ public CliMoveChoseResources(int myId){ this.move = new MoveChoseResources(myId); } /** * Update cli. * @param game of type GameClient: the game. * @param stdin of type Scanner: the input scanner. * @return of type MoveType: the move. */ @Override public MoveType updateCLI(GameClient game, Scanner stdin) { NumberOfResources numberOfResources = new NumberOfResources(); System.out.println("Risorse attuali totali: "+ game.getMe().getDepots().getResources()); if(game.getMe().getToActive().getOfYourChoiceInput()==1){ System.out.println("Scegli la risorsa in input!"); printRes(); int res = CLI.ReadFromKeyboard(stdin); move.setOfYourChoiceInput(numberOfResources.add(ResourceType.values()[res-1],1)); }else{ System.out.println("Scegli le risorse in input!"); printRes(); while(numberOfResources.size()< game.getMe().getToActive().getOfYourChoiceInput()){ int res = CLI.ReadFromKeyboard(stdin); if(res>0 && res<=ResourceType.values().length){ numberOfResources=numberOfResources.add(ResourceType.values()[res-1],1); }else{ System.out.println("Indice non valido"); } } move.setOfYourChoiceInput(numberOfResources); } System.out.println("Scegli la risorsa in output!"); printRes(); int res = CLI.ReadFromKeyboard(stdin); move.setOfYourChoiceOutput(new NumberOfResources().add(ResourceType.values()[res-1],1)); return move; } /** * @see MoveType#canPerform(Game) */ @Override public boolean canPerform(GameClient game) { return move.canPerform(game); } /** * * @see MoveType#getClassName() */ @Override public String getName() { return move.getClassName(); } private void printRes(){ int i=1; for(ResourceType type : ResourceType.values()){ System.out.println(i+". "+type); i++; } } }
package com.taobao.csp.ahas.transport.api; public interface RequestHandler { <R> Response<R> handle(Request var1) throws RequestException; }
def audit(self): log.debug("SAMLAssertionWallet.audit ...") for k, v in self.__assertionsMap.items(): creds = [credential for credential in v if self.isValidCredential(credential)] if len(creds) > 0: self.__assertionsMap[k] = creds else: del self.__assertionsMap[k]
/* This file is part of solidity. solidity is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. solidity is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with solidity. If not, see <http://www.gnu.org/licenses/>. */ #pragma once #include <test/libsolidity/util/SoltestTypes.h> #include <test/libsolidity/util/SoltestErrors.h> #include <libsolutil/CommonData.h> #include <json/json.h> namespace solidity::frontend::test { using ABITypes = std::vector<ABIType>; /** * Utility class that aids conversions from contract ABI types stored in a * Json value to the internal ABIType representation of isoltest. */ class ContractABIUtils { public: /// Parses and translates Solidity's ABI types as Json string into /// a list of internal type representations of isoltest. /// Creates parameters from Contract ABI and is used to generate values for /// auto-correction during interactive update routine. static std::optional<ParameterList> parametersFromJsonOutputs( ErrorReporter& _errorReporter, Json::Value const& _contractABI, std::string const& _functionSignature ); /// Overwrites _targetParameters if ABI types or sizes given /// by _sourceParameters do not match. static void overwriteParameters( ErrorReporter& _errorReporter, ParameterList& _targetParameters, ParameterList const& _sourceParameters ); /// If parameter count does not match, take types defined _sourceParameters /// and create a warning if so. static ParameterList preferredParameters( ErrorReporter& _errorReporter, ParameterList const& _targetParameters, ParameterList const& _sourceParameters, bytes const& _bytes ); /// Returns a list of parameters corresponding to the encoding of /// returned values in case of a failure. Creates an additional parameter /// for the error message if _bytes is larger than 68 bytes /// (function_selector + tail_ptr + message_length). static ParameterList failureParameters(bytes const _bytes); /// Returns _count parameters with their type set to ABIType::UnsignedDec /// and their size set to 32 bytes. static ParameterList defaultParameters(size_t count = 0); /// Calculates the encoding size of given _parameters based /// on the size of their types. static size_t encodingSize(ParameterList const& _paremeters); private: /// Parses and translates a single type and returns a list of /// internal type representations of isoltest. /// Types defined by the ABI will translate to ABITypes /// as follows: /// `bool` -> [`Boolean`] /// `uint` -> [`Unsigned`] /// `string` -> [`Unsigned`, `Unsigned`, `String`] /// `bytes` -> [`Unsigned`, `Unsigned`, `HexString`] /// ... static bool appendTypesFromName( Json::Value const& _functionOutput, ABITypes& _inplaceTypes, ABITypes& _dynamicTypes, bool _isCompoundType = false ); }; }
/* * To change this license header, choose License Headers in Project Properties. * To change this template file, choose Tools | Templates * and open the template in the editor. */ package shapeCollection; import collection.TList; import static org.testng.Assert.*; import org.testng.annotations.BeforeMethod; import org.testng.annotations.DataProvider; import org.testng.annotations.Test; import shape.TPoint2i; import static shape.ShapeUtil.p2i; import string.Strings; import static string.Strings.asCharList; /** * * @author masao */ public class GridNGTest { Grid<TPoint2i> tested; public GridNGTest() { } @BeforeMethod public void setUpMethod() throws Exception { tested = new Grid<>(new TPoint2i(5, 2), new TPoint2i(3, 5), (a, b)->new TPoint2i(a, b)); } @DataProvider(name = "get_int_int") public Object[][] get_int_int() { return new Object[][]{ {3, 2}, {4, 2}, {5, 2}, {3, 3}, {4, 3}, {5, 3}, {3, 4}, {4, 4}, {5, 4}, {3, 5}, {4, 5}, {5, 5},}; } @Test(dataProvider = "get_int_int") public void testGet_int_int(int x, int y) { System.out.println(test.TestUtils.methodName(0)); TPoint2i result = tested.get(x, y); TPoint2i expected = p2i(x, y); System.out.println("result : "+result); System.out.println("expected: "+expected); assertEquals(result, expected); } @DataProvider(name = "get_int_int_fail") public Object[][] get_int_int_fail() { return new Object[][]{ {2, 4}, {5, 6}, {4, 1} }; } @Test(dataProvider = "get_int_int_fail", expectedExceptions = AssertionError.class) public void testGet_int_int_fail(int x, int y) { System.out.println(test.TestUtils.methodName(0)); TPoint2i result = tested.get(x, y); TPoint2i expected = p2i(x, y); System.out.println("result : "+result); System.out.println("expected: "+expected); } @Test public void testGetY_int() { System.out.println(test.TestUtils.methodName(0)); TList<TPoint2i> result = tested.getY(4); TList<TPoint2i> expected = TList.of(p2i(5, 4), p2i(4, 4), p2i(3, 4)); System.out.println("result : "+result); System.out.println("expected: "+expected); assertEquals(result, expected); } @Test public void testGetX_int() { System.out.println(test.TestUtils.methodName(0)); TList<TPoint2i> result = tested.getX(4); TList<TPoint2i> expected = TList.of(p2i(4, 2), p2i(4, 3), p2i(4, 4), p2i(4, 5)); System.out.println("result : "+result); System.out.println("expected: "+expected); assertEquals(result, expected); } @Test public void testGetY() { System.out.println(test.TestUtils.methodName(0)); Grid<TPoint2i> tested = new Grid<>(new TPoint2i(5, 2), new TPoint2i(3, 5), (a, b)->new TPoint2i(a, b)); TList<TList<TPoint2i>> result = tested.getY(); TList<TList<TPoint2i>> expected = TList.of( TList.of(p2i(5, 2), p2i(4, 2), p2i(3, 2)), TList.of(p2i(5, 3), p2i(4, 3), p2i(3, 3)), TList.of(p2i(5, 4), p2i(4, 4), p2i(3, 4)), TList.of(p2i(5, 5), p2i(4, 5), p2i(3, 5)) ); System.out.println("result : "+result); System.out.println("expected: "+expected); assertEquals(result, expected); } @Test public void testGetX() { System.out.println(test.TestUtils.methodName(0)); Grid<TPoint2i> tested = new Grid<>(new TPoint2i(5, 2), new TPoint2i(3, 5), (a, b)->new TPoint2i(a, b)); TList<TList<TPoint2i>> result = tested.getX(); TList<TList<TPoint2i>> expected = TList.of( TList.of(p2i(5, 2), p2i(5, 3), p2i(5, 4), p2i(5, 5)), TList.of(p2i(4, 2), p2i(4, 3), p2i(4, 4), p2i(4, 5)), TList.of(p2i(3, 2), p2i(3, 3), p2i(3, 4), p2i(3, 5)) ); System.out.println("result : "+result); System.out.println("expected: "+expected); assertEquals(result, expected); } @Test public void testSet() { System.out.println(test.TestUtils.methodName(0)); Grid<TPoint2i> tested = new Grid<>(new TPoint2i(5, 2), new TPoint2i(3, 5), (a, b)->new TPoint2i(a, b)); tested.cset(p2i(5, 3), TPoint2i.zero); TList<TList<TPoint2i>> result = tested.getX(); TList<TList<TPoint2i>> expected = TList.of( TList.of(p2i(5, 2), p2i(0, 0), p2i(5, 4), p2i(5, 5)), TList.of(p2i(4, 2), p2i(4, 3), p2i(4, 4), p2i(4, 5)), TList.of(p2i(3, 2), p2i(3, 3), p2i(3, 4), p2i(3, 5)) ); System.out.println("result : "+result); System.out.println("expected: "+expected); assertEquals(result, expected); } @Test public void testSetY() { System.out.println(test.TestUtils.methodName(0)); Grid<TPoint2i> tested = new Grid<>(new TPoint2i(5, 2), new TPoint2i(3, 5), (a, b)->new TPoint2i(a, b)); tested.setY(3, TList.nCopies(3, TPoint2i.zero)); TList<TList<TPoint2i>> result = tested.getX(); TList<TList<TPoint2i>> expected = TList.of( TList.of(p2i(5, 2), p2i(0, 0), p2i(5, 4), p2i(5, 5)), TList.of(p2i(4, 2), p2i(0, 0), p2i(4, 4), p2i(4, 5)), TList.of(p2i(3, 2), p2i(0, 0), p2i(3, 4), p2i(3, 5)) ); System.out.println("result : "+result); System.out.println("expected: "+expected); assertEquals(result, expected); } @Test public void testSetX() { System.out.println(test.TestUtils.methodName(0)); Grid<TPoint2i> tested = new Grid<>(new TPoint2i(5, 2), new TPoint2i(3, 5), (a, b)->new TPoint2i(a, b)); tested.setX(3, TList.nCopies(4, TPoint2i.zero)); TList<TList<TPoint2i>> result = tested.getX(); TList<TList<TPoint2i>> expected = TList.of( TList.of(p2i(5, 2), p2i(5, 3), p2i(5, 4), p2i(5, 5)), TList.of(p2i(4, 2), p2i(4, 3), p2i(4, 4), p2i(4, 5)), TList.of(p2i(0, 0), p2i(0, 0), p2i(0, 0), p2i(0, 0)) ); System.out.println("result : "+result); System.out.println("expected: "+expected); assertEquals(result, expected); } @Test public void testClone() throws CloneNotSupportedException { System.out.println(test.TestUtils.methodName(0)); Grid<TPoint2i> tested = new Grid<>(new TPoint2i(5, 2), new TPoint2i(3, 5), (a, b)->new TPoint2i(a, b)); TList<TList<TPoint2i>> result = tested.clone().getX(); TList<TList<TPoint2i>> expected = TList.of( TList.of(p2i(5, 2), p2i(5, 3), p2i(5, 4), p2i(5, 5)), TList.of(p2i(4, 2), p2i(4, 3), p2i(4, 4), p2i(4, 5)), TList.of(p2i(3, 2), p2i(3, 3), p2i(3, 4), p2i(3, 5)) ); System.out.println("result : "+result); System.out.println("expected: "+expected); assertEquals(result, expected); } @Test public void testRotateX() { System.out.println(test.TestUtils.methodName(0)); Grid<TPoint2i> tested = new Grid<>(new TPoint2i(5, 2), new TPoint2i(3, 5), (a, b)->new TPoint2i(a, b)); TList<TList<TPoint2i>> result = tested.rotateX(1).getX(); TList<TList<TPoint2i>> expected = TList.of( TList.of(p2i(4, 2), p2i(4, 3), p2i(4, 4), p2i(4, 5)), TList.of(p2i(3, 2), p2i(3, 3), p2i(3, 4), p2i(3, 5)), TList.of(p2i(5, 2), p2i(5, 3), p2i(5, 4), p2i(5, 5)) ); System.out.println("result : "+result); System.out.println("expected: "+expected); assertEquals(result, expected); } @Test public void testRotateX2() { System.out.println(test.TestUtils.methodName(0)); Grid<TPoint2i> tested = new Grid<>(new TPoint2i(5, 2), new TPoint2i(3, 5), (a, b)->new TPoint2i(a, b)); TList<TList<TPoint2i>> result = tested.rotateX(-1).getX(); TList<TList<TPoint2i>> expected = TList.of( TList.of(p2i(3, 2), p2i(3, 3), p2i(3, 4), p2i(3, 5)), TList.of(p2i(5, 2), p2i(5, 3), p2i(5, 4), p2i(5, 5)), TList.of(p2i(4, 2), p2i(4, 3), p2i(4, 4), p2i(4, 5)) ); System.out.println("result : "+result); System.out.println("expected: "+expected); assertEquals(result, expected); } @Test public void testRotateY() { System.out.println(test.TestUtils.methodName(0)); Grid<TPoint2i> tested = new Grid<>(new TPoint2i(5, 2), new TPoint2i(3, 5), (a, b)->new TPoint2i(a, b)); TList<TList<TPoint2i>> result = tested.rotateY(1).getY(); TList<TList<TPoint2i>> expected = TList.of( TList.of(p2i(5, 3), p2i(4, 3), p2i(3, 3)), TList.of(p2i(5, 4), p2i(4, 4), p2i(3, 4)), TList.of(p2i(5, 5), p2i(4, 5), p2i(3, 5)), TList.of(p2i(5, 2), p2i(4, 2), p2i(3, 2)) ); System.out.println("result : "+result); System.out.println("expected: "+expected); assertEquals(result, expected); } @Test public void testRotateY2() { System.out.println(test.TestUtils.methodName(0)); Grid<TPoint2i> tested = new Grid<>(new TPoint2i(5, 2), new TPoint2i(3, 5), (a, b)->new TPoint2i(a, b)); TList<TList<TPoint2i>> result = tested.rotateY(-1).getY(); TList<TList<TPoint2i>> expected = TList.of( TList.of(p2i(5, 5), p2i(4, 5), p2i(3, 5)), TList.of(p2i(5, 2), p2i(4, 2), p2i(3, 2)), TList.of(p2i(5, 3), p2i(4, 3), p2i(3, 3)), TList.of(p2i(5, 4), p2i(4, 4), p2i(3, 4)) ); System.out.println("result : "+result); System.out.println("expected: "+expected); assertEquals(result, expected); } @Test public void testShift() { System.out.println(test.TestUtils.methodName(0)); Grid<TPoint2i> tested = new Grid<>(new TPoint2i(5, 2), new TPoint2i(3, 5), (a, b)->new TPoint2i(a, b)); tested = tested.shift(new TPoint2i(-1, -1)); System.out.println(tested.get(4, 4)); TList<TList<TPoint2i>> result = tested.getY(); TList<TList<TPoint2i>> expected = TList.of( TList.of(p2i(5, 2), p2i(4, 2), p2i(3, 2)), TList.of(p2i(5, 3), p2i(4, 3), p2i(3, 3)), TList.of(p2i(5, 4), p2i(4, 4), p2i(3, 4)), TList.of(p2i(5, 5), p2i(4, 5), p2i(3, 5)) ); System.out.println("result : "+result); System.out.println("expected: "+expected); assertEquals(result, expected); } @Test public void testReverseY() { System.out.println(test.TestUtils.methodName(0)); Grid<TPoint2i> tested = new Grid<>(new TPoint2i(5, 2), new TPoint2i(3, 5), (a, b)->new TPoint2i(a, b)); TList<TList<TPoint2i>> result = tested.reverseY().getY(); TList<TList<TPoint2i>> expected = TList.of( TList.of(p2i(5, 5), p2i(4, 5), p2i(3, 5)), TList.of(p2i(5, 4), p2i(4, 4), p2i(3, 4)), TList.of(p2i(5, 3), p2i(4, 3), p2i(3, 3)), TList.of(p2i(5, 2), p2i(4, 2), p2i(3, 2)) ); System.out.println("result : "+result); System.out.println("expected: "+expected); assertEquals(result, expected); } @Test public void testReverseX() { System.out.println(test.TestUtils.methodName(0)); Grid<TPoint2i> tested = new Grid<>(new TPoint2i(5, 2), new TPoint2i(3, 5), (a, b)->new TPoint2i(a, b)); TList<TList<TPoint2i>> result = tested.reverseX().getX(); TList<TList<TPoint2i>> expected = TList.of( TList.of(p2i(3, 2), p2i(3, 3), p2i(3, 4), p2i(3, 5)), TList.of(p2i(4, 2), p2i(4, 3), p2i(4, 4), p2i(4, 5)), TList.of(p2i(5, 2), p2i(5, 3), p2i(5, 4), p2i(5, 5)) ); System.out.println("result : "+result); System.out.println("expected: "+expected); assertEquals(result, expected); } @Test public void testToRightHandSystem() { System.out.println(test.TestUtils.methodName(0)); Grid<TPoint2i> tested = new Grid<>(new TPoint2i(5, 2), new TPoint2i(3, 5), (a, b)->new TPoint2i(a, b)); TList<TList<TPoint2i>> result = tested.toRightHandedSystem().getY(); TList<TList<TPoint2i>> expected = TList.of( TList.of(p2i(3, 5), p2i(4, 5), p2i(5, 5)), TList.of(p2i(3, 4), p2i(4, 4), p2i(5, 4)), TList.of(p2i(3, 3), p2i(4, 3), p2i(5, 3)), TList.of(p2i(3, 2), p2i(4, 2), p2i(5, 2)) ); System.out.println("result : "+result); System.out.println("expected: "+expected); assertEquals(result, expected); } @Test public void testToString() { System.out.println(test.TestUtils.methodName(0)); Grid<TPoint2i> tested = new Grid<>(new TPoint2i(5, 2), new TPoint2i(3, 5), (a, b)->new TPoint2i(a, b)); System.out.println(tested); } @Test public void testTrim() { System.out.println(test.TestUtils.methodName(0)); TList<TList<Integer>> result = new Grid<>(p2i(0, 0), p2i(4, 4), (a, b)->a*4+b).trim(p2i(2, 2), p2i(5, 5), (a, b)->0).getX(); TList<TList<Integer>> expected = TList.of( TList.of(10, 11, 12, 0), TList.of(14, 15, 16, 0), TList.of(18, 19, 20, 0), TList.of(0, 0, 0, 0) ); System.out.println("result : "+result); System.out.println("expected: "+expected); assertEquals(result, expected); } @Test public void testConstructor() { System.out.println(test.TestUtils.methodName(0)); String result = new Grid<>(p2i(4, 2), p2i(7, 5), asCharList("0123456789ABCDEF")).toFlatTestString(); String expected = "0123\n4567\n89AB\nCDEF"; System.out.println("result : "+result); System.out.println("expected: "+expected); assertEquals(result, expected); } @Test public void testMark() { System.out.println(test.TestUtils.methodName(0)); String result = new Grid<>(p2i(4, 2), p2i(7, 5), (a, b)->" ").mark(TList.of(p2i(5, 3), p2i(5, 4), p2i(6, 3)), p->"X").toFlatTestString(); String expected = Strings.wrap( " " +" XX " +" X " +" ", 4); System.out.println("result : "+result); System.out.println("expected: "+expected); assertEquals(result, expected); } @Test public void testMap() { System.out.println(test.TestUtils.methodName(0)); Grid<Integer> result = new Grid<>(p2i(4, 2), p2i(7, 5), (a, b)->"0").mark(TList.of(p2i(5, 3), p2i(5, 4), p2i(6, 3)), p->"1").map(s->Integer.parseInt(s)); Grid<Integer> expected = new Grid<>(p2i(4, 2), p2i(7,5), Strings.asCharList( "0000" +"0110" +"0100" +"0000" ).map(s->s-'0')); System.out.println("result : "+result); System.out.println("expected: "+expected); assertEquals(result, expected); } @Test public void testMapByPosition() { System.out.println(test.TestUtils.methodName(0)); Grid<Integer> result = new Grid<>(p2i(4, 2), p2i(7, 5), (a, b)->"0").mark(TList.of(p2i(5, 3), p2i(5, 4), p2i(6, 3)), p->"1").mapByPosition((p,s)->Integer.parseInt(s)+p.y); Grid<Integer> expected = new Grid<>(p2i(4, 2), p2i(7,5), Strings.asCharList( "2222" +"3443" +"4544" +"5555" ).map(s->s-'0')); System.out.println("result : "+result); System.out.println("expected: "+expected); assertEquals(result, expected); } @Test public void testPair() { System.out.println(test.TestUtils.methodName(0)); Grid<Integer> tested = new Grid<>(p2i(4, 2), p2i(7, 5), (a, b)->"0").mark(TList.of(p2i(5, 3), p2i(5, 4), p2i(6, 3)), p->"1").map(s->Integer.parseInt(s)); Grid<Integer> result = tested.mapByPosition((p,s)->p.y).pair(tested, (a,b)->a+b); Grid<Integer> expected = new Grid<>(p2i(4, 2), p2i(7,5), Strings.asCharList( "2222" +"3443" +"4544" +"5555" ).map(s->s-'0')); System.out.println("result : "+result); System.out.println("expected: "+expected); assertEquals(result, expected); } @Test public void testToRightHandedSystem() { System.out.println(test.TestUtils.methodName(0)); Grid<Integer> result = new Grid<>(p2i(4, 2), p2i(7, 5), (a, b)->"0").mark(TList.of(p2i(5, 3), p2i(5, 4), p2i(6, 3)), p->"1").mapByPosition((p,s)->Integer.parseInt(s)+p.y).toRightHandedSystem(); Grid<Integer> expected = new Grid<>(p2i(4, 2), p2i(7,5), Strings.asCharList("" +"5555" +"4544" +"3443" +"2222" ).map(s->s-'0')); System.out.println("result : "+result); System.out.println("expected: "+expected); assertEquals(result, expected); } @Test public void testFlip() { System.out.println(test.TestUtils.methodName(0)); Grid<Integer> result = new Grid<>(p2i(4, 2), p2i(7, 6), (a, b)->"0").mark(TList.of(p2i(5, 3), p2i(5, 4), p2i(6, 3)), p->"1").mapByPosition((p,s)->Integer.parseInt(s)+p.y).flip(); Grid<Integer> expected = new Grid<>(p2i(2, 4), p2i(6, 7), Strings.asCharList( "23456" +"24556" +"24456" +"23456" ).map(s->s-'0')); System.out.println("result : "+result); System.out.println("expected: "+expected); assertEquals(result, expected); } }
<reponame>piquark6046/gtk3-rs<gh_stars>0 // This file was generated by gir (https://github.com/gtk-rs/gir) // from gir-files (https://github.com/gtk-rs/gir-files) // DO NOT EDIT use crate::Display; use crate::Visual; use crate::Window; use glib::object::ObjectType as ObjectType_; use glib::signal::connect_raw; use glib::signal::SignalHandlerId; use glib::translate::*; use std::boxed::Box as Box_; use std::fmt; use std::mem::transmute; glib::wrapper! { #[doc(alias = "GdkScreen")] pub struct Screen(Object<ffi::GdkScreen>); match fn { type_ => || ffi::gdk_screen_get_type(), } } impl Screen { #[doc(alias = "gdk_screen_get_display")] #[doc(alias = "get_display")] pub fn display(&self) -> Display { unsafe { from_glib_none(ffi::gdk_screen_get_display(self.to_glib_none().0)) } } #[doc(alias = "gdk_screen_get_resolution")] #[doc(alias = "get_resolution")] pub fn resolution(&self) -> f64 { unsafe { ffi::gdk_screen_get_resolution(self.to_glib_none().0) } } #[doc(alias = "gdk_screen_get_rgba_visual")] #[doc(alias = "get_rgba_visual")] pub fn rgba_visual(&self) -> Option<Visual> { unsafe { from_glib_none(ffi::gdk_screen_get_rgba_visual(self.to_glib_none().0)) } } #[doc(alias = "gdk_screen_get_root_window")] #[doc(alias = "get_root_window")] pub fn root_window(&self) -> Option<Window> { unsafe { from_glib_none(ffi::gdk_screen_get_root_window(self.to_glib_none().0)) } } #[doc(alias = "gdk_screen_get_system_visual")] #[doc(alias = "get_system_visual")] pub fn system_visual(&self) -> Option<Visual> { unsafe { from_glib_none(ffi::gdk_screen_get_system_visual(self.to_glib_none().0)) } } #[doc(alias = "gdk_screen_get_toplevel_windows")] #[doc(alias = "get_toplevel_windows")] pub fn toplevel_windows(&self) -> Vec<Window> { unsafe { FromGlibPtrContainer::from_glib_container(ffi::gdk_screen_get_toplevel_windows( self.to_glib_none().0, )) } } #[doc(alias = "gdk_screen_get_window_stack")] #[doc(alias = "get_window_stack")] pub fn window_stack(&self) -> Vec<Window> { unsafe { FromGlibPtrContainer::from_glib_full(ffi::gdk_screen_get_window_stack( self.to_glib_none().0, )) } } #[doc(alias = "gdk_screen_is_composited")] pub fn is_composited(&self) -> bool { unsafe { from_glib(ffi::gdk_screen_is_composited(self.to_glib_none().0)) } } #[doc(alias = "gdk_screen_list_visuals")] pub fn list_visuals(&self) -> Vec<Visual> { unsafe { FromGlibPtrContainer::from_glib_container(ffi::gdk_screen_list_visuals( self.to_glib_none().0, )) } } #[doc(alias = "gdk_screen_set_font_options")] pub fn set_font_options(&self, options: Option<&cairo::FontOptions>) { unsafe { ffi::gdk_screen_set_font_options(self.to_glib_none().0, options.to_glib_none().0); } } #[doc(alias = "gdk_screen_set_resolution")] pub fn set_resolution(&self, dpi: f64) { unsafe { ffi::gdk_screen_set_resolution(self.to_glib_none().0, dpi); } } #[doc(alias = "gdk_screen_get_default")] #[doc(alias = "get_default")] pub fn default() -> Option<Screen> { assert_initialized_main_thread!(); unsafe { from_glib_none(ffi::gdk_screen_get_default()) } } #[doc(alias = "composited-changed")] pub fn connect_composited_changed<F: Fn(&Self) + 'static>(&self, f: F) -> SignalHandlerId { unsafe extern "C" fn composited_changed_trampoline<F: Fn(&Screen) + 'static>( this: *mut ffi::GdkScreen, f: glib::ffi::gpointer, ) { let f: &F = &*(f as *const F); f(&from_glib_borrow(this)) } unsafe { let f: Box_<F> = Box_::new(f); connect_raw( self.as_ptr() as *mut _, b"composited-changed\0".as_ptr() as *const _, Some(transmute::<_, unsafe extern "C" fn()>( composited_changed_trampoline::<F> as *const (), )), Box_::into_raw(f), ) } } #[doc(alias = "monitors-changed")] pub fn connect_monitors_changed<F: Fn(&Self) + 'static>(&self, f: F) -> SignalHandlerId { unsafe extern "C" fn monitors_changed_trampoline<F: Fn(&Screen) + 'static>( this: *mut ffi::GdkScreen, f: glib::ffi::gpointer, ) { let f: &F = &*(f as *const F); f(&from_glib_borrow(this)) } unsafe { let f: Box_<F> = Box_::new(f); connect_raw( self.as_ptr() as *mut _, b"monitors-changed\0".as_ptr() as *const _, Some(transmute::<_, unsafe extern "C" fn()>( monitors_changed_trampoline::<F> as *const (), )), Box_::into_raw(f), ) } } #[doc(alias = "size-changed")] pub fn connect_size_changed<F: Fn(&Self) + 'static>(&self, f: F) -> SignalHandlerId { unsafe extern "C" fn size_changed_trampoline<F: Fn(&Screen) + 'static>( this: *mut ffi::GdkScreen, f: glib::ffi::gpointer, ) { let f: &F = &*(f as *const F); f(&from_glib_borrow(this)) } unsafe { let f: Box_<F> = Box_::new(f); connect_raw( self.as_ptr() as *mut _, b"size-changed\0".as_ptr() as *const _, Some(transmute::<_, unsafe extern "C" fn()>( size_changed_trampoline::<F> as *const (), )), Box_::into_raw(f), ) } } #[doc(alias = "font-options")] pub fn connect_font_options_notify<F: Fn(&Self) + 'static>(&self, f: F) -> SignalHandlerId { unsafe extern "C" fn notify_font_options_trampoline<F: Fn(&Screen) + 'static>( this: *mut ffi::GdkScreen, _param_spec: glib::ffi::gpointer, f: glib::ffi::gpointer, ) { let f: &F = &*(f as *const F); f(&from_glib_borrow(this)) } unsafe { let f: Box_<F> = Box_::new(f); connect_raw( self.as_ptr() as *mut _, b"notify::font-options\0".as_ptr() as *const _, Some(transmute::<_, unsafe extern "C" fn()>( notify_font_options_trampoline::<F> as *const (), )), Box_::into_raw(f), ) } } #[doc(alias = "resolution")] pub fn connect_resolution_notify<F: Fn(&Self) + 'static>(&self, f: F) -> SignalHandlerId { unsafe extern "C" fn notify_resolution_trampoline<F: Fn(&Screen) + 'static>( this: *mut ffi::GdkScreen, _param_spec: glib::ffi::gpointer, f: glib::ffi::gpointer, ) { let f: &F = &*(f as *const F); f(&from_glib_borrow(this)) } unsafe { let f: Box_<F> = Box_::new(f); connect_raw( self.as_ptr() as *mut _, b"notify::resolution\0".as_ptr() as *const _, Some(transmute::<_, unsafe extern "C" fn()>( notify_resolution_trampoline::<F> as *const (), )), Box_::into_raw(f), ) } } } impl fmt::Display for Screen { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { f.write_str("Screen") } }
// Copyright 2017 Rough Industries LLC. All rights reserved. //model/image.go:package model package model import () //DATA STRUCTURES //Image data structure type Image struct { Text string Keywords string ImageSource string LabeledImageSource string ImageGen string ImageUpdate string }
Actress Lena Dunham wants images of guns yanked from ads for Matt Damon’s new film Jason Bourne. On Tuesday, the Girls star re-posted a photo of an ad in which the image of Damon’s gun had been ripped out. Producer Tami Sagher had captioned the photo: “Hey New Yorkers, what if we do some peeling & get rid of the guns in the Jason Bourne subway ads. So tired of guns.” Dunham added her own comment: “Good idea @tulipbone! Let’s go! Ironically, Dunham’s anti-gun zeal comes on the heels of Matt Damon’s own criticism of guns and U.S. laws. On July 5 Breitbart News reported that Damon spoke at the Australian premiere of Jason Bourne and lamented that the U.S. has not passed an Australian-style gun ban. However, he did not publicly call for ripping images of guns out of ads for his movie, nor had he done so for previous shoot ’em ups like The Bourne Identity, The Bourne Supremacy, The Bourne Ultimatum and The Departed. Damon’s approach was more like that of Liam Neeson, another gun-toting action hero who called for increased gun control in the U.S. while releasing new movies in which guns were central to his character’s success. In fact, at one point, Neeson called for more gun control while standing in front of a poster of Taken 3 that showed him aiming a gun at the camera. Maybe Damon and Neeson could both put their money where their mouths are and make ads that show them holding slingshots or sticks rather than firearms. AWR Hawkins is the Second Amendment columnist for Breitbart News and political analyst for Armed American Radio. Follow him on Twitter: @AWRHawkins. Reach him directly at [email protected].
/** A mapping from {@link TypeVariable} to resolved {@link Type}. */ class TypeResolutions { /** The type variables. */ private final TypeVariable<?>[] typeVariables; /** The resolved type arguments. */ Type[] resolvedTypeArguments; /** * Produce a list of type variable resolutions from a resolved type, by comparing its actual type parameters * with the generic (declared) parameters of its generic type. * * @param resolvedType * the resolved type */ TypeResolutions(final ParameterizedType resolvedType) { typeVariables = ((Class<?>) resolvedType.getRawType()).getTypeParameters(); resolvedTypeArguments = resolvedType.getActualTypeArguments(); if (resolvedTypeArguments.length != typeVariables.length) { throw new IllegalArgumentException("Type parameter count mismatch"); } } /** * Resolve the type variables in a type using a type variable resolution list, producing a resolved type. * * @param type * the type * @return the resolved type */ Type resolveTypeVariables(final Type type) { if (type instanceof Class<?>) { // Arrays and non-generic classes have no type variables return type; } else if (type instanceof ParameterizedType) { // Recursively resolve parameterized types final ParameterizedType parameterizedType = (ParameterizedType) type; final Type[] typeArgs = parameterizedType.getActualTypeArguments(); Type[] typeArgsResolved = null; for (int i = 0; i < typeArgs.length; i++) { // Recursively revolve each parameter of the type final Type typeArgResolved = resolveTypeVariables(typeArgs[i]); // Only compare typeArgs to typeArgResolved until the first difference is found if (typeArgsResolved == null) { if (!typeArgResolved.equals(typeArgs[i])) { // After the first difference is found, lazily allocate typeArgsResolved typeArgsResolved = new Type[typeArgs.length]; // Go back and copy all the previous args System.arraycopy(typeArgs, 0, typeArgsResolved, 0, i); // Insert the first different arg typeArgsResolved[i] = typeArgResolved; } } else { // After the first difference is found, keep copying the resolved args into the array typeArgsResolved[i] = typeArgResolved; } } if (typeArgsResolved == null) { // There were no type parameters to resolve return type; } else { // Return new ParameterizedType that wraps the resolved type args return new ParameterizedTypeImpl((Class<?>) parameterizedType.getRawType(), typeArgsResolved, parameterizedType.getOwnerType()); } } else if (type instanceof TypeVariable<?>) { // Look up concrete type for type variable final TypeVariable<?> typeVariable = (TypeVariable<?>) type; for (int i = 0; i < typeVariables.length; i++) { if (typeVariables[i].getName().equals(typeVariable.getName())) { return resolvedTypeArguments[i]; } } // Could not resolve type variable return type; } else if (type instanceof GenericArrayType) { // Count the array dimensions, and resolve the innermost type of the array int numArrayDims = 0; Type t = type; while (t instanceof GenericArrayType) { numArrayDims++; t = ((GenericArrayType) t).getGenericComponentType(); } final Type innermostType = t; final Type innermostTypeResolved = resolveTypeVariables(innermostType); if (!(innermostTypeResolved instanceof Class<?>)) { throw new IllegalArgumentException("Could not resolve generic array type " + type); } final Class<?> innermostTypeResolvedClass = (Class<?>) innermostTypeResolved; // Build an array to hold the size of each dimension, filled with zeroes final int[] dims = (int[]) Array.newInstance(int.class, numArrayDims); // Build a zero-sized array of the required number of dimensions, using the resolved innermost class final Object arrayInstance = Array.newInstance(innermostTypeResolvedClass, dims); // Get the class of this array instance -- this is the resolved array type return arrayInstance.getClass(); } else if (type instanceof WildcardType) { // TODO: Support WildcardType throw ClassGraphException.newClassGraphException("WildcardType not yet supported: " + type); } else { throw ClassGraphException.newClassGraphException("Got unexpected type: " + type); } } /* (non-Javadoc) * @see java.lang.Object#toString() */ @Override public String toString() { if (typeVariables.length == 0) { return "{ }"; } else { final StringBuilder buf = new StringBuilder(); buf.append("{ "); for (int i = 0; i < typeVariables.length; i++) { if (i > 0) { buf.append(", "); } buf.append(typeVariables[i]).append(" => ").append(resolvedTypeArguments[i]); } buf.append(" }"); return buf.toString(); } } }
use pyo3::prelude::*; pub mod hw { tonic::include_proto!("hw"); // The string specified here must match the proto package name } #[pyclass] #[derive(Debug, Clone)] pub struct HelloRequest { #[pyo3(get)] name: String } #[pymethods] impl HelloRequest { #[new] fn new(name: &str) -> Self { HelloRequest { name: name.into() } } } impl From<hw::HelloRequest> for HelloRequest { fn from(other: hw::HelloRequest) -> Self { HelloRequest { name: other.name } } } #[pyclass] #[derive(Debug, Clone)] pub struct HelloResponse { #[pyo3(get, set)] message: String } #[pymethods] impl HelloResponse { #[new] fn new(message: &str) -> Self { HelloResponse { message: message.into() } } } impl From<HelloResponse> for hw::HelloResponse { fn from(other: HelloResponse) -> Self { hw::HelloResponse { message: other.message } } }
/* * Copyright (c) 2017 The Regents of the University of California. * All rights reserved. * * '$Author: crawl $' * '$Date: 2017-08-29 15:27:08 -0700 (Tue, 29 Aug 2017) $' * '$Revision: 1392 $' * * Permission is hereby granted, without written agreement and without * license or royalty fees, to use, copy, modify, and distribute this * software and its documentation for any purpose, provided that the above * copyright notice and the following two paragraphs appear in all copies * of this software. * * IN NO EVENT SHALL THE UNIVERSITY OF CALIFORNIA BE LIABLE TO ANY PARTY * FOR DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES * ARISING OUT OF THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION, EVEN IF * THE UNIVERSITY OF CALIFORNIA HAS BEEN ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * THE UNIVERSITY OF CALIFORNIA SPECIFICALLY DISCLAIMS ANY WARRANTIES, * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE SOFTWARE * PROVIDED HEREUNDER IS ON AN "AS IS" BASIS, AND THE UNIVERSITY OF * CALIFORNIA HAS NO OBLIGATION TO PROVIDE MAINTENANCE, SUPPORT, UPDATES, * ENHANCEMENTS, OR MODIFICATIONS. * */ package org.kepler.webview.provenance; import java.util.Date; import org.kepler.objectmanager.lsid.KeplerLSID; import org.kepler.provenance.FireState; import org.kepler.provenance.ProvenanceRecorder; import org.kepler.provenance.RecordingException; import org.kepler.provenance.SimpleFiringRecording; import org.kepler.webview.server.WebViewableUtilities; import ptolemy.actor.Actor; import ptolemy.actor.FiringEvent; import ptolemy.kernel.util.IllegalActionException; import ptolemy.kernel.util.Nameable; import ptolemy.kernel.util.NamedObj; public class WebViewRecording extends SimpleFiringRecording<Integer> { public WebViewRecording() throws RecordingException { super(); } @Override /** Record an actor firing at a specific time. */ public void actorFire(FiringEvent event, Date timestamp) throws RecordingException { Actor actor = event.getActor(); FiringEvent.FiringEventType curEventType = event.getType(); FireState<Integer> fireState = _fireStateTable.get(actor); synchronized (fireState) { // get the last type of firing start FiringEvent.FiringEventType lastStartType = fireState.getLastStartFireType(); if (curEventType == FiringEvent.BEFORE_ITERATE || (curEventType == FiringEvent.BEFORE_PREFIRE && lastStartType != FiringEvent.BEFORE_ITERATE)) { int firing = fireState.getNumberOfFirings() + 1; fireState.fireStart(curEventType, firing); try { WebViewableUtilities.sendEvent(WebViewableUtilities.Event.FireStart, (NamedObj)actor, timestamp); } catch (IllegalActionException e) { // TODO Auto-generated catch block e.printStackTrace(); } } // see if current firing is end of iteration: else if (curEventType == FiringEvent.AFTER_ITERATE || (curEventType == FiringEvent.AFTER_POSTFIRE && lastStartType == FiringEvent.BEFORE_PREFIRE)) { if (curEventType == FiringEvent.AFTER_POSTFIRE) { fireState.fireStop(FiringEvent.AFTER_PREFIRE); } else { fireState.fireStop(curEventType); } try { WebViewableUtilities.sendEvent(WebViewableUtilities.Event.FireEnd, (NamedObj)actor, timestamp); } catch (IllegalActionException e) { // TODO Auto-generated catch block e.printStackTrace(); } } } } /** * An actor threw an exception. * * @param source * @param throwable * @param executionLSID * @throws RecordingException */ @Override public void executionError(Nameable source, Throwable throwable, KeplerLSID executionLSID) throws RecordingException { // TODO } /** * Record the starting of workflow execution at a specific time. * * @param executionLSID * @throws RecordingException */ @Override public void executionStart(KeplerLSID executionLSID, Date timestamp) throws RecordingException { try { WebViewableUtilities.sendEvent( WebViewableUtilities.Event.WorkflowExecutionStart, _model, timestamp); } catch (IllegalActionException e) { // TODO Auto-generated catch block e.printStackTrace(); } } /** * Record the stopping of workflow execution. * * @param executionLSID * @throws RecordingException */ @Override public void executionStop(KeplerLSID executionLSID, Date timestamp) throws RecordingException { try { WebViewableUtilities.sendEvent( WebViewableUtilities.Event.WorkflowExecutionEnd, _model, timestamp); } catch (IllegalActionException e) { // TODO Auto-generated catch block e.printStackTrace(); } } @Override public void setContainer(ProvenanceRecorder container) { super.setContainer(container); if (_recorderContainer == null) { _model = null; } else { _model = _recorderContainer.toplevel(); } } private NamedObj _model; }
<reponame>chrissound/HaskellNet module Network.HaskellNet.POP3 ( -- * Establishing Connection connectPop3Port , connectPop3 , connectStream -- * Send Command , sendCommand -- * More Specific Operations , closePop3 , user , pass , userPass , apop , auth , stat , dele , retr , top , rset , allList , list , allUIDLs , uidl -- * Other Useful Operations , doPop3Port , doPop3 , doPop3Stream -- * Other types , A.AuthType(..) ) where import Network.HaskellNet.BSStream import Network.Socket import Network.Compat import qualified Network.HaskellNet.Auth as A import Data.ByteString (ByteString) import qualified Data.ByteString as B import qualified Data.ByteString.Char8 as BS import Crypto.Hash.MD5 import Numeric (showHex) import Control.Applicative ((<$>)) import Control.Exception import Control.Monad (when, unless) import Data.List import Data.Char (isSpace, isControl) import System.IO import Prelude hiding (catch) import Network.HaskellNet.POP3.Types import Network.HaskellNet.POP3.Connection hexDigest :: [Char] -> [Char] hexDigest = concatMap (flip showHex "") . B.unpack . hash . B.pack . map (toEnum.fromEnum) blank :: Char -> Bool blank a = isSpace a || isControl a trimR :: ByteString -> ByteString trimR s = let rs = BS.reverse s in BS.dropWhile blank rs strip :: ByteString -> ByteString strip = trimR . trimR stripEnd :: ByteString -> ByteString stripEnd = BS.reverse . trimR -- | connecting to the pop3 server specified by the hostname and port -- number connectPop3Port :: String -> PortNumber -> IO POP3Connection connectPop3Port hostname port = handleToStream <$> (connectTo hostname port) >>= connectStream -- | connecting to the pop3 server specified by the hostname. 110 is -- used for the port number. connectPop3 :: String -> IO POP3Connection connectPop3 = flip connectPop3Port 110 -- | connecting to the pop3 server via a stream connectStream :: BSStream -> IO POP3Connection connectStream st = do (resp, msg) <- response st when (resp == Err) $ fail "cannot connect" let code = last $ BS.words msg if BS.head code == '<' && BS.last code == '>' then return $ newConnection st (BS.unpack code) else return $ newConnection st "" response :: BSStream -> IO (Response, ByteString) response st = do reply <- strip <$> bsGetLine st if (BS.pack "+OK") `BS.isPrefixOf` reply then return (Ok, BS.drop 4 reply) else return (Err, BS.drop 5 reply) -- | parse mutiline of response responseML :: POP3Connection -> IO (Response, ByteString) responseML conn = do reply <- strip <$> bsGetLine st if (BS.pack "+OK") `BS.isPrefixOf` reply then do rest <- getRest return (Ok, BS.unlines (BS.drop 4 reply : rest)) else return (Err, BS.drop 5 reply) where st = stream conn getRest = do l <- stripEnd <$> bsGetLine st if l == BS.singleton '.' then return [] else (l:) <$> getRest -- | sendCommand sends a pop3 command via a pop3 connection. This -- action is too generic. Use more specific actions sendCommand :: POP3Connection -> Command -> IO (Response, ByteString) sendCommand conn (LIST Nothing) = bsPutCrLf (stream conn) (BS.pack "LIST") >> responseML conn sendCommand conn (UIDL Nothing) = bsPutCrLf (stream conn) (BS.pack "UIDL") >> responseML conn sendCommand conn (RETR msg) = bsPutCrLf (stream conn) (BS.pack $ "RETR " ++ show msg) >> responseML conn sendCommand conn (TOP msg n) = bsPutCrLf (stream conn) (BS.pack $ "TOP " ++ show msg ++ " " ++ show n) >> responseML conn sendCommand conn (AUTH A.LOGIN username password) = do bsPutCrLf (stream conn) $ BS.pack "AUTH LOGIN" bsGetLine (stream conn) bsPutCrLf (stream conn) $ BS.pack userB64 bsGetLine (stream conn) bsPutCrLf (stream conn) $ BS.pack passB64 response (stream conn) where (userB64, passB64) = A.login username password sendCommand conn (AUTH at username password) = do bsPutCrLf (stream conn) $ BS.pack $ unwords ["AUTH", show at] c <- bsGetLine (stream conn) let challenge = if BS.take 2 c == BS.pack "+ " then A.b64Decode $ BS.unpack $ head $ dropWhile (isSpace . BS.last) $ BS.inits $ BS.drop 2 c else "" bsPutCrLf (stream conn) $ BS.pack $ A.auth at challenge username password response (stream conn) sendCommand conn command = bsPutCrLf (stream conn) (BS.pack commandStr) >> response (stream conn) where commandStr = case command of (USER name) -> "USER " ++ name (PASS passw) -> "PASS " ++ passw NOOP -> "NOOP" QUIT -> "QUIT" STAT -> "STAT" (DELE msg) -> "DELE " ++ show msg RSET -> "RSET" (LIST msg) -> "LIST " ++ maybe "" show msg (UIDL msg) -> "UIDL " ++ maybe "" show msg (APOP usern passw) -> "APOP " ++ usern ++ " " ++ hexDigest (apopKey conn ++ passw) (AUTH _ _ _) -> error "BUG: AUTH should not get matched here" (RETR _) -> error "BUG: RETR should not get matched here" (TOP _ _) -> error "BUG: TOP should not get matched here" user :: POP3Connection -> String -> IO () user conn name = do (resp, _) <- sendCommand conn (USER name) when (resp == Err) $ fail "cannot send user name" pass :: POP3Connection -> String -> IO () pass conn pwd = do (resp, _) <- sendCommand conn (PASS pwd) when (resp == Err) $ fail "cannot send password" userPass :: POP3Connection -> A.UserName -> A.Password -> IO () userPass conn name pwd = user conn name >> pass conn pwd auth :: POP3Connection -> A.AuthType -> A.UserName -> A.Password -> IO () auth conn at username password = do (resp, msg) <- sendCommand conn (AUTH at username password) unless (resp == Ok) $ fail $ "authentication failed: " ++ BS.unpack msg apop :: POP3Connection -> String -> String -> IO () apop conn name pwd = do (resp, msg) <- sendCommand conn (APOP name pwd) when (resp == Err) $ fail $ "authentication failed: " ++ BS.unpack msg stat :: POP3Connection -> IO (Int, Int) stat conn = do (resp, msg) <- sendCommand conn STAT when (resp == Err) $ fail "cannot get stat info" let (nn, mm) = BS.span (/=' ') msg return (read $ BS.unpack nn, read $ BS.unpack $ BS.tail mm) dele :: POP3Connection -> Int -> IO () dele conn n = do (resp, _) <- sendCommand conn (DELE n) when (resp == Err) $ fail "cannot delete" retr :: POP3Connection -> Int -> IO ByteString retr conn n = do (resp, msg) <- sendCommand conn (RETR n) when (resp == Err) $ fail "cannot retrieve" return $ BS.tail $ BS.dropWhile (/='\n') msg top :: POP3Connection -> Int -> Int -> IO ByteString top conn n m = do (resp, msg) <- sendCommand conn (TOP n m) when (resp == Err) $ fail "cannot retrieve" return $ BS.tail $ BS.dropWhile (/='\n') msg rset :: POP3Connection -> IO () rset conn = do (resp, _) <- sendCommand conn RSET when (resp == Err) $ fail "cannot reset" allList :: POP3Connection -> IO [(Int, Int)] allList conn = do (resp, lst) <- sendCommand conn (LIST Nothing) when (resp == Err) $ fail "cannot retrieve the list" return $ map f $ tail $ BS.lines lst where f s = let (n1, n2) = BS.span (/=' ') s in (read $ BS.unpack n1, read $ BS.unpack $ BS.tail n2) list :: POP3Connection -> Int -> IO Int list conn n = do (resp, lst) <- sendCommand conn (LIST (Just n)) when (resp == Err) $ fail "cannot retrieve the list" let (_, n2) = BS.span (/=' ') lst return $ read $ BS.unpack $ BS.tail n2 allUIDLs :: POP3Connection -> IO [(Int, ByteString)] allUIDLs conn = do (resp, lst) <- sendCommand conn (UIDL Nothing) when (resp == Err) $ fail "cannot retrieve the uidl list" return $ map f $ tail $ BS.lines lst where f s = let (n1, n2) = BS.span (/=' ') s in (read $ BS.unpack n1, n2) uidl :: POP3Connection -> Int -> IO ByteString uidl conn n = do (resp, msg) <- sendCommand conn (UIDL (Just n)) when (resp == Err) $ fail "cannot retrieve the uidl data" return $ BS.tail $ BS.dropWhile (/=' ') msg closePop3 :: POP3Connection -> IO () closePop3 c = do sendCommand c QUIT bsClose (stream c) doPop3Port :: String -> PortNumber -> (POP3Connection -> IO a) -> IO a doPop3Port host port execution = bracket (connectPop3Port host port) closePop3 execution doPop3 :: String -> (POP3Connection -> IO a) -> IO a doPop3 host execution = doPop3Port host 110 execution doPop3Stream :: BSStream -> (POP3Connection -> IO b) -> IO b doPop3Stream conn execution = bracket (connectStream conn) closePop3 execution crlf :: BS.ByteString crlf = BS.pack "\r\n" bsPutCrLf :: BSStream -> ByteString -> IO () bsPutCrLf h s = bsPut h s >> bsPut h crlf >> bsFlush h
/* * release_partition * clear information kept within a partition, including * tuplestore and aggregate results. */ static void release_partition(WindowAggState *winstate) { int i; for (i = 0; i < winstate->numfuncs; i++) { WindowStatePerFunc perfuncstate = &(winstate->perfunc[i]); if (perfuncstate->winobj) perfuncstate->winobj->localmem = NULL; } MemoryContextResetAndDeleteChildren(winstate->partcontext); MemoryContextResetAndDeleteChildren(winstate->aggcontext); if (winstate->buffer) tuplestore_end(winstate->buffer); winstate->buffer = NULL; winstate->partition_spooled = false; }
<reponame>spyrosfoniadakis/jalgorithms package misc; import keyedElement.DoubleKeyedElement; import keyedElement.FloatKeyedElement; import keyedElement.IntKeyedElement; import keyedElement.LongKeyedElement; import utils.DateUtils; import java.util.Arrays; import java.util.stream.Collectors; public final class PersonUtils { public static Person[] getPeople() { return new Person[]{ Person.from("John", "Doe", DateUtils.getDateFrom(1980, 7, 19)), Person.from("Jack", "Brown", DateUtils.getDateFrom(1990, 8, 14)), Person.from("Joe", "Black", DateUtils.getDateFrom(1997, 3, 20)), Person.from("Hank", "Smith", DateUtils.getDateFrom(1972, 10, 30)), Person.from("Tim", "Johnson", DateUtils.getDateFrom(1979, 1, 22)), Person.from("George", "Edison", DateUtils.getDateFrom(1992, 12, 7)), Person.from("Alan", "Edison", DateUtils.getDateFrom(1990, 9, 7)) }; } public static IntKeyedElement<Person>[] createIntKeyedElementsArrayFrom(Person[] people) { return Arrays.stream(people) .map(p -> IntKeyedElement.from(p.getAgeInMonths(), p)) .collect(Collectors.toList()) .toArray(new IntKeyedElement[people.length]); } public static LongKeyedElement<Person>[] createLongKeyedElementsArrayFrom(Person[] people) { return Arrays.stream(people) .map(p -> LongKeyedElement.from(((long)p.getAgeInMonths()), p)) .collect(Collectors.toList()) .toArray(new LongKeyedElement[people.length]); } public static FloatKeyedElement<Person>[] createFloatKeyedElementsArrayFrom(Person[] people) { return Arrays.stream(people) .map(p -> FloatKeyedElement.from(((float)p.getAgeInMonths()), p)) .collect(Collectors.toList()) .toArray(new FloatKeyedElement[people.length]); } public static DoubleKeyedElement<Person>[] createDoubleKeyedElementsArrayFrom(Person[] people) { return Arrays.stream(people) .map(p -> DoubleKeyedElement.from((double)p.getAgeInMonths(), p)) .collect(Collectors.toList()) .toArray(new DoubleKeyedElement[people.length]); } }
<filename>generate.go package veldt // GenerateTile generates a tile for the provided pipeline ID and JSON request. func GenerateTile(id string, args map[string]interface{}) error { pipeline, err := GetPipeline(id) if err != nil { return err } req, err := pipeline.NewTileRequest(args) if err != nil { return err } return pipeline.Generate(req) } // GetTile retrieves a tile from the store for the provided pipeline ID // and JSON request. func GetTile(id string, args map[string]interface{}) ([]byte, error) { pipeline, err := GetPipeline(id) if err != nil { return nil, err } req, err := pipeline.NewTileRequest(args) if err != nil { return nil, err } return pipeline.Get(req) } // GenerateAndGetTile generates and retrieves a tile from the store // for the provided pipeline ID and JSON request. func GenerateAndGetTile(id string, args map[string]interface{}) ([]byte, error) { pipeline, err := GetPipeline(id) if err != nil { return nil, err } req, err := pipeline.NewTileRequest(args) if err != nil { return nil, err } return pipeline.GenerateAndGet(req) } // GenerateMeta generates meta data for the provided pipeline ID and JSON // request. func GenerateMeta(id string, args map[string]interface{}) error { pipeline, err := GetPipeline(id) if err != nil { return err } req, err := pipeline.NewMetaRequest(args) if err != nil { return err } return pipeline.Generate(req) } // GetMeta retrieves metadata from the store for the provided pipeline // ID and JSON request. func GetMeta(id string, args map[string]interface{}) ([]byte, error) { pipeline, err := GetPipeline(id) if err != nil { return nil, err } req, err := pipeline.NewMetaRequest(args) if err != nil { return nil, err } return pipeline.Get(req) } // GenerateAndGetMeta generates and retrieves a metadata from the store // for the provided pipeline ID and JSON request. func GenerateAndGetMeta(id string, args map[string]interface{}) ([]byte, error) { pipeline, err := GetPipeline(id) if err != nil { return nil, err } req, err := pipeline.NewMetaRequest(args) if err != nil { return nil, err } return pipeline.GenerateAndGet(req) }
A mobile unit for memory retrieval in daily life based on image and sensor processing We developed a Mobile Unit which purpose is to support memory retrieval of daily life. In this paper, we describe the two characteristic factors of this unit. (1)The behavior classification with an acceleration sensor. (2)Extracting the difference of environment with image processing technology. In (1), By analyzing power and frequency of an acceleration sensor which turns to gravity direction, the one's activities can be classified using some techniques to walk, stay, and so on. In (2), By extracting the difference between the beginning scene and the ending scene of a stay scene with image processing, the result which is done by user is recognized as the difference of environment. Using those 2 techniques, specific scenes of daily life can be extracted, and important information at the change of scenes can be realized to record. Especially we describe the effect to support retrieving important things, such as a thing left behind and a state of working halfway.
<reponame>jumi2016/birdfight // // UIView+SDAutoLayout.h // // Created by gsd on 15/10/6. // Copyright (c) 2015年 gsd. All rights reserved. // /* ************************************************************************* --------- INTRODUCTION --------- HOW TO USE ? MODE 1. >>>>>>>>>>>>>>> You can use it in this way: Demo.sd_layout .topSpaceToView(v1, 100) .bottomSpaceToView(v3, 100) .leftSpaceToView(v0, 150) .rightSpaceToView(v2, 150); MODE 2. >>>>>>>>>>>>>>> You can also use it in this way that is more brevity: Demo.sd_layout.topSpaceToView(v1, 100).bottomSpaceToView(v3, 100).leftSpaceToView(v0, 150).rightSpaceToView(v2, 150); ************************************************************************* */ /* ********************************************************************************* * * 在您使用此自动布局库的过程中如果出现bug请及时以以下任意一种方式联系我们,我们会及时修复bug并 * 帮您解决问题。 * QQ : 2689718696(gsdios) * Email : <EMAIL> * GitHub: https://github.com/gsdios * 新浪微博:GSD_iOS * * 视频教程:http://www.letv.com/ptv/vplay/24038772.html * 用法示例:https://github.com/gsdios/SDAutoLayout/blob/master/README.md * ********************************************************************************* */ #import <UIKit/UIKit.h> @class SDAutoLayoutModel; typedef SDAutoLayoutModel *(^MarginToView)(UIView *toView, CGFloat value); typedef SDAutoLayoutModel *(^Margin)(CGFloat value); typedef SDAutoLayoutModel *(^MarginEqualToView)(UIView *toView); typedef SDAutoLayoutModel *(^WidthHeight)(CGFloat value); typedef SDAutoLayoutModel *(^WidthHeightEqualToView)(UIView *toView, CGFloat ratioValue); typedef SDAutoLayoutModel *(^AutoHeight)(CGFloat ratioValue); typedef void (^SpaceToSuperView)(UIEdgeInsets insets); @interface SDAutoLayoutModel : NSObject /* *************************说明************************ 方法名中带有“SpaceToView”的需要传递2个参数:(UIView)参照view 和 (CGFloat)间距数值 方法名中带有“RatioToView”的需要传递2个参数:(UIView)参照view 和 (CGFloat)倍数 方法名中带有“EqualToView”的需要传递1个参数:(UIView)参照view 方法名中带有“Is”的需要传递1个参数:(CGFloat)数值 ***************************************************** */ /* * 设置距离其它view的间距 */ @property (nonatomic, copy, readonly) MarginToView leftSpaceToView; @property (nonatomic, copy, readonly) MarginToView rightSpaceToView; @property (nonatomic, copy, readonly) MarginToView topSpaceToView; @property (nonatomic, copy, readonly) MarginToView bottomSpaceToView; /* * 设置x、y、width、height、centerX、centerY 值 */ @property (nonatomic, copy, readonly) Margin xIs; @property (nonatomic, copy, readonly) Margin yIs; @property (nonatomic, copy, readonly) Margin centerXIs; @property (nonatomic, copy, readonly) Margin centerYIs; @property (nonatomic, copy, readonly) WidthHeight widthIs; @property (nonatomic, copy, readonly) WidthHeight heightIs; /* * 设置最大宽度和高度、最小宽度和高度 */ @property (nonatomic, copy, readonly) WidthHeight maxWidthIs; @property (nonatomic, copy, readonly) WidthHeight maxHeightIs; @property (nonatomic, copy, readonly) WidthHeight minWidthIs; @property (nonatomic, copy, readonly) WidthHeight minHeightIs; /* * 设置和哪一个参照view的边距相同 */ @property (nonatomic, copy, readonly) MarginEqualToView leftEqualToView; @property (nonatomic, copy, readonly) MarginEqualToView rightEqualToView; @property (nonatomic, copy, readonly) MarginEqualToView topEqualToView; @property (nonatomic, copy, readonly) MarginEqualToView bottomEqualToView; @property (nonatomic, copy, readonly) MarginEqualToView centerXEqualToView; @property (nonatomic, copy, readonly) MarginEqualToView centerYEqualToView; /* * 设置宽度或者高度等于参照view的多少倍 */ @property (nonatomic, copy, readonly) WidthHeightEqualToView widthRatioToView; @property (nonatomic, copy, readonly) WidthHeightEqualToView heightRatioToView; @property (nonatomic, copy, readonly) AutoHeight autoHeightRatio; /* * 填充父view(快捷方法) */ @property (nonatomic, copy, readonly) SpaceToSuperView spaceToSuperView; @property (nonatomic, weak) UIView *needsAutoResizeView; @end @interface UIView (SDAutoHeight) @property (nonatomic) CGFloat autoHeight; @property (nonatomic) UIView *sd_bottomView; @property (nonatomic) CGFloat sd_bottomViewBottomMargin; // 设置普通view内容自适应 - (void)setupAutoHeightWithBottomView:(UIView *)bottomView bottomMargin:(CGFloat)bottomMargin; @end @interface UIView (SDLayoutExtention) /* 设置圆角 */ @property (nonatomic, strong) NSNumber *sd_cornerRadius; @property (nonatomic, strong) NSNumber *sd_cornerRadiusFromWidthRatio; @property (nonatomic, strong) NSNumber *sd_cornerRadiusFromHeightRatio; // 设置等宽子view @property (nonatomic, strong) NSArray *sd_equalWidthSubviews; @end @interface UIScrollView (SDAutoContentSize) // 设置scrollview内容自适应 - (void)setupAutoContentSizeWithBottomView:(UIView *)bottomView bottomMargin:(CGFloat)bottomMargin; @end @interface UILabel (SDLabelAutoResize) // 设置单行文本label宽度自适应 - (void)setSingleLineAutoResizeWithMaxWidth:(CGFloat)maxWidth; @end // ----------------- 以下为此库内部需要用到的类和方法(可以不用看)---------------- @interface SDAutoLayoutModelItem : NSObject @property (nonatomic, strong) NSNumber *value; @property (nonatomic, weak) UIView *refView; @end @interface UIView (SDAutoLayout) - (NSMutableArray *)autoLayoutModelsArray; - (SDAutoLayoutModel *)sd_layout; - (void)addAutoLayoutModel:(SDAutoLayoutModel *)model; @property (nonatomic, strong) NSNumber *fixedWith; @property (nonatomic, strong) NSNumber *fixedHeight; @property (nonatomic, strong) NSNumber *sd_maxWidth; @property (nonatomic, strong) NSNumber *autoHeightRatioValue; @end @interface UIView (SDChangeFrame) @property (nonatomic) CGFloat left; @property (nonatomic) CGFloat top; @property (nonatomic) CGFloat right; @property (nonatomic) CGFloat bottom; @property (nonatomic) CGFloat centerX; @property (nonatomic) CGFloat centerY; @property (nonatomic) CGFloat width; @property (nonatomic) CGFloat height; @property (nonatomic) CGPoint origin; @property (nonatomic) CGSize size; @end
Welcome to What Do They Own?, a new Curbed series where we take someone making headlines and try to figure out how much of the world they own, and by extension, how far they've gone to insulate themselves from the world. Disgraced financier Jeffrey Epstein became a registered sex offender in 2008, when he was convicted of soliciting and underage girl for prostitution at his Palm Beach Mansion. Often erroneously referred to as a billionaire, Epstein runs a shady money management firm based in the U.S. Virgin Islands, where he owns a 70-acre island, one of the places named in a recent suit alleging that he forced a minor he kept as a "sex slave" to have sex with Harvard Law professor emeritus Alan Dershowitz and Prince Andrew, Duke of York, on multiple occasions. His portfolio also includes a "stone fortress" in New Mexico and the Herbert N. Straus Mansion in Manhattan, both of which are mentioned in the current round of allegations. The Herbert N. Straus Mansion New York, New York Often referred to as one of the largest townhouses in Manhattan—possessing 21,000 square feet and seven stories, 45,000 square feet and eight stories, or 50,000 square feet and nine stories, depending on who's describing it and when—the stone mansion at 9 East 71st Street was built in 1933. It was designed by society architect Horace Trumbauer for Herbert N. Straus, one of the heirs to the Macy's department store fortune, who died before it was completed. It's been said that "entire 18th-century rooms were purchased to be shipped to New York and installed in the new mansion," and the Metropolitan Museum of Art does have a exhibit in its period rooms collection with fixtures from a French hotel acquired by Herbert's wife Therese in a timeline that would fit the mansion's construction. She donated them to the Met in 1943, a year before the mansion was converted into a convalescent home, after the Straus family donated it to the Roman Catholic Archbishopric of New York. These photos offer an interesting picture of the conversion process, which shows much of the interior fixtures stripped away. In 1961, the mansion became home to the Birch Wathen School, which it remained until Leslie H. Wexner, the founding chairman of the Limited Inc., bought it in 1989 for $13.2M. Wexner hired architect Thierry Despont and interior designer John Stefanidis to help gut-renovate the 40-room home, showing it off in the December 1995 issue of Architectural Digest (sadly, the magazine's online archives don't go back that far). In 1996, the New York Times referred to the sumptuously decorated, expensively renovated pied-à-terre as the latest "puzzling" "status symbol of the ultra rich," when it reported that Wexner never spent more than a few months in the home. This was back when the scarcely used pied-à-terre was a smaller part of the Manhattan real estate makeup. Back then, according to the Times, Visitors described a bathroom reminiscent of James Bond movies: hidden beneath a stairway, lined with lead to provide shelter from attack and supplied with closed-circuit television screens and a telephone, both concealed in a cabinet beneath the sink. The house also has a heated sidewalk, a luxurious provision that explains why, while snow blankets the rest of the Eastern Seaboard, the Wexner house (and Bill Cosby's house across the street) remains opulently snow-free, much to the delight of neighborhood dogs.in 1995, Wexner turned the home over to Epstein, who was his protege and financial advisor (and much more, if you believe Gawker CEO Nick Denton's argument) because, on the face of it, his new wife "expressed greater enthusiasm for bringing up their two young children in Columbus, Ohio." Some say that Epstein paid just a dollar for the mansion, though it would seem to be well within his means at the time to pay full market value. Epstein then undertook his own renovation, not wanting "to live in another person's house." He is said to have spent $10M redoing the place. In 2007, when model Maximilia Cordero filed suit against Epstein for statutory rape and sexual assault (the suit was later dismissed), her lawyer included a description of what has by now become a legendary piece of puerile decor in chez Epstein: "[The] defendant gave plaintiff a tour of his mansion, showing her a huge crystal staircase with a huge crystal ball by the railing, ceiling chandeliers, a lounge room with red chairs, a statute [sic] of a dog with a statute [sic] of dog feces next to it" (emphasis ours). Vicky Ward, in her recently elaborated upon 2003 profile of Epstein, very memorably captured the experience of touring the residence: The entrance hall is decorated not with paintings but with row upon row of individually framed eyeballs; these, the owner tells people with relish, were imported from England, where they were made for injured soldiers. Next comes a marble foyer, which does have a painting, in the manner of Jean Dubuffet … but the host coyly refuses to tell visitors who painted it. In any case, guests are like pygmies next to the nearby twice-life-size sculpture of a naked African warrior. ...Tea is served in the "leather room," so called because of the cordovan-colored fabric on the walls. The chairs are covered in a leopard print, and on the wall hangs a huge, Oriental fantasy of a woman holding an opium pipe and caressing a snarling lionskin. Under her gaze, plates of finger sandwiches are delivered to Epstein and guests by the menservants in white gloves. Upstairs, to the right of a spiral staircase, is the "office," an enormous gallery spanning the width of the house. Strangely, it holds no computer. Computers belong in the "computer room" (a smaller room at the back of the house), Epstein has been known to say. The office features a gilded desk (which Epstein tells people belonged to banker J. P. Morgan), 18th-century black lacquered Portuguese cabinets, and a nine-foot ebony Steinway "D" grand. On the desk, a paperback copy of the Marquis de Sade's The Misfortunes of Virtue was recently spotted. Covering the floor, Epstein has explained, "is the largest Persian rug you'll ever see in a private home—so big, it must have come from a mosque." Amid such splendor, much of which reflects the work of the French decorator Alberto Pinto, who has worked for Jacques Chirac and the royal families of Jordan and Saudi Arabia, there is one particularly startling oddity: a stuffed black poodle, standing atop the grand piano. "No decorator would ever tell you to do that," Epstein brags to visitors. "But I want people to think what it means to stuff a dog." People can't help but feel it's Epstein's way of saying that he always has the last word. *Shudders.* In 2001, the New York Post reported that Epstein and Prince Andrew celebrated the registered sex offender's release from jail with a party at the mansion. Virginia Roberts, one of the litigants in a Florida lawsuit against Epstein's prosecutors, alleges that the second time she was coerced into having sex with prince Andrew was at Epstein's Manhattan mansion in 2001. Little Saint James U.S. Virgin Islands A construction shot from 2005. Photo via Panoramio Epstein owns the entire 70-acre island of Little Saint James, which has its own Wikipedia page. (The U.S. Virgin islands is also where his money management firm is based.) The Daily Mirror recently flew a helicopter over what they dub the "isle of sin," and came back with some pretty good shots of what Epstein has built there: a colonnaded villa-style compound designed by luxury resort and hotel designer Edward Tuttle, with a large library, a cinema, surrounding cabanas, and a detached Japanese bathhouse. The island is where Epstein's alleged "sex slave" Virginia Roberts claimed he made her take part in orgies, including one where she was allegedly forced to have sex with Prince Andrew. According to the Mirror, the island could at one time be rented for £4K (~$6K) a night. Over the years, it's hosted the conferences held by the Jeffrey Epstein VI Foundation, which have drawn the likes of Stephen Hawking. Roberts claims she met Bill Clinton once on the island, when he was there to dine with Epstein. Court papers claim that at other times, visitors included "prominent American politicians, powerful business executives, foreign presidents, a well known prime minister and other world leaders." Oddly enough, providing the third instance of a weird non-animate animal on an Epstein property, in 2009, a blogger based on the island of St. John claimed that this image showed a "fake lawn ornament cow" that was seen in different locations on Epstein's island throughout the construction process. Palm Beach Mansion Palm Beach, Florida Epstein's Palm Beach mansion, once valued at $6.8M, was at the center of the undercover investigation that eventually led to Epstein pleading guilty to a single state charge of soliciting prostitution, becoming a registered sex offender, and serving 13 months out of an 18-month sentence. According to a rather lurid Daily Beast article published in 2010, a police search of the property turned up: large, framed photos of nude young girls, and similar images... stashed in an armoire and on the computers seized at the house (although police found only bare cables where other computers had been). Some bathrooms were stocked with soap in the shape of sex organs, and various sex toys, such as a "twin torpedo" vibrator and creams and lubricants available at erotic specialty shops, were stowed near the massage tables set up in several rooms upstairs. Zorro Ranch Stanley, New Mexico In 1993, Epstein purchased a 7,500-acre ranch in Stanley, New Mexico, from the late former New Mexico governor Bruce King. He named the ranch "Zorro," and proceeded to build a 26,700-square-foot hilltop mansion that was once said to be the largest home in the state, and has been described as a "stone fortress." A 1995 article in The New Mexican said that Epstein's initial plans for the residence described a main house that "will be similar to a Mexican hacienda, with an open-air entry into a courtyard with high-ceiling hallways, stone columns and a central fountain. The living room will measure about 2,100-square-feet, larger than the average house in Santa Fe County. The home will have an elevator, eight bathrooms, four fireplaces and three bedrooms." According to more recent report, Epstein recieved a county permit to build a small airplane hangar and air strip on the ranch. Epstein has been reported as saying his New Mexico home "makes the town house look like a shack." According to records accessed on Property Shark, the structures on the property were last appraised in 2013 at $18,186,406. In the recent court filing in Florida, Roberts names Zorro Ranch as one of the place she was sexually abused by Epstein, as well as forced to have sex with Harvard law professor Alan Dershowitz. The Last time Epstein made news in New Mexico, it was when it was revealed that he attempted to contribute to the reelection campaign of former Attorney General and gubernatorial candidate Gary King in 2006. Foch Avenue Apartment Paris, France Avenue Foch. Photo via Wikimedia Commons Epstein owns an apartment on Paris' ritzy Avenue Foch. A Boeing 727 Pictured here at Palm Beach International Airport. Photo via NYC Aviation Not technically a piece of real estate, but also kind of a flying piece of real estate. Said to be the only reason Bill Clinton was ever friends with Epstein—that and his campaign donations to democrats—though many in the conservative media are surely speculating otherwise. In September of 2002, Clinton had a weeklong tour of South Africa, Nigeria, Ghana, Rwanda, and Mozambique coming up to promote anti-AIDS efforts, and former Clinton advisor Doug Band encouraged Epstein to come along and provide the ride. Kevin Spacey and Chris Tucker were there, too, which sounds like it must have been kind of weird. If you know of any part of Jeffrey Epstein's property portfolio we missed, please drop us a line.
<reponame>ajblane/iota_fpga /* * (C) Copyright 2001 * <NAME>, DENX Software Engineering, <EMAIL>. * * See file CREDITS for list of people who contributed to this * project. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation; either version 2 of * the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, * MA 02111-1307 USA */ #include <common.h> #include <mpc824x.h> #include <asm/processor.h> #if defined(CONFIG_ENV_IS_IN_FLASH) # ifndef CONFIG_ENV_ADDR # define CONFIG_ENV_ADDR (CONFIG_SYS_FLASH_BASE + CONFIG_ENV_OFFSET) # endif # ifndef CONFIG_ENV_SIZE # define CONFIG_ENV_SIZE CONFIG_ENV_SECT_SIZE # endif # ifndef CONFIG_ENV_SECT_SIZE # define CONFIG_ENV_SECT_SIZE CONFIG_ENV_SIZE # endif #endif #define FLASH_BANK_SIZE 0x800000 #define MAIN_SECT_SIZE 0x40000 #define PARAM_SECT_SIZE 0x8000 #define BOARD_CTRL_REG 0xFE800013 flash_info_t flash_info[CONFIG_SYS_MAX_FLASH_BANKS]; static int write_data (flash_info_t *info, ulong dest, ulong *data); static void write_via_fpu(vu_long *addr, ulong *data); static __inline__ unsigned long get_msr(void); static __inline__ void set_msr(unsigned long msr); /*---------------------------------------------------------------------*/ #undef DEBUG_FLASH /*---------------------------------------------------------------------*/ #ifdef DEBUG_FLASH #define DEBUGF(fmt,args...) printf(fmt ,##args) #else #define DEBUGF(fmt,args...) #endif /*---------------------------------------------------------------------*/ /*----------------------------------------------------------------------- */ unsigned long flash_init(void) { int i, j; ulong size = 0; volatile unsigned char *bcr = (volatile unsigned char *)(BOARD_CTRL_REG); DEBUGF("Write protect was: 0x%02X\n", *bcr); *bcr &= 0x1; /* FWPT must be 0 */ *bcr |= 0x6; /* FWP0 = FWP1 = 1 */ DEBUGF("Write protect is: 0x%02X\n", *bcr); for (i = 0; i < CONFIG_SYS_MAX_FLASH_BANKS; i++) { vu_long *addr = (vu_long *)(CONFIG_SYS_FLASH_BASE + i * FLASH_BANK_SIZE); addr[0] = 0x00900090; DEBUGF ("Flash bank # %d:\n" "\tManuf. ID @ 0x%08lX: 0x%08lX\n" "\tDevice ID @ 0x%08lX: 0x%08lX\n", i, (ulong)(&addr[0]), addr[0], (ulong)(&addr[2]), addr[2]); if ((addr[0] == addr[1]) && (addr[0] == INTEL_MANUFACT) && (addr[2] == addr[3]) && (addr[2] == INTEL_ID_28F160F3B)) { flash_info[i].flash_id = (FLASH_MAN_INTEL & FLASH_VENDMASK) | (INTEL_ID_28F160F3B & FLASH_TYPEMASK); } else { flash_info[i].flash_id = FLASH_UNKNOWN; addr[0] = 0xFFFFFFFF; goto Done; } DEBUGF ("flash_id = 0x%08lX\n", flash_info[i].flash_id); addr[0] = 0xFFFFFFFF; flash_info[i].size = FLASH_BANK_SIZE; flash_info[i].sector_count = CONFIG_SYS_MAX_FLASH_SECT; memset(flash_info[i].protect, 0, CONFIG_SYS_MAX_FLASH_SECT); for (j = 0; j < flash_info[i].sector_count; j++) { if (j <= 7) { flash_info[i].start[j] = CONFIG_SYS_FLASH_BASE + i * FLASH_BANK_SIZE + j * PARAM_SECT_SIZE; } else { flash_info[i].start[j] = CONFIG_SYS_FLASH_BASE + i * FLASH_BANK_SIZE + (j - 7)*MAIN_SECT_SIZE; } } size += flash_info[i].size; } /* Protect monitor and environment sectors */ #if CONFIG_SYS_MONITOR_BASE >= CONFIG_SYS_FLASH_BASE #if CONFIG_SYS_MONITOR_BASE >= CONFIG_SYS_FLASH_BASE + FLASH_BANK_SIZE flash_protect(FLAG_PROTECT_SET, CONFIG_SYS_MONITOR_BASE, CONFIG_SYS_MONITOR_BASE + monitor_flash_len - 1, &flash_info[1]); #else flash_protect(FLAG_PROTECT_SET, CONFIG_SYS_MONITOR_BASE, CONFIG_SYS_MONITOR_BASE + monitor_flash_len - 1, &flash_info[0]); #endif #endif #if defined(CONFIG_ENV_IS_IN_FLASH) && defined(CONFIG_ENV_ADDR) #if CONFIG_ENV_ADDR >= CONFIG_SYS_FLASH_BASE + FLASH_BANK_SIZE flash_protect(FLAG_PROTECT_SET, CONFIG_ENV_ADDR, CONFIG_ENV_ADDR + CONFIG_ENV_SIZE - 1, &flash_info[1]); #else flash_protect(FLAG_PROTECT_SET, CONFIG_ENV_ADDR, CONFIG_ENV_ADDR + CONFIG_ENV_SIZE - 1, &flash_info[0]); #endif #endif Done: return size; } /*----------------------------------------------------------------------- */ void flash_print_info (flash_info_t * info) { int i; switch ((i = info->flash_id & FLASH_VENDMASK)) { case (FLASH_MAN_INTEL & FLASH_VENDMASK): printf ("Intel: "); break; default: printf ("Unknown Vendor 0x%04x ", i); break; } switch ((i = info->flash_id & FLASH_TYPEMASK)) { case (INTEL_ID_28F160F3B & FLASH_TYPEMASK): printf ("28F160F3B (16Mbit)\n"); break; default: printf ("Unknown Chip Type 0x%04x\n", i); goto Done; break; } printf (" Size: %ld MB in %d Sectors\n", info->size >> 20, info->sector_count); printf (" Sector Start Addresses:"); for (i = 0; i < info->sector_count; i++) { if ((i % 5) == 0) { printf ("\n "); } printf (" %08lX%s", info->start[i], info->protect[i] ? " (RO)" : " "); } printf ("\n"); Done: return; } /*----------------------------------------------------------------------- */ int flash_erase (flash_info_t *info, int s_first, int s_last) { int flag, prot, sect; ulong start, now, last; DEBUGF ("Erase flash bank %d sect %d ... %d\n", info - &flash_info[0], s_first, s_last); if ((s_first < 0) || (s_first > s_last)) { if (info->flash_id == FLASH_UNKNOWN) { printf ("- missing\n"); } else { printf ("- no sectors to erase\n"); } return 1; } if ((info->flash_id & FLASH_VENDMASK) != (FLASH_MAN_INTEL & FLASH_VENDMASK)) { printf ("Can erase only Intel flash types - aborted\n"); return 1; } prot = 0; for (sect=s_first; sect<=s_last; ++sect) { if (info->protect[sect]) { prot++; } } if (prot) { printf ("- Warning: %d protected sectors will not be erased!\n", prot); } else { printf ("\n"); } start = get_timer (0); last = start; /* Start erase on unprotected sectors */ for (sect = s_first; sect<=s_last; sect++) { if (info->protect[sect] == 0) { /* not protected */ vu_long *addr = (vu_long *)(info->start[sect]); DEBUGF ("Erase sect %d @ 0x%08lX\n", sect, (ulong)addr); /* Disable interrupts which might cause a timeout * here. */ flag = disable_interrupts(); addr[0] = 0x00500050; /* clear status register */ addr[0] = 0x00200020; /* erase setup */ addr[0] = 0x00D000D0; /* erase confirm */ addr[1] = 0x00500050; /* clear status register */ addr[1] = 0x00200020; /* erase setup */ addr[1] = 0x00D000D0; /* erase confirm */ /* re-enable interrupts if necessary */ if (flag) enable_interrupts(); /* wait at least 80us - let's wait 1 ms */ udelay (1000); while (((addr[0] & 0x00800080) != 0x00800080) || ((addr[1] & 0x00800080) != 0x00800080) ) { if ((now=get_timer(start)) > CONFIG_SYS_FLASH_ERASE_TOUT) { printf ("Timeout\n"); addr[0] = 0x00B000B0; /* suspend erase */ addr[0] = 0x00FF00FF; /* to read mode */ return 1; } /* show that we're waiting */ if ((now - last) > 1000) { /* every second */ putc ('.'); last = now; } } addr[0] = 0x00FF00FF; } } printf (" done\n"); return 0; } /*----------------------------------------------------------------------- * Copy memory to flash, returns: * 0 - OK * 1 - write timeout * 2 - Flash not erased * 4 - Flash not identified */ #define FLASH_WIDTH 8 /* flash bus width in bytes */ int write_buff (flash_info_t *info, uchar *src, ulong addr, ulong cnt) { ulong wp, cp, msr; int l, rc, i; ulong data[2]; ulong *datah = &data[0]; ulong *datal = &data[1]; DEBUGF ("Flash write_buff: @ 0x%08lx, src 0x%08lx len %ld\n", addr, (ulong)src, cnt); if (info->flash_id == FLASH_UNKNOWN) { return 4; } msr = get_msr(); set_msr(msr | MSR_FP); wp = (addr & ~(FLASH_WIDTH-1)); /* get lower aligned address */ /* * handle unaligned start bytes */ if ((l = addr - wp) != 0) { *datah = *datal = 0; for (i = 0, cp = wp; i < l; i++, cp++) { if (i >= 4) { *datah = (*datah << 8) | ((*datal & 0xFF000000) >> 24); } *datal = (*datal << 8) | (*(uchar *)cp); } for (; i < FLASH_WIDTH && cnt > 0; ++i) { char tmp; tmp = *src; src++; if (i >= 4) { *datah = (*datah << 8) | ((*datal & 0xFF000000) >> 24); } *datal = (*datal << 8) | tmp; --cnt; ++cp; } for (; cnt == 0 && i < FLASH_WIDTH; ++i, ++cp) { if (i >= 4) { *datah = (*datah << 8) | ((*datal & 0xFF000000) >> 24); } *datal = (*datah << 8) | (*(uchar *)cp); } if ((rc = write_data(info, wp, data)) != 0) { set_msr(msr); return (rc); } wp += FLASH_WIDTH; } /* * handle FLASH_WIDTH aligned part */ while (cnt >= FLASH_WIDTH) { *datah = *(ulong *)src; *datal = *(ulong *)(src + 4); if ((rc = write_data(info, wp, data)) != 0) { set_msr(msr); return (rc); } wp += FLASH_WIDTH; cnt -= FLASH_WIDTH; src += FLASH_WIDTH; } if (cnt == 0) { set_msr(msr); return (0); } /* * handle unaligned tail bytes */ *datah = *datal = 0; for (i = 0, cp = wp; i < FLASH_WIDTH && cnt > 0; ++i, ++cp) { char tmp; tmp = *src; src++; if (i >= 4) { *datah = (*datah << 8) | ((*datal & 0xFF000000) >> 24); } *datal = (*datal << 8) | tmp; --cnt; } for (; i < FLASH_WIDTH; ++i, ++cp) { if (i >= 4) { *datah = (*datah << 8) | ((*datal & 0xFF000000) >> 24); } *datal = (*datal << 8) | (*(uchar *)cp); } rc = write_data(info, wp, data); set_msr(msr); return (rc); } /*----------------------------------------------------------------------- * Write a word to Flash, returns: * 0 - OK * 1 - write timeout * 2 - Flash not erased */ static int write_data (flash_info_t *info, ulong dest, ulong *data) { vu_long *addr = (vu_long *)dest; ulong start; int flag; /* Check if Flash is (sufficiently) erased */ if (((addr[0] & data[0]) != data[0]) || ((addr[1] & data[1]) != data[1]) ) { return (2); } /* Disable interrupts which might cause a timeout here */ flag = disable_interrupts(); addr[0] = 0x00400040; /* write setup */ write_via_fpu(addr, data); /* re-enable interrupts if necessary */ if (flag) enable_interrupts(); start = get_timer (0); while (((addr[0] & 0x00800080) != 0x00800080) || ((addr[1] & 0x00800080) != 0x00800080) ) { if (get_timer(start) > CONFIG_SYS_FLASH_WRITE_TOUT) { addr[0] = 0x00FF00FF; /* restore read mode */ return (1); } } addr[0] = 0x00FF00FF; /* restore read mode */ return (0); } /*----------------------------------------------------------------------- */ static void write_via_fpu(vu_long *addr, ulong *data) { __asm__ __volatile__ ("lfd 1, 0(%0)" : : "r" (data)); __asm__ __volatile__ ("stfd 1, 0(%0)" : : "r" (addr)); } /*----------------------------------------------------------------------- */ static __inline__ unsigned long get_msr(void) { unsigned long msr; __asm__ __volatile__ ("mfmsr %0" : "=r" (msr) :); return msr; } static __inline__ void set_msr(unsigned long msr) { __asm__ __volatile__ ("mtmsr %0" : : "r" (msr)); }
import { createTreeWithEmptyWorkspace } from '@nrwl/devkit/testing'; import { readProjectConfiguration, readJson, logger } from '@nrwl/devkit'; import type { Tree } from '@nrwl/devkit'; import { libraryGenerator } from '@nrwl/node'; import { configurationGenerator } from '../configuration/generator'; import { scssGenerator } from './generator'; import type { ScssGeneratorSchema } from './schema'; import { Config, ConfigOverride } from 'stylelint'; const defaultOptions: ScssGeneratorSchema = { project: 'test', skipFormat: false, }; describe('scss generator', () => { let tree: Tree; beforeAll(async () => { logger.info = jest.fn(); }); beforeEach(() => { tree = createTreeWithEmptyWorkspace(); }); it('should add a a glob pattern for scss files to the target configuration', async () => { await libraryGenerator(tree, { name: 'test' }); await configurationGenerator(tree, { project: 'test', skipFormat: true }); await scssGenerator(tree, defaultOptions); const config = readProjectConfiguration(tree, 'test'); expect(config).toBeDefined(); expect(config.targets?.stylelint).toBeDefined(); expect(config.targets?.stylelint.executor).toBe('nx-stylelint:lint'); expect(config.targets?.stylelint.options.lintFilePatterns).toContain('libs/test/**/*.css'); expect(config.targets?.stylelint.options.lintFilePatterns).toContain('libs/test/**/*.scss'); }); it('should add required dependencies to package.json', async () => { await libraryGenerator(tree, { name: 'test' }); await configurationGenerator(tree, { project: 'test', skipFormat: true }); await scssGenerator(tree, defaultOptions); const packageJson = readJson(tree, 'package.json'); expect(packageJson.devDependencies['stylelint-config-standard-scss']).toBe('^2.0.0'); }); it('should update root and project stylelint configurations', async () => { const projectStylelint = `libs/test/.stylelintrc.json`; await libraryGenerator(tree, { name: 'test' }); await configurationGenerator(tree, { project: 'test', skipFormat: true }); expect(tree.exists('.stylelintrc.json')).toBeTruthy(); expect(tree.exists(projectStylelint)).toBeTruthy(); let rootConfig = readJson<Config>(tree, '.stylelintrc.json'); expect(rootConfig.overrides).toStrictEqual<ConfigOverride[]>([ { files: ['**/*.css'], extends: ['stylelint-config-standard', 'stylelint-config-prettier'], rules: {}, }, ]); let projectConfig = readJson<Config>(tree, projectStylelint); expect(projectConfig.overrides).toStrictEqual<ConfigOverride[]>([ { files: ['**/*.css'], rules: {}, }, ]); await scssGenerator(tree, defaultOptions); rootConfig = readJson<Config>(tree, '.stylelintrc.json'); expect(rootConfig.overrides).toStrictEqual<ConfigOverride[]>([ { files: ['**/*.css'], extends: ['stylelint-config-standard', 'stylelint-config-prettier'], rules: {}, }, { files: ['**/*.scss'], extends: ['stylelint-config-standard-scss', 'stylelint-config-prettier'], rules: {}, }, ]); projectConfig = readJson<Config>(tree, projectStylelint); expect(projectConfig.overrides).toStrictEqual<ConfigOverride[]>([ { files: ['**/*.css'], rules: {}, }, { files: ['**/*.scss'], rules: {}, }, ]); }); });
WASHINGTON—Thousands thronged the docks of the capital seaport last week to watch as Congressmen boarded galleys and set sail in search of the Lost Sword of Bipartisanship, a holy relic that according to legend has the power to restore collegial relations and procedural harmony to the legislative branch. Initial reports from sea confirm the expedition has already faced dire peril in its quest, which was reportedly inspired by a "radiant vision" of a sword that appeared before stunned senators and representatives in the Capitol rotunda a fortnight ago during negotiations over a minor wetlands preservation bill. Advertisement "I know in my bones this is a sign," Senate Majority Leader Harry Reid (D-NV) was overheard saying as he fell to his knees, humbled by the hovering Sword's fabled embodiment of civil discourse, mutual respect, and practical, common sense. "To see an apparition of this sacred talisman, which cuts through bickering and self-interest, is to see that we have strayed in our ways, and that only the Sword itself can point us toward the true path." "We must seek the sword at once," said House Minority Leader John Boehner (R-OH), who, in a rare moment of alliance, knelt to join Reid in meek supplication. "Though we do not know where it is, it may be the only thing that can save us." Following several days of stalemates and inaction, Congress appropriated funds for a mighty fleet of ships and approved a motion to set sail, reportedly demurring at an offer of assistance from the U.S. Navy because the task of finding the Sword of Bipartisanship was "theirs, and theirs alone" to bear. Advertisement On Friday, messenger birds began to arrive with tidings of great adventure, but grim outcomes. One scroll recounts the demise of Sen. Daniel Inouye (D-HI), who died of exposure and malnutrition within sight of the shores of his beloved island home, while another tells tale of a giant ice floe that carried the entire House Ways and Means Committee into the Arctic night, never to be seen again. Dangling desperately by a lashing line, Sen. Tom Coburn (R-OK) is said to have fallen into a churning maelstrom off the Horn of Africa while raging at the heavens and shouting, "Fools! Fools! There is no Sword! There is no Bipartisanship! It's all a lie, and we all bound for Death!" Advertisement "One of the messages, burnt on the edges and smelling of brimstone, tells how they stopped on a remote island for provisions and were imprisoned by a mighty one-eyed monster who bellowed, 'But there's no way to pay for all this!'" Rep. Nancy Pelosi (D-CA) told reporters from atop the Washington Monument, where she awaits the Congressmen's return, clad in widow's black burlap and gazing out to sea. "They seem to have defeated this Cyclops, but of the coveted saber, there is no word." "And now I've received news that fishermen in Monterey Bay have found in their nets the bloated remains of Barney Frank," Pelosi added. Tales sung by bards since time immemorial describe the Sword as a master blade forged at Lexington and Concord, broken during the Civil War, reforged by Abraham Lincoln, wielded by the imp Joe McCarthy until he was driven mad, used briefly at a Cleveland City Council meeting during a unanimous vote on a zoning variance, and then lost somewhere in the misty murk of Indochina. Advertisement According to a raving, half-starved Senate page found Monday clinging to an oar in the Potomac River, a pack of seductive lobbyists attempted to lure Congress to their doom upon an island's rocky shoals, a disaster averted only when Sen. Russ Feingold (D-WI) locked his colleagues belowdecks and navigated the fleet to safety himself. "I warned them of the dangers they would face," a mysterious and becloaked oracle known only as the Librarian of Congress told reporters. "Those who seek the Sword of Bipartisanship must confront not only the terrors that dwell in the vasty deep, but those within their very hearts." "If that fails, the least they could do is sit down like grown-ups, have an open exchange of ideas, identify shared values, hold good-faith negotiations in which both sides make concessions, reach an agreement that a majority of them believes will advance the common good, and then vote on a goddamn bill," he added.
import os from typing import List, Union import torch import torch.nn as nn from allennlp.modules import FeedForward from allennlp.nn.activations import Activation from torch.nn.utils.rnn import pack_padded_sequence, pad_sequence from transformers import AutoConfig, AutoModel, AutoTokenizer, BertModel device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu') class LSTM(nn.Module): def __init__(self, vocab_size, embed_dim=300, hidden_size=1024, layers=2, bidirectional=True, dropout=0, ag=False): super(LSTM, self).__init__() self.embedding = nn.Embedding(vocab_size, embed_dim) self.lstm = nn.LSTM(input_size=embed_dim, hidden_size=hidden_size, num_layers=layers, batch_first=True, bidirectional=bidirectional, dropout=dropout,) self.linear = nn.Linear(hidden_size*2, 4 if ag else 2) def forward(self, padded_texts, lengths): texts_embedding = self.embedding(padded_texts) packed_inputs = pack_padded_sequence(texts_embedding, lengths, batch_first=True, enforce_sorted=False) _, (hn, _) = self.lstm(packed_inputs) forward_hidden = hn[-1, :, :] backward_hidden = hn[-2, :, :] concat_hidden = torch.cat((forward_hidden, backward_hidden), dim=1) output = self.linear(concat_hidden) return output def predict(self,): pass class BERT(nn.Module): def __init__(self, model_path: str, mlp_layer_num: int, class_num:int=2, hidden_dim:float=1024): super(BERT, self).__init__() self.mlp_layer_num = mlp_layer_num self.config = AutoConfig.from_pretrained(model_path) self.hidden_size = self.config.hidden_size self.tokenizer = AutoTokenizer.from_pretrained(model_path) self.bert = AutoModel.from_pretrained(model_path) if self.mlp_layer_num > 0: self.ffn = FeedForward(input_dim=self.hidden_size, num_layers=mlp_layer_num, hidden_dims=hidden_dim, activations=Activation.by_name('elu')()) self.linear = nn.Linear(hidden_dim, class_num) else: self.linear = nn.Linear(self.hidden_size, class_num) def forward(self, inputs, attention_masks=None): bert_output = self.bert(inputs, attention_mask=attention_masks) cls_tokens = bert_output[0][:, 0, :] # batch_size, 768 #cls_tokens = bert_output.pooler_output if self.mlp_layer_num > 0: ffn_output = self.ffn(cls_tokens) output = self.linear(ffn_output) # batch_size, 1(4) else: output = self.linear(cls_tokens) return output, cls_tokens def predict(self, input): with torch.no_grad(): encode_output = self.tokenizer.encode_plus(input) input_ids, input_mask = torch.tensor([encode_output['input_ids']]).to(device), torch.tensor([encode_output['attention_mask']]).to(device) output, _ = self.forward(input_ids, input_mask) return torch.softmax(output, dim=-1) def get_semantic_feature(self, input_text: List[str]): with torch.no_grad(): text_ids = [] for text in input_text: text_ids.append(torch.tensor(self.tokenizer.encode(text))) input_ids = pad_sequence(text_ids, batch_first=True, padding_value=0) attention_mask = torch.zeros_like(input_ids).masked_fill(input_ids != 0, 1) bert_output = self.bert(input_ids.to(device), attention_mask.to(device)) cls_output = bert_output[0][:,0,:] return cls_output if __name__ == '__main__': bert = BertModel.from_pretrained('bert-base-uncased', )
#include <stdio.h> int main() { int N,K; char S[11]; scanf("%d %s %d",&N,S,&K); for (int i = 0; i < N; i++) { if (S[i] == S[K-1]){ putchar(S[K-1]); }else{ putchar('*'); } } }
def match(self, target): match = self.re.match(target) return match and match.end() == len(target)
def lang_file_ext(self) -> "str": return ( self.json.get("metadata", {}) .get("language_info", {}) .get("file_extension", ".py") )
def lru_diskcache(maxsize=16): def lru_diskcache_inner(func): directory = Path("cashe_of_" + func.__name__) if not directory.is_dir(): directory.mkdir() key_dequeue_file = directory.joinpath("cache_table.json") key_dequeue = OrderedDictStorage(file=key_dequeue_file) @functools.wraps(func) def cache_result(*args, **kwargs): key = repr(args) + repr(kwargs) if key in key_dequeue: filename = key_dequeue[key] cache_file = Path(filename) else: filename = str(abs(hash(key))) + ".pickle.gz" cache_file = directory.joinpath(filename) if cache_file.exists(): with gzip.open(cache_file) as f: return pickle.load(f) result = func(*args, **kwargs) if len(key_dequeue) >= maxsize: key_to_remove, file_to_remove = key_dequeue.popitem(last=False) Path(file_to_remove).unlink() key_dequeue[key] = str(cache_file) with gzip.open(cache_file, 'w') as f: pickle.dump(result, f) key_dequeue.save() return result return cache_result return lru_diskcache_inner
#include<bits/stdc++.h> using namespace std; #define int long long int mod=1e9+7; int modexp(int a,int b){ int ans=1; while(b){ if(b&1){ ans=(ans*a)%mod; } a=(a*a)%mod; b=b/2; } return ans; } int32_t main(){ ios::sync_with_stdio(0);cin.tie(0); int h,w; cin>>h>>w; vector<int> r(h),c(w); for(int i=0;i<h;i++){ cin>>r[i]; } for(int i=0;i<w;i++){ cin>>c[i]; } int arr[h][w]; for(int i=0;i<h;i++){ for(int j=0;j<w;j++){ arr[i][j]=-1; } } for(int i=0;i<h;i++){ for(int j=0;j<r[i];j++){ arr[i][j]=1; } if(r[i]<w){ arr[i][r[i]]=0; } } for(int i=0;i<w;i++){ for(int j=0;j<c[i];j++){ if(arr[j][i]==0){ cout<<0; return 0; } arr[j][i]=1; } if(c[i]<h){ if(arr[c[i]][i]==1){ cout<<0; return 0; } arr[c[i]][i]=0; } } int f=0; for(int i=0;i<h;i++){ for(int j=0;j<w;j++){ if(arr[i][j]==-1){ f++; } } } cout<<modexp(2,f); return 0; }
def start(self): try: self._launch_jupyter() except Exception as exc: console.print(f'[bold red]:x: {exc}') self.close() finally: console.rule( f'[bold red]:x: Terminated the network 📡 connection to {self.session.host}', characters='*', )
ST. PETERSBURG -- To hear him tell it, Ruslan Starodubov can do almost anything: lay tile, carry heavy equipment, even bake bread. "My hands are the hands of a normal Russian man," he says. "They know what they're doing most of the time." But what Starodubov and his hands like to do most is soldier. Originally from the central Kostroma region, Starodubov, a sturdy-looking man with a red Cossack-style mustache, fought in both Chechen wars and served with the OMON special forces. Even his intermittent civilian jobs tended toward the physical -- porter, security guard, phys-ed teacher. Last spring, when hostilities broke out in eastern Ukraine, Starodubov felt an itch to go fight. The United States was backing a war against his ethnic Russian kin -- "You don't need a fortune-teller to know that," he asserts, offering no details -- and he was desperate to put his military skills to use. "Better I die -- me, with four kids -- than some 18-year-old who doesn't know which end of a gun the bullets come out of," says Starodubov, who recently divorced from the mother of his children. Starodubov is not alone. Hundreds of so-called Russian “volunteers” have decamped for eastern Ukraine since the start of armed hostilities between pro-Russian separatist forces and pro-Kyiv troops in the resource-rich Donbas region. Some are believed to be Russian Army conscripts sent to Ukraine under the guise of volunteering during vacations or breaks. Others, like Starodubov, are genuine volunteers -- ordinary citizens, with varying degrees of military training, who are eager to contribute to what they see as a just cause. While the Kremlin has vigorously denied any official ties to the war, it has openly encouraged the Soviet-era tradition of volunteerism, running numerous television programs praising the accomplishments of the fighters, and allowing volunteer organizations to flourish online. Aleksandr Kobrin, a lawyer and deputy with St. Petersburg’s Legislative Assembly, says it’s impossible to determine how many Russians are traveling to the Donbas to fight. “Russia isn’t doing anything to stop the flow of volunteers,” he says. “To the contrary, they’re openly promoting the idea of sending them there.” 'Normal Nationalists' In St. Petersburg -- where Starodubov is currently recovering from a frontline injury, under the watchful eye of his new wife, Larisa, a Donbas native -- recruitment and training organizations have mushroomed since the start of the war. They include Imperial Legion, the Russian nationalist organization that counts among its members Starodubov -- whose field name, “Pastor,” harkens back to a past life as a military priest. Starodubov says the Orthodox faith is a constant in the Legion ranks, but defends the group -- which he says includes Germans, Tatars, and Karelians -- as ethnically and philosophically diverse. “Our ideology isn’t that Slavs are a supernation and everyone else is scum to be destroyed,” he says. “We aren’t Nazis. We’re normal nationalists. We love our people, and we love our homeland. ... The main thing [for membership] is that you’re Orthodox and that you speak and think in Russian.” 'Never A Question' The voluntaristic zeal extends to political groups like Other Russia, an unregistered political party. In St. Petersburg, a portrait of American "gonzo" journalist Hunter S. Thompson presides above bags and boxes carrying food, medicine, and humanitarian aid at the group’s city headquarters. ​The group claims to have recruited dozens of doctors, organizers, and fighters for service in eastern Ukraine, and has collected more than 2 million rubles ($30,000) in local donations. Andrei Dmitriev, the head of Other Russia’s St. Petersburg branch, says he’s rarely seen such an outpouring of public support. “Old women are bringing us their last kopecks,” he says. “It’s a pity that such unity comes at the price of war and bloodshed, but it’s happening, and we’re happy that our pulse is beating in union with the pulse of our fellow citizens.” Noncitizens are a factor, as well. Other Russia says it regularly receives applications from Central Asians, most recently an ethnic Uzbek nightclub bouncer, volunteering to fight in Donbas. “These people who used to be on the periphery, who hadn’t achieved much success, are starting to wake up,” says activist Andrei Pesotsky. “They’re getting a second wind.” ​New and old recruits have made the office a meeting point. Two baby-faced teens, Artyom and Masha, say they hope to leave soon for eastern Ukraine. “I’m embarrassed to be living comfortably when people are dying, suffering, starving,” says Artyom. “My predecessors went to war when they were 12, 14 ... Am I somehow worse than them?” Masha, who plans to leave for Donbas without telling her parents, says she’s traveling on her own free will. “When these events began, there was never a question whose side to be on,” she says. ​The enthusiasm of Artyom and Masha is barely dimmed by the presence of a wounded volunteer -- Sergei Maksimov, who returned from Donbas after sustaining serious battlefield injuries. Maksimov, a pale, quiet man who goes by the field name “Silver,” says he’ll return to the war as soon as he’s able. “It’s a military brotherhood,” he says of the soldiers, whom he claims comprise more Ukrainians than Russians. “A thief, a police officer, and a politician can all serve together there, and everything is normal.” School Of Life Groups like Imperial Legion say they insist their volunteers have military experience. “We only ship people who are prepared -- not guys with sparkling eyes and smoke coming out of their ears,” says Starodubov. Other recruiters, however, admit they can take anyone they can get. “There’s no better teacher than war,” says historian Igor Ivanov, who heads the Russian Military Union, an organization that worked closely with former military commander of the separatist Donetsk People’s Republic, Igor Strelkov. “Our boys and the boys from Donbas go without any training, but they learn through experience,” says Ivanov, who spent much of 2014 in eastern Ukraine. “A young fighter starting from scratch falls into a unit, and after a month of life in the trenches, he’s a professional fighter.” Ivanov goes on to defend the integrity of boot camp under Strelkov’s regime. The so-called "Novorossia" unit, he says, was strictly alcohol-free and run according to international military principles that prohibit looting, torture, and other forms of abuse. Strelkov’s troops, Ivanov says, often blew themselves up with a grenade rather than surrender to Ukrainian troops. Ukrainian troops, by contrast, readily surrendered, he claims. “They know that we don’t torture,” he says, contradicting evidence from Amnesty International that torture has been used against numerous prisoners held by pro-Russian militias. “When we gave back Ukrainian guys [in prisoner exchanges], they could stand on their own legs,” he says. “But when they gave our guys back, they would just toss us what was essentially a bag of bones, a person with everything in him broken. Officially alive, but not going to survive.”
def build_model(self, train_last_layer_only=False): if self.task_config.hub_module_url and self.task_config.init_checkpoint: raise ValueError("At most one of `hub_module_url` and " "`init_checkpoint` can be specified.") if self.task_config.hub_module_url: encoder_network = utils.get_encoder_from_hub( self.task_config.hub_module_url) else: encoder_network = encoders.build_encoder( self.task_config.model.encoder) encoder_network.trainable = not train_last_layer_only return models.BertTokenClassifier( network=encoder_network, num_classes=len(self.task_config.class_names), initializer=tf.keras.initializers.TruncatedNormal( stddev=self.task_config.model.head_initializer_range), dropout_rate=self.task_config.model.head_dropout, output="logits")
<reponame>stierma1/mandrill package mandrill import ( "errors" "reflect" "sync" ) type PID interface { Kill() Send(message []interface{}) error Send1(m interface{}) error Send2(m1, m2 interface{}) error Send3(m1, m2, m3 interface{}) error Read() []interface{} Read1(v interface{}) bool Read2(v1, v2 interface{}) bool Read3(v1, v2, v3 interface{}) bool Stats() chan Stat ExitChan() chan bool PutValue(name string, item interface{}) GetValue(name string, v interface{}) bool } type PIDList []PID type MandrillPID struct { sys PidSystem descriptor string concurrency int mailbox chan []interface{} procStart chan bool boundFunc func(PID, PidSystem) bool exit bool exitChan chan bool stats chan Stat dictionary map[string]interface{} dictionaryMutex sync.Mutex } func Spawn(sys PidSystem, descriptor string, mailboxSize int, concurrency int, dictionary map[string]interface{}, boundFunc func(PID, PidSystem) bool) PID { return &MandrillPID{sys, descriptor, concurrency, make(chan []interface{}, mailboxSize), make(chan bool, concurrency), boundFunc, false, make(chan bool), make(chan Stat, 100), dictionary, sync.Mutex{}} } func SpawnDefault(sys PidSystem, descriptor string, boundFunc func(PID, PidSystem) bool) PID { return &MandrillPID{sys, descriptor, 1, make(chan []interface{}, 10000000), make(chan bool, 1), boundFunc, false, make(chan bool), make(chan Stat, 100), map[string]interface{}{}, sync.Mutex{}} } func (pid *MandrillPID) GetValue(name string, v interface{}) bool { pid.dictionaryMutex.Lock() i, ok := pid.dictionary[name] pid.dictionaryMutex.Unlock() if !ok { return false } val := reflect.ValueOf(v) val.Elem().Set(reflect.ValueOf(i)) return true } func (pid *MandrillPID) PutValue(name string, v interface{}) { pid.dictionaryMutex.Lock() pid.dictionary[name] = v pid.dictionaryMutex.Unlock() } func (pid *MandrillPID) Send(message []interface{}) error { if pid.exit { return errors.New("PID has exited") } pid.mailbox <- message go pid.process() return nil } func (pid *MandrillPID) Send1(m interface{}) error { return pid.Send([]interface{}{m}) } func (pid *MandrillPID) Send2(m1 interface{}, m2 interface{}) error { return pid.Send([]interface{}{m1, m2}) } func (pid *MandrillPID) Send3(m1 interface{}, m2 interface{}, m3 interface{}) error { return pid.Send([]interface{}{m1, m2, m3}) } func (pid *MandrillPID) process() { if pid.exit { return } pid.procStart <- true if pid.exit { return } pid.stats <- &Statistic{"increment", []string{"processInvoked"}, &pid.descriptor} exit := pid.boundFunc(pid, pid.sys) <-pid.procStart if exit { pid.Kill() } } func (pid *MandrillPID) Read() []interface{} { return <-pid.mailbox } func (pid *MandrillPID) Read1(v interface{}) bool { a := pid.Read() if len(a) < 1 { return false } i := a[0] if i == nil { val := reflect.ValueOf(v) val.Elem().Set(reflect.Zero(val.Type())) } else { val := reflect.ValueOf(v) val.Elem().Set(reflect.ValueOf(i)) } return true } func (pid *MandrillPID) Read2(v1, v2 interface{}) bool { a := pid.Read() if len(a) < 2 { return false } i := a[0] if i == nil { val := reflect.ValueOf(v1) val.Elem().Set(reflect.Zero(val.Type())) } else { val := reflect.ValueOf(v1) val.Elem().Set(reflect.ValueOf(i)) } i = a[1] if i == nil { val := reflect.ValueOf(v2) val.Elem().Set(reflect.Zero(val.Type())) } else { val := reflect.ValueOf(v2) val.Elem().Set(reflect.ValueOf(i)) } return true } func (pid *MandrillPID) Read3(v1, v2, v3 interface{}) bool { a := pid.Read() if len(a) < 3 { return false } i := a[0] if i == nil { val := reflect.ValueOf(v1) val.Elem().Set(reflect.Zero(val.Type())) } else { val := reflect.ValueOf(v1) val.Elem().Set(reflect.ValueOf(i)) } i = a[1] if i == nil { val := reflect.ValueOf(v2) val.Elem().Set(reflect.Zero(val.Type())) } else { val := reflect.ValueOf(v2) val.Elem().Set(reflect.ValueOf(i)) } i = a[2] if i == nil { val := reflect.ValueOf(v3) val.Elem().Set(reflect.Zero(val.Type())) } else { val := reflect.ValueOf(v3) val.Elem().Set(reflect.ValueOf(i)) } return true } func (pid *MandrillPID) Stats() chan Stat { return pid.stats } func (pid *MandrillPID) Kill() { if pid.exit { return } pid.exit = true pid.stats <- &Statistic{"event", []string{"killed"}, &pid.descriptor} close(pid.mailbox) close(pid.procStart) close(pid.stats) pid.exitChan <- true close(pid.exitChan) } func (pid *MandrillPID) ExitChan() chan bool { return pid.exitChan }
<filename>entity relations/HotelDatabase/src/main/java/entities/Room.java package entities; import javax.persistence.*; import java.math.BigDecimal; import java.util.Set; @Entity @Table(name = "rooms") public class Room { private Long roomNumber; private RoomType roomType; private BedType bedType; private BigDecimal rate; private RoomStatus roomStatus; private String notes; private Set<Occupancy> roomOccupancies; public Room() { } @Id @Column(name = "room_number") public Long getRoomNumber() { return roomNumber; } public void setRoomNumber(Long roomNumber) { this.roomNumber = roomNumber; } @ManyToOne @JoinColumn(name = "room_type_id", referencedColumnName = "id") public RoomType getRoomType() { return roomType; } public void setRoomType(RoomType roomType) { this.roomType = roomType; } @ManyToOne @JoinColumn(name = "bed_type_id", referencedColumnName = "id") public BedType getBedType() { return bedType; } public void setBedType(BedType bedType) { this.bedType = bedType; } @Basic public BigDecimal getRate() { return rate; } public void setRate(BigDecimal rate) { this.rate = rate; } @ManyToOne @JoinColumn(name = "room_status_id", referencedColumnName = "id") public RoomStatus getRoomStatus() { return roomStatus; } public void setRoomStatus(RoomStatus roomStatus) { this.roomStatus = roomStatus; } @Basic public String getNotes() { return notes; } public void setNotes(String notes) { this.notes = notes; } @OneToMany(mappedBy = "room") public Set<Occupancy> getRoomOccupancies() { return roomOccupancies; } public void setRoomOccupancies(Set<Occupancy> roomOccupancies) { this.roomOccupancies = roomOccupancies; } }
#include <bits/stdc++.h> using namespace std; typedef long long ll; typedef long double ld; typedef pair<int,int> P; typedef pair<ll,ll> l_l; const int INF=1001001000; const int mINF=-1001001000; const ll LINF=10100100100100100; int main(){ ll n,k;cin >> n >> k; for(int i=0; i<35; i++){ ll big=pow(k,i+1); ll small=pow(k,i); if(small<=n&&big>n){ cout << i+1 << endl; return 0; } } }
def handling_cost(self) -> float: if self._handling_cost is None: assert len(self.customers) + 1 == len(self.plan) self._handling_cost = 0. for idx, customer in enumerate(self.customers): before, after = self.plan[idx], self.plan[idx + 1] self._handling_cost += Stacks.cost(customer, before, after) return self._handling_cost
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for ExampleParserConfiguration.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from google.protobuf import text_format from tensorflow.core.example import example_parser_configuration_pb2 from tensorflow.python.client import session from tensorflow.python.framework import dtypes from tensorflow.python.ops import array_ops from tensorflow.python.ops import parsing_ops from tensorflow.python.platform import test from tensorflow.python.util.example_parser_configuration import extract_example_parser_configuration BASIC_PROTO = """ feature_map { key: "x" value { fixed_len_feature { dtype: DT_FLOAT shape { dim { size: 1 } } default_value { dtype: DT_FLOAT tensor_shape { dim { size: 1 } } float_val: 33.0 } values_output_tensor_name: "ParseExample/ParseExample:3" } } } feature_map { key: "y" value { var_len_feature { dtype: DT_STRING values_output_tensor_name: "ParseExample/ParseExample:1" indices_output_tensor_name: "ParseExample/ParseExample:0" shapes_output_tensor_name: "ParseExample/ParseExample:2" } } } """ class ExampleParserConfigurationTest(test.TestCase): def testBasic(self): golden_config = example_parser_configuration_pb2.ExampleParserConfiguration( ) text_format.Parse(BASIC_PROTO, golden_config) with session.Session() as sess: examples = array_ops.placeholder(dtypes.string, shape=[1]) feature_to_type = { 'x': parsing_ops.FixedLenFeature([1], dtypes.float32, 33.0), 'y': parsing_ops.VarLenFeature(dtypes.string) } _ = parsing_ops.parse_example(examples, feature_to_type) parse_example_op = sess.graph.get_operation_by_name( 'ParseExample/ParseExample') config = extract_example_parser_configuration(parse_example_op, sess) self.assertProtoEquals(golden_config, config) if __name__ == '__main__': test.main()
export interface User { uid: string; photoURL: string; displayName: string; email: string; phoneNumber: string; providerId: string; } export interface UserAppSetting extends User { AppKey: string; TeamId: string; AboutMe: string; AppTheme: string; GithubProfile: string; LinkedInProfile: string; DateOfJoining: string; } export const defaultUser = { uid: "defaultUser", photoURL: "../../../assets/defaultavatar.jpg", displayName: "Default User", email: "<EMAIL>", phoneNumber: null, providerId: "worktez", AppKey: "", TeamId: "", AboutMe: "", AppTheme: "", GithubProfile: "", LinkedInProfile: "", DateOfJoining: "", };
/* * ATtinySerialOut.cpp * * For transmitting debug data over bit bang serial with 115200 baud for 1/8/16 MHz ATtiny clock. * For 1 MHz you can choose also 38400 baud (120 bytes smaller code size). * For 8/16 MHz you can choose also 230400 baud (just faster). * 1 Start, 8 Data, 1 Stop, No Parity * * Using PB2 // (Pin7 on Tiny85) as default TX pin to be compatible with digispark board * To change the output pin, modify the line "#define TX_PIN ..." in TinySerialOut.h or or set it as compiler symbol like "-DTX_PIN PB1". * * Using the Serial.print commands needs 4 bytes extra for each call. * * * Copyright (C) 2015-2020 <NAME> * Email: <EMAIL> * * This file is part of TinySerialOut https://github.com/ArminJo/ATtinySerialOut. * * TinySerialOut is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/gpl.html>. * */ #if defined(__AVR_ATtiny25__) || defined(__AVR_ATtiny45__) || defined(__AVR_ATtiny85__) \ || defined(__AVR_ATtiny24__) || defined(__AVR_ATtiny44__) || defined(__AVR_ATtiny84__) \ || defined(__AVR_ATtiny87__) || defined(__AVR_ATtiny167__) #include "ATtinySerialOut.h" #include <avr/eeprom.h> // for eeprom_read_byte() in writeString_E() #ifndef _NOP #define _NOP() __asm__ volatile ("nop") #endif #ifndef PORTB #define PORTB (*(volatile uint8_t *)((0x18) + 0x20)) #endif #if defined(__AVR_ATtiny87__) || defined(__AVR_ATtiny167__) # ifndef TX_PORT #define TX_PORT PORTA #define TX_PORT_ADDR 0x02 // PORTA #define TX_DDR DDRA //#define TX_PORT PORTB //#define TX_PORT_ADDR 0x05 //#define TX_DDR DDRB # endif #else // ATtinyX5 here #define TX_PORT PORTB #define TX_PORT_ADDR 0x18 // PORTB #define TX_DDR DDRB #endif // defined(__AVR_ATtiny87__) || defined(__AVR_ATtiny167__) void write1Start8Data1StopNoParity(uint8_t aValue); bool sUseCliSeiForWrite = true; void initTXPin() { // TX_PIN is active LOW, so set it to HIGH initially TX_PORT |= (1 << TX_PIN); // set pin direction to output TX_DDR |= (1 << TX_PIN); } void write1Start8Data1StopNoParityWithCliSei(uint8_t aValue) { uint8_t oldSREG = SREG; cli(); write1Start8Data1StopNoParity(aValue); SREG = oldSREG; } void writeValue(uint8_t aValue) { write1Start8Data1StopNoParity(aValue); } /* * Used for writeString() and therefore all write<type>() and print<type> */ void useCliSeiForStrings(bool aUseCliSeiForWrite) { sUseCliSeiForWrite = aUseCliSeiForWrite; } /* * Write String residing in RAM */ void writeString(const char *aStringPtr) { #ifndef USE_ALWAYS_CLI_SEI_GUARD_FOR_OUTPUT if (sUseCliSeiForWrite) { #endif while (*aStringPtr != 0) { write1Start8Data1StopNoParityWithCliSei(*aStringPtr++); } #ifndef USE_ALWAYS_CLI_SEI_GUARD_FOR_OUTPUT } else { while (*aStringPtr != 0) { write1Start8Data1StopNoParity(*aStringPtr++); } } #endif } /* * Write string residing in program space (FLASH) */ void writeString_P(const char *aStringPtr) { uint8_t tChar = pgm_read_byte((const uint8_t * ) aStringPtr); // Comparing with 0xFF is safety net for wrong string pointer while (tChar != 0 && tChar != 0xFF) { #ifdef USE_ALWAYS_CLI_SEI_GUARD_FOR_OUTPUT write1Start8Data1StopNoParityWithCliSei(tChar); #else if (sUseCliSeiForWrite) { write1Start8Data1StopNoParityWithCliSei(tChar); } else { write1Start8Data1StopNoParity(tChar); } #endif tChar = pgm_read_byte((const uint8_t * ) ++aStringPtr); } } /* * Write string residing in program space (FLASH) */ void writeString(const __FlashStringHelper *aStringPtr) { PGM_P tPGMStringPtr = reinterpret_cast<PGM_P>(aStringPtr); uint8_t tChar = pgm_read_byte((const uint8_t * ) aStringPtr); // Comparing with 0xFF is safety net for wrong string pointer while (tChar != 0 && tChar != 0xFF) { #ifdef USE_ALWAYS_CLI_SEI_GUARD_FOR_OUTPUT write1Start8Data1StopNoParityWithCliSei(tChar); #else if (sUseCliSeiForWrite) { write1Start8Data1StopNoParityWithCliSei(tChar); } else { write1Start8Data1StopNoParity(tChar); } #endif tChar = pgm_read_byte((const uint8_t * ) ++tPGMStringPtr); } } /* * Write string residing in EEPROM space */ void writeString_E(const char *aStringPtr) { uint8_t tChar = eeprom_read_byte((const uint8_t *) aStringPtr); // Comparing with 0xFF is safety net for wrong string pointer while (tChar != 0 && tChar != 0xFF) { #ifdef USE_ALWAYS_CLI_SEI_GUARD_FOR_OUTPUT write1Start8Data1StopNoParityWithCliSei(tChar); #else if (sUseCliSeiForWrite) { write1Start8Data1StopNoParityWithCliSei(tChar); } else { write1Start8Data1StopNoParity(tChar); } #endif tChar = eeprom_read_byte((const uint8_t *) ++aStringPtr); } } void writeStringWithoutCliSei(const char *aStringPtr) { while (*aStringPtr != 0) { write1Start8Data1StopNoParity(*aStringPtr++); } } void writeStringWithCliSei(const char *aStringPtr) { while (*aStringPtr != 0) { write1Start8Data1StopNoParityWithCliSei(*aStringPtr++); } } void writeStringSkipLeadingSpaces(const char *aStringPtr) { // skip leading spaces while (*aStringPtr == ' ' && *aStringPtr != 0) { aStringPtr++; } #ifndef USE_ALWAYS_CLI_SEI_GUARD_FOR_OUTPUT if (sUseCliSeiForWrite) { #endif while (*aStringPtr != 0) { write1Start8Data1StopNoParityWithCliSei(*aStringPtr++); } #ifndef USE_ALWAYS_CLI_SEI_GUARD_FOR_OUTPUT } else { while (*aStringPtr != 0) { write1Start8Data1StopNoParity(*aStringPtr++); } } #endif } void writeBinary(uint8_t aByte) { #ifdef USE_ALWAYS_CLI_SEI_GUARD_FOR_OUTPUT write1Start8Data1StopNoParityWithCliSei(aByte); #else if (sUseCliSeiForWrite) { write1Start8Data1StopNoParityWithCliSei(aByte); } else { write1Start8Data1StopNoParity(aByte); } #endif } void writeChar(uint8_t aChar) { writeBinary(aChar); } void writeCRLF() { writeBinary('\r'); writeBinary('\n'); } void writeUnsignedByte(uint8_t aByte) { char tStringBuffer[4]; utoa(aByte, tStringBuffer, 10); writeStringSkipLeadingSpaces(tStringBuffer); } /* * 2 Byte Hex output */ void writeUnsignedByteHex(uint8_t aByte) { char tStringBuffer[3]; // tStringBuffer[0] = nibbleToHex(aByte >> 4); // tStringBuffer[1] = nibbleToHex(aByte); // tStringBuffer[2] = '\0'; // the utoa() version is 8 bytes smaller than the version with nibbleToHex(), if utoa() is allocated by another function. utoa(aByte, &tStringBuffer[0], 16); if (tStringBuffer[1] == '\0') { tStringBuffer[2] = '\0'; tStringBuffer[1] = tStringBuffer[0]; tStringBuffer[0] = '0'; } writeString(tStringBuffer); } /* * 2 Byte Hex output with 2 Byte prefix "0x" */ void writeUnsignedByteHexWithPrefix(uint8_t aByte) { writeBinary('0'); writeBinary('x'); writeUnsignedByteHex(aByte); } char nibbleToHex(uint8_t aByte) { aByte = aByte & 0x0F; if (aByte < 10) { return aByte + '0'; } return aByte + 'A' - 10; } void writeByte(int8_t aByte) { char tStringBuffer[5]; itoa(aByte, tStringBuffer, 10); writeStringSkipLeadingSpaces(tStringBuffer); } void writeInt(int16_t aInteger) { char tStringBuffer[7]; itoa(aInteger, tStringBuffer, 10); writeStringSkipLeadingSpaces(tStringBuffer); } void writeUnsignedInt(uint16_t aInteger) { char tStringBuffer[6]; utoa(aInteger, tStringBuffer, 10); writeStringSkipLeadingSpaces(tStringBuffer); } void writeLong(int32_t aLong) { char tStringBuffer[12]; ltoa(aLong, tStringBuffer, 10); writeStringSkipLeadingSpaces(tStringBuffer); } void writeUnsignedLong(uint32_t aLong) { char tStringBuffer[11]; ultoa(aLong, tStringBuffer, 10); writeStringSkipLeadingSpaces(tStringBuffer); } void writeFloat(double aFloat) { char tStringBuffer[11]; dtostrf(aFloat, 10, 3, tStringBuffer); writeStringSkipLeadingSpaces(tStringBuffer); } void writeFloat(double aFloat, uint8_t aDigits) { char tStringBuffer[11]; dtostrf(aFloat, 10, aDigits, tStringBuffer); writeStringSkipLeadingSpaces(tStringBuffer); } /****************************************************** * The TinySerialOut class fuctions which implements * the Serial + printHex() and printlnHex() functions ******************************************************/ /* * An alternative way to call the init function :-) */ void TinySerialOut::begin(long aBaudrate) { initTXPin(); #if defined(USE_115200BAUD) // else smaller code, but only 38400 baud at 1 MHz if (aBaudrate != 115200) { println(F("Only 115200 supported!")); } #else #if (F_CPU == 1000000) if (aBaudrate != 38400) { println(F("Only 38400 supported!")); } #else if (aBaudrate != 230400) { println(F("Only 230400 supported!")); } #endif #endif } void TinySerialOut::end() { // no action needed } void TinySerialOut::flush() { // no action needed, since we do not use a buffer } /* * 2 Byte Hex output with 2 Byte prefix "0x" */ void TinySerialOut::printHex(uint8_t aByte) { writeUnsignedByteHexWithPrefix(aByte); } void TinySerialOut::printHex(uint16_t aWord) { writeUnsignedByteHexWithPrefix(aWord >> 8); writeUnsignedByteHex(aWord); } void TinySerialOut::printlnHex(uint8_t aByte) { printHex(aByte); println(); } void TinySerialOut::printlnHex(uint16_t aWord) { printHex(aWord); println(); } // virtual functions of Print class size_t TinySerialOut::write(uint8_t aByte) { writeBinary(aByte); return 1; } void TinySerialOut::print(const char *aStringPtr) { writeString(aStringPtr); } void TinySerialOut::print(const __FlashStringHelper *aStringPtr) { writeString(aStringPtr); } void TinySerialOut::print(char aChar) { writeBinary(aChar); } void TinySerialOut::print(uint8_t aByte, uint8_t aBase) { if (aBase == 16) { /* * Print Hex always with two characters */ writeUnsignedByteHex(aByte); } else { char tStringBuffer[4]; utoa(aByte, tStringBuffer, aBase); writeStringSkipLeadingSpaces(tStringBuffer); } } void TinySerialOut::print(int16_t aInteger, uint8_t aBase) { char tStringBuffer[7]; itoa(aInteger, tStringBuffer, aBase); writeStringSkipLeadingSpaces(tStringBuffer); } void TinySerialOut::print(uint16_t aInteger, uint8_t aBase) { char tStringBuffer[6]; utoa(aInteger, tStringBuffer, aBase); writeStringSkipLeadingSpaces(tStringBuffer); } void TinySerialOut::print(int32_t aLong, uint8_t aBase) { char tStringBuffer[12]; ltoa(aLong, tStringBuffer, aBase); writeStringSkipLeadingSpaces(tStringBuffer); } void TinySerialOut::print(uint32_t aLong, uint8_t aBase) { char tStringBuffer[11]; ultoa(aLong, tStringBuffer, aBase); writeStringSkipLeadingSpaces(tStringBuffer); } void TinySerialOut::print(double aFloat, uint8_t aDigits) { char tStringBuffer[11]; dtostrf(aFloat, 10, aDigits, tStringBuffer); writeStringSkipLeadingSpaces(tStringBuffer); } void TinySerialOut::println(char aChar) { print(aChar); println(); } void TinySerialOut::println(const char *aStringPtr) { print(aStringPtr); println(); } void TinySerialOut::println(const __FlashStringHelper *aStringPtr) { print(aStringPtr); println(); } void TinySerialOut::println(uint8_t aByte, uint8_t aBase) { print(aByte, aBase); println(); } void TinySerialOut::println(int16_t aInteger, uint8_t aBase) { print(aInteger, aBase); println(); } void TinySerialOut::println(uint16_t aInteger, uint8_t aBase) { print(aInteger, aBase); println(); } void TinySerialOut::println(int32_t aLong, uint8_t aBase) { print(aLong, aBase); println(); } void TinySerialOut::println(uint32_t aLong, uint8_t aBase) { print(aLong, aBase); println(); } void TinySerialOut::println(double aFloat, uint8_t aDigits) { print(aFloat, aDigits); println(); } void TinySerialOut::println() { print('\r'); print('\n'); } /* * The Serial Instance!!! */ // #if ... to be compatible with ATTinyCores and AttinyDigisparkCores #if (!defined(UBRRH) && !defined(UBRR0H)) /*AttinyDigisparkCore and AttinyDigisparkCore condition*/ \ || USE_SOFTWARE_SERIAL /*AttinyDigisparkCore condition*/\ || ((defined(UBRRH) || defined(UBRR0H) || defined(UBRR1H) || defined(LINBRRH)) && !USE_SOFTWARE_SERIAL)/*AttinyDigisparkCore condition for HardwareSerial*/ // Switch to SerialOut since Serial is already defined // or comment out line 745 in TinyDebugSerial.h included in AttinyDigisparkCores/src/tiny/WProgram.h at line 24 for AttinyDigisparkCores TinySerialOut SerialOut; #else TinySerialOut Serial; #endif /******************************** * Basic serial output function *******************************/ inline void delay4CyclesInlineExact(uint16_t a4Microseconds) { /* * The loop takes 4 cycles (4 microseconds at 1 MHz). Last loop is only 3 cycles. Setting of loop counter a4Microseconds needs 2 cycles * 3 -> 13 cycles (3*4 -1 + 2) = 3*4 + 1 * 4 -> 17 cycles * 5 -> 21 cycles */ asm volatile ( "1: sbiw %0,1" "\n\t" // 2 cycles "brne .-4" : "=w" (a4Microseconds) : "0" (a4Microseconds)// 2 cycles ); } #if (F_CPU == 1000000) && defined(USE_115200BAUD) //else smaller code, but only 38400 baud at 1 MHz /* * 115200 baud - 8,680 cycles per bit, 86,8 per byte at 1 MHz * * Assembler code for 115200 baud extracted from Digispark core files: * Code size is 196 Byte (including first call) * * TinySerialOut.h - Tiny write-only software serial. * Copyright 2010 <NAME>. This code is part of Arduino-Tiny. * * Arduino-Tiny is free software: you can redistribute it and/or modify it * under the terms of the GNU Lesser General Public License as published by * the Free Software Foundation, either version 3 of the License, or (at your * option) any later version. */ void write1Start8Data1StopNoParity(uint8_t aValue) { asm volatile ( "cbi %[txport], %[txpin]" "\n\t" /* 2 <--- 0 */ "ror %[value]" "\n\t" /* 1 */ "nop" "\n\t" /* 1 */ "nop" "\n\t" /* 1 */ "nop" "\n\t" /* 1 */ "nop" "\n\t" /* 1 */ "brcs L%=b0h" "\n\t" /* 1 (not taken) */ "nop" "\n\t" /* 1 */ "cbi %[txport], %[txpin]" "\n\t" /* 2 <--- st is 9 cycles */ "rjmp L%=b0z" "\n\t" /* 2 */ "L%=b0h: " /* 2 (taken) */ "sbi %[txport], %[txpin]" "\n\t" /* 2 <--- st is 9 cycles */ "nop" "\n\t" /* 1 */ "nop" "\n\t" /* 1 */ "L%=b0z: " "ror %[value]" "\n\t" /* 1 */ "nop" "\n\t" /* 1 */ "brcs L%=b1h" "\n\t" /* 1 (not taken) */ "nop" "\n\t" /* 1 */ "cbi %[txport], %[txpin]" "\n\t" /* 2 <--- b0 is 8 cycles */ "rjmp L%=b1z" "\n\t" /* 2 */ "L%=b1h: " /* 2 (taken) */ "sbi %[txport], %[txpin]" "\n\t" /* 2 <--- b0 is 8 cycles */ "nop" "\n\t" /* 1 */ "nop" "\n\t" /* 1 */ "L%=b1z: " "ror %[value]" "\n\t" /* 1 */ "nop" "\n\t" /* 1 */ "nop" "\n\t" /* 1 */ "brcs L%=b2h" "\n\t" /* 1 (not taken) */ "nop" "\n\t" /* 1 */ "cbi %[txport], %[txpin]" "\n\t" /* 2 <--- b1 is 9 cycles */ "rjmp L%=b2z" "\n\t" /* 2 */ "L%=b2h: " /* 2 (taken) */ "sbi %[txport], %[txpin]" "\n\t" /* 2 <--- b1 is 9 cycles */ "nop" "\n\t" /* 1 */ "nop" "\n\t" /* 1 */ "L%=b2z: " "ror %[value]" "\n\t" /* 1 */ "nop" "\n\t" /* 1 */ "nop" "\n\t" /* 1 */ "brcs L%=b3h" "\n\t" /* 1 (not taken) */ "nop" "\n\t" /* 1 */ "cbi %[txport], %[txpin]" "\n\t" /* 2 <--- b2 is 9 cycles */ "rjmp L%=b3z" "\n\t" /* 2 */ "L%=b3h: " /* 2 (taken) */ "sbi %[txport], %[txpin]" "\n\t" /* 2 <--- b2 is 9 cycles */ "nop" "\n\t" /* 1 */ "nop" "\n\t" /* 1 */ "L%=b3z: " "ror %[value]" "\n\t" /* 1 */ "nop" "\n\t" /* 1 */ "brcs L%=b4h" "\n\t" /* 1 (not taken) */ "nop" "\n\t" /* 1 */ "cbi %[txport], %[txpin]" "\n\t" /* 2 <--- b3 is 8 cycles */ "rjmp L%=b4z" "\n\t" /* 2 */ "L%=b4h: " /* 2 (taken) */ "sbi %[txport], %[txpin]" "\n\t" /* 2 <--- b3 is 8 cycles */ "nop" "\n\t" /* 1 */ "nop" "\n\t" /* 1 */ "L%=b4z: " "ror %[value]" "\n\t" /* 1 */ "nop" "\n\t" /* 1 */ "nop" "\n\t" /* 1 */ "brcs L%=b5h" "\n\t" /* 1 (not taken) */ "nop" "\n\t" /* 1 */ "cbi %[txport], %[txpin]" "\n\t" /* 2 <--- b4 is 9 cycles */ "rjmp L%=b5z" "\n\t" /* 2 */ "L%=b5h: " /* 2 (taken) */ "sbi %[txport], %[txpin]" "\n\t" /* 2 <--- b4 is 9 cycles */ "nop" "\n\t" /* 1 */ "nop" "\n\t" /* 1 */ "L%=b5z: " "ror %[value]" "\n\t" /* 1 */ "nop" "\n\t" /* 1 */ "nop" "\n\t" /* 1 */ "brcs L%=b6h" "\n\t" /* 1 (not taken) */ "nop" "\n\t" /* 1 */ "cbi %[txport], %[txpin]" "\n\t" /* 2 <--- b5 is 9 cycles */ "rjmp L%=b6z" "\n\t" /* 2 */ "L%=b6h: " /* 2 (taken) */ "sbi %[txport], %[txpin]" "\n\t" /* 2 <--- b5 is 9 cycles */ "nop" "\n\t" /* 1 */ "nop" "\n\t" /* 1 */ "L%=b6z: " "ror %[value]" "\n\t" /* 1 */ "nop" "\n\t" /* 1 */ "brcs L%=b7h" "\n\t" /* 1 (not taken) */ "nop" "\n\t" /* 1 */ "cbi %[txport], %[txpin]" "\n\t" /* 2 <--- b6 is 8 cycles */ "rjmp L%=b7z" "\n\t" /* 2 */ "L%=b7h: " /* 2 (taken) */ "sbi %[txport], %[txpin]" "\n\t" /* 2 <--- b6 is 8 cycles */ "nop" "\n\t" /* 1 */ "nop" "\n\t" /* 1 */ "L%=b7z: " "nop" "\n\t" /* 1 */ "nop" "\n\t" /* 1 */ "nop" "\n\t" /* 1 */ "nop" "\n\t" /* 1 */ "nop" "\n\t" /* 1 */ "sbi %[txport], %[txpin]" "\n\t" /* 2 <--- b7 is 9 cycles */ "nop" "\n\t" /* 1 */ "nop" "\n\t" /* 1 */ "nop" "\n\t" /* 1 */ "nop" "\n\t" /* 1 */ "nop" "\n\t" /* 1 */ "nop" "\n\t" /* 1 */ "nop" "\n\t" /* 1 */ /* <---sp is 9 cycles */ : : [value] "r" ( aValue ), [txport] "I" ( TX_PORT_ADDR ), [txpin] "I" ( TX_PIN ) ); } #else /* * Small code using loop. Code size is 76 Byte (including first call) * * 1 MHz CPU Clock * 26,04 cycles per bit, 260,4 per byte for 38400 baud at 1 MHz Clock * 17,36 cycles per bit, 173,6 per byte for 57600 baud at 1 MHz Clock -> therefore use 38400 baud * 24 cycles between each cbi/sbi (Clear/Set Bit in IO-register) command. * * 8 MHz CPU Clock * 69,44 cycles per bit, 694,4 per byte for 115200 baud at 8 MHz Clock * 34,72 cycles per bit, 347,2 per byte for 230400 baud at 8 MHz Clock. * 68 / 33 cycles between each cbi (Clear Bit in IO-register) or sbi command. * * 16 MHz CPU Clock * 138,88 cycles per bit, 1388,8 per byte for 115200 baud at 16 MHz Clock * 69,44 cycles per bit, 694,4 per byte for 230400 baud at 16 MHz Clock * 137 / 68 cycles between each cbi (Clear Bit in IO-register) or sbi command. * * 2 cycles for each cbi/sbi instruction. */ void write1Start8Data1StopNoParity(uint8_t aValue) { asm volatile ( "cbi %[txport] , %[txpin]" "\n\t" // 2 PORTB &= ~(1 << TX_PIN); #if (F_CPU == 1000000) && !defined(USE_115200BAUD) // 1 MHz 38400 baud // 0 cycles padding to get additional 4 cycles //delay4CyclesInlineExact(5); -> 20 cycles "ldi r30 , 0x05" "\n\t"// 1 #elif ((F_CPU == 8000000) && defined(USE_115200BAUD)) || ((F_CPU == 16000000) && !defined(USE_115200BAUD)) // 8 MHz 115200 baud OR 16 MHz 230400 baud // 3 cycles padding to get additional 7 cycles "nop" "\n\t"// 1 _nop"(); "nop" "\n\t"// 1 _nop"(); "nop" "\n\t"// 1 _nop"(); //delay4CyclesInlineExact(15); -> 61 cycles "ldi r30 , 0x0F" "\n\t"// 1 #elif (F_CPU == 8000000) && !defined(USE_115200BAUD) // 8 MHz 230400 baud // 0 cycles padding to get additional 4 cycles //delay4CyclesInlineExact(7); -> 29 cycles "ldi r30 , 0x07" "\n\t"// 1 #elif (F_CPU == 16000000) && defined(USE_115200BAUD) // 16 MHz 115200 baud // 0 cycles padding to get additional 4 cycles //delay4CyclesInlineExact(33); -> 133 cycles "ldi r30 , 0x21" "\n\t"// 1 #endif "ldi r31 , 0x00" "\n\t" // 1 "delay1:" "sbiw r30 , 0x01" "\n\t"// 2 "brne delay1" "\n\t"// 1-2 "ldi r25 , 0x08" "\n\t"// 1 // Start of loop // if (aValue & 0x01) { "txloop:" "sbrs %[value] , 0" "\n\t"// 1 "rjmp .+6" "\n\t"// 2 "nop" "\n\t"// 1 "sbi %[txport] , %[txpin]" "\n\t"// 2 PORTB |= 1 << TX_PIN; "rjmp .+6" "\n\t"// 2 "cbi %[txport] , %[txpin]" "\n\t"// 2 PORTB &= ~(1 << TX_PIN); "nop" "\n\t"// 1 "nop" "\n\t"// 1 "lsr %[value]" "\n\t"// 1 aValue = aValue >> 1; #if (F_CPU == 1000000) && !defined(USE_115200BAUD) // 1 MHz 38400 baud // 3 cycles padding to get additional 11 cycles "nop" "\n\t"// 1 "nop" "\n\t"// 1 "nop" "\n\t"// 1 // delay4CyclesInlineExact(3); -> 13 cycles "ldi r30 , 0x03" "\n\t"// 1 #elif ((F_CPU == 8000000) && defined(USE_115200BAUD)) || ((F_CPU == 16000000) && !defined(USE_115200BAUD)) // 8 MHz 115200 baud OR 16 MHz 230400 baud // 3 cycles padding to get additional 11 cycles "nop" "\n\t"// 1 "nop" "\n\t"// 1 "nop" "\n\t"// 1 // delay4CyclesInlineExact(14); -> 57 cycles "ldi r30 , 0x0E" "\n\t"// 1 #elif (F_CPU == 8000000) && !defined(USE_115200BAUD) // 8 MHz 230400 baud // 0 cycles padding to get additional 8 cycles // delay4CyclesInlineExact(6); -> 25 cycles "ldi r30 , 0x05" "\n\t"// 1 #elif (F_CPU == 16000000) && defined(USE_115200BAUD) // 16 MHz 115200 baud // 0 cycles padding to get additional 8 cycles //delay4CyclesInlineExact(32); -> 129 cycles "ldi r30 , 0x20" "\n\t"// 1 #endif "ldi r31 , 0x00" "\n\t" // 1 "delay2:" "sbiw r30 , 0x01" "\n\t"// 2 "brne delay2" "\n\t"// 1-2 // }while (i > 0); "subi r25 , 0x01" "\n\t"// 1 "brne txloop" "\n\t"// 1-2 // To compensate for missing loop cycles at last bit "nop" "\n\t"// 1 "nop" "\n\t"// 1 "nop" "\n\t"// 1 "nop" "\n\t"// 1 // Stop bit "sbi %[txport] , %[txpin]" "\n\t"// 2 PORTB |= 1 << TX_PIN; #if (F_CPU == 1000000) && !defined(USE_115200BAUD) // 1 MHz 38400 baud // delay4CyclesInlineExact(4); -> 17 cycles - gives minimum 25 cycles for stop bit "ldi r30 , 0x04" "\n\t"// 1 #elif ((F_CPU == 8000000) && defined(USE_115200BAUD)) || ((F_CPU == 16000000) && !defined(USE_115200BAUD)) // 8 MHz 115200 baud OR 16 MHz 230400 baud // delay4CyclesInlineExact(15) -> 61 cycles - gives minimum 69 cycles for stop bit "ldi r30 , 0x0F" "\n\t"// 1 #elif (F_CPU == 8000000) && !defined(USE_115200BAUD) // 8 MHz 230400 baud // delay4CyclesInlineExact(5) -> 27 cycles - gives minimum 35 cycles for stop bit "ldi r30 , 0x05" "\n\t"// 1 #elif (F_CPU == 16000000) && defined(USE_115200BAUD) // 16 MHz 115200 baud // delay4CyclesInlineExact(32) -> 129 cycles - gives minimum 137 cycles for stop bit "ldi r30 , 0x20" "\n\t"// 1 #endif "ldi r31 , 0x00" "\n\t" // 1 "delay3:" "sbiw r30 , 0x01" "\n\t"// "brne delay3" "\n\t"// 1-2 // return needs 4 cycles, load of next value needs 1 cycle, next rcall needs 3 cycles -> gives additional 8 cycles minimum for stop bit : : [value] "r" ( aValue ), [txport] "I" ( TX_PORT_ADDR ) , /* 0x18 is PORTB on Attiny 85 */ [txpin] "I" ( TX_PIN ) : "r25", "r30", "r31" ); } #endif /* * C Version which generates the assembler code above. * In order to guarantee the correct timing, compile with Arduino standard settings or: * avr-g++ -I"C:\arduino\hardware\arduino\avr\cores\arduino" -I"C:\arduino\hardware\arduino\avr\variants\standard" -c -g -w -Os -ffunction-sections -fdata-sections -mmcu=attiny85 -DF_CPU=1000000UL -MMD -o "TinySerialOut.o" "TinySerialOut.cpp" * Tested with Arduino 1.6.8 and 1.8.5/gcc4.9.2 * C Version does not work with AVR gcc7.3.0, since optimization is too bad */ void write1Start8Data1StopNoParity_C_Version(uint8_t aValue) { /* * C Version here for 38400 baud at 1 MHz Clock. You see, it is simple :-) */ // start bit TX_PORT &= ~(1 << TX_PIN); _NOP(); delay4CyclesInlineExact(4); // 8 data bits uint8_t i = 8; do { if (aValue & 0x01) { // bit=1 // to compensate for jump at data=0 _NOP(); TX_PORT |= 1 << TX_PIN; } else { // bit=0 TX_PORT &= ~(1 << TX_PIN); // compensate for different cycles of sbrs _NOP(); _NOP(); } aValue = aValue >> 1; // 3 cycles padding _NOP(); _NOP(); _NOP(); delay4CyclesInlineExact(3); --i; } while (i > 0); // to compensate for missing loop cycles at last bit _NOP(); _NOP(); _NOP(); _NOP(); // Stop bit TX_PORT |= 1 << TX_PIN; // -8 cycles to compensate for fastest repeated call (1 ret + 1 load + 1 call) delay4CyclesInlineExact(4); // gives minimum 25 cycles for stop bit :-) } #endif // defined(__AVR_ATtiny25__) || defined(__AVR_ATtiny45__) || defined(__AVR_ATtiny85__) || defined(__AVR_ATtiny87__) || defined(__AVR_ATtiny167__)
#include <stdio.h> #include <math.h> int main(void){ int input[3],num[2]; int i,j,Max,iMax,iMin,cnt=0; scanf("%d",&input[0]); scanf("%d",&input[1]); scanf("%d",&input[2]); if(input[0]>input[1]){ Max=input[0]; iMax=0; }else{ Max=input[1]; iMax=1; } if(Max<input[2]){ Max=input[2]; iMax=2; } j=0; for(i=0;i<=2;i++){ if(i!=iMax){ num[j]=input[i]; j++; } } if(num[0]%2==num[1]%2){ printf("%d",abs(num[0]-num[1])/2+Max-(int)fmax((double)num[0],(double)num[1]));//最大以外の数の偶奇が一致 } else{ if(Max%2==num[0]%2){ for(i=num[1];i<=Max;i=i+2){ num[1]=num[1]+2; cnt+=1; } for(j=num[0];j<Max;j=j+2){ num[0]=num[0]+2; cnt+=1; } printf("%d",cnt+num[1]-Max); } else{ for(i=num[0];i<=Max;i=i+2){ num[0]=num[0]+2; cnt+=1; } for(j=num[1];j<Max;j=j+2){ num[1]=num[1]+2; cnt+=1; } printf("%d",cnt+num[0]-Max); } } return 0; }
import {Router, ActivatedRoute} from '@angular/router'; import {from, Observable, noop, forkJoin, of, concat, combineLatest, iif, Subject, throwError} from 'rxjs'; import {ExtendedOrgUser} from 'src/app/core/models/extended-org-user.model'; import {AuthService} from 'src/app/core/services/auth.service'; import {DateService} from 'src/app/core/services/date.service'; import {FormGroup, FormControl, FormArray, FormBuilder, Validators} from '@angular/forms'; import {map, tap, mergeMap, startWith, concatMap, finalize, shareReplay, switchMap, take, concatMapTo, catchError} from 'rxjs/operators'; import { Component, OnInit, ViewChild, ElementRef } from '@angular/core'; import * as moment from 'moment'; import {OrgUserService} from 'src/app/core/services/org-user.service'; import {ModalController, PopoverController} from '@ionic/angular'; import {OtherRequestsComponent} from './other-requests/other-requests.component'; import {CustomInputsService} from 'src/app/core/services/custom-inputs.service'; import {CustomFieldsService} from 'src/app/core/services/custom-fields.service'; import {TripRequestCustomFieldsService} from 'src/app/core/services/trip-request-custom-fields.service'; import {OfflineService} from 'src/app/core/services/offline.service'; import {TripRequestsService} from 'src/app/core/services/trip-requests.service'; import {LoaderService} from 'src/app/core/services/loader.service'; import {SavePopoverComponent} from './save-popover/save-popover.component'; import {CustomField} from 'src/app/core/models/custom_field.model'; import {ProjectsService} from 'src/app/core/services/projects.service'; import {PolicyViolationComponent} from './policy-violation/policy-violation.component'; import {TripRequestPolicyService} from 'src/app/core/services/trip-request-policy.service'; import {StatusService} from '../../core/services/status.service'; import { Employee } from 'src/app/core/models/employee.model'; @Component({ selector: 'app-my-add-edit-trip', templateUrl: './my-add-edit-trip.page.html', styleUrls: ['./my-add-edit-trip.page.scss'], }) export class MyAddEditTripPage implements OnInit { // allowedProjectIds$: Observable<any>; eou$: Observable<ExtendedOrgUser>; tripTypes = []; tripDate; hotelDate; tripActions; mode; minDate; maxDate; today; isTripTypeMultiCity$: Observable<boolean>; isTripTypeOneWay$: Observable<boolean>; isTransportationRequested$: Observable<boolean>; isHotelRequested$: Observable<boolean>; isAdvanceRequested$: Observable<boolean>; isTransportationEnabled$: Observable<boolean>; isHotelEnabled$: Observable<boolean>; isAdvanceEnabled$: Observable<boolean>; travelAgents$: Observable<Employee[]>; customFields$: Observable<any>; isProjectsEnabled$: Observable<boolean>; projects$: Observable<[]>; tripRequest$: Observable<any>; customFieldValues; refreshTrips$ = new Subject(); isTransportationRequestAlreadyAdded: boolean; isHotelRequestAlreadyAdded: boolean; isAdvanceRequestAlreadyAdded: boolean; saveTripAsDraftLoading = false; submitTripLoading = false; @ViewChild('formContainer') formContainer: ElementRef; constructor( private router: Router, private authService: AuthService, private dateService: DateService, private activatedRoute: ActivatedRoute, private formBuilder: FormBuilder, private orgUserService: OrgUserService, private modalController: ModalController, private tripRequestCustomFieldsService: TripRequestCustomFieldsService, private offlineService: OfflineService, private tripRequestsService: TripRequestsService, private loaderService: LoaderService, private popoverController: PopoverController, private projectsService: ProjectsService, private tripRequestPolicyService: TripRequestPolicyService, private statusService: StatusService ) { } fg: FormGroup; async goBack() { const addExpensePopover = await this.popoverController.create({ component: SavePopoverComponent, componentProps: { saveMode: 'CLOSE', }, cssClass: 'dialog-popover' }); await addExpensePopover.present(); const {data} = await addExpensePopover.onDidDismiss(); if (data && data.continue) { this.fg.reset(); this.router.navigate(['/', 'enterprise', 'my_trips']); } } setTripRequestObject(name, mobile) { const intialTraveler = this.formBuilder.group({ name: [name], phone_number: [mobile] }); this.travellerDetails.push(intialTraveler); } removeTraveller(i) { this.travellerDetails.removeAt(i); } removeCity(i) { this.cities.removeAt(i); } addNewTraveller() { const intialTraveler = this.formBuilder.group({ name: [null], phone_number: [null] }); this.travellerDetails.push(intialTraveler); } get travellerDetails() { return this.fg.get('travellerDetails') as FormArray; } async onSubmit() { const addExpensePopover = await this.popoverController.create({ component: SavePopoverComponent, componentProps: { saveMode: 'SUBMIT', otherRequests: [ {hotel: this.fg.get('hotelRequest').value || false}, {transportation: this.fg.get('transportationRequest').value || false} ] }, cssClass: 'dialog-popover' }); if (this.fg.valid) { if (this.validateDates()) { this.scrollToError(); return false; } if (!(this.fg.controls.endDate.value >= this.fg.controls.startDate.value)) { this.fg.markAllAsTouched(); const formContainer = this.formContainer.nativeElement as HTMLElement; if (formContainer) { const invalidElement = formContainer.querySelector('.ng-invalid'); if (invalidElement) { invalidElement.scrollIntoView({ behavior: 'smooth' }); } } return; } else { await addExpensePopover.present(); const {data} = await addExpensePopover.onDidDismiss(); if (data && data.continue) { this.customFields$.pipe( take(1) ).subscribe(customFields => { this.fg.value.custom_field_values = customFields.map(field => { return field.control.value; }); this.submitTripRequest(this.fg.value); }); } } } else { this.fg.markAllAsTouched(); const formContainer = this.formContainer.nativeElement as HTMLElement; if (formContainer) { const invalidElement = formContainer.querySelector('.ng-invalid'); if (invalidElement) { invalidElement.scrollIntoView({ behavior: 'smooth' }); } } } } validateDates() { if (this.tripType === 'MULTI_CITY') { return this.cities.value.some((city, index) => { if (index === 0) { if (!(city.onward_dt >= this.startDate.value)) { this.cities.controls[0]['controls'].onward_dt.setErrors({'incorrect': true}); return true; } } else if ((index + 1) < this.cities.value.length) { if (!(city.onward_dt <= this.cities.value[index + 1].onward_dt)) { this.cities.controls[index + 1]['controls'].onward_dt.setErrors({'incorrect': true}); return true; } } }); } if (this.tripType === 'ROUND') { if (!(this.cities.controls[0].value.onward_dt < this.cities.controls[0].value.return_date)) { this.cities.controls[0]['controls'].return_date.setErrors({'incorrect': true}); return true; } } if (this.tripType === 'ONE_WAY') { if (!(this.fg.controls.endDate.value >= this.fg.controls.startDate.value)) { return true; } } } scrollToError() { const formContainer = this.formContainer.nativeElement as HTMLElement; if (formContainer) { const invalidElement = formContainer.querySelector('.ng-invalid'); if (invalidElement) { invalidElement.scrollIntoView({ behavior: 'smooth' }); } } } async saveDraftModal() { const savePopover = await this.popoverController.create({ component: SavePopoverComponent, componentProps: { saveMode: 'DRAFT' }, cssClass: 'dialog-popover' }); if (this.fg.valid) { if (this.validateDates()) { this.scrollToError(); return false; } if (!(this.fg.controls.endDate.value >= this.fg.controls.startDate.value)) { this.fg.markAllAsTouched(); this.scrollToError(); return; } else { await savePopover.present(); const {data} = await savePopover.onDidDismiss(); if (data && data.continue) { this.saveAsDraft(this.fg.value); } } } else { this.fg.markAllAsTouched(); this.scrollToError(); } } async showPolicyViolationPopup(policyPopupRules: any [], policyActionDescription: string, tripReq) { const latestComment = await this.statusService.findLatestComment(tripReq.trp.id, 'trip_requests', tripReq.trp.org_user_id).toPromise(); const policyViolationsModal = await this.modalController.create({ component: PolicyViolationComponent, componentProps: { policyViolationMessages: policyPopupRules, policyActionDescription, comment: latestComment } }); await policyViolationsModal.present(); const { data } = await policyViolationsModal.onWillDismiss(); if (data) { return { status: 'proceed', comment: data.comment }; } else { return { status: 'stop' }; } } saveAsDraft(formValue) { this.saveTripAsDraftLoading = true; this.makeTrpFormFromFg(formValue).pipe( switchMap((tripReq) => { const tripRequestObject = { trip_request: tripReq, advance_requests: [], transportation_requests: [], hotel_requests: [] }; return this.tripRequestPolicyService.testTripRequest(tripRequestObject).pipe( catchError(_ => of(null)), switchMap((res: any) => { const policyPopupRules = this.tripRequestPolicyService.getPolicyPopupRules(res); if (policyPopupRules.length > 0) { const policyActionDescription = res.trip_request_desired_state.action_description; return from(this.showPolicyViolationPopup( policyPopupRules, policyActionDescription, tripReq )).pipe( switchMap(policyModalRes => { if (policyModalRes.status === 'proceed') { return of({ tripReq, comment: policyModalRes.comment }); } else { return throwError({ status: 'Policy Violated' }); } }) ); } else { return of({tripReq}); } }), catchError((err) => { if (err.status === 'Policy Violated') { return throwError({ status: 'Policy Violated' }); } else { return of({tripReq}); } }) ); }), switchMap(({ tripReq, comment }: any) => { if (comment && tripReq.id) { return this.tripRequestsService.saveDraft(tripReq).pipe( switchMap((res) => { return this.statusService.findLatestComment(tripReq.trp.id, 'trip_requests', tripReq.trp.org_user_id).pipe( switchMap(result => { if (result !== comment) { return this.statusService.post('trip_requests', tripReq.trp.id, {comment}, true).pipe( map(() => res) ); } else { return of(res); } }) ); }) ); } else { return this.tripRequestsService.saveDraft(tripReq); } }), switchMap(res => { return this.tripRequestsService.triggerPolicyCheck(res.id); }), finalize(() => { this.saveTripAsDraftLoading = false; this.fg.reset(); this.router.navigate(['/', 'enterprise', 'my_trips']); }) ).subscribe(() => { this.fg.reset(); this.router.navigate(['/', 'enterprise', 'my_trips']); }); } makeTrpFormFromFg(formValue) { if (this.mode === 'edit') { return forkJoin({ tripRequest: this.tripRequest$ }).pipe( map(res => { const tripRequest: any = res.tripRequest; const trp = { ...tripRequest, custom_field_values: formValue.custom_field_values, end_dt: formValue.endDate, notes: formValue.notes, project_id: formValue.project && formValue.project.project_id || null, purpose: formValue.purpose, source: formValue.source, start_dt: formValue.startDate, traveller_details: formValue.travellerDetails, trip_cities: formValue.cities, trip_type: formValue.tripType }; return trp; }) ); } else { const trp = { custom_field_values: formValue.custom_field_values, end_dt: formValue.endDate, notes: formValue.notes, project_id: formValue.project && formValue.project.project_id || null, purpose: formValue.purpose, source: formValue.source, start_dt: formValue.startDate, traveller_details: formValue.travellerDetails, trip_cities: formValue.cities, trip_type: formValue.tripType }; return of(trp); } } submitTripRequest(formValue) { this.submitTripLoading = true; this.makeTrpFormFromFg(formValue).pipe( switchMap((tripReq) => { const tripRequestObject = { trip_request: tripReq, advance_requests: [], transportation_requests: [], hotel_requests: [] }; return this.tripRequestPolicyService.testTripRequest(tripRequestObject).pipe( catchError(_ => of(null)), switchMap((res: any) => { const policyPopupRules = this.tripRequestPolicyService.getPolicyPopupRules(res); if (policyPopupRules.length > 0) { const policyActionDescription = res.trip_request_desired_state.action_description; return from(this.showPolicyViolationPopup( policyPopupRules, policyActionDescription, tripReq )).pipe( switchMap(policyModalRes => { if (policyModalRes.status === 'proceed') { return of({ tripReq, comment: policyModalRes.comment }); } else { return throwError({ status: 'Policy Violated' }); } }) ); } else { return of({tripReq}); } }), catchError((err) => { if (err.status === 'Policy Violated') { return throwError({ status: 'Policy Violated' }); } else { return of({tripReq}); } }) ); }), switchMap(({ tripReq, comment }: any) => { if (comment && tripReq.id) { return this.tripRequestsService.submit(tripReq).pipe( switchMap((res) => { return this.statusService.findLatestComment(tripReq.trp.id, 'trip_requests', tripReq.trp.org_user_id).pipe( switchMap(result => { if (result !== comment) { return this.statusService.post('trip_requests', tripReq.trp.id, {comment}, true).pipe( map(() => res) ); } else { return of(res); } }) ); }) ); } else { return this.tripRequestsService.submit(tripReq); } }), switchMap(res => { return this.tripRequestsService.triggerPolicyCheck(res.id); }), finalize(() => { this.submitTripLoading = false; this.fg.reset(); this.router.navigate(['/', 'enterprise', 'my_trips']); }) ).subscribe(() => { this.fg.reset(); this.router.navigate(['/', 'enterprise', 'my_trips']); }); } get startDate() { return this.fg.get('startDate') as FormControl; } get endDate() { return this.fg.get('endDate') as FormControl; } setDefaultStarrtDate() { this.today = new Date(); this.startDate.setValue(moment(this.today).format('y-MM-DD')); } get cities() { return this.fg.get('cities') as FormArray; } get tripType() { return this.fg.get('tripType').value; } addDefaultCity() { let toCity; if (this.cities.value.length >= 1) { toCity = this.cities.controls[this.cities.value.length - 1].value.to_city; this.minDate = this.cities.controls[this.cities.value.length - 1].value.onward_dt; } const intialCity = this.formBuilder.group({ from_city: [toCity, Validators.required], to_city: [null, Validators.required], onward_dt: [, Validators.required] }); if (this.fg.controls.tripType.value === 'ROUND') { intialCity.addControl('return_date', new FormControl('', Validators.required)); } this.cities.push(intialCity); } addNewCity() { this.addDefaultCity(); } intializeDefaults() { this.setDefaultStarrtDate(); this.addDefaultCity(); } async openModal() { const modal = await this.modalController.create({ component: OtherRequestsComponent, componentProps: { otherRequests: [ { hotel: this.fg.get('hotelRequest').value || false }, { advance: this.fg.get('advanceRequest').value || false }, { transportation: this.fg.get('transportationRequest').value || false } ], fgValues: this.fg.value, id: this.activatedRoute.snapshot.params.id || null } }); if (this.fg.valid) { if (this.validateDates()) { this.scrollToError(); return false; } if (!(this.fg.controls.endDate.value >= this.fg.controls.startDate.value)) { this.fg.markAllAsTouched(); const formContainer = this.formContainer.nativeElement as HTMLElement; if (formContainer) { const invalidElement = formContainer.querySelector('.ng-invalid'); if (invalidElement) { invalidElement.scrollIntoView({ behavior: 'smooth' }); } } return; } else { return await modal.present(); } } else { this.fg.markAllAsTouched(); const formContainer = this.formContainer.nativeElement as HTMLElement; if (formContainer) { const invalidElement = formContainer.querySelector('.ng-invalid'); if (invalidElement) { invalidElement.scrollIntoView({ behavior: 'smooth' }); } } } } modifyTripRequestCustomFields(customFields): CustomField[] { if (customFields.length === 0) { return []; } customFields = customFields.sort((a, b) => (a.id > b.id) ? 1 : -1); customFields = customFields.map(customField => { if (customField.type === 'DATE' && customField.value) { const updatedDate = new Date(customField.value); customField.value = updatedDate.getFullYear() + '-' + (updatedDate.getMonth() + 1) + '-' + updatedDate.getDate(); } return {id: customField.id, name: customField.name, value: customField.value}; }); this.customFieldValues = customFields; return this.customFieldValues; } ngOnInit() { const id = this.activatedRoute.snapshot.params.id; const orgSettings$ = this.offlineService.getOrgSettings(); this.customFieldValues = []; this.tripTypes = [ { value: 'ONE_WAY', label: 'One Way' }, { value: 'ROUND', label: 'Round Trip' }, { value: 'MULTI_CITY', label: 'Multi City' } ]; this.tripDate = { startMin: moment(this.dateService.addDaysToDate(new Date(), -1)).format('y-MM-DD'), endMin: moment(this.dateService.addDaysToDate(new Date(), -1)).format('y-MM-DD'), departMin: moment(this.dateService.addDaysToDate(new Date(), -1)).format('y-MM-DD'), departMax: moment(this.dateService.addDaysToDate(new Date(), -1)).format('y-MM-DD') }; this.hotelDate = { checkInMin: moment(this.dateService.addDaysToDate(new Date(), -1)).format('y-MM-DD'), checkInMax: moment(this.dateService.addDaysToDate(new Date(), -1)).format('y-MM-DD'), checkOutMin: moment(this.dateService.addDaysToDate(new Date(), -1)).format('y-MM-DD'), }; this.minDate = moment(new Date()).format('y-MM-DD'); this.fg = new FormGroup({ travellerDetails: new FormArray([]), tripType: new FormControl('ONE_WAY', [Validators.required]), startDate: new FormControl('', [Validators.required]), endDate: new FormControl('', [Validators.required]), purpose: new FormControl('', [Validators.required]), cities: new FormArray([]), project: new FormControl('', []), travelAgent: new FormControl('', []), notes: new FormControl('', []), transportationRequest: new FormControl('', []), hotelRequest: new FormControl('', []), advanceRequest: new FormControl('', []), source: new FormControl('MOBILE', []), custom_field_values: new FormArray([]) }); this.maxDate = this.fg.controls.endDate.value; this.customFields$ = this.refreshTrips$.pipe( startWith(0), concatMap(() => { return this.tripRequestCustomFieldsService.getAll(); }), map((customFields: any[]) => { const customFieldsFormArray = this.fg.controls.custom_field_values as FormArray; customFieldsFormArray.clear(); customFields = customFields.sort((a, b) => (a.id > b.id) ? 1 : -1); customFields = customFields.filter(field => { return field.request_type === 'TRIP_REQUEST' && field.trip_type.indexOf(this.fg.get('tripType').value) > -1; }); for (const customField of customFields) { let value; this.customFieldValues.filter(customFieldValue => { if (customFieldValue.id === customField.id) { value = customFieldValue.value; } }); customFieldsFormArray.push( this.formBuilder.group({ id: customField.id, name: customField.input_name, value: [value, customField.mandatory && Validators.required] }) ); } return customFields.map((customField, i) => { customField.control = customFieldsFormArray.at(i); if (customField.input_options) { customField.options = customField.input_options.map(option => { return {label: option, value: option}; }); } return customField; }); }), shareReplay(1) ); this.intializeDefaults(); if (id) { this.mode = 'edit'; this.tripRequest$ = this.tripRequestsService.get(id); const selectedProject$ = this.tripRequest$.pipe( switchMap(trip => { return iif(() => trip.project_id, this.projectsService.getbyId(trip.project_id), of(null)); }) ); from(this.loaderService.showLoader('Getting trip details')).pipe( switchMap(() => { return combineLatest([ this.tripRequest$, selectedProject$, this.tripRequestsService.getHotelRequests(id), this.tripRequestsService.getTransportationRequests(id), this.tripRequestsService.getAdvanceRequests(id), this.tripRequestsService.getActions(id) ]); }), take(1), map(([tripRequest, selectedProject, hotelRequest, transportRequest, advanceRequest, actions]) => { this.tripActions = actions; tripRequest.traveller_details.forEach(traveller => { this.setTripRequestObject(traveller.name, traveller.phone_number); }); this.tripDate.startMin = moment(this.dateService.addDaysToDate(new Date(tripRequest.start_date), -1)).format('y-MM-DD'); this.tripDate.endMin = this.tripDate.startMin; this.tripDate.departMin = moment(this.dateService.addDaysToDate(new Date(tripRequest.start_date), -1)).format('y-MM-DD'); this.tripDate.departMax = moment(tripRequest.end_date).format('y-MM-DD'); this.hotelDate.checkInMin = moment(this.dateService.addDaysToDate(new Date(tripRequest.start_date), -1)).format('y-MM-DD'); this.hotelDate.checkInMax = moment(tripRequest.end_date).format('y-MM-DD'); this.hotelDate.checkOutMin = moment(tripRequest.end_date).format('y-MM-DD'); this.fg.get('tripType').setValue(tripRequest.trip_type); this.fg.get('startDate').setValue(moment(tripRequest.start_date).format('y-MM-DD')); this.fg.get('endDate').setValue(moment(tripRequest.end_date).format('y-MM-DD')); this.fg.get('purpose').setValue(tripRequest.purpose); this.fg.get('project').setValue(selectedProject); this.fg.get('travelAgent').setValue((transportRequest[0] && transportRequest[0].tr.assigned_to) || null); this.fg.get('notes').setValue(tripRequest.notes); this.fg.get('source').setValue(tripRequest.source); const custom = this.fg.get('custom_field_values') as FormArray; custom.clear(); const renderedCustomFeild = this.modifyTripRequestCustomFields(tripRequest.custom_field_values); renderedCustomFeild.forEach(field => { const customFields = this.formBuilder.group({ id: [field.id], name: [field.name], value: [field.value] }); custom.push(customFields); }); this.cities.clear(); tripRequest.trip_cities.forEach(tripCity => { const intialCity = this.formBuilder.group({ from_city: [tripCity.from_city, Validators.required], to_city: [tripCity.to_city, Validators.required], onward_dt: [tripCity.onward_dt ? moment(tripCity.onward_dt).format('y-MM-DD') : null, Validators.required] }); if (this.fg.controls.tripType.value === 'ROUND') { intialCity.addControl('return_date', new FormControl(moment(tripCity.return_dt).format('y-MM-DD'), Validators.required)); } this.cities.push(intialCity); }); this.fg.get('transportationRequest').setValue(transportRequest.length > 0 ? true : false); this.fg.get('hotelRequest').setValue(hotelRequest.length > 0 ? true : false); this.fg.get('advanceRequest').setValue(advanceRequest.length > 0 ? true : false); this.isTransportationRequestAlreadyAdded = transportRequest.length > 0; this.isHotelRequestAlreadyAdded = hotelRequest.length > 0; this.isAdvanceRequestAlreadyAdded = advanceRequest.length > 0; }), finalize(() => this.loaderService.hideLoader()) ).subscribe(noop); } else { this.mode = 'add'; this.tripActions = { can_save: true, can_submit: true }; this.refreshTrips$.next(); } this.eou$ = from(this.authService.getEou()); this.travelAgents$ = this.orgUserService.getEmployees({ ou_roles: 'like.%TRAVEL_AGENT%' }).pipe( map(employees => { const travelAgents = []; employees.forEach(employee => { travelAgents.push({ label: employee.us_full_name + '(' + employee.us_email + ')', value: employee.ou_id }); }); return travelAgents; }) ); if (this.mode === 'add') { this.eou$.subscribe(res => { this.setTripRequestObject(res.us.full_name, res.ou.mobile); }); } this.isTripTypeMultiCity$ = this.fg.controls.tripType.valueChanges.pipe( map(res => res === 'MULTI_CITY') ); this.isTripTypeOneWay$ = this.fg.controls.tripType.valueChanges.pipe( map(res => res === 'ONE_WAY') ); this.isTripTypeMultiCity$.subscribe(isMulticity => { if (isMulticity) { const firstCity = this.cities.value[0]; this.cities.clear(); const intialCity = this.formBuilder.group({ from_city: [firstCity.from_city, Validators.required], to_city: [firstCity.to_city, Validators.required], onward_dt: [firstCity.onward_dt, Validators.required] }); this.cities.push(intialCity); this.addDefaultCity(); } else { const firstCity = this.cities.at(0); this.cities.clear(); this.cities.push(firstCity); } }); this.isTransportationRequested$ = this.fg.controls.transportationRequest.valueChanges.pipe( map(res => { return res; }) ); this.isHotelRequested$ = this.fg.controls.hotelRequest.valueChanges.pipe( map(res => { return res; }) ); this.isAdvanceRequested$ = this.fg.controls.advanceRequest.valueChanges.pipe( map(res => { return res; }) ); this.isProjectsEnabled$ = orgSettings$.pipe( map(orgSettings => { return orgSettings.projects && orgSettings.projects.enabled; }) ); this.projects$ = this.offlineService.getProjects(); this.isAdvanceEnabled$ = orgSettings$.pipe( map(orgSettings => { return orgSettings.advance_requests.enabled; }) ); this.isTransportationEnabled$ = orgSettings$.pipe( map(orgSettings => { return orgSettings.trip_requests.enabled_transportation_requests; }) ); this.isHotelEnabled$ = orgSettings$.pipe( map(orgSettings => { return orgSettings.trip_requests.enabled_hotel_requests; }) ); this.fg.controls.tripType.valueChanges.subscribe(res => { this.refreshTrips$.next(); if (res === 'ROUND') { const firstCity = this.cities.value[0]; this.cities.clear(); const intialCity = this.formBuilder.group({ from_city: [firstCity.from_city, Validators.required], to_city: [firstCity.to_city, Validators.required], onward_dt: [firstCity.onward_dt, Validators.required], return_date: [null, Validators.required] }); this.cities.push(intialCity); } if (res === 'ONE_WAY') { const firstCity = this.cities.value[0]; this.cities.clear(); const intialCity = this.formBuilder.group({ from_city: [firstCity.from_city, Validators.required], to_city: [firstCity.to_city, Validators.required], onward_dt: [firstCity.onward_dt, Validators.required] }); this.cities.push(intialCity); } }); this.fg.valueChanges.subscribe(formValue => { // removing errors after fields value are touched this.cities.value.forEach((city, index) => { let errors = this.cities.controls[index]['controls'].onward_dt.errors; if (errors) { delete errors.incorrect; this.cities.controls[index]['controls'].onward_dt.setErrors(errors); } }); if (this.tripType === 'ROUND' && this.cities.controls.length && this.cities.controls[0]['controls'].return_date) { let errors = this.cities.controls[0]['controls'].return_date.errors; if (errors) { delete errors.incorrect; this.cities.controls[0]['controls'].return_date.setErrors(errors); } } if (formValue.tripType === 'MULTI_CITY') { if (formValue.cities.length > 1) { this.minDate = formValue.cities[formValue.cities.length - 2].onward_dt; } } }); this.refreshTrips$.next(); } }
<filename>adminlte3_theme/serializers.py from rest_framework import serializers from .models import confrence class confrenceserializer(serializers.ModelSerializer): class Meta: model=confrence fields=('confrence_ID','date','venu','image','confrence_Overview','register','travel_information')
// GetSession gets a goboots session func (m *MysqlDBSession) GetSession(sid string) (*goboots.Session, error) { db, err := m.w.db() if err != nil { return nil, err } var stime time.Time var updated time.Time data := make([]byte, 0) var shortexpires time.Time var shortcount uint8 err = db.QueryRowx("SELECT time, updated, data, shortexpires, shortcount FROM goboots_sessid WHERE sid=?", sid).Scan(&stime, &updated, &data, &shortexpires, &shortcount) if err != nil { return nil, err } switch shortcount { case 0: db.Exec("UPDATE goboots_sessid SET shortcount=1, shortexpires = DATE_ADD(NOW(), INTERVAL 30 MINUTE) WHERE sid=?", sid) case 1: db.Exec("UPDATE goboots_sessid SET shortcount=2, shortexpires = DATE_ADD(NOW(), INTERVAL 5 HOUR) WHERE sid=?", sid) case 2: db.Exec("UPDATE goboots_sessid SET shortcount=3, shortexpires = DATE_ADD(NOW(), INTERVAL 5 DAY) WHERE sid=?", sid) case 3: db.Exec("UPDATE goboots_sessid SET shortcount=4, shortexpires = DATE_ADD(NOW(), INTERVAL 5 MONTH) WHERE sid=?", sid) } ses := &goboots.Session{} ses.SID = sid ses.Time = stime ses.Updated = updated ses.Data = umshl(data) return ses, nil }
UPDATE : In an interview with Business Times dated 28 June, Ramsay's business partner Stuart Gillies disclosed that they will be opening their first restaurant in Singapore early next year at The Shoppes at Marina Bay Sands. Our sources further reveal the location is likely to be at the soon-to-close Moluccas Room. Celebrity chef Gordon Ramsay has unveiled he is making major moves to expand his restaurant empire into Asia, with the opening of his first restaurant in Hong Kong by this fall. The Gordon Ramsay Group already has 24 establishments across Europe, the US and the Middle East, boasting 7 Michelin stars amongst its accolades.The British European-themed eatery Bread Street Kitchen from London will open in Hong Kong in September, and will feature a warehouse-style design that mixes vintage and contemporary furnishings.
Festivals exist for music-lovers to let their hair down, ignore their work inbox for a weekend and just cut loose while shaking to some class acts. It’s easy to overlook just how much of an environmental impact large festivals have on their surrounding area, and this is one of the many reasons Liftshare exists. Not only does sharing lifts together cut down on all the emissions used to get to a particular place – such as festivals, it’s also saving people a great deal of cash. We’re always on the look out for new festivals to work with, such as our friends at Willowman Festival, and it’s clear that there are many event organisers out there looking for new ways to cut down their festival’s footprint. Following on from his superb 2015 Festival Preview, we reached out once more to Kes, editor at Festival Mag, to discuss the true environmental impact of music festivals, how many organisers are working to solve the green problem, and to namedrop some of the impressive eco-friendly events out there today. Liftshare: In your experience just how much of an impact do festivals have on the environment? It’s clear that they do but we think – understandably, that sometimes people overlook it completely. Kes: According to research carried out by the Environmental Change Institute at Oxford University, which was commissioned by Julie’s Bicycle, the UK music market is responsible for some 540,000 tonnes CO2e per annum, with around 5 per cent coming from festivals. However, this research was carried out back in 2006 when the UK festival industry entered the current boom period, so in my opinion this number could be even higher today. Why do some individuals overlook this? Some people view festivals as singular events therefore do not realise the impact they create. What they often fail to realise is that there are actually over 900 festivals annually in the UK alone, and collectively the impact from all the waste, power usage and traffic emissions is clearly significant. Thankfully many in the festival industry put the care of our planet at the heart of everything they do, and are working hard to dramatically reduce their CO2e emissions. This isn’t surprising when you consider that much of the festival scene we see today was born out of the peace movements of the 60s, 70s and 80s, which was instrumental in raising green issues at the time and is responsible for much of the environmental awareness around today. Some people from this era are still involved in the industry today and many still share this ecological ethos. When it comes to sustainability the festival industry is probably one of the most proactive and vocal industries going and one could argue that as a society, we’re better off for festivals. Liftshare: We’ve spoken to a few people who have been clean up stewards at festivals in the past, and from the sound of it this is a really telling experience about just how much waste is left behind after an event. Have you had any insight or experience into how this clean-up process takes place, and to what extent recycling comes into it? Kes: Most festivals provide recycling bins and bags, but unfortunately when people are in high spirits not everything always makes to these depositories and is left for the clean-up stewards to clear away after the event. This process can take anywhere between a few weeks to a couple of months for the bigger festivals. Unfortunately recycling comes with a cost, which means a reduced festival budget for artists and production. This is especially a problem for the smaller festivals where the margins are tighter. If fans want to keep seeing the biggest bands together with lower ticket prices then they should do their bit, and make more use of the on-site recycling facilities. Liftshare: How far does landscaping come into the recovery process? You have to imagine that during a rainy event the fields get utterly battered by all the footfall. How conscious are festivals when it comes to preservation? Kes: When you’ve got 50,000 pairs of feet on the ground it only takes a couple of good showers to turn it into a mud bath, and in the UK it is inevitable that such a situation will happen at a good percentage of festivals. You’re seeing a lot more metal track-ways and wood chips being placed down in high traffic area at events over the last few years. This is great for protecting the ground alongside your legs from wading through mud for sixteen hours a day. I’m not sure what happens with green field sites, although I know some of the bigger events have fallow years to give the ground a rest. The licenses of events in public spaces and parks won’t be renewed if they don’t make the site good after an event. This means that they have to perform certain conversational work no matter whether or not this is driven by conscience or coercion. For instance, the Shakedown festival in Brighton takes place in a public park and organisers have paid the council around £45k in previous years for the ground to be restored after the festival. It is good that the local authorities force the festival organisers to take responsibility for the impact of their event. As long as festivals work with local authorities to set an acceptable level of conservation, and factor the costs to support this in to their budgets then relatively speaking I don’t see it being a huge problem. Liftshare: We touched on your green festival champions in our previous interview, but do you have any examples of when events have gone above and beyond to hit sustainability aims? Kes: There are three that immediately spring to mind: Shambala just won the Green Festival Award at the UK Festival Awards for the second year running. This is testament to the work they’ve put in to bringing their carbon footprint down by around 80 per cent over the last five years. Imagine what the world would be like if other industries could achieve the same reductions. Always a leading force in the festival industry, Glastonbury has made some major inroads into becoming greener in recent times. Last year they invested around £600k installing 5,000 long drop toilets. These store the human waste generated by the 200k+ party-goers and staff in underground tanks, which can then be turned into manure to fertilise local fields. However, my personal favourite has to be the Wood Festival in Oxfordshire where the stage lighting is powered by push bikes. Energetic attendees can jump on a push bike and pump out a few k’s to help provide electric power. Unfortunately kinetic, solar and wind power technologies are not quite there yet to power the bigger stages, but hopefully it won’t be long before cost effectiveness solutions will be readily available. Liftshare: Lastly, what do you feel is the biggest sustainability challenge facing most fest today and how could they go about solving it? Kes: Traffic emissions seem to be the main concern at the moment. Waste can be recycled and alternative power technologies are advancing rapidly, but how do you limit the number of vehicles heading to an event? This issue has seen a rise in the incentives for people to use public transport or car share. Festival-goers are generally a fairly green bunch and thus pretty receptive to this green issue and as long as the festivals continue raising awareness and offering incentives then hopefully in the future we’ll see more take up of the ecologically friendly travel options. Let’s not forget that if people stay at home instead of going to a festival they still use electricity, fuel and generate waste. It would be interesting to compare the footprint of a festival-goer living in a tent for five days, cooking on a camping stove and having all their waste recycled compared to that of someone who stayed at home on a bank holiday weekend in August, firing up the BBQ, driving to the coast and possibly not doing that much recycling. Wouldn’t it be nice if one day it was actually greener to go to a festival than to stay at home? Maybe it already is! Thank you once again to Kes for his superb insight into the festival scene, and be sure to check out Festival Mag for all of the hot festival news, rumours and information as it happens.
<reponame>krish2487/STM32-Repo #include <stdint.h> #include <stdio.h> #include <string.h> #include "exti/exti.h" #include "gpio/gpio.h" #include "uart/uart.h" #include "uart/uart_interrupt.h" #include "FreeRTOS.h" #include "queue.h" #include "task.h" /** * STATIC FUNCTION DECLARATIONS */ void main__gpio_output(void); void main__gpio_input(void); void main__gpio_input_external_interrupt(void); /** * STATE VARIABLES */ // GPIO OUTPUT static GPIO_s output_config; // GPIO INPUT static GPIO_s input_config; static EXTI_s gpioC13_interrupt_config; volatile bool is_button_pressed = false; // MISC // INTERRUPTS void EXTI15_10_Handler(void) { if (exti__gpio_is_pending_interrupt(&gpioC13_interrupt_config)) { exti__gpio_clear_pending_interrupt(&gpioC13_interrupt_config); // Your logic here is_button_pressed = true; } } // TASKS void blink_task(void *arg) { while (1) { printf("Hello %s\r\n", __FUNCTION__); gpio__set(&output_config); vTaskDelay(1000); gpio__reset(&output_config); vTaskDelay(1000); } } void uart_read(void *arg) { char buf[20] = {0}; while (1) { printf("Waiting for data>\r\n"); scanf("%s", buf); printf("Data: %s\r\n", buf); } } int main(void) { printf("Starting main\r\n"); main__gpio_output(); gpio__reset(&output_config); xTaskCreate(blink_task, "printf test", 2000, NULL, 1, NULL); xTaskCreate(uart_read, "scanf test", 2000, NULL, 2, NULL); vTaskStartScheduler(); // vTaskStartSchedular should never exit while (1) { } return 0; } void main__gpio_output(void) { // Activate GPIOA RCC->AHB2ENR |= (1 << 0); output_config.mode = GPIO_mode_OUTPUT; output_config.type = GPIO_type_PUSH_PULL; output_config.speed = GPIO_speed_LOW_SPEED; output_config.pull = GPIO_pull_NO_PULLUP_OR_PULLDOWN; gpio__init(&output_config, GPIOA, 5); gpio__set(&output_config); } void main__gpio_input(void) { // Activate GPIOC RCC->AHB2ENR |= (1 << 2); input_config.mode = GPIO_mode_INPUT; input_config.type = GPIO_type_PUSH_PULL; input_config.speed = GPIO_speed_LOW_SPEED; input_config.pull = GPIO_pull_NO_PULLUP_OR_PULLDOWN; gpio__init(&input_config, GPIOC, 13); } void main__gpio_input_external_interrupt(void) { RCC->APB2ENR |= (1 << 0); // 0-3 // 4-7 // 8-11 // 12-15 SYSCFG->EXTICR[3] |= ((1 << 1) << 4); // Set the 13th bit // Connected to External Pullup resistor gpioC13_interrupt_config.type = EXTI_type_FALLING; gpioC13_interrupt_config.pin = input_config.pin; exti__gpio_register_interrupt(&gpioC13_interrupt_config); NVIC_EnableIRQ(EXTI15_10_IRQn); }
/// Update a existing key value pair to etcd pub async fn update_existing_kv< T: DeserializeOwned + Serialize + Clone + Debug + Send + Sync, >( &self, key: &str, value: &T, ) -> DatenLordResult<T> { let write_res = self.write_to_etcd(key, value).await?; if let Some(pre_value) = write_res { Ok(pre_value) } else { panic!("failed to replace previous value, return nothing"); } }
<filename>domain/user.go package domain import ( "fmt" "time" "golang.org/x/crypto/bcrypt" ) // User contains user data. type User struct { ID string Email string Password []byte ActivationToken string RecoveryToken string Activated *time.Time Created *time.Time Updated *time.Time Active *bool } // IsActive checks if user account is activated. func (u *User) IsActive() bool { return *u.Active } // IsValidPassword checks if provided plain password matched hashed password. func (u *User) IsValidPassword(plainPassword string) bool { if plainPassword == "" { return false } return bcrypt.CompareHashAndPassword(u.Password, []byte(plainPassword)) == nil } // HashPassword hashes provided plain password using bcrypt hasher. func HashPassword(plainPassword string) ([]byte, error) { hash, err := bcrypt.GenerateFromPassword([]byte(plainPassword), bcrypt.DefaultCost) if err != nil { return nil, fmt.Errorf("bcrypt: %w", err) } return hash, nil }
// CLI for protocol switch related RAIL events. void emberAfPluginDmpTuningGetRailScheduledEventCounters(void) { emberAfCorePrintln("Scheduled event counter:%d Unscheduled event counter:%d", railScheduledEventCntr, railUnscheduledEventCntr); }
<filename>examples/68702115/index.tsx import React from 'react'; import { useRef, useState, useEffect, MouseEvent } from 'react'; const useAudio = (url: string) => { const audio = useRef<HTMLAudioElement | undefined>(typeof Audio !== 'undefined' ? new Audio(url) : undefined); const [playing, setPlaying] = useState(false); const toggle = () => setPlaying(!playing); useEffect(() => { playing ? audio.current?.play() : audio.current?.pause(); }, [playing]); useEffect(() => { audio.current?.addEventListener('ended', () => setPlaying(false)); return () => { audio.current?.removeEventListener('ended', () => setPlaying(false)); }; }, []); return [playing, toggle] as const; }; const NFT = ({ baseUri, metaId, url }: { baseUri: string; metaId: string; url: string }) => { const [metadata, setMetadata] = useState<{ [key: string]: string } | null>(null); const fetchMetadata = async (url: string) => { const response = await fetch(url); const result = await response.json(); if (!result) return; setMetadata(result); }; var [playing, toggle] = useAudio(url); useEffect(() => { fetchMetadata(`${baseUri}/${metaId}`); }, []); if (!metadata) return null; return ( <div className="w-full md:w-1/2 lg:w-1/3 p-3 mb-4"> <div className="h-96"> <div className="relative items-center min-h-full"> <a href="#"></a> </div> {url && <button onClick={toggle}> {playing ? 'Pause' : 'Play'} </button>} </div> </div> ); }; function test() { return [true, () => {}]; } const [isOk, func] = test(); func();
/** * 深圳金融电子结算中心 * Copyright (c) 1995-2017 All Rights Reserved. */ package com.murong.prepayment.cache; import com.murong.prepayment.cache.config.CacheConfig; import com.murong.prepayment.cache.to.CacheKey; import com.murong.prepayment.cache.to.CacheWrapper; /** * 缓存类接口 * @author lw.xu * @version $Id: Cache.java, v 0.1 2017年10月18日 下午8:20:12 lw.xu Exp $ */ public interface Cache { /** * 设置缓存 * @param cacheKey 缓存的key,不能为空 * @param wrapper 缓存的对象,不能为空 */ void set(CacheKey cacheKey, CacheWrapper wrapper); /** * 查询缓存 * @param cacheKey 缓存的key,不能为空 * @return 缓存的对象 */ CacheWrapper get(CacheKey cacheKey); /** * 删除缓存 * @param cacheKey 缓存的key,不能为空 * @return 删除缓存的数量 */ Long delete(CacheKey cacheKey); /** * 清空缓存 */ void clear(); /** * 关闭缓存 */ void shutdown(); /** * 获取缓存配置 */ CacheConfig getConfig(); /** * 设置Mutex,Map类型缓存不支持此方法,Redis支持 */ Long setMutex(CacheKey cacheKey); }
def _split_coefficients(self, w: NDArray[Float64]) -> Tuple[float, NDArray[Float64]]: if self.fit_intercept: bias = w[0] wf = w[1:] else: bias = 0.0 wf = w return bias, wf
package ru.job4j.threads.sinhronizy; import org.junit.Before; import org.junit.Test; import java.util.List; import static org.hamcrest.core.Is.is; import static org.junit.Assert.*; public class UserStorageTest { private UserStorage storage; private UserStorage.User a; private UserStorage.User b; private List<UserStorage.User> st; @Before public void setUp() throws Exception { storage = new UserStorage(); a = new UserStorage.User(1, 200); b = new UserStorage.User(2, 200); st = storage.getStorage(); } @Test public void whenDoStorageOperation() throws InterruptedException { UserStorage.User c = new UserStorage.User(2, 100); Thread threadA = new Thread(() -> storage.add(this.a)); Thread threadB = new Thread(() -> storage.add(this.b)); threadA.start(); threadA.join(); threadB.start(); threadB.join(); assertTrue(storage.transfer(1, 2, 100)); assertThat(st.get(0).getAmount(), is(100)); assertThat(st.get(1).getAmount(), is(300)); assertTrue(storage.update(c)); assertThat(st.get(1).getAmount(), is(100)); assertTrue(storage.update(b)); assertTrue(storage.delete(this.a)); assertTrue(storage.delete(this.b)); assertFalse(storage.delete(this.b)); assertFalse(storage.delete(c)); } }
<filename>client/client.go // Copyright 2018-2019 The Loopix-Messaging Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. /* Package client implements the class of a network client which can interact with a mix network. */ package client import ( "encoding/base64" "errors" "fmt" "io/ioutil" "math" "net" "os" "sync" "time" "github.com/golang/protobuf/proto" "github.com/nymtech/nym-directory/models" clientConfig "github.com/nymtech/nym-mixnet/client/config" "github.com/nymtech/nym-mixnet/clientcore" "github.com/nymtech/nym-mixnet/config" "github.com/nymtech/nym-mixnet/constants" "github.com/nymtech/nym-mixnet/flags" "github.com/nymtech/nym-mixnet/helpers" "github.com/nymtech/nym-mixnet/helpers/topology" "github.com/nymtech/nym-mixnet/logger" "github.com/nymtech/nym-mixnet/networker" "github.com/nymtech/nym-mixnet/sphinx" "github.com/sirupsen/logrus" ) const ( loopLoad = "LoopCoverMessage" ) // TODO: what is the point of this interface currently? // Client is the client networking interface type Client interface { networker.NetworkClient networker.NetworkServer Start() error SendMessage(message []byte, recipient config.ClientConfig) error ReadInNetworkFromTopology(pkiName string) error } type ReceivedMessages struct { sync.Mutex messages [][]byte } // NetClient is a queuing TCP network client for the mixnet. type NetClient struct { *clientcore.CryptoClient // TODO: somehow rename or completely remove config.ClientConfig because it's waaaay too confusing right now cfg *clientConfig.Config config config.ClientConfig token []byte // TODO: combine with the 'Provider' field considering it's provider specific outQueue chan []byte haltedCh chan struct{} haltOnce sync.Once log *logrus.Logger receivedMessages ReceivedMessages } func (c *NetClient) GetReceivedMessages() [][]byte { c.receivedMessages.Lock() defer c.receivedMessages.Unlock() msgsPtr := c.receivedMessages.messages c.receivedMessages.messages = make([][]byte, 0, 20) return msgsPtr } func (c *NetClient) addNewMessage(msg []byte) { c.receivedMessages.Lock() defer c.receivedMessages.Unlock() c.receivedMessages.messages = append(c.receivedMessages.messages, msg) } // OutQueue returns a reference to the client's outQueue. It's a queue // which holds outgoing packets while their order is randomised. func (c *NetClient) OutQueue() chan<- []byte { return c.outQueue } func getProvider(presences []models.MixProviderPresence, pubKey string) (models.MixProviderPresence, error) { var pres models.MixProviderPresence for _, presence := range presences { if presence.PubKey == pubKey { return presence, nil } } return pres, fmt.Errorf("no provider with the given public key exists in the current network") } // Start reads the network and users information from the topology // and starts the listening server. Returns an error // signalling whenever any operation was unsuccessful. func (c *NetClient) Start() error { c.outQueue = make(chan []byte) initialTopology, err := topology.GetNetworkTopology(c.cfg.Client.DirectoryServerTopologyEndpoint) if err != nil { return err } if err := c.ReadInNetworkFromTopology(initialTopology); err != nil { return err } var providerPresence models.MixProviderPresence if providerPresence, err = getProvider(initialTopology.MixProviderNodes, c.cfg.Client.ProviderID); err != nil { return fmt.Errorf("specified provider does not seem to be online: %v", c.cfg.Client.ProviderID) } provider, err := topology.ProviderPresenceToConfig(providerPresence) // provider, err := providerFromTopology(initialTopology) if err != nil { return err } c.Provider = provider for { if err := c.sendRegisterMessageToProvider(); err != nil { c.log.Errorf("Error during registration to provider: %v", err) time.Sleep(5 * time.Second) } else { c.log.Debug("Registration done!") break } } // before we start traffic, we must wait until registration of some client reaches directory server for { initialTopology, err := topology.GetNetworkTopology(c.cfg.Client.DirectoryServerTopologyEndpoint) if err != nil { return err } if err := c.ReadInNetworkFromTopology(initialTopology); err != nil { return err } if len(c.Network.Clients) > 0 { break } c.log.Debug("No registered clients available. Waiting for a second before retrying.") time.Sleep(time.Second) } c.log.Info("Obtained valid network topology") c.startTraffic() return nil } // Wait waits till the client is terminated for any reason. func (c *NetClient) Wait() { <-c.haltedCh } // Shutdown cleanly shuts down a given client instance. // TODO: create daemon to call this upon sigterm or something func (c *NetClient) Shutdown() { c.haltOnce.Do(func() { c.halt() }) } // calls any required cleanup code func (c *NetClient) halt() { c.log.Infof("Starting graceful shutdown") // close any listeners, free resources, etc close(c.haltedCh) } func (c *NetClient) UpdateNetworkView() error { newTopology, err := topology.GetNetworkTopology(c.cfg.Client.DirectoryServerTopologyEndpoint) if err != nil { c.log.Errorf("error while reading network topology: %v", err) return err } if err := c.ReadInNetworkFromTopology(newTopology); err != nil { c.log.Errorf("error while trying to update topology: %v", err) return err } return nil } func (c *NetClient) checkTopology() error { if c.Network.ShouldUpdate() { return c.UpdateNetworkView() } return nil } func (c *NetClient) GetOwnDetails() *config.ClientConfig { return &c.config } // GetAllPossibleRecipients returns slice containing all recipients at all available providers func (c *NetClient) GetAllPossibleRecipients() []*config.ClientConfig { // explicitly update network if c.UpdateNetworkView() != nil { return nil } // because of how protobuf works, we need to convert the slice of configs to slice of pointer to configs clients := make([]*config.ClientConfig, len(c.Network.Clients)) for i := range c.Network.Clients { clients[i] = &c.Network.Clients[i] } return clients } // SendMessage responsible for sending a real message. Takes as input the message bytes // and the public information about the destination. func (c *NetClient) SendMessage(message []byte, recipient config.ClientConfig) error { // before we send a message, ensure our topology is up to date if err := c.checkTopology(); err != nil { c.log.Errorf("error in updating topology: %v", err) return err } packet, err := c.encodeMessage(message, recipient) if err != nil { c.log.Errorf("Error in sending message - encode message returned error: %v", err) return err } c.outQueue <- packet return nil } // encodeMessage encapsulates the given message into a sphinx packet destinated for recipient // and wraps with the flag pointing that it is the communication packet func (c *NetClient) encodeMessage(message []byte, recipient config.ClientConfig) ([]byte, error) { sphinxPacket, err := c.EncodeMessage(message, recipient) if err != nil { c.log.Errorf("Error in sending message - create sphinx packet returned an error: %v", err) return nil, err } packetBytes, err := config.WrapWithFlag(flags.CommFlag, sphinxPacket) if err != nil { c.log.Errorf("Error in sending message - wrap with flag returned an error: %v", err) return nil, err } return packetBytes, nil } // Send opens a connection with selected network address // and send the passed packet. If connection failed or // the packet could not be send, an error is returned // Otherwise it returns the response sent by server func (c *NetClient) send(packet []byte, host string, port string) (config.ProviderResponse, error) { conn, err := net.Dial("tcp", net.JoinHostPort(host, port)) if err != nil { c.log.Errorf("Error in send - dial returned an error: %v", err) return config.ProviderResponse{}, err } defer conn.Close() if _, err := conn.Write(packet); err != nil { c.log.Errorf("Failed to write to connection: %v", err) return config.ProviderResponse{}, err } buff, err := ioutil.ReadAll(conn) if err != nil { c.log.Errorf("Failed to read response: %v", err) return config.ProviderResponse{}, err } var resPacket config.ProviderResponse if err = proto.Unmarshal(buff, &resPacket); err != nil { c.log.Errorf("Error while unmarshalling received packet: %v", err) return config.ProviderResponse{}, err } return resPacket, nil } // RegisterToken stores the authentication token received from the provider func (c *NetClient) registerToken(token []byte) { c.token = token c.log.Debugf("Registered token %s", c.token) } // ProcessPacket processes the received sphinx packet and returns the // encapsulated message or error in case the processing // was unsuccessful. func (c *NetClient) processPacket(packet []byte) ([]byte, error) { // c.log.Debugf(" Processing packet") // c.log.Tracef("Removing first 37 bytes of the message") if len(packet) > 38 { return packet[38:], nil } return packet, nil } func (c *NetClient) startTraffic() { go func() { err := c.controlOutQueue() if err != nil { c.log.Fatalf("Error in the controller of the outgoing packets queue. Possible security threat.: %v", err) } }() if c.cfg.Debug.LoopCoverTrafficRate > 0.0 { c.turnOnLoopCoverTraffic() } if c.cfg.Debug.FetchMessageRate > 0.0 { go func() { c.controlMessagingFetching() }() } } // SendRegisterMessageToProvider allows the client to register with the selected provider. // The client sends a special assignment packet, with its public information, to the provider // or returns an error. func (c *NetClient) sendRegisterMessageToProvider() error { c.log.Debugf("Sending request to provider to register") confBytes, err := proto.Marshal(&c.config) if err != nil { c.log.Errorf("Error in register provider - marshal of provider config returned an error: %v", err) return err } pktBytes, err := config.WrapWithFlag(flags.AssignFlag, confBytes) if err != nil { c.log.Errorf("Error in register provider - wrap with flag returned an error: %v", err) return err } response, err := c.send(pktBytes, c.Provider.Host, c.Provider.Port) if err != nil { c.log.Errorf("Error in register provider - send registration packet returned an error: %v", err) return err } packets, err := config.UnmarshalProviderResponse(response) if err != nil || len(packets) != 1 { c.log.Errorf("error in register provider - failed to unmarshal response: %v", err) } c.registerToken(packets[0].Data) return nil } // GetMessagesFromProvider allows to fetch messages from the inbox stored by the // provider. The client sends a pull packet to the provider, along with // the authentication token. An error is returned if occurred. func (c *NetClient) getMessagesFromProvider() error { pullRqs := config.PullRequest{ClientPublicKey: c.GetPublicKey().Bytes(), Token: c.token} pullRqsBytes, err := proto.Marshal(&pullRqs) if err != nil { c.log.Errorf("Error in register provider - marshal of pull request returned an error: %v", err) return err } pktBytes, err := config.WrapWithFlag(flags.PullFlag, pullRqsBytes) if err != nil { c.log.Errorf("Error in register provider - marshal of provider config returned an error: %v", err) return err } response, err := c.send(pktBytes, c.Provider.Host, c.Provider.Port) if err != nil { return err } packets, err := config.UnmarshalProviderResponse(response) if err != nil { c.log.Errorf("error in register provider - failed to unmarshal response: %v", err) } for _, packet := range packets { packetData, err := c.processPacket(packet.Data) if err != nil { c.log.Errorf("Error in processing received packet: %v", err) } packetDataStr := string(packetData) switch packetDataStr { case loopLoad: c.log.Debugf("Received loop cover message %v", packetDataStr) default: c.log.Infof("Received new message: %v", packetDataStr) c.addNewMessage(packetData) } } return nil } // controlOutQueue controls the outgoing queue of the client. // If a message awaits in the queue, it is sent. Otherwise a // drop cover message is sent instead. func (c *NetClient) controlOutQueue() error { c.log.Debugf("Queue controller started") for { select { case <-c.haltedCh: c.log.Infof("Halting controlOutQueue") return nil case realPacket := <-c.outQueue: response, err := c.send(realPacket, c.Provider.Host, c.Provider.Port) if err != nil { c.log.Errorf("Could not send real packet: %v", err) } c.log.Debugf("Real packet was sent") c.log.Debugf("Received response: %v", response) default: if !c.cfg.Debug.RateCompliantCoverMessagesDisabled { dummyPacket, err := c.createLoopCoverMessage() if err != nil { return err } response, err := c.send(dummyPacket, c.Provider.Host, c.Provider.Port) if err != nil { c.log.Errorf("Could not send dummy packet: %v", err) } c.log.Debugf("Dummy packet was sent") c.log.Debugf("Received response: %v", response) } } err := delayBeforeContinue(c.cfg.Debug.MessageSendingRate) if err != nil { return err } } } // controlMessagingFetching periodically at random sends a query to the provider // to fetch received messages func (c *NetClient) controlMessagingFetching() { for { select { case <-c.haltedCh: c.log.Infof("Stopping controlMessagingFetching") return default: if err := c.getMessagesFromProvider(); err != nil { c.log.Errorf("Could not get message from provider: %v", err) continue } // c.log.Infof("Sent request to provider to fetch messages") err := delayBeforeContinue(c.cfg.Debug.FetchMessageRate) if err != nil { c.log.Errorf("Error in ControlMessagingFetching - generating random exp. value failed: %v", err) } } } } // createLoopCoverMessage packs a dummy loop message into // a sphinx packet. The loop message is destinated back to the sender // createLoopCoverMessage returns a byte representation of the encapsulated packet and an error func (c *NetClient) createLoopCoverMessage() ([]byte, error) { sphinxPacket, err := c.EncodeMessage([]byte(loopLoad), c.config) if err != nil { return nil, err } packetBytes, err := config.WrapWithFlag(flags.CommFlag, sphinxPacket) if err != nil { return nil, err } return packetBytes, nil } // runLoopCoverTrafficStream manages the stream of loop cover traffic. // In each stream iteration it sends a freshly created loop packet and // waits a random time before scheduling the next loop packet. func (c *NetClient) runLoopCoverTrafficStream() error { c.log.Debugf("Stream of loop cover traffic started") for { select { case <-c.haltedCh: c.log.Infof("Halting loopCoverTrafficStream") return nil default: loopPacket, err := c.createLoopCoverMessage() if err != nil { return err } response, err := c.send(loopPacket, c.Provider.Host, c.Provider.Port) if err != nil { c.log.Errorf("Could not send loop cover traffic message: %v", err) return err } c.log.Debugf("Loop message sent") c.log.Debugf("Received response: %v", response) if err := delayBeforeContinue(c.cfg.Debug.LoopCoverTrafficRate); err != nil { return err } } } } func delayBeforeContinue(rateParam float64) error { delaySec, err := helpers.RandomExponential(rateParam) if err != nil { return err } time.Sleep(time.Duration(int64(delaySec*math.Pow10(9))) * time.Nanosecond) return nil } // turnOnLoopCoverTraffic starts the stream of loop cover traffic func (c *NetClient) turnOnLoopCoverTraffic() { go func() { err := c.runLoopCoverTrafficStream() if err != nil { c.log.Errorf("Error in the controller of the loop cover traffic. Possible security threat.: %v", err) } }() } // ReadInNetworkFromTopology reads in the public information about active mixes // from the topology and stores them locally. In case // the connection or fetching data from the PKI went wrong, // an error is returned. func (c *NetClient) ReadInNetworkFromTopology(topologyData *models.Topology) error { c.log.Debugf("Reading network information from the PKI") mixes, err := topology.GetMixesPKI(topologyData.MixNodes) if err != nil { c.log.Errorf("error while reading mixes from PKI: %v", err) return err } clients, err := topology.GetClientPKI(topologyData.MixProviderNodes) if err != nil { c.log.Errorf("error while reading clients from PKI: %v", err) return err } c.Network.UpdateNetwork(mixes, clients) return nil } // TODO: make it variable, perhaps choose provider with least number of clients? or by preference? // But for now just get the first provider on the list func providerFromTopology(initialTopology *models.Topology) (config.MixConfig, error) { if initialTopology == nil || initialTopology.MixProviderNodes == nil || len(initialTopology.MixProviderNodes) == 0 { return config.MixConfig{}, errors.New("invalid topology") } for _, v := range initialTopology.MixProviderNodes { // get the first entry return topology.ProviderPresenceToConfig(v) } return config.MixConfig{}, errors.New("unknown state") } // NewClient constructor function to create an new client object. // Returns a new client object or an error, if occurred. func NewClient(cfg *clientConfig.Config) (*NetClient, error) { baseLogger, err := logger.New(cfg.Logging.File, cfg.Logging.Level, cfg.Logging.Disable) if err != nil { return nil, err } prvKey := new(sphinx.PrivateKey) pubKey := new(sphinx.PublicKey) if err := helpers.FromPEMFile(prvKey, cfg.Client.PrivateKeyFile(), constants.PrivateKeyPEMType); err != nil { return nil, fmt.Errorf("Failed to load the private key: %v", err) } if err := helpers.FromPEMFile(pubKey, cfg.Client.PublicKeyFile(), constants.PublicKeyPEMType); err != nil { return nil, fmt.Errorf("Failed to load the public key: %v", err) } core := clientcore.NewCryptoClient(prvKey, pubKey, config.MixConfig{}, clientcore.NetworkPKI{}, baseLogger.GetLogger("cryptoClient "+cfg.Client.ID), ) log := baseLogger.GetLogger(cfg.Client.ID) c := NetClient{CryptoClient: core, cfg: cfg, haltedCh: make(chan struct{}), log: log, receivedMessages: ReceivedMessages{ messages: make([][]byte, 0, 20), }, } c.log.Infof("Logging level set to %v", c.cfg.Logging.Level) b64Key := base64.URLEncoding.EncodeToString(c.GetPublicKey().Bytes()) keyInfoStr := fmt.Sprintf("\x1b[%dmOur Public Key is: %s\x1b[0m", logger.ColorYellow, b64Key, ) fmt.Fprint(os.Stdout, keyInfoStr+"\n\n") c.config = config.ClientConfig{Id: b64Key, Host: "", // TODO: remove Port: "", // TODO: remove PubKey: c.GetPublicKey().Bytes(), Provider: &c.Provider, } return &c, nil } // NewTestClient constructs a client object, which can be used for testing. The object contains the crypto core // and the top-level of client, but does not involve networking and starting a listener. // TODO: similar issue as with 'NewClient' - need to create some config struct with the parameters func NewTestClient(cfg *clientConfig.Config, prvKey *sphinx.PrivateKey, pubKey *sphinx.PublicKey) (*NetClient, error) { baseDisabledLogger, err := logger.New(cfg.Logging.File, cfg.Logging.Level, cfg.Logging.Disable) if err != nil { return nil, err } // this logger can be shared as it will be disabled anyway disabledLog := baseDisabledLogger.GetLogger("test") core := clientcore.NewCryptoClient(prvKey, pubKey, config.MixConfig{}, clientcore.NetworkPKI{}, disabledLog, ) c := NetClient{CryptoClient: core, cfg: cfg, haltedCh: make(chan struct{}), log: disabledLog, } b64Key := base64.URLEncoding.EncodeToString(c.GetPublicKey().Bytes()) c.config = config.ClientConfig{Id: b64Key, Host: "", // TODO: remove Port: "", // TODO: remove PubKey: c.GetPublicKey().Bytes(), Provider: &c.Provider, } return &c, nil }
import {ModuleGenerator} from "./ModuleGenerator"; describe('ModuleGenerator', () => { describe('expected files', () => { const generator = new ModuleGenerator({ name: 'name', description: 'description' }); const expectedFiles = [ '.gitignore', '.npmignore', 'LICENSE', 'package.json', 'README.md', 'tsconfig.json', 'tsconfig.test.json', ]; for (const expectedFile of expectedFiles) { it(`should include a ${expectedFile} file`, () => { let found = false; for (const [filename, _] of generator) { if (filename === expectedFile) { found = true; } } expect(found).toBe(true); }); } }); describe('package.json', () => { it('should use the provided package name', () => { const name = 'name'; const generator = new ModuleGenerator({ name, description: 'description' }); let found = false; for (const [filename, contents] of generator) { if (filename === 'package.json') { found = true; const packageJson = JSON.parse(contents); expect(typeof packageJson).toBe('object'); expect(packageJson.name).toBe(name); } } expect(found).toBe(true); }); it('should use the provided description', () => { const description = 'description'; const generator = new ModuleGenerator({ name: 'name', description }); let found = false; for (const [filename, contents] of generator) { if (filename === 'package.json') { found = true; const packageJson = JSON.parse(contents); expect(typeof packageJson).toBe('object'); expect(packageJson.description).toBe(description); } } expect(found).toBe(true); }); it('should use the provided version', () => { const version = '1.1.1-alpha'; const generator = new ModuleGenerator({ name: 'name', description: 'description', version, }); let found = false; for (const [filename, contents] of generator) { if (filename === 'package.json') { found = true; const packageJson = JSON.parse(contents); expect(typeof packageJson).toBe('object'); expect(packageJson.version).toBe(version); } } expect(found).toBe(true); }); it('should use a default version of 0.1.0', () => { const generator = new ModuleGenerator({ name: 'name', description: 'description' }); let found = false; for (const [filename, contents] of generator) { if (filename === 'package.json') { found = true; const packageJson = JSON.parse(contents); expect(typeof packageJson).toBe('object'); expect(packageJson.version).toBe('0.1.0'); } } expect(found).toBe(true); }); }); describe('README.md', () => { it( 'should use the provided name and description as a default README', () => { const name = 'name'; const description = 'description'; const generator = new ModuleGenerator({name, description}); let found = false; for (const [filename, contents] of generator) { if (filename === 'README.md') { found = true; expect(contents).toBe( `# ${name} ${description}` ); } } expect(found).toBe(true); } ); it( 'should just use the name as a title if no description is provided', () => { const name = 'name'; const generator = new ModuleGenerator({name}); let found = false; for (const [filename, contents] of generator) { if (filename === 'README.md') { found = true; expect(contents).toBe(`# ${name}`); } } expect(found).toBe(true); } ); }); });
A report on Donald Trump‘s campaign suggests that his advisors are now trying to limit his TV appearances in order to reduce the amount of controversy he generates with his inflammatory political ideas. Howard Kurtz of Fox’s Mediabuzz reported today that some of the mogul’s top people are trying to cut down on the media blitz tactics that some say were instrumental in his political rise. The report noted that the great majority of Trump’s recent interviews have gone to Fox, and also that there are new efforts in place to keep Trump on “friendlier terrain,” controlling his interviews, and restricting who gets to talk with him. Trump, who has resumed his full-throated denunciations of the media—such as calling CNN the Clinton News Network—personally vetted every TV invitation for most of the campaign. Now the staff is weeding out many requests without consulting him, the sources say, which could either be viewed as a mark of professionalization or an attempt to restrain Trump from being Trump. Watch the latest video at video.foxnews.com Sources indicate that the campaign’s new direction comes from the guidance of Paul Manafort and Jared Kushner. Their goal appears to be keeping Trump attached to hospitable shows like Fox & Friends, Hannity, and The O’Reilly Factor. Kurtz’s report went on to note the current friction that exists between Trump and CNN and MSNBC, as well as the presumptive nominee’s proclivity for giving his interviews by phone instead of in person. Some advisors behind the move reportedly say that it is up to Trump to expand his base, and that he should allow his spokespeople to spar with the media on his behalf when it comes to controversial matters like invoking the Star of David in an attack on Hillary Clinton. Watch above, via Fox. [Image via screengrab] — — >> Follow Ken Meyer (@KenMeyer91) on Twitter Have a tip we should know? [email protected]
/** * Copyright (c) 2020 The UsaCon Authors * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU Affero General Public License as * published by the Free Software Foundation, either version 3 of the * License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Affero General Public License for more details. * * You should have received a copy of the GNU Affero General Public License * along with this program. If not, see <https://www.gnu.org/licenses/>. */ // Class for interfacing with IO Devices on the main thread (window access). import { IoDevices } from "@wasmer/io-devices"; export default class IoDeviceWindow { ioDevices: IoDevices | undefined; popupWindow: Window | undefined | null; popupCanvas: HTMLCanvasElement | undefined | null; popupCanvasContext: CanvasRenderingContext2D | undefined | null; popupImageData: any; // Handle Key Press / Release oldPopupKeyCodes: Array<number> = []; popupKeyCodes: Array<number> = []; // Handle Mouse Move oldMouseMovePosition: { x: number; y: number } = { x: 0, y: 0 }; mouseMovePosition: { x: number; y: number } = { x: 0, y: 0 }; // Handle Mouse Clicks mouseLeftClickPosition: { x: number; y: number } | undefined = undefined; mouseRightClickPosition: { x: number; y: number } | undefined = undefined; mouseMiddleClickPosition: { x: number; y: number } | undefined = undefined; sharedIoDeviceInput: Int32Array | undefined; constructor(sharedIoDeviceInputBuffer?: SharedArrayBuffer) { if (sharedIoDeviceInputBuffer) { this.sharedIoDeviceInput = new Int32Array(sharedIoDeviceInputBuffer); } } resize(width: number, height: number): void { // Close the window if (width === 0 && height === 0) { this.close(); return; } if (this.popupWindow && this.popupCanvas && this.popupCanvasContext) { // Resize the canvas this.popupCanvas.width = width; this.popupCanvas.height = height; this.popupImageData = this.popupCanvasContext.getImageData( 0, 0, width, height ); } else { // Open a new window this._open(width, height); } } close(): void { if (this.popupWindow) { this.popupWindow.close(); this.popupWindow = undefined; } } drawRgbaArrayToFrameBuffer(rgbaArray: Uint8Array): void { if (this.popupCanvas && this.popupCanvasContext && this.popupImageData) { this.popupImageData.data.set(rgbaArray); this.popupCanvasContext.putImageData(this.popupImageData, 0, 0); } } getInputBuffer(): Uint8Array { // Handle keyCodes const inputArray: number[] = []; // Key Presses this.popupKeyCodes.forEach((keyCode) => { if (!this.oldPopupKeyCodes.includes(keyCode)) { inputArray.push(1); inputArray.push(keyCode); } }); // Mouse movement if ( this.oldMouseMovePosition.x !== this.mouseMovePosition.x || this.oldMouseMovePosition.y !== this.mouseMovePosition.y ) { inputArray.push(2); this._append32BitIntToByteArray(this.mouseMovePosition.x, inputArray); this._append32BitIntToByteArray(this.mouseMovePosition.y, inputArray); } this.oldMouseMovePosition = this.mouseMovePosition; // Key Releases this.oldPopupKeyCodes.forEach((keyCode) => { if (!this.popupKeyCodes.includes(keyCode)) { inputArray.push(3); inputArray.push(keyCode); } }); this.oldPopupKeyCodes = this.popupKeyCodes.slice(0); // Left Mouse Click if (this.mouseLeftClickPosition) { inputArray.push(4); this._append32BitIntToByteArray( this.mouseLeftClickPosition.x, inputArray ); this._append32BitIntToByteArray( this.mouseLeftClickPosition.y, inputArray ); this.mouseLeftClickPosition = undefined; } // Right Mouse Click if (this.mouseRightClickPosition) { inputArray.push(5); this._append32BitIntToByteArray( this.mouseRightClickPosition.x, inputArray ); this._append32BitIntToByteArray( this.mouseRightClickPosition.y, inputArray ); this.mouseRightClickPosition = undefined; } // Middle Mouse Click if (this.mouseMiddleClickPosition) { inputArray.push(4); this._append32BitIntToByteArray( this.mouseMiddleClickPosition.x, inputArray ); this._append32BitIntToByteArray( this.mouseMiddleClickPosition.y, inputArray ); this.mouseMiddleClickPosition = undefined; } const inputBytes = new Uint8Array(inputArray); if (this.sharedIoDeviceInput) { // Write the buffer to the memory for (let i = 0; i < inputBytes.length; i++) { this.sharedIoDeviceInput[i + 1] = inputBytes[i]; } // Write our number of elements this.sharedIoDeviceInput[0] = inputBytes.length; Atomics.notify(this.sharedIoDeviceInput, 0, 1); } return inputBytes; } _open(width: number, height: number): void { // Let's assume landscape for now: const widthScreenRatio = Math.floor(screen.width / width); const heightScreenRatio = Math.floor(screen.height / height); const scale = Math.min(widthScreenRatio, heightScreenRatio); let windowWidth = width * scale; let windowHeight = height * scale; // Open the window this.popupWindow = window.open( "about:blank", "WasmerExperimentalFramebuffer", `width=${windowWidth},height=${windowHeight}` ) as Window; // Add our html and canvas and stuff this.popupWindow.document.body.innerHTML = ` <style> html, body { width: 100%; height: 100%; } body { display: flex; flex-direction: column; justify-content: center; align-items: center; margin: 0px; margin-left: auto; margin-right: auto; } #io-device-framebuffer { width: 100%; height: auto; /* Will Keep pixel art looking good */ image-rendering: pixelated; image-rendering: -moz-crisp-edges; image-rendering: crisp-edges; } </style> <canvas id="io-device-framebuffer" width="${width}" height="${height}"></canvas> `; this.popupWindow.document.head.innerHTML = ` <title>Wasmer Experimental Framebuffer</title> `; // Get our elements stuff this.popupCanvas = this.popupWindow.document.querySelector( "#io-device-framebuffer" ) as HTMLCanvasElement; this.popupCanvasContext = this.popupCanvas.getContext( "2d" ) as CanvasRenderingContext2D; this.popupImageData = this.popupCanvasContext.getImageData( 0, 0, width, height ); // Add the neccessary events this.popupWindow.document.addEventListener( "keydown", this._eventListenerKeydown.bind(this) ); this.popupWindow.document.addEventListener( "keyup", this._eventListenerKeyup.bind(this) ); this.popupWindow.document.addEventListener( "mousemove", this._eventListenerMousemove.bind(this) ); this.popupWindow.document.addEventListener( "click", this._eventListenerClick.bind(this) ); } _append32BitIntToByteArray(value: number, numberArray: number[]) { for (let i = 0; i < 4; i++) { // Goes smallest to largest (little endian) let currentByte = value; currentByte = currentByte & (0xff << (i * 8)); currentByte = currentByte >> (i * 8); numberArray.push(currentByte); } } _eventListenerKeydown(event: KeyboardEvent): void { event.preventDefault(); const keyCode = event.keyCode; if (!this.popupKeyCodes.includes(event.keyCode)) { this.popupKeyCodes.push(event.keyCode); } } _eventListenerKeyup(event: KeyboardEvent): void { event.preventDefault(); const keyCode = event.keyCode; const keyCodeIndex = this.popupKeyCodes.indexOf(event.keyCode); if (keyCodeIndex > -1) { this.popupKeyCodes.splice(keyCodeIndex, 1); } } _eventListenerMousemove(event: MouseEvent): void { const position = this._getPositionFromMouseEvent(event); if (position === undefined) { return; } this.mouseMovePosition = position; } _eventListenerClick(event: MouseEvent): void { const position = this._getPositionFromMouseEvent(event); if (position === undefined) { return; } if (event.button === 0) { // Left click this.mouseLeftClickPosition = position; } else if (event.button === 1) { // Middle click this.mouseMiddleClickPosition = position; } else if (event.button === 2) { // Right click this.mouseRightClickPosition = position; } } _getPositionFromMouseEvent( event: MouseEvent ): { x: number; y: number } | undefined { if (!this.popupCanvas) { return undefined; } const popupCanvasBoundingClientRect = this.popupCanvas.getBoundingClientRect(); const minX = popupCanvasBoundingClientRect.x; const maxX = popupCanvasBoundingClientRect.x + popupCanvasBoundingClientRect.width; const minY = popupCanvasBoundingClientRect.y; const maxY = popupCanvasBoundingClientRect.y + popupCanvasBoundingClientRect.height; let x = undefined; let y = undefined; if (event.x >= minX && event.x <= maxX) { x = event.x - minX; } if (event.y >= minY && event.y <= maxY) { y = event.y - minY; } if (x === undefined || y === undefined) { return undefined; } // Find where X and Y would be accoring to the scale const xScale = this.popupCanvas.width / popupCanvasBoundingClientRect.width; const yScale = this.popupCanvas.height / popupCanvasBoundingClientRect.height; x = x * xScale; y = y * yScale; return { x, y, }; } }
/** * Create a class element for the given simple type. * @param type The type * @param annotationMetadata The annotation metadata * @param typeArguments The type arguments * @return The class element * @since 2.4.0 */ static @NonNull ClassElement of( @NonNull Class<?> type, @NonNull AnnotationMetadata annotationMetadata, @NonNull Map<String, ClassElement> typeArguments) { Objects.requireNonNull(annotationMetadata, "Annotation metadata cannot be null"); Objects.requireNonNull(typeArguments, "Type arguments cannot be null"); return new ReflectClassElement( Objects.requireNonNull(type, "Type cannot be null") ) { @Override public AnnotationMetadata getAnnotationMetadata() { return annotationMetadata; } @Override public Map<String, ClassElement> getTypeArguments() { return Collections.unmodifiableMap(typeArguments); } }; }
/* * Copyright 2020 Red Hat, Inc. and/or its affiliates. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.optaplanner.benchmark.impl; import java.io.File; import java.util.ArrayList; import java.util.Collections; import java.util.List; import org.apache.commons.lang3.BooleanUtils; import org.optaplanner.benchmark.api.PlannerBenchmarkFactory; import org.optaplanner.benchmark.config.ProblemBenchmarksConfig; import org.optaplanner.benchmark.config.statistic.ProblemStatisticType; import org.optaplanner.benchmark.config.statistic.SingleStatisticType; import org.optaplanner.benchmark.impl.loader.FileProblemProvider; import org.optaplanner.benchmark.impl.loader.InstanceProblemProvider; import org.optaplanner.benchmark.impl.loader.ProblemProvider; import org.optaplanner.benchmark.impl.result.PlannerBenchmarkResult; import org.optaplanner.benchmark.impl.result.ProblemBenchmarkResult; import org.optaplanner.benchmark.impl.result.SingleBenchmarkResult; import org.optaplanner.benchmark.impl.result.SolverBenchmarkResult; import org.optaplanner.benchmark.impl.result.SubSingleBenchmarkResult; import org.optaplanner.benchmark.impl.statistic.ProblemStatistic; import org.optaplanner.core.config.solver.EnvironmentMode; import org.optaplanner.core.config.util.ConfigUtils; import org.optaplanner.core.impl.domain.solution.descriptor.SolutionDescriptor; import org.optaplanner.core.impl.solver.DefaultSolverFactory; import org.optaplanner.persistence.common.api.domain.solution.SolutionFileIO; public class ProblemBenchmarksFactory { private final ProblemBenchmarksConfig config; public ProblemBenchmarksFactory(ProblemBenchmarksConfig config) { this.config = config; } public <Solution_> void buildProblemBenchmarkList(SolverBenchmarkResult solverBenchmarkResult, Solution_[] extraProblems) { PlannerBenchmarkResult plannerBenchmarkResult = solverBenchmarkResult.getPlannerBenchmarkResult(); List<ProblemBenchmarkResult> unifiedProblemBenchmarkResultList = plannerBenchmarkResult .getUnifiedProblemBenchmarkResultList(); for (ProblemProvider<Solution_> problemProvider : buildProblemProviderList( solverBenchmarkResult, extraProblems)) { // 2 SolverBenchmarks containing equal ProblemBenchmarks should contain the same instance ProblemBenchmarkResult<Solution_> newProblemBenchmarkResult = buildProblemBenchmark( plannerBenchmarkResult, problemProvider); ProblemBenchmarkResult<Solution_> problemBenchmarkResult; int index = unifiedProblemBenchmarkResultList.indexOf(newProblemBenchmarkResult); if (index < 0) { problemBenchmarkResult = newProblemBenchmarkResult; unifiedProblemBenchmarkResultList.add(problemBenchmarkResult); } else { problemBenchmarkResult = unifiedProblemBenchmarkResultList.get(index); } buildSingleBenchmark(solverBenchmarkResult, problemBenchmarkResult); } } private <Solution_> List<ProblemProvider<Solution_>> buildProblemProviderList( SolverBenchmarkResult solverBenchmarkResult, Solution_[] extraProblems) { if (ConfigUtils.isEmptyCollection(config.getInputSolutionFileList()) && extraProblems.length == 0) { throw new IllegalArgumentException( "The solverBenchmarkResult (" + solverBenchmarkResult.getName() + ") has no problems.\n" + "Maybe configure at least 1 <inputSolutionFile> directly or indirectly by inheriting it.\n" + "Or maybe pass at least one problem to " + PlannerBenchmarkFactory.class.getSimpleName() + ".buildPlannerBenchmark()."); } List<ProblemProvider<Solution_>> problemProviderList = new ArrayList<>( extraProblems.length + (config.getInputSolutionFileList() == null ? 0 : config.getInputSolutionFileList().size())); DefaultSolverFactory<Solution_> defaultSolverFactory = new DefaultSolverFactory<>(solverBenchmarkResult.getSolverConfig()); SolutionDescriptor<Solution_> solutionDescriptor = defaultSolverFactory.buildSolutionDescriptor(EnvironmentMode.REPRODUCIBLE); int extraProblemIndex = 0; for (Solution_ extraProblem : extraProblems) { if (extraProblem == null) { throw new IllegalStateException("The benchmark problem (" + extraProblem + ") is null."); } String problemName = "Problem_" + extraProblemIndex; problemProviderList.add(new InstanceProblemProvider<>(problemName, solutionDescriptor, extraProblem)); extraProblemIndex++; } if (ConfigUtils.isEmptyCollection(config.getInputSolutionFileList())) { if (config.getSolutionFileIOClass() != null) { throw new IllegalArgumentException("Cannot use solutionFileIOClass (" + config.getSolutionFileIOClass() + ") with an empty inputSolutionFileList (" + config.getInputSolutionFileList() + ")."); } } else { SolutionFileIO<Solution_> solutionFileIO = buildSolutionFileIO(); for (File inputSolutionFile : config.getInputSolutionFileList()) { if (!inputSolutionFile.exists()) { throw new IllegalArgumentException("The inputSolutionFile (" + inputSolutionFile + ") does not exist."); } problemProviderList.add(new FileProblemProvider<>(solutionFileIO, inputSolutionFile)); } } return problemProviderList; } private <Solution_> SolutionFileIO<Solution_> buildSolutionFileIO() { if (config.getSolutionFileIOClass() == null) { throw new IllegalArgumentException( "The solutionFileIOClass (" + config.getSolutionFileIOClass() + ") cannot be null."); } return (SolutionFileIO<Solution_>) ConfigUtils.newInstance(config, "solutionFileIOClass", config.getSolutionFileIOClass()); } private <Solution_> ProblemBenchmarkResult<Solution_> buildProblemBenchmark( PlannerBenchmarkResult plannerBenchmarkResult, ProblemProvider<Solution_> problemProvider) { ProblemBenchmarkResult<Solution_> problemBenchmarkResult = new ProblemBenchmarkResult<>(plannerBenchmarkResult); problemBenchmarkResult.setName(problemProvider.getProblemName()); problemBenchmarkResult.setProblemProvider(problemProvider); problemBenchmarkResult.setWriteOutputSolutionEnabled( config.getWriteOutputSolutionEnabled() == null ? false : config.getWriteOutputSolutionEnabled()); List<ProblemStatistic> problemStatisticList; if (BooleanUtils.isFalse(config.getProblemStatisticEnabled())) { if (!ConfigUtils.isEmptyCollection(config.getProblemStatisticTypeList())) { throw new IllegalArgumentException("The problemStatisticEnabled (" + config.getProblemStatisticEnabled() + ") and problemStatisticTypeList (" + config.getProblemStatisticTypeList() + ") can be used together."); } problemStatisticList = Collections.emptyList(); } else { List<ProblemStatisticType> problemStatisticTypeList_ = (config.getProblemStatisticTypeList() == null) ? Collections.singletonList(ProblemStatisticType.BEST_SCORE) : config.getProblemStatisticTypeList(); problemStatisticList = new ArrayList<>(problemStatisticTypeList_.size()); for (ProblemStatisticType problemStatisticType : problemStatisticTypeList_) { problemStatisticList.add(problemStatisticType.buildProblemStatistic(problemBenchmarkResult)); } } problemBenchmarkResult.setProblemStatisticList(problemStatisticList); problemBenchmarkResult.setSingleBenchmarkResultList(new ArrayList<>()); return problemBenchmarkResult; } private void buildSingleBenchmark(SolverBenchmarkResult solverBenchmarkResult, ProblemBenchmarkResult problemBenchmarkResult) { SingleBenchmarkResult singleBenchmarkResult = new SingleBenchmarkResult(solverBenchmarkResult, problemBenchmarkResult); buildSubSingleBenchmarks(singleBenchmarkResult, solverBenchmarkResult.getSubSingleCount()); for (SubSingleBenchmarkResult subSingleBenchmarkResult : singleBenchmarkResult.getSubSingleBenchmarkResultList()) { subSingleBenchmarkResult.setPureSubSingleStatisticList(new ArrayList<>( config.getSingleStatisticTypeList() == null ? 0 : config.getSingleStatisticTypeList().size())); } if (config.getSingleStatisticTypeList() != null) { for (SingleStatisticType singleStatisticType : config.getSingleStatisticTypeList()) { for (SubSingleBenchmarkResult subSingleBenchmarkResult : singleBenchmarkResult .getSubSingleBenchmarkResultList()) { subSingleBenchmarkResult.getPureSubSingleStatisticList().add( singleStatisticType.buildPureSubSingleStatistic(subSingleBenchmarkResult)); } } } singleBenchmarkResult.initSubSingleStatisticMaps(); solverBenchmarkResult.getSingleBenchmarkResultList().add(singleBenchmarkResult); problemBenchmarkResult.getSingleBenchmarkResultList().add(singleBenchmarkResult); } private void buildSubSingleBenchmarks(SingleBenchmarkResult parent, int subSingleCount) { List<SubSingleBenchmarkResult> subSingleBenchmarkResultList = new ArrayList<>(subSingleCount); for (int i = 0; i < subSingleCount; i++) { SubSingleBenchmarkResult subSingleBenchmarkResult = new SubSingleBenchmarkResult(parent, i); subSingleBenchmarkResultList.add(subSingleBenchmarkResult); } parent.setSubSingleBenchmarkResultList(subSingleBenchmarkResultList); } }
<gh_stars>10-100 import os.path import os.path as osp import sys sys.path.append(osp.dirname(osp.dirname(osp.abspath(__file__)))) from collections import deque from tqdm import * import click import numpy as np import torch import torch.nn as nn import torch.nn.functional as F import torchvision from PIL import Image from options.fcn_options import BaseOptions from models.fcn8 import VGG16_FCN8s import data import pdb def to_tensor_raw(im): return torch.from_numpy(np.array(im, np.int64, copy=False)) def fmt_array(arr, fmt=','): strs = ['{:.3f}'.format(x) for x in arr] return fmt.join(strs) def fast_hist(a, b, n): k = (a >= 0) & (a < n) return np.bincount(n * a[k].astype(int) + b[k], minlength=n**2).reshape(n, n) def result_stats(hist): acc_overall = np.diag(hist).sum() / hist.sum() * 100 acc_percls = np.diag(hist) / (hist.sum(1) + 1e-8) * 100 iu = np.diag(hist) / (hist.sum(1) + hist.sum(0) - np.diag(hist) + 1e-8) * 100 freq = hist.sum(1) / hist.sum() fwIU = (freq[freq > 0] * iu[freq > 0]).sum() return acc_overall, acc_percls, iu, fwIU ignore_label = 255 id2label = {-1: ignore_label, 0: ignore_label, 1: ignore_label, 2: ignore_label, 3: ignore_label, 4: ignore_label, 5: ignore_label, 6: ignore_label, 7: 0, 8: 1, 9: ignore_label, 10: ignore_label, 11: 2, 12: 3, 13: 4, 14: ignore_label, 15: ignore_label, 16: ignore_label, 17: 5, 18: ignore_label, 19: 6, 20: 7, 21: 8, 22: 9, 23: 10, 24: 11, 25: 12, 26: 13, 27: 14, 28: 15, 29: ignore_label, 30: ignore_label, 31: 16, 32: 17, 33: 18} # parse options opt = BaseOptions().parse() # print options to help debugging print(' '.join(sys.argv)) # load the dataset dataloader = data.create_dataloader(opt) net = VGG16_FCN8s(num_cls=opt.label_nc, pretrained=False) net.load_state_dict(torch.load(opt.model_path, map_location='cuda:{}'.format(opt.gpu_ids[0]))) net.cuda() net.eval() hist = np.zeros((19, 19)) iterations = tqdm(enumerate(dataloader)) for i, data_i in iterations: # Clear out gradients # forward pass and compute loss im = data_i['image_seg'].cuda() label = data_i['label'].squeeze(1) preds = net(im) score = net(im).data _, preds = torch.max(score, 1) hist += fast_hist(label.numpy().flatten(), preds.cpu().numpy().flatten(), 19) acc_overall, acc_percls, iu, fwIU = result_stats(hist) iterations.set_postfix({'mIoU':' {:0.2f} fwIoU: {:0.2f} pixel acc: {:0.2f} per cls acc: {:0.2f}'.format( np.nanmean(iu), fwIU, acc_overall, np.nanmean(acc_percls))}) print() #print(','.join(classes)) print(fmt_array(iu)) print(np.nanmean(iu), fwIU, acc_overall, np.nanmean(acc_percls)) print(np.mean(iu)) #print('Errors:', errs)
Can an experiment be replicated in a mechanical fashion without considering the processes underlying the initial results? Here I will consider a non-replication of Saccade Induced Retrieval Enhancement (SIRE) and argue that it results from focusing on statistical instead of on substantive process hypotheses. Particularly the theoretical integration of SIRE with Eye-Movement Desensitization and Reprocessing (EMDR) therapy, provides clues about when the memory enhancement should occur. A relatively large memory enhancement effect in participants with a consistent (i.e., extreme right or left) handedness should be observed, (a) when explicitly instructed to retrieve and imagine the memories during the eye manipulation, and (b) for emotionally negative material. A finer theoretical analysis may thus well explain the contrast between the original SIRE studies and the non-replication. Also the findings from preregistered confirmatory research (i.e., focusing solely on statistical hypotheses) should be considered preliminary, representing shifts on a gradual scale of evidence, and awaiting interpretation in terms of theoretical hypotheses. Stronger, but still not definitive, conclusions can better be postponed until after multi-study meta-analyses with theoretically motivated moderator variables have been performed. “There are many hypotheses in science which are wrong. That’s perfectly all right; they’re the aperture to finding out what’s right. Science is a self-correcting process. To be accepted, new ideas must survive the most rigorous standards of evidence and scrutiny.” Carl Sagan (1990, Cosmos: A Personal Voyage, Heaven, and Hell [Episode 4] 33 min 20 s) Many published research findings are undoubtedly ‘false’ (i.e., incorrect), also when bias or questionable research practices (QRPs) are completely absent (Ioannidis, 2005). An even larger number cannot be reproduced in replication research (Open Science Collaboration, 2015). To remedy this undesirable situation, Wagenmakers et al. (2012) advocated that researchers should preregister their studies and specify their statistical tests in advance (i.e., “confirmatory” research). Only for this type of purely confirmatory research would the common statistical tests be valid. Although these authors note that there is nothing wrong with their other type of research (i.e., “exploratory” research), as long as this is explicitly acknowledged, they do associate exploratory research with “wonky” statistics. Confirmatory research procedures may to some extent be able to reduce bias and QRPs, but may still not be able of turning a majority of false published findings into a minority (“…the pure gold standard is unattainable.” p. 700, Ioannidis, 2005). Tests serve as a means for making short-term decisions about statistical hypotheses, which in turn can each correspond to an infinitely large number of different substantive process hypotheses. Such decisions may be highly fallible due to the occurrence not only of bias and QRPs but also of inadvertent technical and data processing errors. Even if the effect exists, moreover, it is expected to not become significant a number of times (i.e., the power never equals one), and the false negative rate may even be higher than the false positive rate (cf, Fiedler et al., 2012). No conclusion about non-significant effects can be drawn either way, and probably a preference for the initial hypothesis should be kept (cf, Dienes, 2014). Even if it becomes significant moreover, the finding may frequently be false (i.e., without any bias or QRPs; Ioannidis, 2005). Stroebe et al. (2012) were equally pessimistic about the self-corrective power in psychology and other fields of science (e.g., even physics), but limited their analysis to scientific fraud cases for which they noted that short-term self-correction by replication and peer review did not seem to work. Confirmatory research practices would thus only filter out a percentage of false findings (and miss many “true” findings), but the remainder probably requires a longer and more gradual process of theoretical considerations and comparisons to other findings. I would argue that self-correction in science, also of bona-fide but false claims, mostly does not result from active discussions or confirmatory research, but from a more passive quasi-Darwinian selection of ideas and hypotheses working on longer time scales (e.g., generations of scientists). Evolutionary development probably represents the most powerful optimization process available, and may well also apply to science (cf, Holland, 1975; Dawkins, 1986). The above fallibility of statistical tests severely limits their contribution to the quasi-evolutionary selection process. More often, false hypotheses are ignored in the long run (i.e., become “extinct”), whereas hypotheses that are more consistently supported by the evidence and fit in ongoing discussions have higher chances of survival and reproduction. The single-experiment support or rejection of a statistical hypothesis in the purely confirmatory view can better be replaced by a multi-experiment weighing of psychological hypotheses, which can be represented as different levels of a theoretically motivated moderator variable in a meta-analysis (cf, Ioannidis, 2005). Even after a meta-analysis, one cannot be completely sure that one’s decisions about particular hypotheses are “true.” This is also not too dissimilar from the optimization process performed by evolution. Scientific development can also get stuck in local optima, not being able to reach even fitter solutions. Replication studies conforming to confirmatory standards may still be useful instruments, as long as they are sufficiently theoretically informed. In a purely mechanical view on replication, however, researchers try to reproduce statistical outcomes of tasks rather than predictions of well-specified theories (cf, Klein, 2014). These replicators run the risk of neglecting important moderators that may also not have been recognized explicitly in the initial, to-be-replicated study. Such a hidden variable may inadvertently have been set to different values in the original and the replication studies, which can even lead to opposing outcomes. If in this case falsification is erroneously concluded from non-replication, this may hamper the development of science rather than fostering it. It is certainly true that the hidden moderators invoked by non-replicated researchers may sometimes appear trivial (e.g., testing in cubicles), and unrelated to theory (for this critique, see Yong, 2012; Klein, 2014), but I will discuss an example of a non-replication where these variables could have been derived a priori from prominent theories in the field. The non-replication by Matzke et al. (2015) of Saccade-Induced Retrieval Enhancement (SIRE; e.g., Lyle et al., 2008) seems to suffer from such a theoretical neglect. Determining whether the initial result or the non-replication is “false” does not seem possible by statistical tests alone, but also requires consideration of the underlying process hypotheses and their associated hidden variables. Eye Movements, Memory, and Emotion Two main fields of eye movement (EM) research, sharing an interest on memory processing after short periods of EMs, were linked by Matzke et al. (2015). SIRE investigates the enhancement of predominantly emotionally neutral memories after executing EMs (e.g., Lyle et al., 2008). Eye Movement Desensitization and Reprocessing therapy (EMDR; Shapiro, 1989) deals with the emotional processing of traumatic and anxious memories due to EMs (e.g., Armstrong and Vaughan, 1996; Lee and Cuijpers, 2013). The original SIRE studies did not refer to EMDR, although the eye manipulation, involving a 30 s period of EMs at a 1 s pace, was very similar to the therapeutic procedure. The to-be-retrieved material, moreover, did not consist of traumatic memories, as in EMDR therapy, but of low-to-medium frequency, largely affectively neutral, words. In contrast to the growing confidence in the effectiveness of EMDR (e.g., van den Hout and Engelhard, 2012; Lee and Cuijpers, 2013), the evidence for SIRE has suffered from the non-replication by Matzke et al. (2015). These authors joined in an adversarial-collaboration replication study as proponents or skeptics of SIRE and could not reproduce the memory enhancement obtained by Lyle et al. (2008). Bayesian statistics revealed that the observed data were 15 times more likely under H 0 (i.e., no difference) than under H 1 (i.e., a difference in memory performance between eye manipulation conditions). The proponents in this study were not convinced by this single failure to replicate, but the skeptics even raised the possibility of bias and QRPs on the side of the SIRE research community to explain the initial finding. This conclusion does not seem warranted due to the high rate of false positives (cf, Ioannidis, 2005) and false negatives (cf, Fiedler et al., 2012) even in the absence of bias and QRPs. In addition, it disregards theoretical reasons for the discrepancy. Only one account for SIRE was considered in the non-replication (i.e., the hemispheric interaction hypothesis; Lyle et al., 2008, 2012), which had previously been dismissed by the proponents in the adversarial collaboration (Samara et al., 2011). Other influential accounts, primarily for EMDR, such as the working memory account (Andrade et al., 1997) or the orienting response account (Armstrong and Vaughan, 1996; Stickgold, 2002), as well as the newer top-down attentional control account from the SIRE domain (Edlin and Lyle, 2013; Lyle and Edlin, 2015) were completely ignored. Elsewhere (Phaf, submitted), I have identified crucial hidden variables based on the linking of theoretical accounts for SIRE and EMDR that may well explain the contrast between the original SIRE findings and the non-replication. Two variables suggest themselves from the application of EMDR accounts to SIRE. Neither the retrieval, and re-imagining, during EMs, nor the emotionality of the memories were deemed important in SIRE research. Matzke et al. (2015) even explicitly suppressed the former influences by including a recency buffer at the end of the study list, and moreover strictly selected for affectively neutral words. Because Lyle et al. (2008, 2012) had the EMs performed immediately after study, some recently presented words may still have been active during the EMs. Also the absence of selection for neutrality here meant that there could have been an unknown proportion of negative words in the list. In a non-preregistered (i.e., exploratory, in the statistical classification of Wagenmakers et al., 2012) study an explicit retrieval instruction during the eye manipulation, and the strict selection of negative material, has strongly amplified memory enhancement, far exceeding the effect sizes commonly reported for SIRE (Phaf, submitted). However, also this experiment cannot yield conclusive evidence concerning the crucial hidden factors, but should be followed up by further research that explicitly compares instructions to re-imagine with attempts to suppress such retrieval. The control over, or lack of, or attempts to actively suppress, memory re-activation during EMs could then serve as a moderator variable in meta-analyses of SIRE. The valence of the studied material, as well as the absence of control over valence, could be another moderator variable. To corroborate the present hypotheses, the largest effect sizes should be obtained with memory (re-activation) during EMs and for negative material. Instead of getting bogged down in a statistical impasse, such, probably exploratory, research would eventually advance our understanding of SIRE and may even help to improve EMDR. Replication Requires Theory Statistical testing is not a goal in itself in Psychology, but the development of theory is. The confirmatory type of research proposed by Wagenmakers et al. (2012) tries to validate the statistical tests, but does not necessarily provide meaning to the results. Without a theoretical specification of the hypotheses even significant findings can mean anything, and their application (e.g., in case of practical interventions) may remain “magical” (as has been argued for EMDR, McNally, 1999). The non-replication of Matzke et al. (2015) provides an example of not sufficiently addressing theory. Process hypotheses could have been derived here not only from EMDR but also from other potential sources (e.g., visual attention, working memory). This a-theoretical stance is fostered by an over-reliance on statistical tests. The practice of only describing test statistics but not actual results (e.g., means and measures of variance) in results sections of research papers (e.g., many studies had to be excluded for this reason from the meta-analysis of Phaf et al., 2014) further illustrates the frequent prioritization of mechanical statistical testing over theoretical analysis. The emphasis in these papers should shift from establishing that “something is there” to estimating and explaining what exactly is happening in the results. Too often statistical testing acts as a stop criterion, which consists of the simple decision rule that an effect is there if it is significant and not there if it is non-significant, taken to indicate that no further theoretical analysis is needed. The non-replication of Matzke et al. (2015) may have reached this stop criterion even earlier, not after the tests were performed but in the initial stage when the tests were planned and preregistered. Even more theoretical work is required, however, after non-significance than after significance. If one considers a theoretical hypothesis to be refuted by non-significance, a superior alternative should always be formulated according to modern philosophy of science (e.g., Lakatos, 1970). There can be no hypothesis abandonment without hypothesis replacement. The utilization of the stop criterion distinguishes mechanical replication attempts of statistical hypotheses from theoretically informed replication attempts of substantive hypotheses. Some researchers even use it as a tool for relieving them from the burden of having to delve into a largely confusing abundance of prior findings and hypotheses. The stop criterion frequently results in what Ioannidis (2005) calls the Proteus phenomenon that squarely contradicting, but both significant, sets of results are published shortly after another, sometimes even in the same journal, without referring to the other. The opposing findings are not necessarily caused by bias or QRPs in one of the studies, but may simply reflect the majority of bona-fide significant findings being false, as Ioannidis argues. Another unfortunate consequence of this criterion is that the same research is often repeated over and over again (i.e., “the wheel is reinvented”), sometimes with slight modifications or (e.g., neuro-imaging) additions, while the researchers remain unaware of previous work. Due to the frequent application of the statistical stop criterion for theoretical analysis, psychology often does not seem to learn from its own research. In terms of substantive hypotheses, classical null hypothesis statistical testing performs a kind of inverse, rather than direct, falsification. Instead of trying to falsify a concrete hypothesis, one tries to establish evidence against being nothing there. After rejecting the null hypothesis, one claims that this rejection supports one’s proposed hypothesis, which may take any form other than the null. The H 1 thus extends to an infinite range of theories, and could better be renamed H ∞ to recognize the theoretical indifference of this hypothesis. This contrasts sharply with the dominant falsification practice in for instance physics. Here a non-trivial hypothesis is disconfirmed when the values predicted by theory fall outside the uncertainty interval around the observed results (e.g., Taylor, 1982). This approach compares predicted and actual results and concludes to non-falsification in the absence rather than presence of a difference. The physics approach to data analysis also entails more attention for measurement accuracy than in psychology. Non-significance in classical null hypothesis statistical testing more often indicates a lack of measurement accuracy than an absence of difference, however small it may be (cf, Cohen, 1990). Physical theories are undoubtedly among the most numerical and highly developed in the whole of science, and therefore probably better suited to this approach than psychological theories. The rigid application of statistical hypothesis-testing, however, seems to have aggravated the neglect of theory in psychology. A single-experiment just cannot serve to decide conclusively whether a claim is false or not (cf, Hauer, 2004; Ioannidis, 2005). It merely adds weight, proportional to the accuracy of its measurements, to one or the other position. A publication of a new effect should be considered suggestive, but certainly not definitive “proof” (cf, Phaf et al., 2014). In the words of Medawar (1991): “In the outcome science is not a collection of facts or of unquestionable generalizations, but a logically connected network of hypotheses which represent our current opinion about what the real world is like.” (p. 98) Scientific exaggeration is often required by funding agencies for research marketing purposes (also called “valorization” at Dutch universities), but may induce QRPs and even fraud. Scientific prudence and modesty seem better ways to reach a durable development of science. Confirmatory researchers may inadvertently add to this exaggeration, because they are inclined to think of science as collecting conclusive, sometimes even “proven,” facts, whereas history has shown it to consist of ongoing discussions with continuous weight shifts between alternative hypotheses (cf, Lakatos, 1970). If null hypothesis statistical testing detracts from psychological hypotheses and even induces a false sense of certainty, why not abandon null hypothesis statistical testing altogether (cf, Cumming, 2014)? The reporting of only effect sizes and confidence intervals (CIs) may actually reduce publication bias, because the latter is based more often on significance levels than on effect sizes (cf, Simonsohn et al., 2014). In addition, these estimation statistics are more informative, because they, similar to physics, focus on what the effect is rather than on what it is not. CIs should be used as an indication of measurement accuracy rather than for making decisions on whether some unspecified “effect” is there or not (e.g., contains zero; see Gardner and Altman, 1986). The latter decisions are highly fallible (a majority is probably “false,” see Ioannidis, 2005; Fiedler et al., 2012), and we need other, more theoretical, arguments to determine the level of support for a hypothesis provided by a set of results. Stronger, but still not infallible, conclusions can better be postponed until after meta-analyses on the proposed hypotheses have been performed (Schmidt, 1996). These meta-analyses have the additional advantage of identifying publication bias and being able to correct for it with the Trim-and-Fill method (Duval and Tweedie, 2000), or possibly with the p-curve method (Simonsohn et al., 2014). Only when effect size and the extent of publication bias can be judged in a meta-analysis, one can have more confidence in a finding. The primary aim of this comment is to juxtapose the statistically oriented approach and a more theoretically oriented approach. The statistical approach of Wagenmakers et al. (2012) entails a two-way classification in either exploratory or hypothesis-confirmatory research. The latter type can only have a binary outcome with respect to the decision being made, the hypothesis is either confirmed or not. To arrive at such an outcome, a replication attempt must rely on the original research having uncovered and made explicit all relevant processes (i.e., an exhaustive theoretical analysis). All other types of research fall in the exploratory category, even when they further develop the theory starting from quite specific hypotheses. Merely confirming preregistered hypotheses has, however, never yielded new hypotheses, whereas unexpected findings stimulating further investigations do have that capacity and may even be the royal road to scientific innovation (e.g., Lehrer, 2009). Calling it undirected exploratory research, moreover, also does not do justice to the gradual progress-by-adjustment type of research (cf, Lakatos, 1970). The latter type of research is often guided by well-specified and concrete process hypotheses, which may be far superior above merely expecting a difference. Although I think we should try to move away from null hypothesis statistical testing (cf, Cumming, 2014), in the meantime a statistical approach to experimental psychology should become more theoretically oriented and include a third category: hypothesis-guided research. Author Contributions The author confirms being the sole contributor of this work and approved it for publication. Conflict of Interest Statement The authors declare that the research was conducted in the absence of any commercial or financial relationships that could be construed as a potential conflict of interest. Acknowledgment I gratefully acknowledge Alexander Krepel and Lotte Mulder for their help, and Dora Matzke for comments on a previous version of the manuscript. References Armstrong, M. S., and Vaughan, K. (1996). An orienting response model of eye movement desensitization. J. Behav. Ther. Exp. Psychiatry 27, 21–32. doi: 10.1016/0005-7916(95)00056-9 CrossRef Full Text | Google Scholar Dawkins, R. (1986). The Blind Watchmaker. New York, NY: Norton. Google Scholar Duval, S., and Tweedie, R. (2000). Trim and fill: a simple funnel-plot–based method of testing and adjusting for publication bias in meta-analysis. Biometrics 56, 455–463. doi: 10.1111/j.0006-341X.2000.00455.x CrossRef Full Text | Google Scholar Edlin, J. M., and Lyle, K. B. (2013). The effect of repetitive saccade execution on the attention network test: enhancing executive function with a flick of the eyes. Brain Cogn. 81, 345–351. doi: 10.1016/j.bandc.2012.12.006 PubMed Abstract | CrossRef Full Text | Google Scholar Holland, J. H. (1975). Adaptation in Natural and Artificial Systems. Ann Arbor, MI: The University of Michigan Press. Google Scholar Klein, S. B. (2014). What can recent replication failures tell us about the theoretical commitments of psychology? Theory Psychol. 24, 326–338. doi: 10.1177/0959354314529616 CrossRef Full Text | Google Scholar Lakatos, I. (1970). “Falsification and the methodology of scientific research programmes,” in Criticism and the Growth of Knowledge, eds I. Lakatos and A. Musgrave (Cambridge: Cambridge University Press). Google Scholar Lehrer, J. (2009). Accept Defeat: The Neuroscience of Screwing Up. Available at: http://www.wired.com/2009/12/fail_accept_defeat/2/ [accessed April 14, 2015] Google Scholar Lyle, K. B., and Edlin, J. M. (2015). Why does saccade execution increase episodic memory retrieval? A test of the top-down attentional control hypothesis. Memory 23, 187–202. doi: 10.1080/09658211.2013.877487 PubMed Abstract | CrossRef Full Text | Google Scholar Lyle, K. B., Logan, J. M., and Roediger, H. L. (2008). Eye movements enhance memory for individuals who are strongly right-handed and harm it for individuals who are not. Psychonomic Bull. Rev. 15, 515–520. doi: 10.3758/PBR.15.3.515 PubMed Abstract | CrossRef Full Text | Google Scholar Matzke, D., Nieuwenhuis, S., van Rijn, H., Slagter, H. A., van der Molen, M. W., and Wagenmakers, E. J. (2015). The effect of horizontal eye movements on free recall: a preregistered adversarial collaboration. J. Exp. Psychol. Gen. 144, e1–e15. doi: 10.1037/xge0000038 PubMed Abstract | CrossRef Full Text | Google Scholar Medawar, P. (1991). The Threat and the Glory: Reflections on Science and Scientists. Oxford: Oxford University Press. Google Scholar Samara, Z., Elzinga, B. M., Slagter, H. A., and Nieuwenhuis, S. (2011). Do horizontal saccadic eye movements increase interhemispheric coherence? Investigation of a hypothesized neural mechanism underlying EMDR. Front. Psychiatry 2:4. doi: 10.3389/fpsyt.2011.00004 PubMed Abstract | CrossRef Full Text | Google Scholar Schmidt, F. L. (1996). Statistical significance testing and cumulative knowledge in psychology: implications for training of researchers. Psychol. Methods 1, 115–129. doi: 10.1037/1082-989X.1.2.115 CrossRef Full Text | Google Scholar Shapiro, F. (1989). Eye movement desensitization: a new treatment for post-traumatic stress disorder. J. Behav. Ther. Exp. Psychiatry 20, 211–217. doi: 10.1016/0005-7916(89)90025-6 CrossRef Full Text | Google Scholar Taylor, J. R. (1982). An Introduction to Error Analysis: The Study of Uncertainties in Physical Measurements. Sausalito, CA: University Science Books. Google Scholar
def inellipse(pos, center, theta, a, b): c = np.cos(np.radians(theta)) s = np.sin(np.radians(theta)) r = b/a x, y = pos x0, y0 = center return ( (x-x0)**2*(c**2 + s**2/r**2) + (y-y0)**2*(c**2/r**2 + s**2) + 2*(x-x0)*(y-y0)*c*s*(1./r**2 - 1.) < (a/3600.)**2 )
import java.util.ArrayList; import java.util.Collections; import java.util.Scanner; public class Main { public static void main(String[] args) { Scanner scanner = new Scanner(System.in); Integer sum = 0; Integer numTypeDonuts= new Integer(scanner.next()); Integer donutsMaterials = new Integer(scanner.next()); ArrayList<Integer> materialAmount = new ArrayList<Integer>(); for(int i = 0; i < numTypeDonuts;i++) { materialAmount.add(new Integer(scanner.next())); sum += materialAmount.get(i); } Integer output = (donutsMaterials - sum) / Collections.min(materialAmount) + numTypeDonuts; System.out.println(output); scanner.close(); } }
def _make_time_pass(self, seconds, timeout, time_mock): time_mock.return_value = TIMEOUT_EPOCH timeout.start_connect() time_mock.return_value = TIMEOUT_EPOCH + seconds return timeout
As more information comes to light and public outcry steadily grows against FirstEnergy, Duke and American Electric Power's respective bailout riders, 12 companies with a large presence in Ohio now also have come out against the utilities. As more information comes to light and public outcry steadily grows against FirstEnergy, Duke and American Electric Power�s respective bailout riders, 12 companies with a large presence in Ohio now also have come out against the utilities. Costco, Lowe�s, Staples and nine other companies have submitted a public comment to the Public Utilities Commission of Ohio condemning AEP�s rider to bail out its inefficient and underperforming coal plants. These companies, which represent more than 79,000 Ohio employees, support the advancement of a free market for electricity in the state because �well-designed competitive markets produce substantial savings.� If the big utilities� riders force us to help pay for their generation costs, it would reduce competition as a whole. If AEP can�t survive on its own in Ohio�s electrical free market, the problems � and more importantly, the financial losses � belong to them, not the consumers. ARYEH ALEX Columbus
//************************************************************************************************* /*!\brief Constructor for the GeneralTest class test. // // \exception std::runtime_error Operation error detected. */ GeneralTest::GeneralTest() { testIsNan(); testIsUniform(); testIsZero(); testNormalize(); testMinimum(); testMaximum(); testArgmin(); testArgmax(); testL1Norm(); testL2Norm(); testL3Norm(); testL4Norm(); testLpNorm(); testLinfNorm(); testLength(); testMean(); testVar(); testStdDev(); testSoftmax(); testLeftShift(); testRightShift(); testBitand(); testBitor(); testBitxor(); testNot(); testAnd(); testOr(); testGenerate(); testLinspace(); testLogspace(); testUniform(); testZero(); }
. The diagnosis of an immunodeficiency is made after detailed clinical and laboratory investigations. To guide these investigations the Dutch Working Group on Immunodeficiencies had made a protocol for evaluating the functioning of the immune system of patients. The protocol is based on the report 'Immunodeficiency' from a WHO scientific group. This paper discusses the value of laboratory investigations on the following aspects of the immune system: immunoglobulins and their subclasses, primary and secondary antibody responses in vivo, and the complement system. The evaluation of these components of the defense system can firmly establish possibly occurring severe immunodeficiency states. Some cases of chronic and/or recurrent infections, however, remain enigmas that have to be resolved by future investigations on the interactions of infectious agents with the immune system.