content
stringlengths 10
4.9M
|
---|
/**
* Helper method to set the items for this subsection.
* @param subsectionItems The items associated with this subsection.
*/
public void update(List<DownloadHistoryItemWrapper> subsectionItems) {
mSubsectionItems = subsectionItems;
mTotalFileSize = 0;
for (DownloadHistoryItemWrapper item : subsectionItems) {
mTotalFileSize += item.getFileSize();
mLatestUpdateTime = Math.max(mLatestUpdateTime, item.getTimestamp());
}
} |
// Copyright (c) 2017-2018 Uber Technologies, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package utils
import (
"compress/gzip"
"encoding/json"
"fmt"
"github.com/uber-go/tally"
"github.com/uber/aresdb/common"
"golang.org/x/net/http2"
"golang.org/x/net/http2/h2c"
"golang.org/x/net/netutil"
"net"
"net/http"
"strconv"
"time"
)
const (
HTTPContentTypeHeaderKey = "Content-Type"
HTTPAcceptTypeHeaderKey = "Accept"
HTTPAcceptEncodingHeaderKey = "Accept-Encoding"
HTTPContentEncodingHeaderKey = "Content-Encoding"
HTTPContentTypeApplicationJson = "application/json"
HTTPContentTypeApplicationGRPC = "application/grpc"
// HTTPContentTypeUpsertBatch defines the upsert data content type.
HTTPContentTypeUpsertBatch = "application/upsert-data"
// HTTPContentTypeHyperLogLog defines the hyperloglog query result content type.
HTTPContentTypeHyperLogLog = "application/hll"
HTTPContentEncodingGzip = "gzip"
// CompressionThreshold is the min number of bytes beyond which we will compress json payload
CompressionThreshold = 1 << 10
)
var epoch = time.Unix(0, 0).Format(time.RFC1123)
var noCacheHeaders = map[string]string{
"Expires": epoch,
"Cache-Control": "no-cache, private, max-age=0",
"Pragma": "no-cache",
"X-Accel-Expires": "0",
}
var etagHeaders = []string{
"ETag",
"If-Modified-Since",
"If-Match",
"If-None-Match",
"If-Range",
"If-Unmodified-Since",
}
// NoCache sets no cache headers and removes any ETag headers that may have been set.
func NoCache(h http.Handler) http.Handler {
fn := func(w http.ResponseWriter, r *http.Request) {
for _, v := range etagHeaders {
if r.Header.Get(v) != "" {
r.Header.Del(v)
}
}
for k, v := range noCacheHeaders {
w.Header().Set(k, v)
}
h.ServeHTTP(w, r)
}
return http.HandlerFunc(fn)
}
// GetOrigin returns the caller of the request.
func GetOrigin(r *http.Request) string {
origin := r.Header.Get("RPC-Caller")
if origin == "" {
origin = r.Header.Get("X-Uber-Origin")
}
if origin == "" {
origin = "UNKNOWN"
}
return origin
}
// LimitServe will start a http server on the port with the handler and at most maxConnection concurrent connections.
func LimitServe(port int, handler http.Handler, httpCfg common.HTTPConfig) {
listener, err := net.Listen("tcp", fmt.Sprintf(":%d", port))
if err != nil {
GetLogger().Fatal(err)
}
defer listener.Close()
listener = netutil.LimitListener(listener, httpCfg.MaxConnections)
server := &http.Server{
ReadTimeout: time.Duration(httpCfg.ReadTimeOutInSeconds) * time.Second,
WriteTimeout: time.Duration(httpCfg.WriteTimeOutInSeconds) * time.Second,
Handler: h2c.NewHandler(handler, &http2.Server{}),
}
GetLogger().Fatal(server.Serve(listener))
}
// LimitServeAsync will start a http server on the port with the handler and at most maxConnection concurrent connections.
func LimitServeAsync(port int, handler http.Handler, httpCfg common.HTTPConfig) (chan error, *http.Server) {
listener, err := net.Listen("tcp", fmt.Sprintf(":%d", port))
if err != nil {
GetLogger().Fatal(err)
}
listener = netutil.LimitListener(listener, httpCfg.MaxConnections)
server := &http.Server{
ReadTimeout: time.Duration(httpCfg.ReadTimeOutInSeconds) * time.Second,
WriteTimeout: time.Duration(httpCfg.WriteTimeOutInSeconds) * time.Second,
Handler: h2c.NewHandler(handler, &http2.Server{}),
}
errChan := make(chan error)
go func() {
defer listener.Close()
errChan <- server.Serve(listener)
}()
return errChan, server
}
// HandlerFunc defines http handler function
type HandlerFunc func(rw *ResponseWriter, r *http.Request)
// HTTPHandlerWrapper wraps http handler function
type HTTPHandlerWrapper func(handler HandlerFunc) HandlerFunc
// ApplyHTTPWrappers apply wrappers according to the order
func ApplyHTTPWrappers(handler HandlerFunc, wrappers ...HTTPHandlerWrapper) http.HandlerFunc {
h := handler
for _, wrapper := range wrappers {
h = wrapper(h)
}
return func(writer http.ResponseWriter, request *http.Request) {
rw := NewResponseWriter(writer)
h(rw, request)
}
}
// MetricsLoggingMiddleWareProvider provides middleware for metrics and logger for http requests
type MetricsLoggingMiddleWareProvider struct {
scope tally.Scope
logger common.Logger
}
// NewMetricsLoggingMiddleWareProvider creates metrics and logging middleware provider
func NewMetricsLoggingMiddleWareProvider(scope tally.Scope, logger common.Logger) MetricsLoggingMiddleWareProvider {
return MetricsLoggingMiddleWareProvider{
scope: scope,
logger: logger,
}
}
// WithMetrics plug in metrics middleware
func (p *MetricsLoggingMiddleWareProvider) WithMetrics(next HandlerFunc) HandlerFunc {
funcName := GetFuncName(next)
return func(rw *ResponseWriter, r *http.Request) {
origin := GetOrigin(r)
stopWatch := p.scope.Tagged(map[string]string{
metricsTagHandler: funcName,
metricsTagOrigin: origin,
}).Timer(scopeNameHTTPHandlerLatency).Start()
next(rw, r)
stopWatch.Stop()
p.scope.Tagged(map[string]string{
metricsTagHandler: funcName,
metricsTagOrigin: origin,
metricsTagStatusCode: strconv.Itoa(rw.statusCode),
}).Counter(scopeNameHTTPHandlerCall).Inc(1)
}
}
// WithLogging plug in logging middleware
func (p *MetricsLoggingMiddleWareProvider) WithLogging(next HandlerFunc) HandlerFunc {
return func(rw *ResponseWriter, r *http.Request) {
next(rw, r)
if rw.err != nil {
p.logger.With(
"request", rw.req,
"status", rw.statusCode,
"error", rw.err,
"method", r.Method,
"name", r.URL.Path,
).Errorf("request failed")
} else {
p.logger.With(
"request", rw.req,
"name", r.URL.Path,
).Debug("request succeeded")
}
}
}
func setCommonHeaders(w http.ResponseWriter) {
w.Header().Set("Cache-Control", "no-cache, no-store, must-revalidate")
w.Header().Set("Pragma", "no-cache")
w.Header().Set("Expires", "0")
}
// ErrorResponse represents error response.
// swagger:response errorResponse
type ErrorResponse struct {
//in: body
Body APIError
}
// ResponseWriter decorates http.ResponseWriter
type ResponseWriter struct {
http.ResponseWriter
statusCode int
req interface{}
err error
}
// NewResponseWriter returns response writer with status code 200
func NewResponseWriter(rw http.ResponseWriter) *ResponseWriter {
return &ResponseWriter{
statusCode: http.StatusOK,
ResponseWriter: rw,
}
}
// SetRequest set unmarshalled request body to response writer for logging purpose
func (s *ResponseWriter) SetRequest(req interface{}) {
s.req = req
}
// WriteHeader implements http.ResponseWriter WriteHeader for write status code
func (s *ResponseWriter) WriteHeader(code int) {
if code > 0 {
s.statusCode = code
s.ResponseWriter.WriteHeader(code)
}
}
// WriteBytes implements http.ResponseWriter Write for write bytes
func (s *ResponseWriter) WriteBytes(bts []byte) {
setCommonHeaders(s)
s.Write(bts)
}
// WriteBytesWithCode writes bytes with code
func (s *ResponseWriter) WriteBytesWithCode(code int, bts []byte) {
setCommonHeaders(s)
s.WriteHeader(code)
if bts != nil {
s.Write(bts)
}
}
// WriteJSONBytes write json bytes with default status ok
func (s *ResponseWriter) WriteJSONBytes(jsonBytes []byte, marshalErr error) {
s.WriteJSONBytesWithCode(http.StatusOK, jsonBytes, marshalErr)
}
// WriteJSONBytesWithCode write json bytes and marshal error to response
func (s *ResponseWriter) WriteJSONBytesWithCode(code int, jsonBytes []byte, marshalErr error) {
s.Header().Set(HTTPContentTypeHeaderKey, HTTPContentTypeApplicationJson)
if marshalErr != nil {
jsonMarshalErrorResponse := ErrorResponse{}
code = http.StatusInternalServerError
jsonMarshalErrorResponse.Body.Code = code
jsonMarshalErrorResponse.Body.Message = "failed to marshal object"
jsonMarshalErrorResponse.Body.Cause = marshalErr
// ignore this error since this should not happen
jsonBytes, _ = json.Marshal(jsonMarshalErrorResponse.Body)
}
if jsonBytes == nil {
return
}
// try best effort write with gzip compression
willCompress := len(jsonBytes) > CompressionThreshold
if willCompress {
gw, err := gzip.NewWriterLevel(s, gzip.BestSpeed)
if err == nil {
defer gw.Close()
s.Header().Set(HTTPContentEncodingHeaderKey, HTTPContentEncodingGzip)
setCommonHeaders(s)
s.WriteHeader(code)
_, _ = gw.Write(jsonBytes)
return
}
}
// default to normal json response
s.WriteBytesWithCode(code, jsonBytes)
}
// WriteObject write json object to response
func (s *ResponseWriter) WriteObject(obj interface{}) {
s.WriteObjectWithCode(http.StatusOK, obj)
}
// WriteObjectWithCode serialize object and write code
func (s *ResponseWriter) WriteObjectWithCode(code int, obj interface{}) {
if obj != nil {
jsonBytes, err := json.Marshal(obj)
s.WriteJSONBytesWithCode(code, jsonBytes, err)
} else {
s.WriteBytesWithCode(code, nil)
}
}
// WriteErrorWithCode writes error with specific code
func (s *ResponseWriter) WriteErrorWithCode(code int, err error) {
s.err = err
var errorResponse ErrorResponse
if e, ok := err.(APIError); ok {
errorResponse.Body = e
} else {
errorResponse.Body.Message = err.Error()
}
errorResponse.Body.Code = code
s.WriteObjectWithCode(errorResponse.Body.Code, errorResponse.Body)
}
// WriteError write error to response
func (s *ResponseWriter) WriteError(err error) {
if e, ok := err.(APIError); ok {
s.WriteErrorWithCode(e.Code, err)
} else {
s.WriteErrorWithCode(http.StatusInternalServerError, err)
}
}
|
#include "test.h"
#include "testtools.h"
#include "../Codecs/PPM/ppmdecoder.h"
#include "../Core/bitmap.h"
#include <cstring>
BEGIN_SUITE( PpmDecoder )
BEGIN_TEST(PpmDecoder, TestGray8)
auto pDecoder = std::make_unique<PpmDecoder>();
pDecoder->Attach(GetPathToTestFile("PPM/gray8.pgm"));
EXPECT_EQ(PixelFormat::Gray8, pDecoder->GetPixelFormat());
EXPECT_EQ(500, pDecoder->GetWidth());
EXPECT_EQ(500, pDecoder->GetHeight());
END_TEST
BEGIN_TEST(PpmDecoder, TestGray16)
auto pDecoder = std::make_unique<PpmDecoder>();
pDecoder->Attach(GetPathToTestFile("PPM/gray16.pgm"));
EXPECT_EQ(PixelFormat::Gray16, pDecoder->GetPixelFormat());
EXPECT_EQ(500, pDecoder->GetWidth());
EXPECT_EQ(500, pDecoder->GetHeight());
END_TEST
BEGIN_TEST(PpmDecoder, TestRgb24)
auto pDecoder = std::make_unique<PpmDecoder>();
pDecoder->Attach(GetPathToTestFile("PPM/rgb24.ppm"));
EXPECT_EQ(PixelFormat::RGB24, pDecoder->GetPixelFormat());
EXPECT_EQ(640, pDecoder->GetWidth());
EXPECT_EQ(426, pDecoder->GetHeight());
END_TEST
BEGIN_TEST(PpmDecoder, TestRgb48)
auto pDecoder = std::make_unique<PpmDecoder>();
pDecoder->Attach(GetPathToTestFile("PPM/rgb48.ppm"));
EXPECT_EQ(PixelFormat::RGB48, pDecoder->GetPixelFormat());
EXPECT_EQ(500, pDecoder->GetWidth());
EXPECT_EQ(500, pDecoder->GetHeight());
END_TEST
BEGIN_TEST(PpmDecoder, TestPlain)
auto pDecoder = std::make_unique<PpmDecoder>();
std::string fileNames[2] = { GetPathToTestFile("PPM/plain.ppm"), GetPathToTestFile("PPM/binary.ppm") };
for (auto& fileName : fileNames)
{
pDecoder->Attach(fileName);
EXPECT_EQ(PixelFormat::RGB24, pDecoder->GetPixelFormat());
EXPECT_EQ(4, pDecoder->GetWidth());
EXPECT_EQ(4, pDecoder->GetHeight());
auto pBitmap = pDecoder->ReadBitmap();
EXPECT_EQ(PixelFormat::RGB24, pBitmap->GetPixelFormat());
EXPECT_EQ(4, pBitmap->GetWidth());
EXPECT_EQ(4, pBitmap->GetHeight());
const std::vector<uint8_t> expectedData
{
0, 0, 0, 100, 0, 0, 0, 0, 0, 255, 0, 255,
0, 0, 0, 0, 255, 175, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 15, 175, 0, 0, 0,
255, 0, 255, 0, 0, 0, 0, 0, 0, 255, 255, 255
};
EXPECT_EQ(0, memcmp(std::static_pointer_cast<Bitmap<PixelFormat::RGB24>>(pBitmap)->GetPlanarScanline(0), expectedData.data(), expectedData.size()));
}
END_TEST
BEGIN_TEST(PpmDecoder, TestByteOrdering)
auto pDecoder = std::make_unique<PpmDecoder>();
pDecoder->Attach(GetPathToTestFile("PPM/IMG_4030.ppm"));
auto pBitmap = pDecoder->ReadBitmap();
auto pScanLine = pBitmap->GetPlanarScanline(0);
char bytes[2] = {pScanLine[0], pScanLine[1]};
EXPECT_EQ(0x7b, bytes[0]);
EXPECT_EQ(0x63, bytes[1]);
auto pGray16Bitmap = std::static_pointer_cast<Bitmap<PixelFormat::Gray16>>(pBitmap);
auto pix = pGray16Bitmap->GetChannel(0,0,0);
EXPECT_EQ(0x637b, pix);
END_TEST
BEGIN_TEST (PpmDecoder, ReadTwice)
auto pDecoder = std::make_unique<PpmDecoder>();
pDecoder->Attach(GetPathToTestFile("PPM/rgb24.ppm"));
auto pFirstBitmap = pDecoder->ReadBitmap();
auto pSecondBitmap = pDecoder->ReadBitmap();
EXPECT_TRUE(BitmapsAreEqual(pFirstBitmap, pSecondBitmap));
END_TEST
BEGIN_TEST (PpmDecoder, ReadStripes)
auto pDecoder = std::make_unique<PpmDecoder>();
pDecoder->Attach(GetPathToTestFile("PPM/rgb24.ppm"));
EXPECT_TRUE(BitmapsAreEqual(GetPathToPattern("PpmDecoder/ReadStripes/stripe0.ppm"), pDecoder->ReadStripe(200)));
EXPECT_TRUE(BitmapsAreEqual(GetPathToPattern("PpmDecoder/ReadStripes/stripe1.ppm"), pDecoder->ReadStripe(200)));
EXPECT_TRUE(BitmapsAreEqual(GetPathToPattern("PpmDecoder/ReadStripes/stripe2.ppm"), pDecoder->ReadStripe(0)));
END_TEST
BEGIN_TEST(PpmDecoder, TestEmptyFile)
auto f = []()
{
auto pDecoder = std::make_unique<PpmDecoder>();
pDecoder->Attach(GetPathToTestFile("PPM/empty.ppm"));
};
ASSERT_THROWS(f, std::runtime_error);
END_TEST
BEGIN_TEST(PpmDecoder, TestCorrupted)
auto f = []()
{
auto pDecoder = std::make_unique<PpmDecoder>();
pDecoder->Attach(GetPathToTestFile("PPM/corrupted.ppm"));
};
ASSERT_THROWS(f, std::runtime_error);
END_TEST
END_SUITE
|
Russia – if you’re listening – I hope you’re able to find the hacked DCCC emails…
The FBI is investigating a cyber attack on the Democratic Congressional Campaign Committee.
This comes after the hack attack on the DNC earlier this year.
Politico reported:
Hackers apparently affiliated with Russian intelligence have launched a cyberattack targeting donors to the Democratic Party’s House campaign arm, sources and news reports said Thursday night, adding to the troubles unleashed by last week’s disclosure of embarrassing internal emails from the Democratic National Committee.
The latest hack, aimed at the Democratic Congressional Campaign Committee, was perpetrated by one the two Russian-based groups previously blamed for rifling through the DNC’s computer networks and making off with emails and other documents, a security expert familiar with the latest breach told POLITICO. But this new attack “appears to be a bit different” the source said.
“It’s part of a broader intelligence collection effort,” the source said, adding: “It’s maybe an attempt to harvest credentials. … It’s not an email grab like the DNC.”
Even so, it ought to stoke anxiety for people throughout the political process, said Jim Manley, a former top spokesman for Senate Democratic leader Harry Reid. |
/**
* Write the scene to a file.
*
* @param location The location of the file.
* @param rig the rig to write to file.
*/
public static void writeRig(File location, Rig rig) {
FileWriter fw;
BufferedWriter bw = null;
try {
if (!location.getParentFile().exists()) {
location.getParentFile().mkdirs();
}
fw = new FileWriter(location);
bw = new BufferedWriter(fw);
writeRig(bw, rig);
} catch (IOException ex) {
Logger.getLogger("DArtE").log(Level.SEVERE, null, ex);
} finally {
try {
bw.close();
} catch (IOException ex) {
Logger.getLogger("DArtE").log(Level.SEVERE, null, ex);
}
}
} |
from pyspark.ml import Estimator
import re
class CustomMeanImputer(Estimator):
"""
This class replaces the missing values of non-categorical variables with
their mean value calculated using aggregate
note: CustomImputer class was created as there was no inbuilt class for
imputing missing values in Pyspark 2.1.0.
It was continued after upgrade to Pyspark 2.2.0 as the new Imputer
class did operations column by column which made the process slow
"""
def __init__(self,cols_to_impute, value = None):
super(CustomMeanImputer, self).__init__()
self.value = value
self.cols_to_impute = cols_to_impute
def setParams(self, inputCol, cols_to_impute, value = None):
kwargs = self.setParams._input_kwargs
return self._set(**kwargs)
def getImputeValue(self):
print(self.inputCol, self.value)
def fit(self, dataset):
num_cols = self.cols_to_impute
self.value = dict()
dict_custom = {key: "avg" for key in num_cols}
mean_dict = dataset.agg(dict_custom).first().asDict()
self.value = {re.findall(r"avg\((.*)\)", key)[0]:round(float(val), 4) for key, val in mean_dict.items()}
return CustomMeanImputer(value = self.value, cols_to_impute = self.cols_to_impute)
def transform(self, dataset):
dataset = dataset.na.fill(self.value)
return dataset |
n,m=map(int,input().split())
o=[[] for _ in range(n)]
for i in range(m):
a,b=map(int,input().split())
o[a-1].append('%i %i'%(a,i+1))
o[b-1].append('%i %i'%(b,i+1))
m+=1
for i in range(n):
if len(o[i])==0:
print(1)
print('%i %i'%(i+1,m))
m+=1
else:
print(len(o[i]))
print('\n'.join(o[i])) |
/*
* Copyright (c) 2000, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation. Oracle designates this
* particular file as subject to the "Classpath" exception as provided
* by Oracle in the LICENSE file that accompanied this code.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
package lib.sun.misc;
import jdk.internal.vm.annotation.ForceInline;
import jdk.internal.misc.VM;
import jdk.internal.ref.Cleaner;
import jdk.internal.reflect.CallerSensitive;
import jdk.internal.reflect.Reflection;
import sun.nio.ch.DirectBuffer;
import java.lang.reflect.Field;
/**
* A collection of methods for performing low-level, unsafe operations.
* Although the class and all methods are public, use of this class is
* limited because only trusted code can obtain instances of it.
*
* <em>Note:</em> It is the resposibility of the caller to make sure
* arguments are checked before methods of this class are
* called. While some rudimentary checks are performed on the input,
* the checks are best effort and when performance is an overriding
* priority, as when methods of this class are optimized by the
* runtime compiler, some or all checks (if any) may be elided. Hence,
* the caller must not rely on the checks and corresponding
* exceptions!
*
* @author <NAME>
* @see #getUnsafe
*/
public final class Unsafe {
static {
Reflection.registerMethodsToFilter(Unsafe.class, "getUnsafe");
}
private Unsafe() {}
private static final Unsafe theUnsafe = new Unsafe();
private static final jdk.internal.misc.Unsafe theInternalUnsafe = jdk.internal.misc.Unsafe.getUnsafe();
/**
* Provides the caller with the capability of performing unsafe
* operations.
*
* <p>The returned {@code Unsafe} object should be carefully guarded
* by the caller, since it can be used to read and write data at arbitrary
* memory addresses. It must never be passed to untrusted code.
*
* <p>Most methods in this class are very low-level, and correspond to a
* small number of hardware instructions (on typical machines). Compilers
* are encouraged to optimize these methods accordingly.
*
* <p>Here is a suggested idiom for using unsafe operations:
*
* <pre> {@code
* class MyTrustedClass {
* private static final Unsafe unsafe = Unsafe.getUnsafe();
* ...
* private long myCountAddress = ...;
* public int getCount() { return unsafe.getByte(myCountAddress); }
* }}</pre>
*
* (It may assist compilers to make the local variable {@code final}.)
*
* @throws SecurityException if the class loader of the caller
* class is not in the system domain in which all permissions
* are granted.
*/
@CallerSensitive
public static Unsafe getUnsafe() {
Class<?> caller = Reflection.getCallerClass();
if (!VM.isSystemDomainLoader(caller.getClassLoader()))
throw new SecurityException("Unsafe");
return theUnsafe;
}
/// peek and poke operations
/// (compilers should optimize these to memory ops)
// These work on object fields in the Java heap.
// They will not work on elements of packed arrays.
/**
* Fetches a value from a given Java variable.
* More specifically, fetches a field or array element within the given
* object {@code o} at the given offset, or (if {@code o} is null)
* from the memory address whose numerical value is the given offset.
* <p>
* The results are undefined unless one of the following cases is true:
* <ul>
* <li>The offset was obtained from {@link #objectFieldOffset} on
* the {@link java.lang.reflect.Field} of some Java field and the object
* referred to by {@code o} is of a class compatible with that
* field's class.
*
* <li>The offset and object reference {@code o} (either null or
* non-null) were both obtained via {@link #staticFieldOffset}
* and {@link #staticFieldBase} (respectively) from the
* reflective {@link Field} representation of some Java field.
*
* <li>The object referred to by {@code o} is an array, and the offset
* is an integer of the form {@code B+N*S}, where {@code N} is
* a valid index into the array, and {@code B} and {@code S} are
* the values obtained by {@link #arrayBaseOffset} and {@link
* #arrayIndexScale} (respectively) from the array's class. The value
* referred to is the {@code N}<em>th</em> element of the array.
*
* </ul>
* <p>
* If one of the above cases is true, the call references a specific Java
* variable (field or array element). However, the results are undefined
* if that variable is not in fact of the type returned by this method.
* <p>
* This method refers to a variable by means of two parameters, and so
* it provides (in effect) a <em>double-register</em> addressing mode
* for Java variables. When the object reference is null, this method
* uses its offset as an absolute address. This is similar in operation
* to methods such as {@link #getInt(long)}, which provide (in effect) a
* <em>single-register</em> addressing mode for non-Java variables.
* However, because Java variables may have a different layout in memory
* from non-Java variables, programmers should not assume that these
* two addressing modes are ever equivalent. Also, programmers should
* remember that offsets from the double-register addressing mode cannot
* be portably confused with longs used in the single-register addressing
* mode.
*
* @param o Java heap object in which the variable resides, if any, else
* null
* @param offset indication of where the variable resides in a Java heap
* object, if any, else a memory address locating the variable
* statically
* @return the value fetched from the indicated Java variable
* @throws RuntimeException No defined exceptions are thrown, not even
* {@link NullPointerException}
*/
@ForceInline
public int getInt(Object o, long offset) {
return theInternalUnsafe.getInt(o, offset);
}
/**
* Stores a value into a given Java variable.
* <p>
* The first two parameters are interpreted exactly as with
* {@link #getInt(Object, long)} to refer to a specific
* Java variable (field or array element). The given value
* is stored into that variable.
* <p>
* The variable must be of the same type as the method
* parameter {@code x}.
*
* @param o Java heap object in which the variable resides, if any, else
* null
* @param offset indication of where the variable resides in a Java heap
* object, if any, else a memory address locating the variable
* statically
* @param x the value to store into the indicated Java variable
* @throws RuntimeException No defined exceptions are thrown, not even
* {@link NullPointerException}
*/
@ForceInline
public void putInt(Object o, long offset, int x) {
theInternalUnsafe.putInt(o, offset, x);
}
/**
* Fetches a reference value from a given Java variable.
* @see #getInt(Object, long)
*/
@ForceInline
public Object getObject(Object o, long offset) {
return theInternalUnsafe.getObject(o, offset);
}
/**
* Stores a reference value into a given Java variable.
* <p>
* Unless the reference {@code x} being stored is either null
* or matches the field type, the results are undefined.
* If the reference {@code o} is non-null, card marks or
* other store barriers for that object (if the VM requires them)
* are updated.
* @see #putInt(Object, long, int)
*/
@ForceInline
public void putObject(Object o, long offset, Object x) {
theInternalUnsafe.putObject(o, offset, x);
}
/** @see #getInt(Object, long) */
@ForceInline
public boolean getBoolean(Object o, long offset) {
return theInternalUnsafe.getBoolean(o, offset);
}
/** @see #putInt(Object, long, int) */
@ForceInline
public void putBoolean(Object o, long offset, boolean x) {
theInternalUnsafe.putBoolean(o, offset, x);
}
/** @see #getInt(Object, long) */
@ForceInline
public byte getByte(Object o, long offset) {
return theInternalUnsafe.getByte(o, offset);
}
/** @see #putInt(Object, long, int) */
@ForceInline
public void putByte(Object o, long offset, byte x) {
theInternalUnsafe.putByte(o, offset, x);
}
/** @see #getInt(Object, long) */
@ForceInline
public short getShort(Object o, long offset) {
return theInternalUnsafe.getShort(o, offset);
}
/** @see #putInt(Object, long, int) */
@ForceInline
public void putShort(Object o, long offset, short x) {
theInternalUnsafe.putShort(o, offset, x);
}
/** @see #getInt(Object, long) */
@ForceInline
public char getChar(Object o, long offset) {
return theInternalUnsafe.getChar(o, offset);
}
/** @see #putInt(Object, long, int) */
@ForceInline
public void putChar(Object o, long offset, char x) {
theInternalUnsafe.putChar(o, offset, x);
}
/** @see #getInt(Object, long) */
@ForceInline
public long getLong(Object o, long offset) {
return theInternalUnsafe.getLong(o, offset);
}
/** @see #putInt(Object, long, int) */
@ForceInline
public void putLong(Object o, long offset, long x) {
theInternalUnsafe.putLong(o, offset, x);
}
/** @see #getInt(Object, long) */
@ForceInline
public float getFloat(Object o, long offset) {
return theInternalUnsafe.getFloat(o, offset);
}
/** @see #putInt(Object, long, int) */
@ForceInline
public void putFloat(Object o, long offset, float x) {
theInternalUnsafe.putFloat(o, offset, x);
}
/** @see #getInt(Object, long) */
@ForceInline
public double getDouble(Object o, long offset) {
return theInternalUnsafe.getDouble(o, offset);
}
/** @see #putInt(Object, long, int) */
@ForceInline
public void putDouble(Object o, long offset, double x) {
theInternalUnsafe.putDouble(o, offset, x);
}
// These work on values in the C heap.
/**
* Fetches a value from a given memory address. If the address is zero, or
* does not point into a block obtained from {@link #allocateMemory}, the
* results are undefined.
*
* @see #allocateMemory
*/
@ForceInline
public byte getByte(long address) {
return theInternalUnsafe.getByte(address);
}
/**
* Stores a value into a given memory address. If the address is zero, or
* does not point into a block obtained from {@link #allocateMemory}, the
* results are undefined.
*
* @see #getByte(long)
*/
@ForceInline
public void putByte(long address, byte x) {
theInternalUnsafe.putByte(address, x);
}
/** @see #getByte(long) */
@ForceInline
public short getShort(long address) {
return theInternalUnsafe.getShort(address);
}
/** @see #putByte(long, byte) */
@ForceInline
public void putShort(long address, short x) {
theInternalUnsafe.putShort(address, x);
}
/** @see #getByte(long) */
@ForceInline
public char getChar(long address) {
return theInternalUnsafe.getChar(address);
}
/** @see #putByte(long, byte) */
@ForceInline
public void putChar(long address, char x) {
theInternalUnsafe.putChar(address, x);
}
/** @see #getByte(long) */
@ForceInline
public int getInt(long address) {
return theInternalUnsafe.getInt(address);
}
/** @see #putByte(long, byte) */
@ForceInline
public void putInt(long address, int x) {
theInternalUnsafe.putInt(address, x);
}
/** @see #getByte(long) */
@ForceInline
public long getLong(long address) {
return theInternalUnsafe.getLong(address);
}
/** @see #putByte(long, byte) */
@ForceInline
public void putLong(long address, long x) {
theInternalUnsafe.putLong(address, x);
}
/** @see #getByte(long) */
@ForceInline
public float getFloat(long address) {
return theInternalUnsafe.getFloat(address);
}
/** @see #putByte(long, byte) */
@ForceInline
public void putFloat(long address, float x) {
theInternalUnsafe.putFloat(address, x);
}
/** @see #getByte(long) */
@ForceInline
public double getDouble(long address) {
return theInternalUnsafe.getDouble(address);
}
/** @see #putByte(long, byte) */
@ForceInline
public void putDouble(long address, double x) {
theInternalUnsafe.putDouble(address, x);
}
/**
* Fetches a native pointer from a given memory address. If the address is
* zero, or does not point into a block obtained from {@link
* #allocateMemory}, the results are undefined.
*
* <p>If the native pointer is less than 64 bits wide, it is extended as
* an unsigned number to a Java long. The pointer may be indexed by any
* given byte offset, simply by adding that offset (as a simple integer) to
* the long representing the pointer. The number of bytes actually read
* from the target address may be determined by consulting {@link
* #addressSize}.
*
* @see #allocateMemory
*/
@ForceInline
public long getAddress(long address) {
return theInternalUnsafe.getAddress(address);
}
/**
* Stores a native pointer into a given memory address. If the address is
* zero, or does not point into a block obtained from {@link
* #allocateMemory}, the results are undefined.
*
* <p>The number of bytes actually written at the target address may be
* determined by consulting {@link #addressSize}.
*
* @see #getAddress(long)
*/
@ForceInline
public void putAddress(long address, long x) {
theInternalUnsafe.putAddress(address, x);
}
/// wrappers for malloc, realloc, free:
/**
* Allocates a new block of native memory, of the given size in bytes. The
* contents of the memory are uninitialized; they will generally be
* garbage. The resulting native pointer will never be zero, and will be
* aligned for all value types. Dispose of this memory by calling {@link
* #freeMemory}, or resize it with {@link #reallocateMemory}.
*
* <em>Note:</em> It is the resposibility of the caller to make
* sure arguments are checked before the methods are called. While
* some rudimentary checks are performed on the input, the checks
* are best effort and when performance is an overriding priority,
* as when methods of this class are optimized by the runtime
* compiler, some or all checks (if any) may be elided. Hence, the
* caller must not rely on the checks and corresponding
* exceptions!
*
* @throws RuntimeException if the size is negative or too large
* for the native size_t type
*
* @throws OutOfMemoryError if the allocation is refused by the system
*
* @see #getByte(long)
* @see #putByte(long, byte)
*/
@ForceInline
public long allocateMemory(long bytes) {
return theInternalUnsafe.allocateMemory(bytes);
}
/**
* Resizes a new block of native memory, to the given size in bytes. The
* contents of the new block past the size of the old block are
* uninitialized; they will generally be garbage. The resulting native
* pointer will be zero if and only if the requested size is zero. The
* resulting native pointer will be aligned for all value types. Dispose
* of this memory by calling {@link #freeMemory}, or resize it with {@link
* #reallocateMemory}. The address passed to this method may be null, in
* which case an allocation will be performed.
*
* <em>Note:</em> It is the resposibility of the caller to make
* sure arguments are checked before the methods are called. While
* some rudimentary checks are performed on the input, the checks
* are best effort and when performance is an overriding priority,
* as when methods of this class are optimized by the runtime
* compiler, some or all checks (if any) may be elided. Hence, the
* caller must not rely on the checks and corresponding
* exceptions!
*
* @throws RuntimeException if the size is negative or too large
* for the native size_t type
*
* @throws OutOfMemoryError if the allocation is refused by the system
*
* @see #allocateMemory
*/
@ForceInline
public long reallocateMemory(long address, long bytes) {
return theInternalUnsafe.reallocateMemory(address, bytes);
}
/**
* Sets all bytes in a given block of memory to a fixed value
* (usually zero).
*
* <p>This method determines a block's base address by means of two parameters,
* and so it provides (in effect) a <em>double-register</em> addressing mode,
* as discussed in {@link #getInt(Object,long)}. When the object reference is null,
* the offset supplies an absolute base address.
*
* <p>The stores are in coherent (atomic) units of a size determined
* by the address and length parameters. If the effective address and
* length are all even modulo 8, the stores take place in 'long' units.
* If the effective address and length are (resp.) even modulo 4 or 2,
* the stores take place in units of 'int' or 'short'.
*
* <em>Note:</em> It is the resposibility of the caller to make
* sure arguments are checked before the methods are called. While
* some rudimentary checks are performed on the input, the checks
* are best effort and when performance is an overriding priority,
* as when methods of this class are optimized by the runtime
* compiler, some or all checks (if any) may be elided. Hence, the
* caller must not rely on the checks and corresponding
* exceptions!
*
* @throws RuntimeException if any of the arguments is invalid
*
* @since 1.7
*/
@ForceInline
public void setMemory(Object o, long offset, long bytes, byte value) {
theInternalUnsafe.setMemory(o, offset, bytes, value);
}
/**
* Sets all bytes in a given block of memory to a fixed value
* (usually zero). This provides a <em>single-register</em> addressing mode,
* as discussed in {@link #getInt(Object,long)}.
*
* <p>Equivalent to {@code setMemory(null, address, bytes, value)}.
*/
@ForceInline
public void setMemory(long address, long bytes, byte value) {
theInternalUnsafe.setMemory(address, bytes, value);
}
/**
* Sets all bytes in a given block of memory to a copy of another
* block.
*
* <p>This method determines each block's base address by means of two parameters,
* and so it provides (in effect) a <em>double-register</em> addressing mode,
* as discussed in {@link #getInt(Object,long)}. When the object reference is null,
* the offset supplies an absolute base address.
*
* <p>The transfers are in coherent (atomic) units of a size determined
* by the address and length parameters. If the effective addresses and
* length are all even modulo 8, the transfer takes place in 'long' units.
* If the effective addresses and length are (resp.) even modulo 4 or 2,
* the transfer takes place in units of 'int' or 'short'.
*
* <em>Note:</em> It is the resposibility of the caller to make
* sure arguments are checked before the methods are called. While
* some rudimentary checks are performed on the input, the checks
* are best effort and when performance is an overriding priority,
* as when methods of this class are optimized by the runtime
* compiler, some or all checks (if any) may be elided. Hence, the
* caller must not rely on the checks and corresponding
* exceptions!
*
* @throws RuntimeException if any of the arguments is invalid
*
* @since 1.7
*/
@ForceInline
public void copyMemory(Object srcBase, long srcOffset,
Object destBase, long destOffset,
long bytes) {
theInternalUnsafe.copyMemory(srcBase, srcOffset, destBase, destOffset, bytes);
}
/**
* Sets all bytes in a given block of memory to a copy of another
* block. This provides a <em>single-register</em> addressing mode,
* as discussed in {@link #getInt(Object,long)}.
*
* Equivalent to {@code copyMemory(null, srcAddress, null, destAddress, bytes)}.
*/
@ForceInline
public void copyMemory(long srcAddress, long destAddress, long bytes) {
theInternalUnsafe.copyMemory(srcAddress, destAddress, bytes);
}
/**
* Disposes of a block of native memory, as obtained from {@link
* #allocateMemory} or {@link #reallocateMemory}. The address passed to
* this method may be null, in which case no action is taken.
*
* <em>Note:</em> It is the resposibility of the caller to make
* sure arguments are checked before the methods are called. While
* some rudimentary checks are performed on the input, the checks
* are best effort and when performance is an overriding priority,
* as when methods of this class are optimized by the runtime
* compiler, some or all checks (if any) may be elided. Hence, the
* caller must not rely on the checks and corresponding
* exceptions!
*
* @throws RuntimeException if any of the arguments is invalid
*
* @see #allocateMemory
*/
@ForceInline
public void freeMemory(long address) {
theInternalUnsafe.freeMemory(address);
}
/// random queries
/**
* This constant differs from all results that will ever be returned from
* {@link #staticFieldOffset}, {@link #objectFieldOffset},
* or {@link #arrayBaseOffset}.
*/
public static final int INVALID_FIELD_OFFSET = jdk.internal.misc.Unsafe.INVALID_FIELD_OFFSET;
/**
* Reports the location of a given field in the storage allocation of its
* class. Do not expect to perform any sort of arithmetic on this offset;
* it is just a cookie which is passed to the unsafe heap memory accessors.
*
* <p>Any given field will always have the same offset and base, and no
* two distinct fields of the same class will ever have the same offset
* and base.
*
* <p>As of 1.4.1, offsets for fields are represented as long values,
* although the Sun JVM does not use the most significant 32 bits.
* However, JVM implementations which store static fields at absolute
* addresses can use long offsets and null base pointers to express
* the field locations in a form usable by {@link #getInt(Object,long)}.
* Therefore, code which will be ported to such JVMs on 64-bit platforms
* must preserve all bits of static field offsets.
* @see #getInt(Object, long)
*/
@ForceInline
public long objectFieldOffset(Field f) {
return theInternalUnsafe.objectFieldOffset(f);
}
/**
* Reports the location of a given static field, in conjunction with {@link
* #staticFieldBase}.
* <p>Do not expect to perform any sort of arithmetic on this offset;
* it is just a cookie which is passed to the unsafe heap memory accessors.
*
* <p>Any given field will always have the same offset, and no two distinct
* fields of the same class will ever have the same offset.
*
* <p>As of 1.4.1, offsets for fields are represented as long values,
* although the Sun JVM does not use the most significant 32 bits.
* It is hard to imagine a JVM technology which needs more than
* a few bits to encode an offset within a non-array object,
* However, for consistency with other methods in this class,
* this method reports its result as a long value.
* @see #getInt(Object, long)
*/
@ForceInline
public long staticFieldOffset(Field f) {
return theInternalUnsafe.staticFieldOffset(f);
}
/**
* Reports the location of a given static field, in conjunction with {@link
* #staticFieldOffset}.
* <p>Fetch the base "Object", if any, with which static fields of the
* given class can be accessed via methods like {@link #getInt(Object,
* long)}. This value may be null. This value may refer to an object
* which is a "cookie", not guaranteed to be a real Object, and it should
* not be used in any way except as argument to the get and put routines in
* this class.
*/
@ForceInline
public Object staticFieldBase(Field f) {
return theInternalUnsafe.staticFieldBase(f);
}
/**
* Detects if the given class may need to be initialized. This is often
* needed in conjunction with obtaining the static field base of a
* class.
* @return false only if a call to {@code ensureClassInitialized} would have no effect
*/
@ForceInline
public boolean shouldBeInitialized(Class<?> c) {
return theInternalUnsafe.shouldBeInitialized(c);
}
/**
* Ensures the given class has been initialized. This is often
* needed in conjunction with obtaining the static field base of a
* class.
*/
@ForceInline
public void ensureClassInitialized(Class<?> c) {
theInternalUnsafe.ensureClassInitialized(c);
}
/**
* Reports the offset of the first element in the storage allocation of a
* given array class. If {@link #arrayIndexScale} returns a non-zero value
* for the same class, you may use that scale factor, together with this
* base offset, to form new offsets to access elements of arrays of the
* given class.
*
* @see #getInt(Object, long)
* @see #putInt(Object, long, int)
*/
@ForceInline
public int arrayBaseOffset(Class<?> arrayClass) {
return theInternalUnsafe.arrayBaseOffset(arrayClass);
}
/** The value of {@code arrayBaseOffset(boolean[].class)} */
public static final int ARRAY_BOOLEAN_BASE_OFFSET = jdk.internal.misc.Unsafe.ARRAY_BOOLEAN_BASE_OFFSET;
/** The value of {@code arrayBaseOffset(byte[].class)} */
public static final int ARRAY_BYTE_BASE_OFFSET = jdk.internal.misc.Unsafe.ARRAY_BYTE_BASE_OFFSET;
/** The value of {@code arrayBaseOffset(short[].class)} */
public static final int ARRAY_SHORT_BASE_OFFSET = jdk.internal.misc.Unsafe.ARRAY_SHORT_BASE_OFFSET;
/** The value of {@code arrayBaseOffset(char[].class)} */
public static final int ARRAY_CHAR_BASE_OFFSET = jdk.internal.misc.Unsafe.ARRAY_CHAR_BASE_OFFSET;
/** The value of {@code arrayBaseOffset(int[].class)} */
public static final int ARRAY_INT_BASE_OFFSET = jdk.internal.misc.Unsafe.ARRAY_INT_BASE_OFFSET;
/** The value of {@code arrayBaseOffset(long[].class)} */
public static final int ARRAY_LONG_BASE_OFFSET = jdk.internal.misc.Unsafe.ARRAY_LONG_BASE_OFFSET;
/** The value of {@code arrayBaseOffset(float[].class)} */
public static final int ARRAY_FLOAT_BASE_OFFSET = jdk.internal.misc.Unsafe.ARRAY_FLOAT_BASE_OFFSET;
/** The value of {@code arrayBaseOffset(double[].class)} */
public static final int ARRAY_DOUBLE_BASE_OFFSET = jdk.internal.misc.Unsafe.ARRAY_DOUBLE_BASE_OFFSET;
/** The value of {@code arrayBaseOffset(Object[].class)} */
public static final int ARRAY_OBJECT_BASE_OFFSET = jdk.internal.misc.Unsafe.ARRAY_OBJECT_BASE_OFFSET;
/**
* Reports the scale factor for addressing elements in the storage
* allocation of a given array class. However, arrays of "narrow" types
* will generally not work properly with accessors like {@link
* #getByte(Object, long)}, so the scale factor for such classes is reported
* as zero.
*
* @see #arrayBaseOffset
* @see #getInt(Object, long)
* @see #putInt(Object, long, int)
*/
@ForceInline
public int arrayIndexScale(Class<?> arrayClass) {
return theInternalUnsafe.arrayIndexScale(arrayClass);
}
/** The value of {@code arrayIndexScale(boolean[].class)} */
public static final int ARRAY_BOOLEAN_INDEX_SCALE = jdk.internal.misc.Unsafe.ARRAY_BOOLEAN_INDEX_SCALE;
/** The value of {@code arrayIndexScale(byte[].class)} */
public static final int ARRAY_BYTE_INDEX_SCALE = jdk.internal.misc.Unsafe.ARRAY_BYTE_INDEX_SCALE;
/** The value of {@code arrayIndexScale(short[].class)} */
public static final int ARRAY_SHORT_INDEX_SCALE = jdk.internal.misc.Unsafe.ARRAY_SHORT_INDEX_SCALE;
/** The value of {@code arrayIndexScale(char[].class)} */
public static final int ARRAY_CHAR_INDEX_SCALE = jdk.internal.misc.Unsafe.ARRAY_CHAR_INDEX_SCALE;
/** The value of {@code arrayIndexScale(int[].class)} */
public static final int ARRAY_INT_INDEX_SCALE = jdk.internal.misc.Unsafe.ARRAY_INT_INDEX_SCALE;
/** The value of {@code arrayIndexScale(long[].class)} */
public static final int ARRAY_LONG_INDEX_SCALE = jdk.internal.misc.Unsafe.ARRAY_LONG_INDEX_SCALE;
/** The value of {@code arrayIndexScale(float[].class)} */
public static final int ARRAY_FLOAT_INDEX_SCALE = jdk.internal.misc.Unsafe.ARRAY_FLOAT_INDEX_SCALE;
/** The value of {@code arrayIndexScale(double[].class)} */
public static final int ARRAY_DOUBLE_INDEX_SCALE = jdk.internal.misc.Unsafe.ARRAY_DOUBLE_INDEX_SCALE;
/** The value of {@code arrayIndexScale(Object[].class)} */
public static final int ARRAY_OBJECT_INDEX_SCALE = jdk.internal.misc.Unsafe.ARRAY_OBJECT_INDEX_SCALE;
/**
* Reports the size in bytes of a native pointer, as stored via {@link
* #putAddress}. This value will be either 4 or 8. Note that the sizes of
* other primitive types (as stored in native memory blocks) is determined
* fully by their information content.
*/
@ForceInline
public int addressSize() {
return theInternalUnsafe.addressSize();
}
/** The value of {@code addressSize()} */
public static final int ADDRESS_SIZE = theInternalUnsafe.addressSize();
/**
* Reports the size in bytes of a native memory page (whatever that is).
* This value will always be a power of two.
*/
@ForceInline
public int pageSize() {
return theInternalUnsafe.pageSize();
}
/// random trusted operations from JNI:
/**
* Defines a class but does not make it known to the class loader or system dictionary.
* <p>
* For each CP entry, the corresponding CP patch must either be null or have
* the a format that matches its tag:
* <ul>
* <li>Integer, Long, Float, Double: the corresponding wrapper object type from java.lang
* <li>Utf8: a string (must have suitable syntax if used as signature or name)
* <li>Class: any java.lang.Class object
* <li>String: any object (not just a java.lang.String)
* <li>InterfaceMethodRef: (NYI) a method handle to invoke on that call site's arguments
* </ul>
* @param hostClass context for linkage, access control, protection domain, and class loader
* @param data bytes of a class file
* @param cpPatches where non-null entries exist, they replace corresponding CP entries in data
*/
@ForceInline
public Class<?> defineAnonymousClass(Class<?> hostClass, byte[] data, Object[] cpPatches) {
return theInternalUnsafe.defineAnonymousClass(hostClass, data, cpPatches);
}
/**
* Allocates an instance but does not run any constructor.
* Initializes the class if it has not yet been.
*/
@ForceInline
public Object allocateInstance(Class<?> cls)
throws InstantiationException {
return theInternalUnsafe.allocateInstance(cls);
}
/** Throws the exception without telling the verifier. */
@ForceInline
public void throwException(Throwable ee) {
theInternalUnsafe.throwException(ee);
}
/**
* Atomically updates Java variable to {@code x} if it is currently
* holding {@code expected}.
*
* <p>This operation has memory semantics of a {@code volatile} read
* and write. Corresponds to C11 atomic_compare_exchange_strong.
*
* @return {@code true} if successful
*/
@ForceInline
public final boolean compareAndSwapObject(Object o, long offset,
Object expected,
Object x) {
return theInternalUnsafe.compareAndSetObject(o, offset, expected, x);
}
/**
* Atomically updates Java variable to {@code x} if it is currently
* holding {@code expected}.
*
* <p>This operation has memory semantics of a {@code volatile} read
* and write. Corresponds to C11 atomic_compare_exchange_strong.
*
* @return {@code true} if successful
*/
@ForceInline
public final boolean compareAndSwapInt(Object o, long offset,
int expected,
int x) {
return theInternalUnsafe.compareAndSetInt(o, offset, expected, x);
}
/**
* Atomically updates Java variable to {@code x} if it is currently
* holding {@code expected}.
*
* <p>This operation has memory semantics of a {@code volatile} read
* and write. Corresponds to C11 atomic_compare_exchange_strong.
*
* @return {@code true} if successful
*/
@ForceInline
public final boolean compareAndSwapLong(Object o, long offset,
long expected,
long x) {
return theInternalUnsafe.compareAndSetLong(o, offset, expected, x);
}
/**
* Fetches a reference value from a given Java variable, with volatile
* load semantics. Otherwise identical to {@link #getObject(Object, long)}
*/
@ForceInline
public Object getObjectVolatile(Object o, long offset) {
return theInternalUnsafe.getObjectVolatile(o, offset);
}
/**
* Stores a reference value into a given Java variable, with
* volatile store semantics. Otherwise identical to {@link #putObject(Object, long, Object)}
*/
@ForceInline
public void putObjectVolatile(Object o, long offset, Object x) {
theInternalUnsafe.putObjectVolatile(o, offset, x);
}
/** Volatile version of {@link #getInt(Object, long)} */
@ForceInline
public int getIntVolatile(Object o, long offset) {
return theInternalUnsafe.getIntVolatile(o, offset);
}
/** Volatile version of {@link #putInt(Object, long, int)} */
@ForceInline
public void putIntVolatile(Object o, long offset, int x) {
theInternalUnsafe.putIntVolatile(o, offset, x);
}
/** Volatile version of {@link #getBoolean(Object, long)} */
@ForceInline
public boolean getBooleanVolatile(Object o, long offset) {
return theInternalUnsafe.getBooleanVolatile(o, offset);
}
/** Volatile version of {@link #putBoolean(Object, long, boolean)} */
@ForceInline
public void putBooleanVolatile(Object o, long offset, boolean x) {
theInternalUnsafe.putBooleanVolatile(o, offset, x);
}
/** Volatile version of {@link #getByte(Object, long)} */
@ForceInline
public byte getByteVolatile(Object o, long offset) {
return theInternalUnsafe.getByteVolatile(o, offset);
}
/** Volatile version of {@link #putByte(Object, long, byte)} */
@ForceInline
public void putByteVolatile(Object o, long offset, byte x) {
theInternalUnsafe.putByteVolatile(o, offset, x);
}
/** Volatile version of {@link #getShort(Object, long)} */
@ForceInline
public short getShortVolatile(Object o, long offset) {
return theInternalUnsafe.getShortVolatile(o, offset);
}
/** Volatile version of {@link #putShort(Object, long, short)} */
@ForceInline
public void putShortVolatile(Object o, long offset, short x) {
theInternalUnsafe.putShortVolatile(o, offset, x);
}
/** Volatile version of {@link #getChar(Object, long)} */
@ForceInline
public char getCharVolatile(Object o, long offset) {
return theInternalUnsafe.getCharVolatile(o, offset);
}
/** Volatile version of {@link #putChar(Object, long, char)} */
@ForceInline
public void putCharVolatile(Object o, long offset, char x) {
theInternalUnsafe.putCharVolatile(o, offset, x);
}
/** Volatile version of {@link #getLong(Object, long)} */
@ForceInline
public long getLongVolatile(Object o, long offset) {
return theInternalUnsafe.getLongVolatile(o, offset);
}
/** Volatile version of {@link #putLong(Object, long, long)} */
@ForceInline
public void putLongVolatile(Object o, long offset, long x) {
theInternalUnsafe.putLongVolatile(o, offset, x);
}
/** Volatile version of {@link #getFloat(Object, long)} */
@ForceInline
public float getFloatVolatile(Object o, long offset) {
return theInternalUnsafe.getFloatVolatile(o, offset);
}
/** Volatile version of {@link #putFloat(Object, long, float)} */
@ForceInline
public void putFloatVolatile(Object o, long offset, float x) {
theInternalUnsafe.putFloatVolatile(o, offset, x);
}
/** Volatile version of {@link #getDouble(Object, long)} */
@ForceInline
public double getDoubleVolatile(Object o, long offset) {
return theInternalUnsafe.getDoubleVolatile(o, offset);
}
/** Volatile version of {@link #putDouble(Object, long, double)} */
@ForceInline
public void putDoubleVolatile(Object o, long offset, double x) {
theInternalUnsafe.putDoubleVolatile(o, offset, x);
}
/**
* Version of {@link #putObjectVolatile(Object, long, Object)}
* that does not guarantee immediate visibility of the store to
* other threads. This method is generally only useful if the
* underlying field is a Java volatile (or if an array cell, one
* that is otherwise only accessed using volatile accesses).
*
* Corresponds to C11 atomic_store_explicit(..., memory_order_release).
*/
@ForceInline
public void putOrderedObject(Object o, long offset, Object x) {
theInternalUnsafe.putObjectRelease(o, offset, x);
}
/** Ordered/Lazy version of {@link #putIntVolatile(Object, long, int)} */
@ForceInline
public void putOrderedInt(Object o, long offset, int x) {
theInternalUnsafe.putIntRelease(o, offset, x);
}
/** Ordered/Lazy version of {@link #putLongVolatile(Object, long, long)} */
@ForceInline
public void putOrderedLong(Object o, long offset, long x) {
theInternalUnsafe.putLongRelease(o, offset, x);
}
/**
* Unblocks the given thread blocked on {@code park}, or, if it is
* not blocked, causes the subsequent call to {@code park} not to
* block. Note: this operation is "unsafe" solely because the
* caller must somehow ensure that the thread has not been
* destroyed. Nothing special is usually required to ensure this
* when called from Java (in which there will ordinarily be a live
* reference to the thread) but this is not nearly-automatically
* so when calling from native code.
*
* @param thread the thread to unpark.
*/
@ForceInline
public void unpark(Object thread) {
theInternalUnsafe.unpark(thread);
}
/**
* Blocks current thread, returning when a balancing
* {@code unpark} occurs, or a balancing {@code unpark} has
* already occurred, or the thread is interrupted, or, if not
* absolute and time is not zero, the given time nanoseconds have
* elapsed, or if absolute, the given deadline in milliseconds
* since Epoch has passed, or spuriously (i.e., returning for no
* "reason"). Note: This operation is in the Unsafe class only
* because {@code unpark} is, so it would be strange to place it
* elsewhere.
*/
@ForceInline
public void park(boolean isAbsolute, long time) {
theInternalUnsafe.park(isAbsolute, time);
}
/**
* Gets the load average in the system run queue assigned
* to the available processors averaged over various periods of time.
* This method retrieves the given {@code nelem} samples and
* assigns to the elements of the given {@code loadavg} array.
* The system imposes a maximum of 3 samples, representing
* averages over the last 1, 5, and 15 minutes, respectively.
*
* @param loadavg an array of double of size nelems
* @param nelems the number of samples to be retrieved and
* must be 1 to 3.
*
* @return the number of samples actually retrieved; or -1
* if the load average is unobtainable.
*/
@ForceInline
public int getLoadAverage(double[] loadavg, int nelems) {
return theInternalUnsafe.getLoadAverage(loadavg, nelems);
}
// The following contain CAS-based Java implementations used on
// platforms not supporting native instructions
/**
* Atomically adds the given value to the current value of a field
* or array element within the given object {@code o}
* at the given {@code offset}.
*
* @param o object/array to update the field/element in
* @param offset field/element offset
* @param delta the value to add
* @return the previous value
* @since 1.8
*/
@ForceInline
public final int getAndAddInt(Object o, long offset, int delta) {
return theInternalUnsafe.getAndAddInt(o, offset, delta);
}
/**
* Atomically adds the given value to the current value of a field
* or array element within the given object {@code o}
* at the given {@code offset}.
*
* @param o object/array to update the field/element in
* @param offset field/element offset
* @param delta the value to add
* @return the previous value
* @since 1.8
*/
@ForceInline
public final long getAndAddLong(Object o, long offset, long delta) {
return theInternalUnsafe.getAndAddLong(o, offset, delta);
}
/**
* Atomically exchanges the given value with the current value of
* a field or array element within the given object {@code o}
* at the given {@code offset}.
*
* @param o object/array to update the field/element in
* @param offset field/element offset
* @param newValue new value
* @return the previous value
* @since 1.8
*/
@ForceInline
public final int getAndSetInt(Object o, long offset, int newValue) {
return theInternalUnsafe.getAndSetInt(o, offset, newValue);
}
/**
* Atomically exchanges the given value with the current value of
* a field or array element within the given object {@code o}
* at the given {@code offset}.
*
* @param o object/array to update the field/element in
* @param offset field/element offset
* @param newValue new value
* @return the previous value
* @since 1.8
*/
@ForceInline
public final long getAndSetLong(Object o, long offset, long newValue) {
return theInternalUnsafe.getAndSetLong(o, offset, newValue);
}
/**
* Atomically exchanges the given reference value with the current
* reference value of a field or array element within the given
* object {@code o} at the given {@code offset}.
*
* @param o object/array to update the field/element in
* @param offset field/element offset
* @param newValue new value
* @return the previous value
* @since 1.8
*/
@ForceInline
public final Object getAndSetObject(Object o, long offset, Object newValue) {
return theInternalUnsafe.getAndSetObject(o, offset, newValue);
}
/**
* Ensures that loads before the fence will not be reordered with loads and
* stores after the fence; a "LoadLoad plus LoadStore barrier".
*
* Corresponds to C11 atomic_thread_fence(memory_order_acquire)
* (an "acquire fence").
*
* A pure LoadLoad fence is not provided, since the addition of LoadStore
* is almost always desired, and most current hardware instructions that
* provide a LoadLoad barrier also provide a LoadStore barrier for free.
* @since 1.8
*/
@ForceInline
public void loadFence() {
theInternalUnsafe.loadFence();
}
/**
* Ensures that loads and stores before the fence will not be reordered with
* stores after the fence; a "StoreStore plus LoadStore barrier".
*
* Corresponds to C11 atomic_thread_fence(memory_order_release)
* (a "release fence").
*
* A pure StoreStore fence is not provided, since the addition of LoadStore
* is almost always desired, and most current hardware instructions that
* provide a StoreStore barrier also provide a LoadStore barrier for free.
* @since 1.8
*/
@ForceInline
public void storeFence() {
theInternalUnsafe.storeFence();
}
/**
* Ensures that loads and stores before the fence will not be reordered
* with loads and stores after the fence. Implies the effects of both
* loadFence() and storeFence(), and in addition, the effect of a StoreLoad
* barrier.
*
* Corresponds to C11 atomic_thread_fence(memory_order_seq_cst).
* @since 1.8
*/
@ForceInline
public void fullFence() {
theInternalUnsafe.fullFence();
}
/**
* Invokes the given direct byte buffer's cleaner, if any.
*
* @param directBuffer a direct byte buffer
* @throws NullPointerException if {@code directBuffer} is null
* @throws IllegalArgumentException if {@code directBuffer} is non-direct,
* or is a {@link java.nio.Buffer#slice slice}, or is a
* {@link java.nio.Buffer#duplicate duplicate}
* @since 9
*/
public void invokeCleaner(java.nio.ByteBuffer directBuffer) {
if (!directBuffer.isDirect())
throw new IllegalArgumentException("buffer is non-direct");
DirectBuffer db = (DirectBuffer)directBuffer;
if (db.attachment() != null)
throw new IllegalArgumentException("duplicate or slice");
Cleaner cleaner = db.cleaner();
if (cleaner != null) {
cleaner.clean();
}
}
} |
/**
* Tests the vibCount method from the VibrationMethods class.
*/
public class vibCountTest {
int[] expected;
int[] result;
@Before
public void setUp() {
expected = new int[3];
}
@After
public void tearDown() {
expected = null;
result = null;
}
@Test
/**
* Tests for the minimum possible time vibration-wise: 1:00
*/
public void minTime() {
expected[0] = 1; expected[1] = 0; expected[2] = 0;
result = VibrationMethods.vibCalc(1,00);
for (int i = 0; i < 2; i++){
assertEquals(expected[i], result[i]);
}
}
@Test
/**
* Tests for the maximum possible time vibration-wise: 12:59
*/
public void maxTime() {
expected[0] = 12; expected[1] = 5; expected[2] = 9;
result = VibrationMethods.vibCalc(12,59);
for (int i = 0; i < 2; i++){
assertEquals(expected[i], result[i]);
}
}
@Test
/**
* Tests for the vibrations at 12:00 - a time that has different conditions than the others.
*/
public void hour12Test() {
expected[0] = 12; expected[1] = 0; expected[2] = 0;
result = VibrationMethods.vibCalc(12,00);
for (int i = 0; i < 2; i++){
assertEquals(expected[i], result[i]);
}
}
} |
def fix_depth_size(self):
if self.hasRgb and self.hasDepth:
if self.depth.shape[:2] != (self.height, self.width):
self.depth = cv2.resize(self.depth, (self.width, self.height), interpolation=cv2.INTER_CUBIC) |
/**
* take a StringBuilder and replace a marker inside a file by the content of that StringBuilder.
*
* @param filename path of the file to change
* @param marker marker String in file to be replace
* @param sb StringBuilder that has the content to put instead of the marker
* @throws IOException
*/
private void substituteInFile(final String filename, final String marker, final StringBuilder sb) throws IOException {
final InputStream in = new FileInputStream(filename);
Scanner scanner = new Scanner(in);
StringBuilder fileContents = new StringBuilder();
try {
while (scanner.hasNextLine()) {
fileContents.append(scanner.nextLine()).append(CRLF);
}
} finally {
scanner.close();
}
in.close();
byte[] replace = fileContents.toString().replace(marker, sb.toString()).getBytes();
OutputStream out = new FileOutputStream(new File(filename));
out.write(replace, 0, replace.length);
out.close();
} |
#include <stdio.h>
#include <stdlib.h>
#include "../lib/gt.h"
void f(void* unused) {
static int x = 0;
int i, id;
id = ++x;
for (i = 0; i < 10; i++) {
printf("%d %d\n", id, i);
gt_yield();
}
}
int main() {
int i;
gt_init();
for (i = 0; i < 270; i++) gt_go(f, NULL);
gt_ret(1, NULL);
}
|
<gh_stars>1-10
class LinkedList<T> extends Array<T> {
constructor(...args: Array<T>) {
// new LinkedList(1) -> new Array(1) & arr.push(1) -> [1]
if (args.length === 1) {
args.push(args[0])
}
super(...args)
}
get head() {
return this[0] || null
}
get tail() {
return this[this.length - 1] || null
}
}
export default LinkedList |
/**
* Created by richard on 13-9-26.
*/
public class UpdateAllService extends IntentService {
Context context = null;
final Handler handler = new Handler(new Handler.Callback() {
@Override
public boolean handleMessage(Message message) {
if (message.obj != null) {
String key = message.getData().getString("key");
String data = (String) message.obj;
LogHelper.d(key + " Service update complete");
if (data.length() < 5) {
//本次请求失败
return true;
}
try {
JSONObject jsonObject = new JSONObject(data);
String msg = jsonObject.getString("msg");
//意味着有错误
if (context != null)
Toast.makeText(context, msg, Toast.LENGTH_SHORT).show();
} catch (JSONException e) {
//没有msg
if (context != null)
context.getSharedPreferences(Utility.PREFERENCE, 0)
.edit()
.putString(key, data)
.commit();
} catch (Exception e) {
e.printStackTrace();
}
return true;
}
return false;
}
});
public UpdateAllService() {
super("UpdateAllService");
}
@Override
protected void onHandleIntent(Intent intent) {
LogHelper.d("service handle intent");
context = this;
ExecutorService service = Executors.newFixedThreadPool(2);
final PersonalDataHelper helper = new PersonalDataHelper(context);
final UserIDStructure structure = helper.getCurrentUser();
if (structure == null)
return;
List<String> subs = new ArrayList<String>();
if (intent.getStringArrayExtra("update") == null) {
//全部更新
for (String name : DataUpdater.name.keySet()) {
subs.add(name);
}
} else {
//部分更新
String[] names = intent.getStringArrayExtra("update");
for (String name : names) {
subs.add(name);
}
}
for (String name : subs) {
service.submit(new DataUpdaterRunnable(name, handler, this));
}
}
} |
/**
* This migration sets the enum value testCaseType for every existing test case depending on its name and exercise's programming language.
*/
@Component
public class MigrationEntry20220210_160300 extends MigrationEntry {
private final ProgrammingExerciseRepository programmingExerciseRepository;
private final ProgrammingExerciseTestCaseRepository programmingExerciseTestCaseRepository;
private final ProgrammingExerciseTestCaseService programmingExerciseTestCaseService;
public MigrationEntry20220210_160300(ProgrammingExerciseRepository programmingExerciseRepository, ProgrammingExerciseTestCaseRepository programmingExerciseTestCaseRepository,
ProgrammingExerciseTestCaseService programmingExerciseTestCaseService) {
this.programmingExerciseRepository = programmingExerciseRepository;
this.programmingExerciseTestCaseRepository = programmingExerciseTestCaseRepository;
this.programmingExerciseTestCaseService = programmingExerciseTestCaseService;
}
/**
* Invokes saving the type for all test cases for a list of programming exercises to the database.
* No batch processing is required as all database calls are made sequentially per exercise.
*/
@Override
public void execute() {
programmingExerciseRepository.findAllWithEagerTestCases()
.forEach(programmingExercise -> processTestCases(programmingExercise.getTestCases(), programmingExercise.getProgrammingLanguage()));
}
/**
* Sets the enum value test case type for every test case and saves to the database. Implicitly, all tests are of the same programming language
* If the test case belongs to a non-JAVA programming exercise, the type is set to DEFAULT.
* If the test case belongs to a JAVA programming exercise, the type is set to:
* STRUCTURAL: test case has been generated by the structure oracle, and it's name therefore follow a certain pattern
* BEHAVIORAL: all other test cases (which have been created by the instructor)
*
* @param testCases the test cases of a programming exercises
* @param language the programming language the test cases are written in
*/
private void processTestCases(Set<ProgrammingExerciseTestCase> testCases, ProgrammingLanguage language) {
programmingExerciseTestCaseService.setTestCaseType(testCases, language);
programmingExerciseTestCaseRepository.saveAll(testCases);
}
@Override
public String author() {
return "ole-ve";
}
@Override
public String date() {
return "20220210_160300";
}
} |
/** Test that do not send cache page request after limit exceeded. */
@Test
public void testTextQueryLimitedMultiplePages() {
checkTextQuery("1*", QUERY_LIMIT, 30);
checkPages(NODES, NODES + 2, NODES);
} |
<filename>14/17/book.h
#ifndef BOOK_H_
#define BOOK_H_
#include <string>
#include <iostream>
class Book {
friend std::ostream &operator<<(std::ostream &, const Book &);
friend std::istream &operator>>(std::istream &, Book &);
friend bool operator==(const Book &, const Book &);
friend bool operator!=(const Book &, const Book &);
public:
Book(std::string s = "Untitled", std::string t = "Anonymous"): title(s) { }
Book(std::istream &is) { read(is, *this); }
Book(std::string s, std::string t, int y): title(s), author(t), year(y) { }
read(std::istream &is, Book b) {
is >> name >> author >> year;
}
private:
std::string title = "Untitled";
std::string author = "Anonymous";
int year = 0;
};
bool operator==(const Book &lhs, const Book &rhs)
{
return lhs.title == rhs.title &&
lhs.author == rhs.author &&
lhs.year == rhs.year;
}
bool operator!=(const Book &lhs, const Book &rhs)
{
return !(lhs == rhs);
}
std::ostream &operator<<(std::ostream &os, const Book &b)
{
os << b.author << " " << b.title << " " << b.year;
return os;
}
std::istream &operator>>(std::istream &is, Book &b)
{
is >> b.author >> b.title >> b.year;
if (!is)
b = Book();
return is;
}
#endif
|
/**
* Returns a human-readable representation of bytes similar to how "ls -h" works in Unix systems. The resulting is
* guaranteed to be 4 characters or less unless the given value is greater than 999TB. The last character in the
* returned string is one of 'B', 'K', 'M', 'G', or 'T' representing bytes, kilobytes, megabytes, gigabytes or
* terabytes. This function uses the binary unit system where 1K = 1024B.
*
* <p>
* Examples:
* </p>
*
* <pre>
* 482 = 482B
* 1245 = 1.2K
* 126976 = 124K
* 4089471 = 3.9M
* 43316209 = 41M
* 1987357695 = 1.9G
* </pre>
*/
public static String readable(long bytes) {
double result = bytes;
int i = 0;
while (result >= 1000 && i < BYTE_COUNT_SUFFIX.length() - 1) {
result /= 1024.0;
i++;
}
DecimalFormat formatter = (result < 10 && i > 0) ? ONE_FRACTION_DIGIT : NO_FRACTION_DIGIT;
return formatter.format(result) + BYTE_COUNT_SUFFIX.charAt(i);
} |
<gh_stars>10-100
# -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making BK-BASE 蓝鲸基础平台 available.
Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved.
BK-BASE 蓝鲸基础平台 is licensed under the MIT License.
License for BK-BASE 蓝鲸基础平台:
--------------------------------------------------------------------
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial
portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT
LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN
NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
import datetime
import json
import gevent
from datamanage import exceptions as dm_errors
from datamanage.lite.dmonitor.constants import (
ALERT_ENV_MAPPINGS,
ALERT_TITLE_MAPPSINGS,
ALERT_VERSION_MAPPINGS,
GRAFANA_ALERT_STATUS,
MSG_TYPE_MAPPSINGS,
)
from datamanage.lite.dmonitor.filters import AlertsFilter
from datamanage.lite.dmonitor.mixins.base_mixins import BaseMixin
from datamanage.lite.dmonitor.models import (
AlertDetail,
AlertLog,
AlertShield,
DatamonitorAlertConfigRelation,
)
from datamanage.lite.dmonitor.serializers import (
AlertDetailListSerializer,
AlertDetailMineSerializer,
AlertDetailSerializer,
AlertLogSerializer,
AlertReportSerializer,
AlertSendSerializer,
AlertShieldSerializer,
AlertTargetMineSerializer,
GrafanaAlertSerilizer,
)
from datamanage.pizza_settings import NOTIFY_WITH_ENV, RUN_MODE, RUN_VERSION
from datamanage.utils.api import DataflowApi, MetaApi
from datamanage.utils.drf import DataPageNumberPagination
from datamanage.utils.time_tools import timetostr, tznow
from django_filters.rest_framework import DjangoFilterBackend
from rest_framework.response import Response
from common.api import CmsiApi
from common.bklanguage import BkLanguage
from common.decorators import list_route, params_valid
from common.local import get_request_username
from common.log import logger
from common.views import APIModelViewSet, APIViewSet
class AlertViewSet(APIModelViewSet):
"""
@api {get} /datamanage/dmonitor/alerts/:id/ 查询汇总告警详情
@apiName dmonitor_retrivev_alert
@apiGroup DmonitorAlert
@apiVersion 1.0.0
@apiSuccessExample {json} Success-Response:
HTTP/1.1 200 OK
{
"result": true,
"data": [
{
"message": "xxx",
"message_en": "xxx",
"alert_time": "2019-03-01 00:00:00",
"alert_level": "warning",
"receiver": "user",
"notify_way": "wechat",
"description": "xxxx",
"alert_details": [
{
"message": "yyyy",
"message_en": "yyyyy",
"full_message": "zzzzz",
"full_message_en": "zzzzz",
"alert_level": "warning",
"alert_code": "data_delay",
"bk_biz_id": 1, // 可选维度
"project_id": 4, // 可选维度
"flow_id": 1, // 可选维度
"raw_data_id": 1, // 可选维度
"result_table_id": "1_example", // 可选维度
"bk_app_code": "dataweb" // 可选维度
}
]
}
],
"message": "",
"code": "00",
}
"""
"""
@api {get} /datamanage/dmonitor/alerts/ 查询汇总告警列表
@apiName dmonitor_list_alerts
@apiGroup DmonitorAlert
@apiVersion 1.0.0
@apiSuccessExample {json} Success-Response:
HTTP/1.1 200 OK
{
"result": true,
"data": {
"message": "xxx",
"message_en": "xxx",
"alert_time": "2019-03-01 00:00:00",
"alert_level": "warning",
"receiver": "user",
"notify_way": "wechat",
"description": "xxxx",
"alert_details": [
{
"message": "yyyy",
"message_en": "yyyyy",
"full_message": "zzzzz",
"full_message_en": "zzzzz",
"alert_level": "warning",
"alert_code": "data_delay",
"bk_biz_id": 1, // 可选维度
"project_id": 4, // 可选维度
"flow_id": 1, // 可选维度
"raw_data_id": 1, // 可选维度
"result_table_id": "1_example", // 可选维度
"bk_app_code": "dataweb" // 可选维度
}
]
},
"message": "",
"code": "00",
}
"""
model = AlertLog
lookup_field = model._meta.pk.name
filter_backends = (DjangoFilterBackend,)
filter_class = AlertsFilter
search_fields = ("receiver",)
pagination_class = DataPageNumberPagination
serializer_class = AlertLogSerializer
ordering_fields = ("id", "created_at")
ordering = ("-id",)
def get_queryset(self):
return self.model.objects.all()
@list_route(methods=["post"], url_path="send")
@params_valid(serializer=AlertSendSerializer)
def do_alert_send(self, request, params):
"""
@api {post} /datamanage/dmonitor/alerts/send/ 发送告警
@apiName dmonitor_send_alerts
@apiGroup DmonitorAlert
@apiVersion 1.0.0
@apiParam {String} receiver 告警接收人
@apiParam {String="weixin","work-weixin","mail","sms"} notify_way 告警方式
@apiParam {String} [title] 告警标题
@apiParam {String} message 告警信息
@apiParam {String} [message_en] 英文告警信息
@apiSuccessExample {json} Success-Response:
HTTP/1.1 200 OK
{
"result": true,
"data": "ok",
"message": "",
"code": "00",
}
"""
receiver = params["receiver"]
title = params.get("title")
title_en = params.get("title_en")
message = params["message"]
notify_way = params["notify_way"]
notify_way = MSG_TYPE_MAPPSINGS.get(notify_way, notify_way)
if RUN_VERSION == "tencent":
title = self.get_alert_title(title, title_en, BkLanguage.CN)
if notify_way == "weixin":
message = "\n{}".format(message)
return Response(self.send_alert_message(receiver, notify_way, title, message))
else:
receivers = receiver.split(",")
if notify_way == "voice":
result = self.send_alert_message_with_voice(receivers, message, params)
else:
result = self.send_alert_message_one_by_one(receivers, notify_way, message, params)
return Response(result)
def get_alert_title(self, title=None, title_en=None, language=None):
if NOTIFY_WITH_ENV:
version = RUN_VERSION.upper()
env = RUN_MODE.upper()
if language == BkLanguage.EN:
title = title_en
return "{title}{version}{env}".format(
title=title if (title is not None) else ALERT_TITLE_MAPPSINGS.get(language, title),
version="[{}]".format(ALERT_VERSION_MAPPINGS.get(version, {}).get(language, version)),
env="[{}]".format(ALERT_ENV_MAPPINGS.get(env, {}).get(language, env)) if env != "PRODUCT" else "",
)
else:
return title if (title is not None and language is None) else ALERT_TITLE_MAPPSINGS.get(language, title)
def send_alert_message(self, receivers, msg_type, title, content):
# 内部版不支持voice语音,需要通过UWORK接口来发送
if RUN_VERSION == "tencent" and msg_type == "voice":
message = "{}\n{}".format(title, content)
result = CmsiApi.send_voice_msg({"receivers": receivers, "message": message})
else:
result = CmsiApi.send_msg(
{
"msg_type": msg_type,
"receivers": receivers,
"content": content,
"title": title,
}
)
if not result.is_success():
raise dm_errors.SendAlertError(result.message)
return result.data
def send_alert_message_with_voice(self, receivers, default_message, params):
if len(receivers) > 0:
first_user = receivers[0]
language = BkLanguage.get_user_language(first_user)
message = default_message
if language != BkLanguage.CN:
message = params.get("message_{}".format(language), "") or message
title = self.get_alert_title(language=language)
result = self.send_alert_message(",".join(receivers), "voice", title, message)
return result
return {}
def send_alert_message_one_by_one(self, receivers, notify_way, default_message, params):
result, errors = {}, {}
for user in receivers:
language = BkLanguage.get_user_language(user)
message = default_message
if language != BkLanguage.CN:
message = params.get("message_{}".format(language), "") or message
title = self.get_alert_title(language=language)
try:
result[user] = self.send_alert_message(user, notify_way, title, message)
except Exception as e:
errors[user] = str(e)
if len(errors) > 0:
errors.update(result)
raise dm_errors.SendAlertError(errors=errors)
return result
@list_route(methods=["post"], url_path="report")
@params_valid(serializer=AlertReportSerializer)
def do_alert_report(self, request, params):
"""
@api {post} /datamanage/dmonitor/alerts/report/ 上报告警
@apiVersion 1.0.0
@apiGroup DmonitorAlert
@apiName dmonitor_report_alert
@apiParam {object} flow_info 数据流信息
@apiParam {string} alert_code 告警码, 任务监控目前亩田为task
@apiParam {string} level 告警级别, 可选值有info/warning/danger
@apiParam {string} message 告警信息
@apiParam {string} message_en 告警英文信息
@apiParam {string} [full_message] 告警详情
@apiParam {string} [full_message_en] 告警英文详情
@apiParam {object} custom_tags 自定义告警维度
@apiParamExample {json} 参数样例:
{
// flow_info支持多种结构,目前总线的任务告警需要提供flow_id(实际为raw_data_id)
"flow_info": {
"flow_id": 100,
"data_set_id": "1_clean_task"
},
"alert_code": "task",
"level": "warning",
"message": "清洗任务异常",
"message_en": "There are some errors about cleaning task",
"full_message": "清洗任务(xxx)异常",
"full_message_en": "There are some errors about the xxx cleaning task",
"custom_tags": {
"project_id": 1,
"bk_biz_id": 1,
"raw_data_id": 100,
"detail_code": "databus_custom_alert_code",
}
}
@apiSuccessExample Success-Response:
HTTP/1.1 200 OK
{
"errors": {},
"message": "ok",
"code": "1500200",
"result": true,
"data": {}
}
"""
return Response({})
@list_route(methods=["post"], url_path="grafana")
@params_valid(serializer=GrafanaAlertSerilizer)
def do_grafana_alert(self, request, params):
"""
@api {post} /datamanage/dmonitor/alerts/grafana/ 处理Grafana告警
@apiName dmonitor_send_grafana_alerts
@apiGroup DmonitorAlert
@apiVersion 1.0.0
@apiParam {String} title 告警标题
@apiParam {String} ruleName 告警规则名称
@apiParam {String} ruleUrl 告警规则路径
@apiParam {String} state 告警状态
@apiParam {String} [message] 告警信息
@apiParam {Dict} [evalMatches] 告警参数
@apiSuccessExample {json} Success-Response:
HTTP/1.1 200 OK
{
"result": true,
"data": "ok",
"message": "",
"code": "00",
}
"""
receivers = request.GET.getlist("receivers", [])
notify_ways = request.GET.getlist("notify_ways", [])
message = params.get("message", "")
rule_name = params.get("ruleName", "")
title = params.get("title", "")
title = "{title}(Grafana)".format(title=rule_name or title)
if len(receivers) > 0:
first_user = receivers[0]
language = BkLanguage.get_user_language(first_user)
title = self.get_alert_title(title, language=language)
for notify_way in notify_ways:
title = "{title}(Grafana)".format(title=rule_name or title)
message = ("[{status}]{message}\n" "各指标当前值为:\n" "\t{metric_value}\n" "详情请查看仪表板: {grafana_url}").format(
status=GRAFANA_ALERT_STATUS.get(params.get("state"), "未知状态"),
message=message,
metric_value="\t\n".join(self.get_grafana_metric_values(params.get("evalMatches", []))),
grafana_url=params.get("ruleUrl"),
)
msg_type = MSG_TYPE_MAPPSINGS.get(notify_way, notify_way)
self.send_alert_message(receivers=receivers, msg_type=msg_type, title=title, content=message)
return Response("ok")
def get_grafana_metric_values(self, evalMatches):
for item in evalMatches:
yield "{metric_key}{tags_str}: {metric_value}".format(
metric_key=item["metric"],
metric_value=item["value"],
tags_str="({})".format(", ".join("{}: {}".format(k, v) for k, v in list(item.get("tags", {}).items())))
if item.get("tags")
else "",
)
@list_route(methods=["post"], url_path="grafana/shield")
@params_valid(serializer=GrafanaAlertSerilizer)
def do_grafana_shield(self, request, params):
"""
@api {post} /datamanage/dmonitor/alerts/grafana/shield/ 处理Grafana告警
@apiName dmonitor_do_grafana_shield
@apiGroup DmonitorAlert
@apiVersion 1.0.0
@apiParam {String} title 告警标题
@apiParam {String} ruleName 告警规则名称
@apiParam {String} ruleUrl 告警规则路径
@apiParam {String} state 告警状态
@apiParam {String} [message] 告警信息
@apiParam {Dict} [evalMatches] 告警参数
@apiParam {Int} shield_time 屏蔽时间
@apiParam {String} shield_params 屏蔽参数
@apiSuccessExample {json} Success-Response:
HTTP/1.1 200 OK
{
"result": true,
"data": "ok",
"message": "",
"code": "00",
}
"""
shield_time = int(request.GET.get("shield_time", 60))
shield_params = json.loads(request.GET.get("shield_params") or "{}")
title = params.get("title", "")
start_time = tznow()
end_time = start_time + datetime.timedelta(seconds=shield_time)
AlertShield.objects.create(
start_time=start_time,
end_time=end_time,
reason=title,
alert_code=shield_params.get("alert_code"),
alert_level=shield_params.get("alert_level"),
alert_config_id=shield_params.get("alert_config_id"),
receivers=shield_params.get("receivers"),
notify_ways=shield_params.get("notify_ways"),
dimensions=shield_params.get("dimensions"),
description=json.dumps(params),
)
return Response("ok")
class AlertShieldViewSet(APIModelViewSet):
"""
@api {post} /datamanage/dmonitor/alert_shields/ 创建告警屏蔽规则
@apiVersion 1.0.0
@apiGroup DmonitorShield
@apiName dmonitor_shield_alert
@apiParam {datetime} [start_time=now] 屏蔽开始时间
@apiParam {datetime} end_time 屏蔽结束时间
@apiParam {String} reason 屏蔽原因
@apiParam {String} [alert_code] 屏蔽策略
@apiParam {String} [alert_level] 屏蔽级别
@apiParam {Int} [alert_config_id] 屏蔽告警配置ID
@apiParam {List} [receivers] 屏蔽接收人
@apiParam {List} [notify_ways] 屏蔽通知方式
@apiParam {Dict} [dimensions] 屏蔽维度
@apiParam {String} [description] 描述
@apiParamExample {json} 参数样例:
{
"start_time": "2019-07-18 12:00:00",
"end_time": "2019-07-18 18:00:00",
"reason": "数据平台变更"
}
@apiSuccessExample Success-Response:
HTTP/1.1 200 OK
{
"errors": {},
"message": "ok",
"code": "1500200",
"result": true,
"data": {}
}
"""
model = AlertShield
lookup_field = model._meta.pk.name
filter_backends = (DjangoFilterBackend,)
pagination_class = DataPageNumberPagination
serializer_class = AlertShieldSerializer
ordering_fields = ("id", "created_at")
ordering = ("-id",)
def get_queryset(self):
return self.model.objects.all()
@list_route(methods=["get"], url_path="in_effect")
def in_effect(self, request):
queryset = self.filter_queryset(self.get_queryset())
nowtime = tznow()
queryset = queryset.filter(active=True, start_time__lte=nowtime, end_time__gte=nowtime)
page = self.paginate_queryset(queryset)
if page is not None:
serializer = self.get_serializer(page, many=True)
return self.get_paginated_response(serializer.data)
serializer = self.get_serializer(queryset, many=True)
return Response(serializer.data)
def perform_create(self, serializer):
serializer.save(created_by=get_request_username())
def perform_update(self, serializer):
serializer.save(created_by=get_request_username())
class AlertDetailViewSet(BaseMixin, APIModelViewSet):
"""
@api {get} /datamanage/dmonitor/alert_details/:id/ 查询告警详情
@apiName dmonitor_retrivev_alert_detail
@apiGroup DmonitorAlert
@apiVersion 1.0.0
@apiSuccessExample {json} Success-Response:
HTTP/1.1 200 OK
{
"result": true,
"data": "ok",
"message": "",
"code": "00",
}
"""
model = AlertDetail
lookup_field = model._meta.pk.name
filter_backends = (DjangoFilterBackend,)
filter_fields = ("alert_code", "alert_status", "alert_send_status")
pagination_class = DataPageNumberPagination
serializer_class = AlertDetailSerializer
ordering_fields = ("id", "alert_time")
ordering = ("-alert_time",)
def get_queryset(self):
return self.model.objects.all()
def retrieve(self, request, *args, **kwargs):
"""
@api {get} /datamanage/dmonitor/alert_details/:id/ 查询我的告警列表
@apiName dmonitor_retrieve_alert_detail
@apiGroup DmonitorAlert
@apiVersion 1.0.0
@apiSuccessExample {json} Success-Response:
HTTP/1.1 200 OK
{
"result": true,
"data": {
"alert_target_type": "rawdata",
"alert_target_id": "391",
"alert_target_alias": "体验登录日志",
"alert_type": "task_monitor",
"alert_code": "no_data",
"alert_level": "warning",
"project_id": 400,
"project_alias: "TEST_PROJECT",
"bk_biz_id": 591,
"bk_biz_name": "测试业务",
"message": "xxxxx",
"message_en": "xxxsxxx",
"full_message": "xxxxxx",
"full_message_en": "xxxxxxx",
"alert_status": "alerting", // "alerting", "converged", "recovered"
"alert_send_status": "success", // "init", "success", "error"
"alert_send_error": null,
"alert_time": "2019-02-25 00:00:00",
"alert_send_time": "2019-02-25 00:01:00",
"alert_recover_time": null,
"description": "xxxx"
},
"message": "",
"code": "00",
}
"""
instance = self.get_object()
serializer = self.get_serializer(instance)
alert_detail = self.attach_dimension_information([serializer.data])[0]
if alert_detail.get("alert_target_type") == "dataflow":
self.check_permission(
"flow.query_metrics",
alert_detail.get("alert_target_id"),
get_request_username(),
)
elif alert_detail.get("alert_target_type") == "rawdata":
self.check_permission(
"raw_data.query_data",
alert_detail.get("alert_target_id"),
get_request_username(),
)
return Response(alert_detail)
@list_route(methods=["GET"], url_path="mine")
@params_valid(serializer=AlertDetailMineSerializer)
def mine(self, request, params, *args, **kwargs):
"""
@api {get} /datamanage/dmonitor/alert_details/mine/ 查询我的告警列表
@apiName dmonitor_mine_alert_details
@apiGroup DmonitorAlert
@apiVersion 1.0.0
@apiParam {Int} [start_time] 开始时间(默认为当天0点)
@apiParam {Int} [end_time] 结束时间(默认为当前时间)
@apiParam {String} [alert_type] 告警类型, data_monitor, task_monitor
@apiParam {String} [alert_target] 告警对象
@apiParam {String} [alert_level] 告警级别, warning, danger
@apiParam {List} [alert_status] 告警策略ID列表
@apiSuccessExample {json} Success-Response:
HTTP/1.1 200 OK
{
"result": true,
"data": [
{
"alert_target_type": "rawdata",
"alert_target_id": "391",
"alert_target_alias": "体验登录日志",
"alert_type": "task_monitor",
"alert_code": "no_data",
"alert_level": "warning",
"project_id": 400,
"project_alias: "TEST_PROJECT",
"bk_biz_id": 591,
"bk_biz_name": "测试业务",
"message": "xxxxx",
"message_en": "xxxsxxx",
"full_message": "xxxxxx",
"full_message_en": "xxxxxxx",
"alert_status": "alerting", // "alerting", "converged", "recovered"
"alert_send_status": "success", // "init", "success", "error"
"alert_send_error": null,
"alert_time": "2019-02-25 00:00:00",
"alert_send_time": "2019-02-25 00:01:00",
"alert_recover_time": null,
"description": "xxxx"
}
],
"message": "",
"code": "00",
}
"""
now = tznow()
default_start_time = datetime.datetime(year=now.year, month=now.month, day=now.day)
default_end_time = now
start_time = timetostr(params.get("start_time", default_start_time))
end_time = timetostr(params.get("end_time", default_end_time))
bk_username = get_request_username()
queryset = AlertDetail.objects.all()
queryset = queryset.filter(
alert_time__gte=start_time,
alert_time__lt=end_time,
receivers__contains=bk_username,
).order_by("-alert_time")
if params.get("alert_type"):
queryset = queryset.filter(alert_type=params.get("alert_type"))
if params.get("alert_level"):
queryset = queryset.filter(alert_level=params.get("alert_level"))
if params.get("alert_status"):
queryset = queryset.filter(alert_status__in=params.get("alert_status", []))
if params.get("alert_target"):
queryset = queryset.filter(flow_id=params.get("alert_target"))
page = self.paginate_queryset(queryset)
if page is not None:
serializer = self.get_serializer(page, many=True)
return self.get_paginated_response(self.attach_dimension_information(serializer.data))
serializer = self.get_serializer(queryset, many=True)
return Response(self.attach_dimension_information(serializer.data))
def attach_dimension_information(self, alert_data):
from gevent import monkey
monkey.patch_all()
# 获取dataflow和rawdata的信息
rawdata_targets, dataflow_targets = set(), set()
for alert in alert_data:
flow_id = alert.get("flow_id")
if alert.get("flow_id").startswith("rawdata"):
rawdata_targets.add(flow_id.strip("rawdata"))
alert["alert_target_id"] = flow_id.strip("rawdata")
alert["alert_target_type"] = "rawdata"
else:
dataflow_targets.add(flow_id)
alert["alert_target_id"] = flow_id
alert["alert_target_type"] = "dataflow"
raw_data_infos, dataflow_infos, biz_infos, project_infos = {}, {}, {}, {}
bk_username = get_request_username()
task_list = [gevent.spawn(self.fetch_biz_infos, biz_infos)]
if len(rawdata_targets) > 0:
task_list.append(gevent.spawn(self.fetch_rawdata_infos, raw_data_infos, rawdata_targets))
if len(dataflow_targets) > 0:
task_list.append(
gevent.spawn(
self.fetch_dataflow_infos,
dataflow_infos,
dataflow_targets,
bk_username,
)
)
gevent.joinall(task_list)
# 获取项目信息
project_ids = set()
for dataflow in list(dataflow_infos.values()):
project_ids.add(dataflow.get("project_id"))
self.fetch_project_infos(project_infos, project_infos)
# 补全项目,业务,dataflow和rawdata信息到告警中
for alert in alert_data:
if alert["alert_target_type"] == "rawdata":
raw_data_info = raw_data_infos.get(alert.get("alert_target_id"), {})
bk_biz_id = raw_data_info.get("bk_biz_id")
alert["alert_target_alias"] = raw_data_info.get("raw_data_alias")
alert["bk_biz_id"] = bk_biz_id
alert["bk_biz_name"] = biz_infos.get(bk_biz_id, {}).get("bk_biz_name")
elif alert["alert_target_type"] == "dataflow":
dataflow_info = dataflow_infos.get(alert.get("alert_target_id"), {})
project_id = dataflow_info.get("project_id")
alert["alert_target_alias"] = dataflow_info.get("flow_name")
alert["project_id"] = project_id
alert["project_alias"] = project_infos.get(project_id, {}).get("project_name")
return alert_data
def prepare_alert_list_filter(self, params):
# 准备过滤维度
flow_id = params.get("flow_id")
node_id = params.get("node_id")
bk_biz_id = params.get("bk_biz_id")
project_id = params.get("project_id")
generate_type = params.get("generate_type")
alert_config_ids = params.get("alert_config_ids", [])
dimensions = params.get("dimensions") or {}
if dimensions and not isinstance(dimensions, dict):
dimensions = json.loads(dimensions)
if "bk_biz_id" in dimensions:
bk_biz_id = dimensions.get("bk_biz_id")
del dimensions["bk_biz_id"]
if "project_id" in dimensions:
project_id = dimensions.get("project_id")
del dimensions["project_id"]
if "generate_type" in dimensions:
generate_type = dimensions.get("generate_type")
del dimensions["generate_type"]
return (
flow_id,
node_id,
bk_biz_id,
project_id,
generate_type,
alert_config_ids,
dimensions,
)
def prepare_alert_list_time(self, params):
# 准备过滤时间
now = tznow()
default_start_time = datetime.datetime(year=now.year, month=now.month, day=now.day)
default_end_time = now
start_time = timetostr(params.get("start_time", default_start_time))
end_time = timetostr(params.get("end_time", default_end_time))
return start_time, end_time
def prepare_alert_list_queryset(
self,
start_time,
end_time,
flow_id,
node_id,
bk_biz_id,
project_id,
generate_type,
alert_config_ids,
):
queryset = self.filter_queryset(self.get_queryset())
queryset = (
queryset.filter(alert_time__gte=start_time, alert_time__lt=end_time)
.exclude(alert_status="converged")
.exclude(alert_status="shielded")
.order_by("-alert_time")
)
if flow_id:
queryset = queryset.filter(flow_id=flow_id)
if node_id:
queryset = queryset.filter(node_id=node_id)
if bk_biz_id:
queryset = queryset.filter(bk_biz_id=bk_biz_id)
if project_id:
queryset = queryset.filter(project_id=project_id)
if generate_type:
queryset = queryset.filter(generate_type=generate_type)
if alert_config_ids:
queryset = queryset.filter(alert_config_id__in=alert_config_ids)
return queryset
@params_valid(serializer=AlertDetailListSerializer)
def list(self, request, params, *args, **kwargs):
"""
@api {get} /datamanage/dmonitor/alert_details/ 查询告警列表
@apiName dmonitor_list_alert_details
@apiGroup DmonitorAlert
@apiVersion 1.0.0
@apiParam {Int} [start_time] 开始时间(默认为当天0点)
@apiParam {Int} [end_time] 结束时间(默认为当前时间)
@apiParam {String} [flow_id] FlowID(数据源的flow用rawdata{}的格式作为flow_id)
@apiParam {String} [node_id] Flow节点的ID
@apiParam {List} [alert_config_ids] 告警策略ID列表
@apiParam {Json} [dimensions] 需要过滤的维度条件
@apiSuccessExample {json} Success-Response:
HTTP/1.1 200 OK
{
"result": true,
"data": [
{
"message": "xxxxx",
"message_en": "xxxsxxx",
"full_message": "xxxxxx",
"full_message_en": "xxxxxxx",
"receivers": ["zhangshan", "lisi"],
"notify_ways": ["wechat", "phone"],
"alert_code": "no_data",
"monitor_config": {
"no_data_interval": 600,
"monitor_status": "on",
},
"dimensions": {
"bk_biz_id": 591,
},
"alert_status": "alerting", // "alerting", "converged", "recovered"
"alert_send_status": "success", // "init", "success", "error"
"alert_send_error": null,
"alert_time": "2019-02-25 00:00:00",
"alert_send_time": "2019-02-25 00:01:00",
"alert_recover_time": null,
"description": "xxxx"
}
],
"message": "",
"code": "00",
}
"""
(
flow_id,
node_id,
bk_biz_id,
project_id,
generate_type,
alert_config_ids,
dimensions,
) = self.prepare_alert_list_filter(
params
) # noqa
start_time, end_time = self.prepare_alert_list_time(params)
queryset = self.prepare_alert_list_queryset(
start_time,
end_time,
flow_id,
node_id,
bk_biz_id,
project_id,
generate_type,
alert_config_ids,
)
page = self.paginate_queryset(queryset)
# 带分页的情况下不允许按维度过滤
if page is not None:
serializer = self.get_serializer(page, many=True)
return self.get_paginated_response(serializer.data)
serializer_data = self.get_serializer(queryset, many=True).data
if dimensions:
response_data = []
for item in serializer_data:
if self.check_dimension_match(dimensions, item["dimensions"]):
response_data.append(item)
return Response(response_data)
return Response(serializer_data)
@list_route(methods=["GET"], url_path="summary")
@params_valid(serializer=AlertDetailListSerializer)
def summary(self, request, params):
"""
@api {get} /datamanage/dmonitor/alert_details/summary/ 查询告警汇总信息
@apiName dmonitor_list_alert_detail_summary
@apiGroup DmonitorAlert
@apiVersion 1.0.0
@apiParam {Int} [start_time] 开始时间(默认为24小时前)
@apiParam {Int} [end_time] 结束时间(默认为当前时间)
@apiParam {String} [flow_id] FlowID(数据源的flow用raw_data_id作为flow_id)
@apiParam {List} [alert_config_ids] 告警策略ID列表
@apiParam {Json} [dimensions] 需要过滤的维度条件
@apiParam {String} [group] 分组维度
@apiSuccessExample {json} Success-Response:
HTTP/1.1 200 OK
{
"result": true,
"data": {
"alert_count": 100,
"alert_levels": {
"warning": 60,
"danger": 40
},
"alert_codes": {
"task": 40,
"no_data": 10,
"data_trend": 10,
"data_time_delay": 10,
"process_time_delay": 10,
"data_drop": 10,
"data_interrupt": 10
},
"alert_types": {
"task_monitor": 40,
"data_monitor": 60
},
"groups": {
"591_rt1": {
"alert_count": 100,
"alert_levels": {
"warning": 60,
"danger": 40
},
"alert_codes": {
"task": 40,
"no_data": 10,
"data_trend": 10,
"data_time_delay": 10,
"process_time_delay": 10,
"data_drop": 10,
"data_interrupt": 10
},
"alert_types": {
"task_monitor": 40,
"data_monitor": 60
}
}
}
},
"message": "",
"code": "00",
}
"""
(
flow_id,
node_id,
bk_biz_id,
project_id,
generate_type,
alert_config_ids,
dimensions,
) = self.prepare_alert_list_filter(
params
) # noqa
start_time, end_time = self.prepare_alert_list_time(params)
queryset = self.prepare_alert_list_queryset(
start_time,
end_time,
flow_id,
node_id,
bk_biz_id,
project_id,
generate_type,
alert_config_ids,
)
serializer_data = self.get_serializer(queryset, many=True).data
if dimensions:
response_data = []
for item in serializer_data:
if self.check_dimension_match(dimensions, item["dimensions"]):
response_data.append(item)
serializer_data = response_data
response = self.summary_alerts(serializer_data, params.get("group"))
return Response(response)
class AlertNotifyWayViewSet(APIViewSet):
"""
@api {get} /datamanage/dmonitor/notify_ways/ 查询告警通知方式列表
@apiName dmonitor_list_notify_ways
@apiGroup DmonitorAlert
@apiVersion 1.0.0
@apiSuccessExample {json} Success-Response:
HTTP/1.1 200 OK
{
"result": true,
"data": [
{
"notify_way": "wechat",
"notify_way_name": "wechat",
"notify_way_alias": "微信",
"description": "",
"icon": "(base64 image)"
},
{
"notify_way": "sms",
"notify_way_name": "sms",
"notify_way_alias": "短信",
"description": "",
"icon": "(base64 image)"
}
],
"message": "",
"code": "00",
}
"""
def list(self, request):
# 短期内通过映射进行兼容,后续部署脚本统一升级
notify_ways = []
try:
res = CmsiApi.get_msg_type({"bk_username": "admin"}, raise_exception=True)
for msg_type_info in res.data:
if msg_type_info.get("is_active"):
msg_type = msg_type_info.get("type")
notify_ways.append(
{
"notify_way": msg_type,
"notify_way_name": msg_type,
"notify_way_alias": msg_type_info.get("label"),
"description": msg_type_info.get("description", ""),
"icon": msg_type_info.get("icon"),
"active": msg_type_info.get("is_active"),
}
)
except Exception as e:
logger.error("Can not get message type supportted by blueking, error: {error}".format(error=e))
return Response(notify_ways)
class AlertTargetViewSet(BaseMixin, APIViewSet):
@list_route(methods=["GET"], url_path="mine")
@params_valid(serializer=AlertTargetMineSerializer)
def mine(self, request, params, *args, **kwargs):
"""
@api {get} /datamanage/dmonitor/alert_targets/mine/ 查询我的告警对象列表
@apiName dmonitor_mine_alert_targets
@apiGroup DmonitorAlert
@apiVersion 1.0.0
@apiParam {Int} [start_time] 开始时间(默认为当天0点)
@apiParam {Int} [end_time] 结束时间(默认为当前时间)
@apiParam {String} [alert_type] 告警类型, data_monitor, task_monitor
@apiParam {String} [alert_level] 告警级别, warning, danger
@apiParam {List} [alert_status] 告警策略ID列表
@apiSuccessExample {json} Success-Response:
HTTP/1.1 200 OK
{
"result": true,
"data": [
{
"alert_target_type": "rawdata",
"alert_target_id": "391",
"alert_target_alias": "体验登录日志"
}
],
"message": "",
"code": "00",
}
"""
base = params.get("base")
flow_ids = []
if base == "alert":
flow_ids = self.get_flow_ids_by_alert(params)
elif base == "alert_config":
flow_ids = self.get_flow_ids_by_alert_config(params)
return Response(self.get_alert_targets(flow_ids, params))
def get_flow_ids_by_alert(self, params):
now = tznow()
default_start_time = datetime.datetime(year=now.year, month=now.month, day=now.day)
default_end_time = now
start_time = timetostr(params.get("start_time", default_start_time))
end_time = timetostr(params.get("end_time", default_end_time))
bk_username = get_request_username()
queryset = AlertDetail.objects.all()
queryset = queryset.filter(
alert_time__gte=start_time,
alert_time__lt=end_time,
receivers__contains=bk_username,
).order_by("-alert_time")
if params.get("alert_type"):
queryset = queryset.filter(alert_type=params.get("alert_type"))
if params.get("alert_level"):
queryset = queryset.filter(alert_level=params.get("alert_level"))
if params.get("alert_status"):
queryset = queryset.filter(alert_status__in=params.get("alert_status", []))
return [x.get("flow_id") for x in queryset.values("flow_id").distinct()]
def get_flow_ids_by_alert_config(self, params):
bk_username = get_request_username()
from gevent import monkey
monkey.patch_all()
flow_ids = set()
flows_by_biz, flows_by_project = set(), set()
tasks = [
gevent.spawn(
self.get_flow_ids_by_biz,
params.get("bk_biz_id"),
bk_username,
flows_by_biz,
),
gevent.spawn(
self.get_flow_ids_by_project,
params.get("project_id"),
bk_username,
flows_by_project,
),
]
gevent.joinall(tasks)
if not params.get("project_id") and not params.get("bk_biz_id"):
flow_ids = flows_by_biz | flows_by_project
elif params.get("project_id"):
flow_ids = flows_by_project
elif params.get("bk_biz_id"):
flow_ids = flows_by_biz
return flow_ids
def combine_flow_ids(self, flow_ids, temp_ids, filtered=False):
if not filtered:
return flow_ids | temp_ids
else:
return flow_ids & temp_ids
def get_flow_ids_by_biz(self, bk_biz_id, bk_username, flow_ids):
if bk_biz_id:
bk_biz_ids = [bk_biz_id]
else:
bizs = self.get_bizs_by_username(bk_username)
bk_biz_ids = [x.get("bk_biz_id") for x in bizs]
try:
sql = """
SELECT id FROM access_raw_data WHERE bk_biz_id in ({bk_biz_ids})
""".format(
bk_biz_ids=",".join(["'{}'".format(x) for x in bk_biz_ids])
)
res = MetaApi.complex_search(
{
"statement": sql,
"backend": "mysql",
},
raise_exception=True,
)
for item in res.data:
flow_ids.add("rawdata%s" % str(item.get("id")))
except Exception as e:
logger.error("根据业务ID获取数据源ID列表失败, ERROR: %s" % e)
return flow_ids
def get_flow_ids_by_project(self, project_id, bk_username, flow_ids):
if project_id:
project_ids = [project_id]
else:
projects = self.get_projects_by_username(bk_username)
project_ids = [x.get("project_id") for x in projects]
try:
res = DataflowApi.flows.list({"bk_username": bk_username, "project_id": project_ids})
for item in res.data:
flow_ids.add(str(item.get("flow_id")))
except Exception as e:
logger.error("根据项目ID获取dataflow列表失败, ERROR: %s" % e)
return flow_ids
def get_flow_ids_by_target_type(self, alert_target_type):
try:
flow_ids = DatamonitorAlertConfigRelation.objects.filter(alert_target_type=alert_target_type).values_list(
"flow_id", flat=True
)
except Exception as e:
flow_ids = []
logger.error("根据告警对象类型获取flow列表, ERROR: %s" % e)
return set(flow_ids)
def get_flow_ids_by_received(self, bk_username):
try:
flow_ids = DatamonitorAlertConfigRelation.objects.filter(
alert_config__receivers__icontains=bk_username
).values_list("flow_id", flat=True)
except Exception as e:
flow_ids = []
logger.error("根据我接收的告警获取flow列表, ERROR: %s" % e)
return set(flow_ids)
def get_alert_targets(self, flow_ids, params=None):
params = params or {}
from gevent import monkey
monkey.patch_all()
targets = {}
alert_target_type = params.get("alert_target_type")
# 获取dataflow和rawdata的信息
rawdata_targets, dataflow_targets = set(), set()
for flow_id in flow_ids:
if flow_id.startswith("rawdata") and alert_target_type != "dataflow":
rawdata_targets.add(flow_id.strip("rawdata"))
targets[flow_id] = {
"alert_target_id": flow_id.strip("rawdata"),
"alert_target_type": "rawdata",
}
elif (not flow_id.startswith("rawdata")) and alert_target_type != "rawdata":
dataflow_targets.add(flow_id)
targets[flow_id] = {
"alert_target_id": flow_id,
"alert_target_type": "dataflow",
}
raw_data_infos, dataflow_infos = {}, {}
bk_username = get_request_username()
gevent.joinall(
[
gevent.spawn(
self.fetch_dataflow_multiprocess,
dataflow_infos,
dataflow_targets,
bk_username,
),
gevent.spawn(self.fetch_rawdata_infos, raw_data_infos, rawdata_targets),
]
)
# 补全项目,业务,dataflow和rawdata信息到告警中
target_ids = list(targets.keys())
bk_biz_id = params.get("bk_biz_id")
project_id = params.get("project_id")
for target_id in target_ids:
alert_target = targets[target_id]
if alert_target["alert_target_type"] == "rawdata":
raw_data_info = raw_data_infos.get(alert_target.get("alert_target_id"), {})
if bk_biz_id and raw_data_info.get("bk_biz_id") != bk_biz_id:
del targets[target_id]
continue
alert_target["alert_target_alias"] = raw_data_info.get("raw_data_alias")
elif alert_target["alert_target_type"] == "dataflow":
dataflow_info = dataflow_infos.get(alert_target.get("alert_target_id"), {})
if project_id and dataflow_info.get("project_id") != project_id:
del targets[target_id]
continue
alert_target["alert_target_alias"] = dataflow_info.get("flow_name")
return list(targets.values())
|
def cumprod(series):
prods = series.cumprod()
return prods |
/**
* Generally, can be used to filter on FrameBuffer and render to another, such as post-processing an image.
*/
public class TextureFilter implements Disposable, DrawToable {
/**
* The frame buffer that will store this filter's output.
*/
public final FrameBuffer target;
/**
* The program that will be applied to the input texture.
*/
public final Program program;
/**
* The mesh that will be used to draw the texture.
*/
public final Mesh mesh;
/**
* Create a new filter that can apply a program to an input texture or frame.
*
* @param width The width of the output texture.
* @param height The height of the output texture.
* @param resources A reference to this application's resources.
* @param shaderIDs The resource IDs of shader source code to load into this filter's program.
*/
public TextureFilter(final int width, final int height, final Resources resources, final int... shaderIDs) {
target = new FrameBuffer(width, height);
program = new Program(resources, shaderIDs);
mesh = new Mesh();
}
/**
* Apply this filter's shaders to a texture.
*
* @param inTexture The input texture to process.
*/
public void processTexture(final Texture inTexture) {
program.bind();
program.setTexels(target.width, target.height);
target.bind();
target.clear();
inTexture.draw(program);
mesh.draw(program);
program.unbind();
target.unbind();
}
/**
* Draw this texture filter's effect to a drawable object.
*
* @param drawable The object to draw to.
* @param unbind Whether or not to unbind this object's program when done.
*/
@Override
public void drawTo(final Drawable drawable, final boolean unbind) {
program.bind();
drawable.draw(program);
drawable.cleanup(program);
if (unbind) {
program.unbind();
}
}
/**
* Returns the internal target texture, which receives the filtered output.
*/
public Texture getTexture() {
return target.texture;
}
@Override
public void dispose() {
target.dispose();
program.dispose();
mesh.dispose();
}
} |
package service
import (
"net/http"
"github.com/cplusgo/go-gateway/proxy"
)
/**
* 业务集群,集群的名称必须是全局唯一的
* 这个比较好区分,如账号服务account,订单服务order,商品服务goods
*/
type Cluster struct {
name string `json:"name"`
services []*Service
}
func (this *Cluster) addService(name, url string) {
service := &Service{
name: name,
url: url,
connectionNum:0,
httpProxy:proxy.NewHttpProxy(url),
}
this.services = append(this.services, service)
}
/**
*根据连接数去负载均衡
*/
func (this *Cluster) serveRequest(w http.ResponseWriter, req *http.Request) {
var serviceImpl *Service = this.services[0]
for _, v := range this.services {
if serviceImpl.connectionNum > v.connectionNum {
serviceImpl = v
}
}
serviceImpl.serveRequest(w, req)
} |
// Copyright 2018-2019 <NAME>.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package tuna
import (
"bytes"
"fmt"
"html/template"
"io"
"net/http"
"os"
"path/filepath"
"github.com/redmaner/MaguroHTTP/router"
)
// Function to handle HTTP requests to MaguroHTTP download server
// This can be further configurated in the configuration file
// MaguroHTTP download server generates a table of downloadable files based on extensions
func (s *Server) handleDownload() http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
host := router.StripHostPort(r.Host)
var dlurls []fileInfo
cfg := s.Cfg
// If virtual hosting is enabled, the configuration is switched to the
// configuration of the vhost
if cfg.Core.VirtualHosting {
if _, ok := cfg.Core.VirtualHosts[host]; ok {
cfg = s.Vhosts[host]
}
}
// Collect downloadable files
if cfg.Serve.Download.Enabled {
for _, v := range cfg.Serve.Download.Exts {
filepath.Walk(cfg.Serve.ServeDir, func(path string, f os.FileInfo, _ error) error {
if !f.IsDir() {
if filepath.Ext(f.Name()) == v {
dlurls = append(dlurls, fileInfo{
Name: f.Name(),
Size: f.Size(),
ModTime: f.ModTime(),
})
}
}
return nil
})
}
}
path := r.URL.Path
// Correct path to ServeIndex when path is root
if path == "/" {
path = cfg.Serve.ServeIndex
}
buf := bytes.NewBufferString("")
// If the request path is ServeIndex, generate the index page with downloadable files
if path == cfg.Serve.ServeIndex {
w.Header().Set("Content-Type", "text/html")
s.setHeaders(w, cfg.Serve.Headers)
io.WriteString(buf, "<h1>Downloads</h1>")
io.WriteString(buf, fmt.Sprintln(`<table border="0" cellpadding="0" cellspacing="0">`))
io.WriteString(buf, fmt.Sprintln(`<tr><td height="auto" width="200px"><span><b>Name</b></span><td height="auto" width="120px"><span><b>Size</b></span></td><td height="auto" width="auto"><span><b>Modification date</b></span></td></tr>`))
for _, v := range dlurls {
io.WriteString(buf, fmt.Sprint(`<tr><td height="auto" width="200px"><span><a href="/`, v.Name, `">`, v.Name, `</a><br></span><td height="auto" width="120px"><span >`, v.Size, `</b></span></td><td height="auto" width="auto"><span>`, v.ModTime, `</b></span></td></tr>`))
}
io.WriteString(buf, fmt.Sprintln("</table><br>"))
data := struct {
DownloadTable template.HTML
}{
DownloadTable: template.HTML(buf.String()),
}
s.templates.download.Execute(w, data)
s.LogNetwork(200, r)
// If the request path is not the index, and the file does exist in ServeDir
// the file is served and forced to be downloaded by the recipient.
// If the file doesn't exist, a 404 error is returned.
} else if _, err := os.Stat(cfg.Serve.ServeDir + path); err == nil {
w.Header().Set("Content-Type", getMIMEType(path, cfg.Serve.MIMETypes))
w.Header().Set("Content-Disposition", "attachement")
s.setHeaders(w, cfg.Serve.Headers)
http.ServeFile(w, r, cfg.Serve.ServeDir+path)
s.LogNetwork(200, r)
} else {
// Path wasn't found, so we return a 404 not found error.
s.handleError(w, r, 404)
return
}
}
}
|
<gh_stars>0
/*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
/*
* This file conatins unit tests for the RtpUtility.
*/
#include "testing/gtest/include/gtest/gtest.h"
#include "webrtc/modules/rtp_rtcp/source/rtp_format_vp8.h"
#include "webrtc/modules/rtp_rtcp/source/rtp_utility.h"
#include "webrtc/typedefs.h"
namespace webrtc {
using RtpUtility::RTPPayloadParser;
using RtpUtility::RTPPayload;
using RtpUtility::RTPPayloadVP8;
// Payload descriptor
// 0 1 2 3 4 5 6 7
// +-+-+-+-+-+-+-+-+
// |X|R|N|S|PartID | (REQUIRED)
// +-+-+-+-+-+-+-+-+
// X: |I|L|T|K| RSV | (OPTIONAL)
// +-+-+-+-+-+-+-+-+
// I: | PictureID | (OPTIONAL)
// +-+-+-+-+-+-+-+-+
// L: | TL0PICIDX | (OPTIONAL)
// +-+-+-+-+-+-+-+-+
// T/K: |TID:Y| KEYIDX | (OPTIONAL)
// +-+-+-+-+-+-+-+-+
//
// Payload header
// 0 1 2 3 4 5 6 7
// +-+-+-+-+-+-+-+-+
// |Size0|H| VER |P|
// +-+-+-+-+-+-+-+-+
// | Size1 |
// +-+-+-+-+-+-+-+-+
// | Size2 |
// +-+-+-+-+-+-+-+-+
// | Bytes 4..N of |
// | VP8 payload |
// : :
// +-+-+-+-+-+-+-+-+
// | OPTIONAL RTP |
// | padding |
// : :
// +-+-+-+-+-+-+-+-+
void VerifyBasicHeader(const RTPPayloadVP8 &header,
bool N, bool S, int PartID) {
EXPECT_EQ(N, header.nonReferenceFrame);
EXPECT_EQ(S, header.beginningOfPartition);
EXPECT_EQ(PartID, header.partitionID);
}
void VerifyExtensions(const RTPPayloadVP8 &header,
bool I, bool L, bool T, bool K) {
EXPECT_EQ(I, header.hasPictureID);
EXPECT_EQ(L, header.hasTl0PicIdx);
EXPECT_EQ(T, header.hasTID);
EXPECT_EQ(K, header.hasKeyIdx);
}
TEST(ParseVP8Test, BasicHeader) {
uint8_t payload[4] = {0};
payload[0] = 0x14; // Binary 0001 0100; S = 1, PartID = 4.
payload[1] = 0x01; // P frame.
RTPPayloadParser rtpPayloadParser(kRtpVideoVp8, payload, 4);
RTPPayload parsedPacket;
ASSERT_TRUE(rtpPayloadParser.Parse(parsedPacket));
EXPECT_EQ(RtpUtility::kPFrame, parsedPacket.frameType);
EXPECT_EQ(kRtpVideoVp8, parsedPacket.type);
VerifyBasicHeader(parsedPacket.info.VP8, 0 /*N*/, 1 /*S*/, 4 /*PartID*/);
VerifyExtensions(parsedPacket.info.VP8, 0 /*I*/, 0 /*L*/, 0 /*T*/, 0 /*K*/);
EXPECT_EQ(payload + 1, parsedPacket.info.VP8.data);
EXPECT_EQ(4 - 1, parsedPacket.info.VP8.dataLength);
}
TEST(ParseVP8Test, PictureID) {
uint8_t payload[10] = {0};
payload[0] = 0xA0;
payload[1] = 0x80;
payload[2] = 17;
RTPPayloadParser rtpPayloadParser(kRtpVideoVp8, payload, 10);
RTPPayload parsedPacket;
ASSERT_TRUE(rtpPayloadParser.Parse(parsedPacket));
EXPECT_EQ(RtpUtility::kPFrame, parsedPacket.frameType);
EXPECT_EQ(kRtpVideoVp8, parsedPacket.type);
VerifyBasicHeader(parsedPacket.info.VP8, 1 /*N*/, 0 /*S*/, 0 /*PartID*/);
VerifyExtensions(parsedPacket.info.VP8, 1 /*I*/, 0 /*L*/, 0 /*T*/, 0 /*K*/);
EXPECT_EQ(17, parsedPacket.info.VP8.pictureID);
EXPECT_EQ(payload + 3, parsedPacket.info.VP8.data);
EXPECT_EQ(10 - 3, parsedPacket.info.VP8.dataLength);
// Re-use payload, but change to long PictureID.
payload[2] = 0x80 | 17;
payload[3] = 17;
RTPPayloadParser rtpPayloadParser2(kRtpVideoVp8, payload, 10);
ASSERT_TRUE(rtpPayloadParser2.Parse(parsedPacket));
VerifyBasicHeader(parsedPacket.info.VP8, 1 /*N*/, 0 /*S*/, 0 /*PartID*/);
VerifyExtensions(parsedPacket.info.VP8, 1 /*I*/, 0 /*L*/, 0 /*T*/, 0 /*K*/);
EXPECT_EQ((17<<8) + 17, parsedPacket.info.VP8.pictureID);
EXPECT_EQ(payload + 4, parsedPacket.info.VP8.data);
EXPECT_EQ(10 - 4, parsedPacket.info.VP8.dataLength);
}
TEST(ParseVP8Test, Tl0PicIdx) {
uint8_t payload[13] = {0};
payload[0] = 0x90;
payload[1] = 0x40;
payload[2] = 17;
RTPPayloadParser rtpPayloadParser(kRtpVideoVp8, payload, 13);
RTPPayload parsedPacket;
ASSERT_TRUE(rtpPayloadParser.Parse(parsedPacket));
EXPECT_EQ(RtpUtility::kIFrame, parsedPacket.frameType);
EXPECT_EQ(kRtpVideoVp8, parsedPacket.type);
VerifyBasicHeader(parsedPacket.info.VP8, 0 /*N*/, 1 /*S*/, 0 /*PartID*/);
VerifyExtensions(parsedPacket.info.VP8, 0 /*I*/, 1 /*L*/, 0 /*T*/, 0 /*K*/);
EXPECT_EQ(17, parsedPacket.info.VP8.tl0PicIdx);
EXPECT_EQ(payload + 3, parsedPacket.info.VP8.data);
EXPECT_EQ(13 - 3, parsedPacket.info.VP8.dataLength);
}
TEST(ParseVP8Test, TIDAndLayerSync) {
uint8_t payload[10] = {0};
payload[0] = 0x88;
payload[1] = 0x20;
payload[2] = 0x80; // TID(2) + LayerSync(false)
RTPPayloadParser rtpPayloadParser(kRtpVideoVp8, payload, 10);
RTPPayload parsedPacket;
ASSERT_TRUE(rtpPayloadParser.Parse(parsedPacket));
EXPECT_EQ(RtpUtility::kPFrame, parsedPacket.frameType);
EXPECT_EQ(kRtpVideoVp8, parsedPacket.type);
VerifyBasicHeader(parsedPacket.info.VP8, 0 /*N*/, 0 /*S*/, 8 /*PartID*/);
VerifyExtensions(parsedPacket.info.VP8, 0 /*I*/, 0 /*L*/, 1 /*T*/, 0 /*K*/);
EXPECT_EQ(2, parsedPacket.info.VP8.tID);
EXPECT_FALSE(parsedPacket.info.VP8.layerSync);
EXPECT_EQ(payload + 3, parsedPacket.info.VP8.data);
EXPECT_EQ(10 - 3, parsedPacket.info.VP8.dataLength);
}
TEST(ParseVP8Test, KeyIdx) {
uint8_t payload[10] = {0};
payload[0] = 0x88;
payload[1] = 0x10; // K = 1.
payload[2] = 0x11; // KEYIDX = 17 decimal.
RTPPayloadParser rtpPayloadParser(kRtpVideoVp8, payload, 10);
RTPPayload parsedPacket;
ASSERT_TRUE(rtpPayloadParser.Parse(parsedPacket));
EXPECT_EQ(RtpUtility::kPFrame, parsedPacket.frameType);
EXPECT_EQ(kRtpVideoVp8, parsedPacket.type);
VerifyBasicHeader(parsedPacket.info.VP8, 0 /*N*/, 0 /*S*/, 8 /*PartID*/);
VerifyExtensions(parsedPacket.info.VP8, 0 /*I*/, 0 /*L*/, 0 /*T*/, 1 /*K*/);
EXPECT_EQ(17, parsedPacket.info.VP8.keyIdx);
EXPECT_EQ(payload + 3, parsedPacket.info.VP8.data);
EXPECT_EQ(10 - 3, parsedPacket.info.VP8.dataLength);
}
TEST(ParseVP8Test, MultipleExtensions) {
uint8_t payload[10] = {0};
payload[0] = 0x88;
payload[1] = 0x80 | 0x40 | 0x20 | 0x10;
payload[2] = 0x80 | 17; // PictureID, high 7 bits.
payload[3] = 17; // PictureID, low 8 bits.
payload[4] = 42; // Tl0PicIdx.
payload[5] = 0x40 | 0x20 | 0x11; // TID(1) + LayerSync(true) + KEYIDX(17).
RTPPayloadParser rtpPayloadParser(kRtpVideoVp8, payload, 10);
RTPPayload parsedPacket;
ASSERT_TRUE(rtpPayloadParser.Parse(parsedPacket));
EXPECT_EQ(RtpUtility::kPFrame, parsedPacket.frameType);
EXPECT_EQ(kRtpVideoVp8, parsedPacket.type);
VerifyBasicHeader(parsedPacket.info.VP8, 0 /*N*/, 0 /*S*/, 8 /*PartID*/);
VerifyExtensions(parsedPacket.info.VP8, 1 /*I*/, 1 /*L*/, 1 /*T*/, 1 /*K*/);
EXPECT_EQ((17<<8) + 17, parsedPacket.info.VP8.pictureID);
EXPECT_EQ(42, parsedPacket.info.VP8.tl0PicIdx);
EXPECT_EQ(1, parsedPacket.info.VP8.tID);
EXPECT_EQ(17, parsedPacket.info.VP8.keyIdx);
EXPECT_EQ(payload + 6, parsedPacket.info.VP8.data);
EXPECT_EQ(10 - 6, parsedPacket.info.VP8.dataLength);
}
TEST(ParseVP8Test, TooShortHeader) {
uint8_t payload[4] = {0};
payload[0] = 0x88;
payload[1] = 0x80 | 0x40 | 0x20 | 0x10; // All extensions are enabled...
payload[2] = 0x80 | 17; // ... but only 2 bytes PictureID is provided.
payload[3] = 17; // PictureID, low 8 bits.
RTPPayloadParser rtpPayloadParser(kRtpVideoVp8, payload, 4);
RTPPayload parsedPacket;
EXPECT_FALSE(rtpPayloadParser.Parse(parsedPacket));
}
TEST(ParseVP8Test, TestWithPacketizer) {
uint8_t payload[10] = {0};
uint8_t packet[20] = {0};
RTPVideoHeaderVP8 inputHeader;
inputHeader.nonReference = true;
inputHeader.pictureId = 300;
inputHeader.temporalIdx = 1;
inputHeader.layerSync = false;
inputHeader.tl0PicIdx = kNoTl0PicIdx; // Disable.
inputHeader.keyIdx = 31;
RtpPacketizerVp8 packetizer(inputHeader, 20);
packetizer.SetPayloadData(payload, 10, NULL);
bool last;
size_t send_bytes;
ASSERT_TRUE(packetizer.NextPacket(packet, &send_bytes, &last));
ASSERT_TRUE(last);
RTPPayloadParser rtpPayloadParser(kRtpVideoVp8, packet, send_bytes);
RTPPayload parsedPacket;
ASSERT_TRUE(rtpPayloadParser.Parse(parsedPacket));
EXPECT_EQ(RtpUtility::kIFrame, parsedPacket.frameType);
EXPECT_EQ(kRtpVideoVp8, parsedPacket.type);
VerifyBasicHeader(parsedPacket.info.VP8,
inputHeader.nonReference /*N*/,
1 /*S*/,
0 /*PartID*/);
VerifyExtensions(parsedPacket.info.VP8,
1 /*I*/,
0 /*L*/,
1 /*T*/,
1 /*K*/);
EXPECT_EQ(inputHeader.pictureId, parsedPacket.info.VP8.pictureID);
EXPECT_EQ(inputHeader.temporalIdx, parsedPacket.info.VP8.tID);
EXPECT_EQ(inputHeader.layerSync, parsedPacket.info.VP8.layerSync);
EXPECT_EQ(inputHeader.keyIdx, parsedPacket.info.VP8.keyIdx);
EXPECT_EQ(packet + 5, parsedPacket.info.VP8.data);
EXPECT_EQ(send_bytes - 5, parsedPacket.info.VP8.dataLength);
}
} // namespace
|
/*
* Javolution - Java(TM) Solution for Real-Time and Embedded Systems
* Copyright (C) 2012 - Javolution (http://javolution.org/)
* All rights reserved.
*
* Permission to use, copy, modify, and distribute this software is
* freely granted, provided that this notice is preserved.
*/
package org.javolution.util.internal.collection;
import org.javolution.util.AbstractCollection;
import org.javolution.util.FastIterator;
import org.javolution.util.function.Equality;
import org.javolution.util.function.Predicate;
/**
* A view resulting of the concatenation of two collections.
*/
public final class ConcatCollectionImpl<E> extends AbstractCollection<E> {
private static final long serialVersionUID = 0x700L; // Version.
private final AbstractCollection<E> first;
private final AbstractCollection<? extends E> second;
public ConcatCollectionImpl(AbstractCollection<E> first, AbstractCollection<? extends E> second) {
this.first = first;
this.second = second;
}
@Override
public boolean removeIf(Predicate<? super E> filter) {
return first.removeIf(filter) | second.removeIf(filter);
}
@Override
public FastIterator<E> iterator() {
return new FastIterator<E>() {
FastIterator<E> firstItr = first.iterator();
FastIterator<? extends E> secondItr = second.iterator();
@Override
public boolean hasNext() {
return firstItr.hasNext() || secondItr.hasNext();
}
@Override
public boolean hasNext(Predicate<? super E> matching) {
return firstItr.hasNext(matching) || secondItr.hasNext(matching);
}
@Override
public E next() {
return firstItr.hasNext() ? firstItr.next() : secondItr.next();
}
@Override
public void remove() {
throw new UnsupportedOperationException();
}};
}
@Override
public FastIterator<E> descendingIterator() {
return new FastIterator<E>() {
FastIterator<E> firstItr = first.descendingIterator();
FastIterator<? extends E> secondItr = second.descendingIterator();
@Override
public boolean hasNext() {
return secondItr.hasNext() || firstItr.hasNext();
}
@Override
public boolean hasNext(Predicate<? super E> matching) {
return secondItr.hasNext(matching) || firstItr.hasNext(matching);
}
@Override
public E next() {
return secondItr.hasNext() ? secondItr.next() : firstItr.next();
}
@Override
public void remove() {
throw new UnsupportedOperationException();
}};
}
@Override
public boolean add(E element) {
return first.add(element);
}
@Override
public boolean isEmpty() {
return first.isEmpty() && second.isEmpty();
}
@Override
public int size() {
return first.size() + second.size();
}
@Override
public void clear() {
first.clear();
second.clear();
}
@Override
public Equality<? super E> equality() {
return first.equality();
}
@SuppressWarnings("unchecked")
@Override
public AbstractCollection<E>[] trySplit(int n) {
return new AbstractCollection[] { first, second };
}
}
|
/**
* Capture the content around the capsule ItemEntity, update capsule state.
*/
public static boolean captureContentIntoCapsule(ItemStack capsule, BlockPos anchor, UUID thrower, int size, int extendLength, ServerWorld playerWorld) {
if (anchor != null) {
BlockPos source = anchor.offset(-extendLength, 1, -extendLength);
return captureAtPosition(capsule, thrower, size, playerWorld, source);
} else {
CapsuleItem.revertStateFromActivated(capsule);
PlayerEntity player = playerWorld.getPlayerByUUID(thrower);
if (player != null) {
player.sendMessage(new TranslationTextComponent("capsule.error.noCaptureBase"), Util.NIL_UUID);
}
}
return false;
} |
n,m=map(int,input().split())
arr=[]
for i in range (m):
x,y=map(int,input().split())
arr.append(x);arr.append(y)
print(n-1)
for i in range (1,n+1):
if (i in arr) == False:
for j in range (1,n+1):
if i != j: print (str(i) + ' ' + str(j))
break
|
Rat poison might be killing more than rats in Clintonville.
Rat poison might be killing more than rats in Clintonville.
Several dogs have been sickened from ingesting the poison and at least one has died, according to residents.
Boo, a 10-year-old collie mix, was treated in June for rat-poison ingestion after his owner, Alicia Spagnol Heringhaus, noticed that his eyes were bloodshot, he was coughing and he had trouble standing up.
�They ruled out everything else under the sun,� said Heringhaus, who lives in Beechwold in northern Clintonville. �It�s just unbelievable. He was my constant companion for 10 years.�
Veterinarians concluded that Boo was bleeding internally. They treated him with vitamin K, but his condition worsened, and Heringhaus had him euthanized about 10 days after the diagnosis.
Columbus police Officer Eric Richards, the community liaison for the precinct that includes Clintonville, said he has heard from several residents who suspect that their dogs ate rat poison. All were in the Beechwold area where Heringhaus lives.
A police report filed on Aug. 13 says two dogs at the same address on E. Jeffrey Place got sick after eating rat-poisoned dog treats tossed into their yard. The dogs were treated for several days and survived, the report said.
Richards said he has tried unsuccessfully to contact that resident to find out more details. He hadn�t heard of any other reports of poisoned dog treats.
Clintonville residents have reported seeing rats in their yards in recent months and have contacted Columbus Public Health. One Clintonville Area Commission member distributed fliers about the problem. An online map has been set up for residents to post rat sightings.
Dr. Edward Cooper, head of small-animal emergency and critical care at the Ohio State University Veterinary Medical Center, said an owner brings in a dog that he or she has seen eating rat poison about once a week, but the numbers have not increased.
About once a month, an owner comes in with a dog that has rat-poisoning symptoms, but the owner hasn�t seen the dog consuming any, Cooper said. Owners usually don�t know where the poison came from, and Cooper sends them home to look in remote corners of the house and yard.
�It�s not uncommon for people to say that there�s no way their dog got into it,� he said. �But knowing what we�re dealing with is helpful.�
Each of the three kinds of rat poison affects dogs in a different way, Cooper said. The most popular type stops blood from clotting, causing internal bleeding, lethargy, pale gums, weakness, and sometimes bloody diarrhea or coughed-up blood. The two other types either damage the kidneys, causing lethargy, lack of appetite and diarrhea; or damage the brain, causing seizures, stumbling and a lack of awareness.
Owners should take their dogs to a veterinarian immediately if they see symptoms, he said.
Cats don�t seem to be attracted to rat poison, he said. �Cats show a lot more discretion as to what they eat, but dogs put pretty much anything into their mouths.�
Anyone putting out rat poison should make sure it�s in a place completely inaccessible to pets, Cooper said. He cautioned anyone moving into a new apartment or house to make sure that past owners didn�t tuck poison away in a remote area of the home or yard.
A dog that eats a poisoned rat probably wouldn�t ingest enough poison to get sick, Cooper said. And he dismissed the idea that a rat could carry enough poison into a yard to affect a dog.
Heringhaus said she kept Boo in her fenced yard or in the house, and she has no idea where he would have encountered rat poison.
�I�ll never know,� she said.
[email protected]
@reporterkathy |
def _is_token_expired(self):
try:
jwt_claims = jwt.decode(self._access_token.encode(),
algorithms=self.ALGORITHM,
options={
'verify_signature': False,
'verify_exp': False,
'verify_nbf': False,
'verify_iat': False,
'verify_aud': False
})
except:
return True
return jwt_claims['exp'] <= int(time.time()) |
a = int(input())
b = int(input())
h = int(input())
#print(a)
#print(b)
#print(h)
ans = (a + b)*h/2
#print(ans)
ans_int = int(ans) #int型に変換
print(ans_int) |
<gh_stars>0
// Copyright IBM Corp. 2017, 2021 All Rights Reserved.
// Licensed under the Mozilla Public License v2.0
package vpc
import (
"fmt"
"github.com/IBM-Cloud/terraform-provider-ibm/ibm/flex"
"github.com/IBM/vpc-go-sdk/vpcv1"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
)
func DataSourceIBMISSecurityGroupTarget() *schema.Resource {
return &schema.Resource{
Read: dataSourceIBMISSecurityGroupTargetRead,
Schema: map[string]*schema.Schema{
"security_group": {
Type: schema.TypeString,
Required: true,
Description: "Security group id",
},
"target": {
Type: schema.TypeString,
Computed: true,
Description: "security group target identifier",
},
"name": {
Type: schema.TypeString,
Required: true,
Description: "Security group target name",
},
"crn": {
Type: schema.TypeString,
Computed: true,
Description: "The CRN for this security group target",
},
"resource_type": {
Type: schema.TypeString,
Computed: true,
Description: "Resource Type",
},
"more_info": {
Type: schema.TypeString,
Computed: true,
Description: "Link to documentation about deleted resources",
},
},
}
}
func dataSourceIBMISSecurityGroupTargetRead(d *schema.ResourceData, meta interface{}) error {
sess, err := vpcClient(meta)
if err != nil {
return err
}
securityGroupID := d.Get("security_group").(string)
name := d.Get("name").(string)
// Support for pagination
start := ""
allrecs := []vpcv1.SecurityGroupTargetReferenceIntf{}
for {
listSecurityGroupTargetsOptions := sess.NewListSecurityGroupTargetsOptions(securityGroupID)
if start != "" {
listSecurityGroupTargetsOptions.Start = &start
}
groups, response, err := sess.ListSecurityGroupTargets(listSecurityGroupTargetsOptions)
if err != nil {
return fmt.Errorf("[ERROR] Error Getting InstanceGroup Managers %s\n%s", err, response)
}
if *groups.TotalCount == int64(0) {
break
}
start = flex.GetNext(groups.Next)
allrecs = append(allrecs, groups.Targets...)
if start == "" {
break
}
}
for _, securityGroupTargetReferenceIntf := range allrecs {
securityGroupTargetReference := securityGroupTargetReferenceIntf.(*vpcv1.SecurityGroupTargetReference)
if *securityGroupTargetReference.Name == name {
d.Set("target", *securityGroupTargetReference.ID)
d.Set("crn", securityGroupTargetReference.CRN)
// d.Set("resource_type", *securityGroupTargetReference.ResourceType)
if securityGroupTargetReference.Deleted != nil {
d.Set("more_info", *securityGroupTargetReference.Deleted.MoreInfo)
}
if securityGroupTargetReference != nil && securityGroupTargetReference.ResourceType != nil {
d.Set("resource_type", *securityGroupTargetReference.ResourceType)
}
d.SetId(fmt.Sprintf("%s/%s", securityGroupID, *securityGroupTargetReference.ID))
return nil
}
}
return fmt.Errorf("Security Group Target %s not found", name)
}
|
<filename>src/main/java/com/ycs/community/cmmbo/service/CommentService.java
package com.ycs.community.cmmbo.service;
import com.ycs.community.cmmbo.domain.dto.CommentRequestDto;
import com.ycs.community.cmmbo.domain.dto.CommentResponseDto;
public interface CommentService {
CommentResponseDto qryCommentsByQuestionId(Long questionId);
boolean commentQuestionOrAnswer(CommentRequestDto request);
}
|
Book reviews : Parsons, A.J. and Abrahams, A.D., editors, 1992: Overland flow hydraulics and erosion mechanics. London: UCL Press, xvi + 464 pp. £60.00 cloth. ISBN: 85728 006 7
The hydraulics and erosion mechanics of overland flow is a subjectmatter of interest to a number of professions and academic disciplines. Nevertheless, it is ironic that separate strands and approaches of research have been ongoing for decades, without many formal means for these distinct groups to pool information and findings. The potential importance of this collection of edited papers (presented to a workshop held in July 1991 ) is that it brings together this otherwise disparate research. Contributions to this volume were invited from participants and others unable to attend. The authors of the 17 articles reproduced in the text include 14 from the USA, six from the UK, three from Canada, two from Belgium, two from Australia, and one from each of Israel, Portugal and Germany. The contribu- |
<filename>internal/httpclient/client/public/complete_self_service_settings_flow_with_profile_method_responses.go
// Code generated by go-swagger; DO NOT EDIT.
package public
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"fmt"
"io"
"github.com/go-openapi/runtime"
"github.com/go-openapi/strfmt"
"github.com/ory/kratos-client-go/models"
)
// CompleteSelfServiceSettingsFlowWithProfileMethodReader is a Reader for the CompleteSelfServiceSettingsFlowWithProfileMethod structure.
type CompleteSelfServiceSettingsFlowWithProfileMethodReader struct {
formats strfmt.Registry
}
// ReadResponse reads a server response into the received o.
func (o *CompleteSelfServiceSettingsFlowWithProfileMethodReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {
switch response.Code() {
case 200:
result := NewCompleteSelfServiceSettingsFlowWithProfileMethodOK()
if err := result.readResponse(response, consumer, o.formats); err != nil {
return nil, err
}
return result, nil
case 302:
result := NewCompleteSelfServiceSettingsFlowWithProfileMethodFound()
if err := result.readResponse(response, consumer, o.formats); err != nil {
return nil, err
}
return nil, result
case 400:
result := NewCompleteSelfServiceSettingsFlowWithProfileMethodBadRequest()
if err := result.readResponse(response, consumer, o.formats); err != nil {
return nil, err
}
return nil, result
case 401:
result := NewCompleteSelfServiceSettingsFlowWithProfileMethodUnauthorized()
if err := result.readResponse(response, consumer, o.formats); err != nil {
return nil, err
}
return nil, result
case 403:
result := NewCompleteSelfServiceSettingsFlowWithProfileMethodForbidden()
if err := result.readResponse(response, consumer, o.formats); err != nil {
return nil, err
}
return nil, result
case 500:
result := NewCompleteSelfServiceSettingsFlowWithProfileMethodInternalServerError()
if err := result.readResponse(response, consumer, o.formats); err != nil {
return nil, err
}
return nil, result
default:
return nil, runtime.NewAPIError("response status code does not match any response statuses defined for this endpoint in the swagger spec", response, response.Code())
}
}
// NewCompleteSelfServiceSettingsFlowWithProfileMethodOK creates a CompleteSelfServiceSettingsFlowWithProfileMethodOK with default headers values
func NewCompleteSelfServiceSettingsFlowWithProfileMethodOK() *CompleteSelfServiceSettingsFlowWithProfileMethodOK {
return &CompleteSelfServiceSettingsFlowWithProfileMethodOK{}
}
/*CompleteSelfServiceSettingsFlowWithProfileMethodOK handles this case with default header values.
settingsFlow
*/
type CompleteSelfServiceSettingsFlowWithProfileMethodOK struct {
Payload *models.SettingsFlow
}
func (o *CompleteSelfServiceSettingsFlowWithProfileMethodOK) Error() string {
return fmt.Sprintf("[POST /self-service/settings/methods/profile][%d] completeSelfServiceSettingsFlowWithProfileMethodOK %+v", 200, o.Payload)
}
func (o *CompleteSelfServiceSettingsFlowWithProfileMethodOK) GetPayload() *models.SettingsFlow {
return o.Payload
}
func (o *CompleteSelfServiceSettingsFlowWithProfileMethodOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
o.Payload = new(models.SettingsFlow)
// response payload
if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF {
return err
}
return nil
}
// NewCompleteSelfServiceSettingsFlowWithProfileMethodFound creates a CompleteSelfServiceSettingsFlowWithProfileMethodFound with default headers values
func NewCompleteSelfServiceSettingsFlowWithProfileMethodFound() *CompleteSelfServiceSettingsFlowWithProfileMethodFound {
return &CompleteSelfServiceSettingsFlowWithProfileMethodFound{}
}
/*CompleteSelfServiceSettingsFlowWithProfileMethodFound handles this case with default header values.
Empty responses are sent when, for example, resources are deleted. The HTTP status code for empty responses is
typically 201.
*/
type CompleteSelfServiceSettingsFlowWithProfileMethodFound struct {
}
func (o *CompleteSelfServiceSettingsFlowWithProfileMethodFound) Error() string {
return fmt.Sprintf("[POST /self-service/settings/methods/profile][%d] completeSelfServiceSettingsFlowWithProfileMethodFound ", 302)
}
func (o *CompleteSelfServiceSettingsFlowWithProfileMethodFound) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
return nil
}
// NewCompleteSelfServiceSettingsFlowWithProfileMethodBadRequest creates a CompleteSelfServiceSettingsFlowWithProfileMethodBadRequest with default headers values
func NewCompleteSelfServiceSettingsFlowWithProfileMethodBadRequest() *CompleteSelfServiceSettingsFlowWithProfileMethodBadRequest {
return &CompleteSelfServiceSettingsFlowWithProfileMethodBadRequest{}
}
/*CompleteSelfServiceSettingsFlowWithProfileMethodBadRequest handles this case with default header values.
settingsFlow
*/
type CompleteSelfServiceSettingsFlowWithProfileMethodBadRequest struct {
Payload *models.SettingsFlow
}
func (o *CompleteSelfServiceSettingsFlowWithProfileMethodBadRequest) Error() string {
return fmt.Sprintf("[POST /self-service/settings/methods/profile][%d] completeSelfServiceSettingsFlowWithProfileMethodBadRequest %+v", 400, o.Payload)
}
func (o *CompleteSelfServiceSettingsFlowWithProfileMethodBadRequest) GetPayload() *models.SettingsFlow {
return o.Payload
}
func (o *CompleteSelfServiceSettingsFlowWithProfileMethodBadRequest) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
o.Payload = new(models.SettingsFlow)
// response payload
if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF {
return err
}
return nil
}
// NewCompleteSelfServiceSettingsFlowWithProfileMethodUnauthorized creates a CompleteSelfServiceSettingsFlowWithProfileMethodUnauthorized with default headers values
func NewCompleteSelfServiceSettingsFlowWithProfileMethodUnauthorized() *CompleteSelfServiceSettingsFlowWithProfileMethodUnauthorized {
return &CompleteSelfServiceSettingsFlowWithProfileMethodUnauthorized{}
}
/*CompleteSelfServiceSettingsFlowWithProfileMethodUnauthorized handles this case with default header values.
genericError
*/
type CompleteSelfServiceSettingsFlowWithProfileMethodUnauthorized struct {
Payload *models.GenericError
}
func (o *CompleteSelfServiceSettingsFlowWithProfileMethodUnauthorized) Error() string {
return fmt.Sprintf("[POST /self-service/settings/methods/profile][%d] completeSelfServiceSettingsFlowWithProfileMethodUnauthorized %+v", 401, o.Payload)
}
func (o *CompleteSelfServiceSettingsFlowWithProfileMethodUnauthorized) GetPayload() *models.GenericError {
return o.Payload
}
func (o *CompleteSelfServiceSettingsFlowWithProfileMethodUnauthorized) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
o.Payload = new(models.GenericError)
// response payload
if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF {
return err
}
return nil
}
// NewCompleteSelfServiceSettingsFlowWithProfileMethodForbidden creates a CompleteSelfServiceSettingsFlowWithProfileMethodForbidden with default headers values
func NewCompleteSelfServiceSettingsFlowWithProfileMethodForbidden() *CompleteSelfServiceSettingsFlowWithProfileMethodForbidden {
return &CompleteSelfServiceSettingsFlowWithProfileMethodForbidden{}
}
/*CompleteSelfServiceSettingsFlowWithProfileMethodForbidden handles this case with default header values.
genericError
*/
type CompleteSelfServiceSettingsFlowWithProfileMethodForbidden struct {
Payload *models.GenericError
}
func (o *CompleteSelfServiceSettingsFlowWithProfileMethodForbidden) Error() string {
return fmt.Sprintf("[POST /self-service/settings/methods/profile][%d] completeSelfServiceSettingsFlowWithProfileMethodForbidden %+v", 403, o.Payload)
}
func (o *CompleteSelfServiceSettingsFlowWithProfileMethodForbidden) GetPayload() *models.GenericError {
return o.Payload
}
func (o *CompleteSelfServiceSettingsFlowWithProfileMethodForbidden) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
o.Payload = new(models.GenericError)
// response payload
if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF {
return err
}
return nil
}
// NewCompleteSelfServiceSettingsFlowWithProfileMethodInternalServerError creates a CompleteSelfServiceSettingsFlowWithProfileMethodInternalServerError with default headers values
func NewCompleteSelfServiceSettingsFlowWithProfileMethodInternalServerError() *CompleteSelfServiceSettingsFlowWithProfileMethodInternalServerError {
return &CompleteSelfServiceSettingsFlowWithProfileMethodInternalServerError{}
}
/*CompleteSelfServiceSettingsFlowWithProfileMethodInternalServerError handles this case with default header values.
genericError
*/
type CompleteSelfServiceSettingsFlowWithProfileMethodInternalServerError struct {
Payload *models.GenericError
}
func (o *CompleteSelfServiceSettingsFlowWithProfileMethodInternalServerError) Error() string {
return fmt.Sprintf("[POST /self-service/settings/methods/profile][%d] completeSelfServiceSettingsFlowWithProfileMethodInternalServerError %+v", 500, o.Payload)
}
func (o *CompleteSelfServiceSettingsFlowWithProfileMethodInternalServerError) GetPayload() *models.GenericError {
return o.Payload
}
func (o *CompleteSelfServiceSettingsFlowWithProfileMethodInternalServerError) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
o.Payload = new(models.GenericError)
// response payload
if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF {
return err
}
return nil
}
|
I present side-by-side photo comparisons featuring images from the "Saving Mr. Banks" trailer(right) and images of people and places that inspired scenes from the movie (left).
I found myself more and more anxiously for the December release of "Saving Mr. Banks" starring Tom Hanks as Walt Disney. The film takes place during the years leading up to and including the 1964 release of "Mary Poppins". How well did movie-makers recreate the period?
In the trailer we see a persistent Walt Disney invite a reluctant P.L. Travers (author of the Mary Poppins children novels) to come with him to Disneyland. During filming last November, reproductions of various now-extinct props were temporarily added to parts of Disneyland. The classic oval "D-I-S-N-E-Y-L-A-N-D" letters were added to the turnstyle rooftops. Attraction posters were added to the wall in front of the Floral Mickey and Railroad Station. And on the Railroad Station, a Santa Fe sign covering up the current sign.
Extras wore clothing from the era. And look... No new-school snow on Sleeping Beauty Castle (like many fans have been fearing).
Here we see a very close replica of Walt's office.
Walt's office (above left) displayed the Ronald Reagan Presidential Library. Photo by Michael Kane.
Back when the first images of production appeared online, I created and tweeted this photo comparison. It was quickly used by hundreds of movie and news sites. But the joke's on you, Hollywood! I mistakenly used the wrong Walt portrait. I later found the correct one (below). It appears they added a photo of Tom Hanks' head onto the photo of Walt Disney, keeping the same suit and background.
Emma Thompson plays P.L. Travers and pulls off the look rather well.
Robert and Richard Sherman are played by B.J. Novak and Jason Schwartzman.
This black-and-white television footage of Walt Disney interacting with an animated Tinkerbell is nicely recreated with Tom Hanks.
In the original footage, Walt is elevated towards the ceiling of his studio office with Pixie Dust but then is lowered to the ground once he brushes the dust off his suit jacket. Tom Hanks is also elevated until he brushes off the Pixie Dust in the same manner.
The Hollywood premiere of "Mary Poppins" is beautifully recreated. The exterior of Graumann's Chinese Theatre has changed a bit since 1964 but the feeling the premiere is captured beautifully.
How fun is it to see Walt's wife, Lillian, in the film as well (played by Dendrie Taylor).
From what little we've seen, how well do the actors capture the personalities of the people the depict? I say quite well. How well does the film capture the period? Very well. Are there post-1964 architecture and props visible in the Disneyland shots? Yes. Pinocchio's Daring Journey (opened in 1983) can be seen behind Tom Hanks while he's on the Carousel. But really, everything in the trailer feels right, in my opinion. No views of Tomorrowland '98! Overall, was 1960s Disneyland presented well? Absolutely. Will overly-picky fans find endless fault with the film? Most-likely. Will the film present a completely accurate version of the Walt–P.L story? Not likely. Will there be some creative and/or historical license taken? Duh. It's not a documentary. Will the film offer a charming, delightful, and nostalgic "based on" account of this true story? Let's hope so.
Related posts:
THEN AND NOW: Walt at Disneyland
That's What Walt Said
Walt's Wife Talks About EPCOT Center
THEN AND NOW Photo Collection |
/**
* Insert a new document into the database.
*
* @param document the new document to insert
* @return the assigned database id of the newly inserted document
*/
public static int insertDocument(Document document) {
DSLContext dslContext = ResourceManager.getDslContext();
DocumentRecord documentRecord = dslContext.newRecord(Tables.DOCUMENT, document);
documentRecord.store();
documentRecord.refresh();
return documentRecord.getId();
} |
/**
* Represents an extension of the CachingConnectionFactory that adds username and password support,
* in case the JMS broker is secured.
*/
public class MotechCachingConnectionFactory extends CachingConnectionFactory {
private String username;
private String password;
/**
* Sets the username.
*
* @param username the name of an user
*/
public void setUsername(String username) {
this.username = username;
}
/**
* Sets the password.
*
* @param password the password of ActiveMQ
*/
public void setPassword(String password) {
this.password = password;
}
/**
* Sets the brokerURL of the ActiveMQ only when the <code>TargetConnectionFactory</code>
* is instanceof ActiveMQConnectionFactory.
*
* @param brokerURL the brokerURL of the ActiveMQ
*/
public void setBrokerUrl(String brokerURL) {
if (getTargetConnectionFactory() instanceof ActiveMQConnectionFactory) {
((ActiveMQConnectionFactory) getTargetConnectionFactory()).setBrokerURL(brokerURL);
}
}
/**
* Creates a connection with the username and password if both not blank,
* otherwise without them.
*/
@Override
protected Connection doCreateConnection() throws JMSException {
if (StringUtils.isBlank(username) && StringUtils.isBlank(password)) {
return getTargetConnectionFactory().createConnection();
} else {
return getTargetConnectionFactory().createConnection(username, password);
}
}
} |
/**
* Ensure that graphs generated by gen_random_chordal_graph() respect the
* maximum number of edges specified as second argument.
*/
BOOST_AUTO_TEST_CASE(random_chordal_graph_respects_max_edges) {
const unsigned max_e = 100 * 99 / 4;
REPEAT(100) {
Graph g = gen_random_chordal_graph<Graph>(100, max_e);
BOOST_CHECK_LE(boost::num_edges(g), max_e);
}
} |
/**
*******************************************************************************
** \brief Int No.000 IRQ handler
**
******************************************************************************/
void IRQ000_Handler(void)
{
if (NULL != IrqHandler[Int000_IRQn])
{
IrqHandler[Int000_IRQn]();
}
} |
Sam Sheppard, 34, claimed she did not even realise her lover was popping the question and the experience was more like “being asked out for pizza” - so she set up a company to train other men in the art of proposing.
She has started her own company, The Proposal Expert, and has already helped couples all over Britain to take the first step toward tying the knot. Although most of her customers have kept secret the fact that they paid her for help.
"The idea came from my own disappointing experience - my boyfriend proposed to me during an argument in the same way you might ask someone if they'd like to go for a pizza,” Miss Sheppard said.
"I want to help other people not to feel how I felt. Initially, I felt like he couldn't really care about me to have asked that way, though of course I know that's not true at all.
"I ask key questions to put together the pieces of the puzzle - what makes their partner laugh, how they spend their Saturday mornings - those little things build up a picture.
"Once we've got the idea, the proposer can then have as much or as little input as they like. So far I've got a 100 per cent success rate."
Despite the fact that her boyfriend Ryan Galeozzi got the proposal so wrong the couple are still together. Mr Galeozzi, her boyfriend of four years, is planning a second attempt.
It can take up to three weeks for Miss Shepherd, who has been compared to matchmaker Hitch from the 2005 Will Smith film of the same name, to plan the perfect proposal.
The groom – or bride – to be can choose from a number of packages, ranging from an bespoke ideas consultation costing under £100, to a full proposal planning service where every aspect is organised. Miss Sheppard claims she can cater for any budget, from £10 to £10,000.
"A lot of men use Google for ideas and end up with something generic. They can become overwhelmed and a little bit paralysed with it,” Miss Shepherd said.
"A good proposal can be lasting talking point, but a bad one can end up on YouTube for the whole world to see.
"I haven't heard any really wacky ideas yet - unless your partner is really wacky too those are best avoided.
"The treasure hunt proposal is a fail-safe option, as long as it has been personalised to the couple and their relationship, and surveys have shown most women prefer to be asked while their partner is on bended knee."
Miss Shepherd, from Monmouthshire, Wales, has in the past worked as an English teacher and lecturer, as well as in marketing.
"A lot of my friends are male which has helped me to know how they think and what makes them tick,” she said.
"I know that they need help with this sort of thing and I'm here to give it to them. I can give them a female's perspective and they don't have to worry about being judged by their friends or their girlfriend finding out. I offer absolute discretion.
"Most of the people I've planned for so far have chosen not to tell the bride-to-be that they've used my services, but I think as time goes on that will change.
"This is a huge industry in America and the guys there are proud to have used a proposal planner. Their fiancés usually love that they've used one too.
"Everything the man says is what helps me to put the idea together - so whatever package they choose they still play a big part." |
// NewMockStorages creates storage mock objects
func NewMockStorages(ctrl *gomock.Controller) (storage *mock.MockStorage, pubsub *mock.MockPubSubStorage, jwts *mock.MockJwtStorage) {
storage = mock.NewMockStorage(ctrl)
pubsub = mock.NewMockPubSubStorage(ctrl)
jwts = mock.NewMockJwtStorage(ctrl)
storage.EXPECT().AsPubSubStorage().Return(pubsub).AnyTimes()
storage.EXPECT().AsJwtStorage().Return(jwts).AnyTimes()
return
} |
<filename>src/cleaners/clean_ec2_instances.py
# clean_ec2_instances.py
# Package Imports
import boto3
# Module Imports
import helpers
from botocore.exceptions import ClientError
# Cleaner Settings
RESOURCE_NAME = "EC2 Instance"
WHITELIST_NAME = "ec2_instances"
BOTO3_NAME = "ec2"
BOTO3_LIST_FUNCTION = "describe_instances"
def clean_ec2_instances() -> list:
"""Main ordering for cleaning instances.
Returns:
A list of all terminated instances
"""
helpers.starting_clean_print(RESOURCE_NAME)
ec2_client = boto3.client(BOTO3_NAME)
instances = get_instances(ec2_client)
terminated_instances = delete_instances(instances)
helpers.finished_clean_print(RESOURCE_NAME, terminated_instances)
return terminated_instances
def get_instances(ec2_client) -> list:
"""Gets all the instances ids in an account.
Args:
ec2_client: A EC2 boto3 client.
Returns:
A list of all InstanceIds in the account.
"""
instance_list = []
paginator = ec2_client.get_paginator(BOTO3_LIST_FUNCTION)
pages = paginator.paginate()
for page in pages:
for reservation in page["Reservations"]:
instance_list = instance_list + reservation["Instances"]
return instance_list
def delete_instances(instances) -> list:
"""Deletes all instances in the instances parameter.
Args:
instances: A list of instances you want deleted.
Returns:
A count of deleted instances
"""
terminated_instances = []
for instance in instances:
instance_id = instance["InstanceId"]
if helpers.check_in_whitelist(instance_id, WHITELIST_NAME):
continue
ec2 = boto3.resource(BOTO3_NAME)
try:
instance = ec2.Instance(instance_id)
instance.terminate() # Terminate the instance
except ClientError as error:
error_string = "{0} on {1} - {2}".format(error, RESOURCE_NAME, instance_id)
print(error_string)
terminated_instances.append(error_string)
continue
terminated_instances.append(instance_id)
return terminated_instances
|
<gh_stars>0
export type DeepPartial<T> = { [P in keyof T]?: DeepPartial<T[P]> }; // todo: replace with the latest syntax
export type Levels = 'debug' | 'warn' | 'info' | 'verbose' | 'debug' | string;
// export type Logger = 'stdout' | 'human' | 'json' | 'stackdriver';
/**
* The actual output channels
* std = the standard output includeing stdout and stderr
* human = a human friendly log file
* json = a machine friendly log file (including stackdriver)
*/
export type OutputChannel = 'stdout' | 'human' | 'json';
/**
* A functional channel is a shortcut option for output in different scenarios
* console = output in a non-container environment
* container = output in a container environment
* log = output all logging options available (human/json/stackdriver)
*/
export type FunctionalChannel = 'console' | 'container' | 'log';
export type Channel = OutputChannel | FunctionalChannel;
export interface FormatterOptions {
level: string;
message?: string;
meta?: object;
timestamp(): string;
}
export interface MessageOptions {
loggingMessage?: string;
data?: object;
meta?: object;
channels?: Channel[];
}
export interface SherOptions {
captureUnhandledException: boolean;
exitOnError: boolean;
emitErrors: boolean;
level: Levels;
// customLevels: string[]; // disabled due to the structure of DeepPartial
stdout: {
level?: Levels;
output?: boolean;
};
humanLog: {
level?: Levels;
path: string;
};
jsonLog: {
level?: Levels;
path: string;
};
stackdriver: {
level?: Levels;
keyFile: string;
projectID: string;
};
monitor: {
display: boolean;
logging: {
maxInterval: number;
maxChange: number;
};
};
}
|
// Build builds the addon manifests and write them to disk. It returns the list
// of written files.
func (a *Addon) Build(config BuildOptions) ([]string, error) {
switch a.Kind {
case addonKindJsonnet:
return a.buildJsonnet(config)
case addonKindYAML:
return a.buildYAML(config)
default:
return nil, fmt.Errorf("unknown addon kind '%s'", a.Kind)
}
} |
/// Translate a source coordinate represented as a SMLoc (a pointer) into
/// line and column.
pub fn find_coord(&self, loc: SMLoc) -> Option<Coord> {
let mut res = MaybeUninit::<Coord>::uninit();
if unsafe { hermes_parser_find_location(self.parser_ctx, loc, res.as_mut_ptr()) } {
Some(unsafe { res.assume_init() })
} else {
None
}
} |
Material/Blackness: Race and Its Material Reconstructions on the Seventeenth-Century English Stage
Abstract:Examining William Shakespeare's Titus Andronicus, William Heminge's The Fatal Contract, and Elkanah Settle's Love and Revenge, this article argues that the seventeenth-century English stage imagines blackness as fluid and transferable because of the materials used in its production. These cosmetics are imagined as being potentially moveable from one surface to another. The article considers the intersection between the materials used to recreate blackness and its semiotic values, focusing on the relationship between black bodies and female bodies. It argues that the materials used in the recreation of these bodies inform and are informed by the panoply of discourses surrounding them. |
/**
* Concrete implementation of an atomic version of the in memory context provider.
*/
public class AtomicInMemoryContextProvider extends InMemoryContextProvider<
AtomicRateLimiterTypeStrategy,
AtomicRateLimitContext,
AtomicContextConfiguration> {
/**
* Constructor that takes a {@link com.calebjonasson.ratelimiter.core.context.configuration.AbstractContextConfiguration}
*
* @param contextConfiguration The context we are looking to store in the context provider.
*/
public AtomicInMemoryContextProvider(AtomicContextConfiguration contextConfiguration) {
super(contextConfiguration);
}
@Override
protected AtomicRateLimitContext createContext(String contextKey) {
return AtomicRateLimitContext.builder()
.limit(this.contextConfiguration.getLimit())
.interval(this.contextConfiguration.getInterval())
.build();
}
} |
GazeChat: Enhancing Virtual Conferences with Gaze-aware 3D Photos
Communication software such as Clubhouse and Zoom has evolved to be an integral part of many people’s daily lives. However, due to network bandwidth constraints and concerns about privacy, cameras in video conferencing are often turned off by participants. This leads to a situation in which people can only see each others’ profile images, which is essentially an audio-only experience. Even when switched on, video feeds do not provide accurate cues as to who is talking to whom. This paper introduces GazeChat, a remote communication system that visually represents users as gaze-aware 3D profile photos. This satisfies users’ privacy needs while keeping online conversations engaging and efficient. GazeChat uses a single webcam to track whom any participant is looking at, then uses neural rendering to animate all participants’ profile images so that participants appear to be looking at each other. We have conducted a remote user study (N=16) to evaluate GazeChat in three conditions: audio conferencing with profile photos, GazeChat, and video conferencing. Based on the results of our user study, we conclude that GazeChat maintains the feeling of presence while preserving more privacy and requiring lower bandwidth than video conferencing, provides a greater level of engagement than to audio conferencing, and helps people to better understand the structure of their conversation. |
/**
* Loads a timefield (3 TextFields + labels) into a HBox
* @param msg Message in front of the TextFields
* @param hbox HBox to store the elements in
*/
private void loadTimeField(String msg, HBox hbox){
int size = 50;
hbox.setAlignment(Pos.CENTER);
hbox.setSpacing(5);
Label label = new Label(msg);
label.setPadding(new Insets(0, 20, 0, 0));
TextField field = new TextField("00");
field.setPrefWidth(size);
TextField finalField = field;
field.focusedProperty().addListener((obs, oldVal, newVal) ->{
if(!newVal && finalField.getText().length() == 1){
finalField.setText( "0" + finalField.getText());
}
});
hbox.getChildren().addAll(label, field);
for(int i = 1; i < 3; i++){
label = new Label(":");
field = new TextField("00");
field.setPrefWidth(size);
TextField finalField1 = field;
field.focusedProperty().addListener((obs, oldVal, newVal) ->{
if(!newVal && finalField1.getText().length() == 1){
finalField1.setText( "0" + finalField1.getText());
}
});
hbox.getChildren().addAll(label, field);
}
label = new Label("hh:mm:ss");
label.setPadding(new Insets(0, 0, 0, 20));
hbox.getChildren().add(label);
} |
def as_dict(self, key_pair: KeyPair) -> dict:
res = dict()
res[KeyPair.DICT_PUBLIC_KEY] = key_pair.public
res[KeyPair.DICT_SECRET_KEY] = key_pair.secret
return res |
def add_so_item_status_history(self, id_sales_order_item_status_history,
fk_sales_order_item,
fk_sales_order_item_status, created_at):
new_history = ItemStatusHistory(
id_sales_order_item_status_history=\
id_sales_order_item_status_history,
fk_sales_order_item=fk_sales_order_item,
fk_sales_order_item_status=fk_sales_order_item_status,
created_at=created_at)
self.session.add(new_history)
self.session.commit()
return new_history |
<reponame>wstnturner/mrpt
/* +------------------------------------------------------------------------+
| Mobile Robot Programming Toolkit (MRPT) |
| https://www.mrpt.org/ |
| |
| Copyright (c) 2005-2022, Individual contributors, see AUTHORS file |
| See: https://www.mrpt.org/Authors - All rights reserved. |
| Released under BSD License. See: https://www.mrpt.org/License |
+------------------------------------------------------------------------+ */
#pragma once
#include <mrpt/nav/tpspace/CPTG_DiffDrive_CollisionGridBased.h>
namespace mrpt::nav
{
/** A PTG for circular paths ("C" type PTG in papers).
* - **Compatible kinematics**: differential-driven / Ackermann steering
* - **Compatible robot shape**: Arbitrary 2D polygon
* - **PTG parameters**: Use the app `ptg-configurator`
*
* This PT generator functions are:
*
* \f[ v(\alpha) = V_{MAX} sign(K) \f]
* \f[ \omega(\alpha) = \dfrac{\alpha}{\pi} W_{MAX} sign(K) \f]
*
* So, the radius of curvature of each trajectory is constant for each "alpha"
* value (the trajectory parameter):
*
* \f[ R(\alpha) = \dfrac{v}{\omega} = \dfrac{V_{MAX}}{W_{MAX}}
* \dfrac{\pi}{\alpha} \f]
*
* from which a minimum radius of curvature can be set by selecting the
* appropriate values of V_MAX and W_MAX,
* knowning that \f$ \alpha \in (-\pi,\pi) \f$.
*
* 
*
* \note [Before MRPT 1.5.0 this was named CPTG1]
* \ingroup nav_tpspace
*/
class CPTG_DiffDrive_C : public CPTG_DiffDrive_CollisionGridBased
{
DEFINE_SERIALIZABLE(CPTG_DiffDrive_C, mrpt::nav)
public:
CPTG_DiffDrive_C() = default;
CPTG_DiffDrive_C(
const mrpt::config::CConfigFileBase& cfg, const std::string& sSection)
{
loadFromConfigFile(cfg, sSection);
}
void loadFromConfigFile(
const mrpt::config::CConfigFileBase& cfg,
const std::string& sSection) override;
void saveToConfigFile(
mrpt::config::CConfigFileBase& cfg,
const std::string& sSection) const override;
std::string getDescription() const override;
bool inverseMap_WS2TP(
double x, double y, int& out_k, double& out_d,
double tolerance_dist = 0.10) const override;
bool PTG_IsIntoDomain(double x, double y) const override;
void ptgDiffDriveSteeringFunction(
float alpha, float t, float x, float y, float phi, float& v,
float& w) const override;
void loadDefaultParams() override;
protected:
/** A generation parameter */
double K{0};
};
} // namespace mrpt::nav
|
def save_metrics(self):
logger.debug(f"saving metrics: {self.metrics}")
if self.metrics:
self.registry_client.save_metrics(self._round, self.metrics)
logger.debug("saving metrics done") |
import type { TaskFunction } from 'gulp';
import type { Module } from './src';
import path from 'path';
import { copyFile, mkdir } from 'fs/promises';
import { copy } from 'fs-extra';
import { buildOutput, projRoot, vinOutput, vinPackage } from '@vinicunca/build-utils';
import { parallel, series } from 'gulp';
import { buildConfig, run, runTask, withTaskName } from './src';
export function copyFiles() {
return Promise.all([
copyFile(vinPackage, path.join(vinOutput, 'package.json')),
copyFile(
path.resolve(projRoot, 'README.md'),
path.resolve(vinOutput, 'README.md'),
),
copyFile(
path.resolve(projRoot, 'global.d.ts'),
path.resolve(vinOutput, 'global.d.ts'),
),
]);
}
export const copyTypesDefinitions: TaskFunction = (done) => {
const src = path.resolve(buildOutput, 'types');
const copyTypes = (module: Module) =>
withTaskName(`copyTypes:${module}`, () =>
copy(src, buildConfig[module].output.path, { recursive: true }),
);
return parallel(copyTypes('esm'), copyTypes('cjs'))(done);
};
export default series(
withTaskName('clean', () => run('pnpm run clean')),
withTaskName('createOutput', () => mkdir(vinOutput, { recursive: true })),
parallel(
runTask('buildModules'),
runTask('buildFullBundle'),
runTask('generateTypesDefinitions'),
),
parallel(copyTypesDefinitions, copyFiles),
);
export * from './src';
|
// dimension builder to keep this up to date
private static class ReservedName {
private long reservationTime;
private final BlockPos pos;
private final RegistryKey<World> world;
public ReservedName(World world, BlockPos pos, long reservationTime) {
this.pos = pos;
this.world = world.dimension();
this.reservationTime = reservationTime;
}
} |
<gh_stars>0
/*
* #%L
* GwtMaterial
* %%
* Copyright (C) 2015 - 2017 GwtMaterialDesign
* %%
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* #L%
*/
package gwt.material.design.addins.client.overlay;
import com.google.gwt.core.client.GWT;
import com.google.gwt.dom.client.Document;
import com.google.gwt.dom.client.Element;
import com.google.gwt.dom.client.Style;
import com.google.gwt.event.logical.shared.*;
import com.google.gwt.event.shared.HandlerRegistration;
import gwt.material.design.addins.client.MaterialAddins;
import gwt.material.design.addins.client.base.constants.AddinsCssName;
import gwt.material.design.addins.client.pathanimator.MaterialPathAnimator;
import gwt.material.design.client.MaterialDesignBase;
import gwt.material.design.client.base.HasDurationTransition;
import gwt.material.design.client.base.MaterialWidget;
import gwt.material.design.client.constants.Color;
import gwt.material.design.client.constants.IconType;
import gwt.material.design.client.ui.MaterialIcon;
import static gwt.material.design.jquery.client.api.JQuery.$;
//@formatter:off
/**
* It's an overlay panel layout wherein you can put as many widgets as you want and design it.
* You can do advance stuff by implementing Path Animator into the overlay panel container.
* <p>
* <h3>XML Namespace Declaration</h3>
* <pre>
* {@code
* xmlns:ma='urn:import:gwt.material.design.addins.client'
* }
* </pre>
* <p>
* <h3>UiBinder Usage:</h3>
* <pre>
* {@code
* <ma:overlay.MaterialOverlay background="blue">
* <-- Some content here -->
* </ma:overlay.MaterialOverlay>
* }
* </pre>
*
* @author kevzlou7979
*/
//@formatter:on
public class MaterialOverlay extends MaterialWidget implements HasOpenHandlers<MaterialOverlay>,
HasCloseHandlers<MaterialOverlay>, HasDurationTransition {
static {
if (MaterialAddins.isDebug()) {
MaterialDesignBase.injectCss(MaterialOverlayDebugClientBundle.INSTANCE.overlayCssDebug());
} else {
MaterialDesignBase.injectCss(MaterialOverlayClientBundle.INSTANCE.overlayCss());
}
}
private Element sourceElement;
private MaterialPathAnimator animator = new MaterialPathAnimator();
private MaterialOverlayTab overlayTab;
private MaterialIcon minimizeIcon = new MaterialIcon(IconType.KEYBOARD_ARROW_DOWN);
public MaterialOverlay() {
super(Document.get().createDivElement(), AddinsCssName.OVERLAY_PANEL);
}
public MaterialOverlay(Color backgroundColor) {
this();
setBackgroundColor(backgroundColor);
}
public MaterialOverlay(Color backgroundColor, Style.Visibility visibility, Double opacity) {
this(backgroundColor);
setVisibility(visibility);
setOpacity(opacity);
}
public void open(MaterialWidget source) {
open(source.getElement());
}
/**
* Open the Overlay Panel with Path Animator applied
*/
public void open(Element sourceElement) {
this.sourceElement = sourceElement;
$("body").attr("style", "overflow: hidden !important");
animator.setSourceElement(sourceElement);
animator.setTargetElement(getElement());
animator.setCompletedCallback(() -> OpenEvent.fire(MaterialOverlay.this, MaterialOverlay.this));
animator.animate();
}
/**
* Open the Overlay Panel without Path Animator
*/
public void open() {
setVisibility(Style.Visibility.VISIBLE);
setOpacity(1);
OpenEvent.fire(this, this);
}
/**
* Close the Overlay Panel with Path Animator applied.
*/
public void close() {
close(true);
}
/**
* Close the Overlay Panel with Path Animator applied.
*
* @param fireEventImmediately should we fire the close event immediately or wait for the animation.
*/
public void close(boolean fireEventImmediately) {
if ($(getElement()).parents(AddinsCssName.OVERLAY_PANEL).length() == 1) {
body().attr("style", "overflow: hidden !important");
} else {
body().attr("style", "overflow: auto !important");
}
if (sourceElement != null) {
if(!fireEventImmediately) {
animator.setCompletedCallback(() -> CloseEvent.fire(MaterialOverlay.this, MaterialOverlay.this));
} else {
CloseEvent.fire(MaterialOverlay.this, MaterialOverlay.this);
}
animator.reverseAnimate();
} else {
setOpacity(0);
setVisibility(Style.Visibility.HIDDEN);
CloseEvent.fire(MaterialOverlay.this, MaterialOverlay.this);
}
}
public boolean isOpen() {
Style.Visibility visibility = getVisibility();
return visibility == null || !visibility.equals(Style.Visibility.HIDDEN);
}
/**
* Get source element for path animator
*/
public Element getSourceElement() {
return sourceElement;
}
/**
* Set source element for path animator
*/
public void setSourceElement(Element sourceElement) {
this.sourceElement = sourceElement;
}
@Override
public int getDuration() {
return animator.getDuration();
}
@Override
public void setDuration(int duration) {
animator.setDuration(duration);
}
public int getTargetShowDuration() {
return animator.getTargetShowDuration();
}
/**
* Duration (in milliseconds) of targetElement to become visible, if hidden initially. The library will automatically try to figure this out from the element's computed styles. Default is 0 seconds.
*/
public void setTargetShowDuration(int targetShowDuration) {
animator.setTargetShowDuration(targetShowDuration);
}
public int getExtraTransitionDuration() {
return animator.getExtraTransitionDuration();
}
/**
* Extra duration (in milliseconds) of targetElement to provide visual continuity between the animation and the rendering of the targetElement. Default is 1 second
*/
public void setExtraTransitionDuration(int extraTransitionDuration) {
animator.setExtraTransitionDuration(extraTransitionDuration);
}
public void setRelativeToWindow(boolean relativeToWindow) {
animator.setRelativeToWindow(relativeToWindow);
}
public boolean isRelativeToWindow() {
return animator.isRelativeToWindow();
}
@Override
public HandlerRegistration addCloseHandler(CloseHandler<MaterialOverlay> closeHandler) {
return addHandler(new CloseHandler<MaterialOverlay>() {
@Override
public void onClose(CloseEvent<MaterialOverlay> closeEvent) {
if (isEnabled()) {
closeHandler.onClose(closeEvent);
}
}
}, CloseEvent.getType());
}
@Override
public HandlerRegistration addOpenHandler(OpenHandler<MaterialOverlay> openHandler) {
return addHandler(new OpenHandler<MaterialOverlay>() {
@Override
public void onOpen(OpenEvent<MaterialOverlay> openEvent) {
if (isEnabled()) {
openHandler.onOpen(openEvent);
}
}
}, OpenEvent.getType());
}
public MaterialOverlayTab getOverlayTab() {
return overlayTab;
}
public void setOverlayTab(MaterialOverlayTab overlayTab) {
this.overlayTab = overlayTab;
minimizeIcon.addStyleName(AddinsCssName.MINIMIZE_ICON);
minimizeIcon.addMouseDownHandler(e -> minimize());
add(minimizeIcon);
}
protected void minimize() {
if (overlayTab != null) {
overlayTab.minimize(this);
} else {
GWT.log("You must set the overlay container before minimizing the overlay.", new IllegalStateException());
}
}
public MaterialIcon getMinimizeIcon() {
return minimizeIcon;
}
} |
/* Makes a refresh on the list. This means restoring all struct members. Helpful if the list is
* out of sync. Rewinds the first_item to the beginning and loop through all items to get the size,
* the last_item and the str_code. Note that the first_item has to be represented in the list.
*/
void
nalu_list_refresh(nalu_list_t *list)
{
if (!list) return;
list->num_items = 0;
memset(list->str_code, 0, sizeof(list->str_code));
while (list->first_item && (list->first_item)->prev) {
list->first_item = (list->first_item)->prev;
}
nalu_list_item_t *item = list->first_item;
while (item) {
memcpy(&list->str_code[list->num_items], item->str_code, sizeof(char));
list->num_items++;
if (!item->next) break;
item = item->next;
}
list->last_item = item;
} |
### coding: UTF-8
#import math
#import random as rd
#import numpy as np
n=int(input())
line=list(map(int,input().split( )))
line=sorted(line)
#print(line)
s=sum([line[-(i+1)] for i in range(n) if i%2==0])-sum([line[-(i+1)] for i in range(n) if i%2!=0])
print(s)
|
/**
* Handles incoming slack messages.
* @param slackMessage message to be processed
* @return true if forwarded to the game, false otherwise
*/
private static Boolean onMessage(SlackMessage slackMessage) {
Context context = slackToContext.get(slackMessage.getChannelName());
if (context == null) {
return false;
} else {
if (slackMessage.getUserId() == null) {
LOGGER.warn("Command {} without a user id. Ignoring.", slackMessage.getCommand());
return false;
}
Configuration.Administrator administrator = context.getAdministrator(slackMessage.getUserId());
if (administrator == null) {
LOGGER.warn("Command {} from an unregistered user id {}. Ignoring.", slackMessage.getCommand(), slackMessage.getUserId());
return false;
}
if (!administrator.getGrants().contains("*") && !administrator.getGrants().contains("/chat")) {
LOGGER.warn("No grants for chat for the user id {}. Ignoring.", slackMessage.getUserId());
slack.sendMessage(slackMessage.getChannelName(), ":exclamation: No permission for chat.");
return false;
}
try {
context.notifyAll("[" + administrator.getNickName() + "] " + slackMessage.getText());
return true;
} catch (Exception ex) {
LOGGER.error("Failed to forward a chat {}.", slackMessage, ex);
return false;
}
}
} |
Shell-shocked Democrats looking to recover from 2016 see the large slate of upcoming governors races as their most likely path out of the political wilderness — starting in the Midwest.
Twenty-seven of the 38 governorships up in 2017 and 2018 are Republican-held, including many seats that will be open after eight years of GOP control. That means widespread opportunities for Democratic gains, as well as a critical chance for new ideas and new blood to emerge as the party seeks to identify its next generation of leaders and dig out from a low point after President-elect Donald Trump’s shocking White House win.
Story Continued Below
The sweeping gubernatorial map takes in virtually every national battleground state and segment of the electorate. Diversifying states where Democrats have struggled in recent midterms, like Florida, Nevada and New Mexico, will have open races in 2018. Unorthodox Republicans who carried blue states like Illinois, Maryland and Massachusetts in 2014 will face reelection challenges, too, while Democrats will also look to replicate recent successes in a handful of red states.
And, critically, Democrats itching to prove they can still win in the Midwest will get the opportunity to test themselves by chasing GOP-held governorships in Iowa, Michigan, Ohio and Wisconsin.
“I don’t think there’s any question that 2018 will be a watershed year for the Democratic Party in the Midwest,” said Joe Rugola, an Ohio labor leader with the American Federation of State, County and Municipal Employees and an at-large member of the Democratic National Committee. “We have to reverse some of the voter patterns we saw in 2016 or we’re going to end up with two Americas.”
Democrats will, of course, also spend significant energy on the next round of Senate and House races, as well as rebuilding the Democratic National Committee under a new chairman. But the conservative-leaning 2018 Senate map means Democrats will be almost entirely focused on keeping key red- and swing-state incumbents in office for the next two years, not electing new Democratic voices.
And while Democrats are out of power in Washington, they are reeling in the states, where there are 33 Republican governors. The GOP holds unified control of government — both state legislative chambers and the governorship — in 24 states, where a raft of conservative policies have been enacted in the latter years of President Barack Obama’s administration.
But Democrats also have a chance to claw back power in the states more quickly than in Washington.
“Just on the number of people who will be term-limited out, and the number of open seats — any time that’s available it gives us a very big opportunity to take back a state,” said Connecticut Gov. Dannel Malloy, chairman of the Democratic Governors Association.
DGA executive director Elisabeth Pearson expects a number of good candidates to come forward in the next year. “I think the future of our party is in Democratic governors’ hands, in a lot of ways,” she said.
While some Democratic Party groups have discouraged primaries in recent years, Pearson said interest in the 2018 races may spark a few spirited intraparty contests — and that it wouldn’t necessarily be the worst thing for a party trying to find its way, provided the candidates stay civil.
“It’s not a bad thing to have a lot of people who are really qualified,” said Pearson. “Obviously, you don’t want to have primaries that tear down the party, but there could be some areas where it’s not a bad thing.”
Labor unions and other progressive groups, looking to turn the page from 2016, are already laying groundwork for the gubernatorial races.
“We see every race as make or break for working people, and the governors races will create a focal point for our members to understand how we can elect champions up and down the ballot,” said Mary Kay Henry, president of the Service Employees International Union.
SEIU is keeping organizers in place after 2016 in Hampton Roads, Richmond and Northern Virginia to prepare for the 2017 gubernatorial race there. Democratic Lt. Gov. Ralph Northam, a self-described fiscal conservative, is seeking to keep the governorship in his party’s hands for a second consecutive term. Democrats are also pushing to take back the New Jersey governor’s mansion in 2017, with Gov. Chris Christie retiring.
Democrats’ gubernatorial push echoes the Republican Party’s rebuild after Obama’s election in 2008, after which the GOP won smashing victories at the state level and elected a slew of governors who went on to become major players in the party, like Wisconsin’s Scott Walker and Ohio’s John Kasich.
Republicans will not sacrifice those gains quietly. First of all, not every open race in 2018 will be competitive; a number are in safely Republican states like Alabama and Wyoming. And the Republican Governors Association is already gearing up to protect incumbents and open competitive seats alike in 2018, while also challenging for open Democratic-held states like Colorado. Republicans also feel confident about preventing Democrats from taking full advantage of all of their opportunities in the next election. The GOP gained governorships in the 2016 elections, flipping Missouri, New Hampshire and Vermont (but trailing in North Carolina, where a recount is underway).
“The DGA talked a big game for the 2016 gubernatorial races but severely underperformed and lost even more states,” RGA spokesman Jon Thompson said. “Republicans may face a larger amount of open seats in 2018, but Democrats have routinely recruited gubernatorial candidates who are out of touch, struggle with Main Street issues and are unable to win.”
Republicans were particularly successful in the Midwest after Obama’s election, something Democrats hope to replicate after Trump’s win. Malloy said he believes Democratic gubernatorial candidates will be able to dispel renewed fears that the region is drifting away from Democrats because they have not been able to sell white working-class voters on their economic agenda.
“Gubernatorial campaigns, by their very nature, take that one on,” said Malloy. “There is no room to have that disconnect. If you’re running for governor, you are going to get known and visit every community. It’s very personal ownership and people want to know you. That doesn’t mean there won’t be close elections, but it’s different.”
SEIU built infrastructure in key 2016 battleground states during the presidential election with an eye toward helping the Democratic Party grow its state-based footprint in 2018.
“Our ’16 investment was about a longer-term cycle,” Henry said. “The reason we were in Colorado, Illinois, Ohio, Pennsylvania, Virginia, Florida — all of those investments and our talents were placed in parts of the states where we knew there needed to be a through-line for 2018.”
Some Democrats even harbor optimism about electing red-state governors in 2018, noting that Democratic governors were elected in two Trump states in 2016 (Montana and West Virginia), while North Carolina’s Roy Cooper leads the uncalled race in another state Trump carried.
“I think Democrats can compete and succeed in any state in the country, but we really need to focus on bread-and-butter issues,” said Montana Gov. Steve Bullock, a former DGA chairman who won reelection this month, outrunning Hillary Clinton by double digits. “Politics becomes very local” in gubernatorial races, Bullock added.
Democrats are also hoping the next slate of governors races can catalyze party rebuilding further down-ballot, as it recently did for the GOP.
Numerous Democrats noted that electing governors in 2018 is the first concrete step the party can take toward having a bigger say in the next decennial redistricting process, after the 2020 census.
Republican governors and legislators fully controlled the process of drawing new congressional lines in 18 states (compared to just six for Democrats) after the 2010 Republican wave election. The GOP was able to draw state legislative boundaries in even more states, which helped incubate a new generation of Republican officeholders.
“There are 35 states where governors play a role in redistricting,” Malloy said. “Some we can win, and some we can’t. But a state like Michigan, which was unfairly drawn, or other Midwestern states — I think that gives us a real opportunity to look forward to a Congress that fairly represents people.”
EMILY’s List, the Democratic women’s group, is already talking to potential candidates in the 2018 states “with an eye on redistricting in 2020,” said Lucinda Guinn, the group’s vice president of campaigns.
Partly because of redistricting looming on the horizon, “There will be a lot of money, there will be a lot of focus, there will be a lot of energy around those races,” said Nick Rathod, executive director of the State Innovation Exchange, a group that promotes progressive policies at the state level.
“Everything will come down to those governors races,” Rathod continued. “The stakes are extremely high.” |
def check_for_steady_cluster_status(config, max_sec_to_wait=1200):
client = get_client(config)
start = time.time()
while (time.time() - start < max_sec_to_wait):
cluster = client.clusters.get(config.bait_resource_group_name,
config.bait_cluster_name)
if cluster.allocation_state == tm.AllocationState.steady:
print('Cluster has reached "steady" allocation state. Ready for ' +
'job submission.')
if cluster.errors is not None:
raise Exception('Errors were thrown during cluster creation:' +
'\n{}'.format('\n'.join(cluster.errors)))
return
time.sleep(10)
raise Exception('Max wait time exceeded for cluster to reach "steady" ' +
'state ({} seconds).'.format(max_sec_to_wait)) |
def perform_assignment(json_dict, assignment_states, id_number):
mode = json_dict['type']
assert mode in ['PingPong','FedAvg', 'COOP', 'FSVRG', 'init_ann', 'destroy_ann', 'init_svrg_ann']
if mode == 'PingPong':
result = ass.pong()
result_json = dict()
result_json['result'] = result
return result_json
elif mode == 'init_ann':
decay = json_dict['decay']
lr = json_dict['lr']
E = json_dict['E']
B = json_dict['B']
dataset = json_dict['dataset']
if dataset == 'MNIST':
image_path = ['data/mnist/car' + CAR_NUMBER + '-images.byte']
label_path = ['data/mnist/car' + CAR_NUMBER + '-labels.byte']
model = ass.Mnist_Model(lr, decay, E, B, image_path, label_path)
elif dataset == 'MNIST-non-iid':
image_path = ['data/mnist-non-iid/car' + CAR_NUMBER + '-images.byte']
label_path = ['data/mnist-non-iid/car' + CAR_NUMBER + '-labels.byte']
model = ass.Mnist_Model(lr, decay, E, B, image_path, label_path)
elif dataset.startswith('MNIST_noniid_cv') or dataset.startswith('MNIST_iid_cv'):
match = re.search(r'(?<=_cv)\d+', dataset)
val_fold = int(match.group(0))
folds = list(range(1, 6))
folds.remove(val_fold)
if dataset.startswith('MNIST_noniid_cv'):
image_path = 'data/mnist_noniid_cv/fold{}/car' + CAR_NUMBER + '-images.byte'
label_path = 'data/mnist_noniid_cv/fold{}/car' + CAR_NUMBER + '-labels.byte'
else:
image_path = 'data/mnist_iid_cv/fold{}/car' + CAR_NUMBER + '-images.byte'
label_path = 'data/mnist_iid_cv/fold{}/car' + CAR_NUMBER + '-labels.byte'
image_paths = [image_path.format(num) for num in folds]
label_paths = [label_path.format(num) for num in folds]
model = ass.Mnist_Model(lr, decay, E, B, image_paths, label_paths)
else:
sys.exit('Missing dataset')
assignment_states[id_number] = model
return None
elif mode == 'init_svrg_ann':
step_size_k = json_dict['step_size']
dataset = json_dict['dataset']
if dataset == 'MNIST':
image_path = ['data/mnist/car' + CAR_NUMBER + '-images.byte']
label_path = ['data/mnist/car' + CAR_NUMBER + '-labels.byte']
model = ass.Fsvrg_Mnist_Model(step_size_k, image_path, label_path)
elif dataset == 'MNIST-non-iid':
image_path = ['data/mnist-non-iid/car' + CAR_NUMBER + '-images.byte']
label_path = ['data/mnist-non-iid/car' + CAR_NUMBER + '-labels.byte']
model = ass.Fsvrg_Mnist_Model(step_size_k, image_path, label_path)
elif dataset.startswith('MNIST_noniid_cv') or dataset.startswith('MNIST_iid_cv'):
match = re.search(r'(?<=_cv)\d+', dataset)
val_fold = int(match.group(0))
folds = list(range(1, 6))
folds.remove(val_fold)
if dataset.startswith('MNIST_noniid_cv'):
image_path = 'data/mnist_noniid_cv/fold{}/car' + CAR_NUMBER + '-images.byte'
label_path = 'data/mnist_noniid_cv/fold{}/car' + CAR_NUMBER + '-labels.byte'
else:
image_path = 'data/mnist_iid_cv/fold{}/car' + CAR_NUMBER + '-images.byte'
label_path = 'data/mnist_iid_cv/fold{}/car' + CAR_NUMBER + '-labels.byte'
image_paths = [image_path.format(num) for num in folds]
label_paths = [label_path.format(num) for num in folds]
model = ass.Fsvrg_Mnist_Model(step_size_k, image_paths, label_paths)
else:
sys.exit('Missing dataset')
assignment_states[id_number] = model
elif mode == 'destroy_ann':
model = assignment_states[id_number]
model.on_destroy()
assignment_states[id_number] = None
del assignment_states[id_number]
return None
else:
model = assignment_states[id_number]
model_params = json_dict['model']
w_list = model_params['w']
b_list = model_params['b']
if mode == 'FSVRG':
global_gradients_w = json_dict['gradients_w']
global_gradients_b = json_dict['gradients_b']
ws, bs = model.train(w_list, b_list, global_gradients_w,
global_gradients_b)
else :
ws, bs = model.train(w_list, b_list)
params = dict()
params['w'] = ws
params['b'] = bs
params['n_k'] = len(model.data[1])
result_json = dict()
result_json['result'] = params
return result_json |
<reponame>KirillTK/SpotifySync
import { all, fork } from 'redux-saga/effects';
import { AuthSaga } from './auth-saga';
import { ProfileSaga } from './profile-saga';
import { SpotifySaga } from './spotify-saga';
export function* rootSaga() {
try {
yield all([fork(AuthSaga), fork(ProfileSaga), fork(SpotifySaga)]);
} catch (e) {
console.error(e);
}
}
|
Amelioration of experimental parkinsonism by intrahypothalamic administration of haloperidol.
Accumulation of amines in the degenerating axons of ascending catecholamine-containing neurons in the hypothalamus has been proposed as a site of function neurotransmitter release and may thereby participate in the development of motor impairment seen after central dopamine-depleting lesions. To test this hypothesis further the dopamine receptor antagonist haloperidol (1 microL of a 14 nmol solution) was injected directly into the lateral hypothalamus (LH) in 6 different injection regimes to determine whether amphetamine-induced turning could be attenuated with this treatment. The injection of haloperidol at 1 and 24 h (group 1), 24 h (group 2) or 6+ 7 d (group 3) after 6-hydroxydopamine (6-OHDA) did not modify amphetamine-induced turning. However, the injection of haloperidol at 1 h, 24 h, 7 d, and 8 d (group 4), days 1-7 (group 5), or gradual infusion (14 nmol/microliters/h) for 7 days (group 6) all reduced the 6-OHDA-induced turning to a level similar to that of controls. These results add further support to the contention that amines are released from the axons of degenerating neurones in the hypothalamus and that this phenomenon participates in the elicitation of behavioral impairment attributed solely to the loss of functional neurotransmitters from terminal fields. Furthermore, the data emphasize the importance of hypothalamic pathology in the development of Parkinsonism and suggest that intrahypothalamic administration of dopamine blocking agents might be useful in the treatment of Parkinsonism. |
Brennan named as being “behind the witch hunts of investigative journalists”
Paul Joseph Watson
Prison Planet.com
August 13, 2013
Journalist Michael Hastings was investigating CIA director John Brennan before his untimely death in a suspicious car accident it has been revealed, with the report set to be published posthumously by Rolling Stone Magazine within the next two weeks.
According to San Diego 6 News reporter Kimberly Dvorak, “John Brennan was Hastings next exposé project.” Dvorak says she received an email from the CIA, “acknowledging Hastings was working on a CIA story,” although the text of that email was not displayed.
Dvorak also cites a Stratfor email hacked by Wikileaks and first published last year which names Brennan as being, “behind the witch hunts of investigative journalists learning information from inside the beltway sources.”
“There is specific tasker from the WH to go after anyone printing materials negative to the Obama agenda (oh my.) Even the FBI is shocked,” states the email.
Before being sworn in as CIA director in March this year, Brennan was a counterterrorism advisor for the Obama administration and helped compile the “kill lists” for the White House’s drone assassination program.
A separate email Dvorak received from CIA media spokesman Todd Ebitz states, “Any suggestion that Director Brennan has ever attempted to infringe on constitutionally-protected press freedoms is offensive and baseless.”
Was Hastings about to reveal that Brennan had been tasked with targeting independent reporters who were in the business of releasing sensitive information?
Dvorak claims that Hastings’ posthumous exposé of Brennan will be published by Rolling Stone Magazine within the next two weeks, although this has yet to be confirmed by the publication itself.
Infowars first reported that Hastings was likely investigating the CIA prior to his death following the comments of Hastings’ close friend Sgt. Joe Biggs, who revealed that the journalist was working on “the biggest story yet” about the CIA.
Hastings sent an email hours before his death stating he was “onto a big story” and needed “to go off the rada[r] for a bit.” A separate close friend of Hastings also told Infowars that the journalist’s home was visited by agents from an unnamed federal agency the day before his death.
Analysis of the recently released surveillance footage of Hastings’ vehicle in the moments before the crash, which was carried out by SDSU professor Morteza M. Mehrabadi, Professor and Interim Chair Areas of Specialization: Mechanics of Materials, also suggests that Hastings was not speeding before his Mercedes hit a tree.
The footage appears to confirm eyewitness reports that the car suffered an “explosion” before it hit the tree, suggesting some kind of incendiary device was planted on the vehicle.
By measuring the distance traveled by the car on the surveillance clip and the time that elapsed before the explosion, Professor Mehrabadi was able to calculate that the car was only traveling at a speed of 35 MPH, and not speeding as some reports claimed.
“The pre-explosion and slower speed could also explain the minimal damage to the palm tree and the facts the rear tires rested against the curb. It also provides an explanation for the location of the engine and drive train at more than 100 feet from the tree impact area,” writes Dvorak.
Facebook @ https://www.facebook.com/paul.j.watson.71
FOLLOW Paul Joseph Watson @ https://twitter.com/PrisonPlanet
*********************
Paul Joseph Watson is the editor and writer for Infowars.com and Prison Planet.com. He is the author of Order Out Of Chaos. Watson is also a host for Infowars Nightly News.
This article was posted: Tuesday, August 13, 2013 at 6:18 am
Print this page.
Infowars.com Videos:
Comment on this article |
/** Used to store information during model construction.
Once the model has been finalized, most of this info
goes away.
*/
class evm_event : public model_event {
struct extra_info {
SplayOfPointers <assign_entry> *modlist;
List <expr>* guards;
public:
extra_info();
~extra_info();
};
extra_info* build_data;
public:
evm_event(const symbol* wrapper, const model_instance* p);
void addEnabling(expr* guard);
bool setAssignment(assign_entry* &tmp, expr* rhs);
void Finalize(OutputStream &ds);
} |
def plot_world(self):
color_map = {PersonStatus.HEALTHY: 'g',
PersonStatus.INFECTED: 'r',
PersonStatus.RECOVERED: 'b',
PersonStatus.DEAD: 'k'}
color_map_wearable = {InfectionSeverity.GREEN: 'g',
InfectionSeverity.ORANGE: 'y',
InfectionSeverity.RED: 'r',
0: 'k'}
plt.clf()
plt.subplot2grid((3, 1), (0, 0), rowspan=2)
plt.gca().add_patch(Rectangle((0, 0), 100, 100, linewidth=1, edgecolor='r', facecolor='none'))
plt.gca().add_patch(Rectangle((200, 0), 100, 100, linewidth=1, edgecolor='g', facecolor='none'))
plt.gca().add_patch(Rectangle((400, 0), 100, 100, linewidth=1, edgecolor='y', facecolor='none'))
plt.gca().add_patch(Rectangle((0, 200), 100, 100, linewidth=1, edgecolor='y', facecolor='none'))
plt.gca().add_patch(Rectangle((200, 200), 100, 100, linewidth=1, edgecolor='r', facecolor='none'))
plt.gca().add_patch(Rectangle((400, 200), 100, 100, linewidth=1, edgecolor='g', facecolor='none'))
plt.gca().add_patch(Rectangle((0, 400), 100, 100, linewidth=1, edgecolor='r', facecolor='none'))
plt.gca().add_patch(Rectangle((200, 400), 100, 100, linewidth=1, edgecolor='y', facecolor='none'))
plt.gca().add_patch(Rectangle((400, 400), 100, 100, linewidth=1, edgecolor='g', facecolor='none'))
plt.scatter([p.x for p in self.world.persons],
[p.y for p in self.world.persons], 30,
[color_map[p.status] for p in self.world.persons])
plt.scatter([p.x for p in self.world.persons],
[p.y for p in self.world.persons], 10,
[color_map_wearable[p.wearable.user_risk_level] for p in self.world.persons])
plt.xlim([0, sc.WORLD_SIZE])
plt.ylim([0, sc.WORLD_SIZE])
plt.title('Day: ' + str(self.simulation_iteration//(24//sc.TIME_STEP)) + ' - Hour: ' + str(self.simulation_iteration%(24//sc.TIME_STEP)*sc.TIME_STEP))
plt.subplot2grid((3, 1), (2, 0), rowspan=1)
plt.plot([p[0] for p in self.history], 'g')
plt.plot([p[1] for p in self.history], 'r')
plt.plot([p[2] for p in self.history], 'b')
plt.plot([p[3] for p in self.history], 'k')
plt.xticks([i for i in range(self.simulation_iteration+1)],
['Day: ' + str(i//(24//sc.TIME_STEP)) if (i%(24//sc.TIME_STEP)*sc.TIME_STEP) == 0 else '' for i in range(self.simulation_iteration+1)],
rotation='vertical')
plt.pause(0.0001) |
package p2p
import (
"github.com/kaspanet/kaspad/peer"
"github.com/kaspanet/kaspad/wire"
)
// OnBlockLocator is invoked when a peer receives a locator kaspa
// message.
func (sp *Peer) OnBlockLocator(_ *peer.Peer, msg *wire.MsgBlockLocator) {
sp.SetWasBlockLocatorRequested(false)
// Find the highest known shared block between the peers, and asks
// the block and its future from the peer. If the block is not
// found, create a lower resolution block locator and send it to
// the peer in order to find it in the next iteration.
dag := sp.server.DAG
if len(msg.BlockLocatorHashes) == 0 {
peerLog.Warnf("Got empty block locator from peer %s",
sp)
return
}
// If the first hash of the block locator is known, it means we found
// the highest shared block.
highHash := msg.BlockLocatorHashes[0]
if dag.IsInDAG(highHash) {
if dag.IsKnownFinalizedBlock(highHash) {
peerLog.Debugf("Cannot sync with peer %s because the highest"+
" shared chain block (%s) is below the finality point", sp, highHash)
sp.server.SyncManager.RemoveFromSyncCandidates(sp.Peer)
return
}
// We send the highHash as the GetBlockInvsMsg's lowHash here.
// This is not a mistake. The invs we desire start from the highest
// hash that we know of and end at the highest hash that the peer
// knows of.
err := sp.Peer.PushGetBlockInvsMsg(highHash, sp.Peer.SelectedTipHash())
if err != nil {
peerLog.Errorf("Failed pushing get blocks message for peer %s: %s",
sp, err)
return
}
return
}
highHash, lowHash := dag.FindNextLocatorBoundaries(msg.BlockLocatorHashes)
if highHash == nil {
panic("Couldn't find any unknown hashes in the block locator.")
}
sp.PushGetBlockLocatorMsg(highHash, lowHash)
}
|
/**
* See equation 25. Fast unsafe version
*/
protected static float A( int x, int y, GrayF32 flow ) {
int index = flow.getIndex(x, y);
float u0 = flow.data[index - 1];
float u1 = flow.data[index + 1];
float u2 = flow.data[index - flow.stride];
float u3 = flow.data[index + flow.stride];
float u4 = flow.data[index - 1 - flow.stride];
float u5 = flow.data[index + 1 - flow.stride];
float u6 = flow.data[index - 1 + flow.stride];
float u7 = flow.data[index + 1 + flow.stride];
return (1.0f/6.0f)*(u0 + u1 + u2 + u3) + (1.0f/12.0f)*(u4 + u5 + u6 + u7);
} |
<reponame>blutorange/selenese-runner-java<gh_stars>100-1000
package jp.vmi.selenium.selenese.parser;
/**
* Test suite entry in side file.
*/
public class TestSuiteEntry extends TestElementEntry {
/**
* Constructor.
*
* @param id test case id.
* @param name test case nam.
*/
public TestSuiteEntry(String id, String name) {
super(id, name);
}
}
|
/**
* Add an Error-result to the totals, if the sentence length
* is <= cut-off length, or if the length does not matter
* @param sentenceLength
* @param message reason for the error
*/
public void addError(int sentenceLength, String message) {
if ((cutOffLength == -1) ||
(sentenceLength <= cutOffLength)) {
this.sentCount++;
this.errorCount++;
}
} |
<reponame>vigsterkr/FlowKet<filename>src/flowket/observables/monte_carlo/sigma_z.py
import functools
import numpy
from .observable import LambdaObservable
def abs_sigma_z(wave_function, configurations):
total_spins_per_sample = numpy.prod(configurations.shape[1:])
axis_to_sum = tuple((range(1, len(configurations.shape))))
return numpy.absolute(configurations.sum(axis=axis_to_sum)) / total_spins_per_sample
def sigma_z(wave_function, configurations):
total_spins_per_sample = numpy.prod(configurations.shape[1:])
axis_to_sum = tuple((range(1, len(configurations.shape))))
return configurations.sum(axis=axis_to_sum) / total_spins_per_sample
AbsSigmaZ = functools.partial(LambdaObservable, observable_function=abs_sigma_z)
SigmaZ = functools.partial(LambdaObservable, observable_function=sigma_z)
|
Aldon Smith is “mad and frustrated” that his efforts to get reinstated to the NFL seem to have hit a standstill. The suspended Raiders linebacker met with Commissioner Roger Goodell last Friday and expected to hear something by now, but he hasn’t.
Smith and his personal trainer, Steven Fotion, posted a picture of themselves on Twitter on Thursday afternoon, saying “how unprofessional NFL, disappointed fans want to know yes or no.” Later, Fotion said on the telephone that Smith was “too angry to talk and was afraid he would start yelling.”
And so Smith spoke through Fotion, who owns Fotion’s Clubhouse Gym in Carbondale, Colo., and has been working with Smith since June.
“He is losing hope,” Fotion said. “They are jerking him around, telling him they’ll have an answer for him on Monday. And then nothing. It seems unprofessional.”
Back to Gallery Raiders’ Aldon Smith wants answer from NFL on suspension 16 1 of 16 Photo: Thearon W. Henderson, Getty Images 2 of 16 Photo: Thearon W. Henderson, Getty Images 3 of 16 Photo: Tony Avelar, Associated Press 4 of 16 Photo: Tony Avelar, AP 5 of 16 Photo: The Chronicle 6 of 16 Photo: The Chronicle 7 of 16 Photo: The Chronicle 8 of 16 Photo: Special to the Chronicle 9 of 16 Photo: The Chronicle 10 of 16 Photo: The Chronicle 11 of 16 Photo: The Chronicle 12 of 16 Photo: The Chronicle 13 of 16 Photo: Chronicle 14 of 16 Photo: The Chronicle 15 of 16 Photo: SFC 16 of 16 Photo: Special to The Chronicle
Smith’s yearlong Stage Three suspension for violating the league’s rules on substance abuse ended Nov. 17. He applied for reinstatement Oct. 3.
As for the meeting with Goodell, Smith told Fotion that “it went well. They poked the bear, they poked the bear … but they didn’t rattle me.”
Goodell said Wednesday at the NFL owners’ meetings in Irving, Texas, that he didn’t have a timetable for announcing a decision on Smith’s reinstatement.
“We’re going through all of the information to make sure we have it all absolutely accurate,” Goodell told reporters, “that we all understand exactly where he is in the process of trying to get himself in a position where he’s got his life in order enough to resume an NFL career. It was good for me to hear from him personally. But when we get to that decision, we’ll certainly announce it.”
The players’ union also was represented at last week’s meeting.
Smith, 27, entered a treatment center in Carbondale in July after a video shared on social media raised questions about whether he had violated the NFL’s drug and conduct policies again. Smith denied on Twitter that he was the man in the video who appeared to be smoking marijuana, and the league has investigated the matter.
Smith also had been to rehab in 2013, following two DUI arrests when he played for the 49ers, and served a nine-game suspension in 2014.
The Raiders (10-3) re-signed Smith to a two-year contract in April even though he was under suspension and not allowed to have contact with the team.
“He just wants to play,” Fotion said. “Actually, he just wants to know. You tell him he can’t come back this season, fine, he’ll rent a house here and we’ll train for next season. He’s been randomly drug-tested and they have all come back clean.”
Raiders general manager Reggie McKenzie said, “The most important thing is that (Smith) is healthy and happy,” but the team definitely could find use for a pass rusher — even if it’s limited use — down the stretch or in the playoffs. Smith has 47.5 career sacks.
Fotion started working five days a week with Smith in June and has been posting pictures and videos of Smith training. Smith (6-foot-4, 258 pounds) is obviously in great shape, as evidenced by a 4-foot box jump — after he had just finished squats and dead lifts.
“Aldon’s in better shape than he was last season, and he is stronger mentally as well,” said Fotion, who has been training athletes for 25 years. “He’s done explosive work, agility drills, weights, cutting drills. He’s ready. ... He’s so ready, he’s bored.”
While the Raiders will be in San Diego on Sunday trying to clinch their first playoff spot in 14 years, Smith and Fotion will attend the Patriots-Broncos game in Denver. Fotion has been a Broncos fan since 1988, but he still hopes that Smith is back with the Raiders playing against his favorite team in the regular-season finale Jan. 1.
“The league let Ray Lewis play, they let Mike Vick play,” Fotion said. “All Aldon and I are asking is what does he have to do? Let us know.
“If they’re trying to break him, they’re not going to break him.”
Vic Tafur is a San Francisco Chronicle staff writer. Email: [email protected] Twitter: @VicTafur |
<reponame>Pythonian/ecomstore
from . import settings
def ecomstore(request):
""" context processor for the site templates """
return {
'site_name': 'Modern Musician',
'meta_keywords': 'Music, instruments, sheet music, musician',
'meta_description': 'Modern Musician is an online supplier of \
instruments, sheet music, and other accessories for musicians',
'analytics_tracking_id': settings.ANALYTICS_TRACKING_ID,
'request': request
}
|
<reponame>cyrus-and/httpfs
#include <stdlib.h>
#include "generators.h"
#include "httpfs.h"
#include "fuse_api/fuse_api.h"
#include "version.h"
static void usage()
{
fprintf( stderr ,
"Usage:\n\n"
" httpfs --help\n"
" httpfs --version\n"
" httpfs generators\n"
" httpfs generate <generator>\n"
" httpfs mount <url> <mount_point> [<remote_chroot>]\n" );
}
static void info()
{
fprintf( stderr , "httpfs " HTTPFS_VERSION "\n" );
}
static void set_verbose_mode()
{
char *env;
env = getenv( "HTTPFS_VERBOSE" );
if ( env && strcmp( env , "1" ) == 0 ) HTTPFS_VERBOSE = 1;
}
int main( int argc , char *argv[] )
{
set_verbose_mode();
if ( argc == 2 && strcmp( argv[ 1 ] , "--version" ) == 0 )
{
info();
}
else if ( argc == 2 && strcmp( argv[ 1 ] , "--help" ) == 0 )
{
usage();
}
else if ( argc == 2 && strcmp( argv[ 1 ] , "generators" ) == 0 )
{
const struct httpfs_generator *generator;
for ( generator = HTTPFS_GENERATORS ; generator->name ; generator++ )
{
printf( "%s\n" , generator->name );
}
}
else if ( argc == 3 && strcmp( argv[ 1 ] , "generate" ) == 0 )
{
if ( !httpfs_generate( argv[ 2 ] ) )
{
usage();
return EXIT_FAILURE;
}
}
else if ( ( argc == 4 || argc == 5 ) &&
strcmp( argv[ 1 ] , "mount" ) == 0 )
{
struct httpfs httpfs;
const char *url;
const char *remote_chroot;
char *mount_point;
int rv;
url = argv[ 2 ];
remote_chroot = ( argc == 5 ? argv[ 4 ] : NULL );
mount_point = argv[ 3 ];
rv = httpfs_fuse_start( &httpfs , url , remote_chroot , mount_point );
if ( rv )
{
fprintf( stderr , "Unable to mount: " );
switch ( rv )
{
case HTTPFS_FUSE_ERROR:
fprintf( stderr , "cannot initialize FUSE\n" );
break;
case HTTPFS_CURL_ERROR:
fprintf( stderr , "cannot initialize cURL\n" );
break;
case HTTPFS_UNREACHABLE_SERVER_ERROR:
fprintf( stderr , "cannot reach the remote server\n" );
break;
case HTTPFS_WRONG_CHROOT_ERROR:
fprintf( stderr , "cannot find the remote path\n" );
break;
case HTTPFS_ERRNO_ERROR:
fprintf( stderr , "errno (%i) %s\n" , errno , strerror( errno ) );
break;
}
}
}
else
{
usage();
return EXIT_FAILURE;
}
return EXIT_SUCCESS;
}
|
<gh_stars>0
/*
* Copyright (C) 2017 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef NOS_DEVICE_H
#define NOS_DEVICE_H
#include <stdint.h>
#ifdef __cplusplus
extern "C" {
#endif
/* Max data size for read/write.
* TODO: Yes, it's a magic number. */
#define MAX_DEVICE_TRANSFER 2044
struct nos_device_ops {
/**
* Read a datagram from the device.
*
* Return 0 on success and a negative value on failure.
*/
int (*read)(void* ctx, uint32_t command, uint8_t *buf, uint32_t len);
/**
* Write a datagram to the device.
*
* Return 0 on success and a negative value on failure.
*/
int (*write)(void *ctx, uint32_t command, const uint8_t *buf, uint32_t len);
/**
* Block until an event has happened on the device, or until timed out.
*
* Values for msecs
* <0 wait forever
* 0 return immediately (why?)
* >0 timeout after this many milliseconds
*
* Returns:
* <0 on error
* 0 timed out
* >0 interrupt occurred
*/
int (*wait_for_interrupt)(void *ctx, int msecs);
/**
* Reset the device.
*
* Return 0 on success and a negative value on failure.
*/
int (*reset)(void *ctx);
/**
* Close the connection to the device.
*
* The device must not be used after closing.
*/
void (*close)(void *ctx);
};
struct nos_device {
void *ctx;
struct nos_device_ops ops;
};
/*
* Open a connection to a Nugget device.
*
* The name parameter identifies which Nugget device to connect to. Passing
* NULL connects to the default device.
*
* This function is implemented by the host specific variants of this library.
*
* Returns 0 on success or negative on failure.
*/
int nos_device_open(const char *name, struct nos_device *device);
#ifdef __cplusplus
}
#endif
#endif /* NOS_DEVICE_H */
|
/*
* Constants in this file should be updated as our situation changes,
* as they are highly data and usage dependent
*/
use dacite::core::{PhysicalDeviceFeatures, Format, FormatProperties};
// This is used to determine the size of the staging buffer
pub const MAX_GPU_UPLOAD: u64 = crate::renderer::memory::CHUNK_SIZE;
// This is the most common depth format supported on graphics hardware.
// (see http://vulkan.gpuinfo.org) and it is a good resolution, and it is
// floating-point (so reverse z-buffering works).
pub const DEPTH_FORMAT: Format = Format::D32_SFloat;
pub const DIFFUSE_FORMAT: Format = Format::A2B10G10R10_UNorm_Pack32;
pub const NORMALS_FORMAT: Format = Format::A2B10G10R10_UNorm_Pack32;
pub const MATERIAL_FORMAT: Format = Format::R8G8B8A8_UNorm;
pub const SHADING_FORMAT: Format = Format::R16G16B16A16_SFloat;
pub const BLUR_FORMAT: Format = Format::R16G16B16A16_SFloat;
pub const FEATURES_NEEDED: PhysicalDeviceFeatures = PhysicalDeviceFeatures {
large_points: true,
sampler_anisotropy: true, // FIXME - we want this, we dont need it.
texture_compression_bc: true,
robust_buffer_access: cfg!(debug_assertions), // finds bugs; too expensive for live.
//
// the rest are false
//
full_draw_index_uint32: false,
image_cube_array: false,
independent_blend: false,
geometry_shader: false,
tessellation_shader: false,
sample_rate_shading: false,
dual_src_blend: false,
logic_op: false,
multi_draw_indirect: false,
draw_indirect_first_instance: false,
depth_clamp: false,
depth_bias_clamp: false,
fill_mode_non_solid: false,
depth_bounds: false,
wide_lines: false,
alpha_to_one: false,
multi_viewport: false,
texture_compression_etc2: false,
texture_compression_astc_ldr: false,
occlusion_query_precise: false,
pipeline_statistics_query: false,
vertex_pipeline_stores_and_atomics: false,
fragment_stores_and_atomics: false,
shader_tessellation_and_geometry_point_size: false,
shader_image_gather_extended: false,
shader_storage_image_extended_formats: false,
shader_storage_image_multisample: false,
shader_storage_image_read_without_format: false,
shader_storage_image_write_without_format: false,
shader_uniform_buffer_array_dynamic_indexing: false,
shader_sampled_image_array_dynamic_indexing: false,
shader_storage_buffer_array_dynamic_indexing: false,
shader_storage_image_array_dynamic_indexing: false,
shader_clip_distance: false,
shader_cull_distance: false,
shader_float64: false,
shader_int64: false,
shader_int16: false,
shader_resource_residency: false,
shader_resource_min_lod: false,
sparse_binding: false,
sparse_residency_buffer: false,
sparse_residency_image_2d: false,
sparse_residency_image_3d: false,
sparse_residency_2_samples: false,
sparse_residency_4_samples: false,
sparse_residency_8_samples: false,
sparse_residency_16_samples: false,
sparse_residency_aliased: false,
variable_multisample_rate: false,
inherited_queries: false,
};
// FIXME: make a const fn once that is stable
pub fn get_formats_needed() -> [(Format, FormatProperties); 12] {
use dacite::core::FormatFeatureFlags;
[
// Most drawables use this format in the vertex buffer
(Format::R32G32B32_SFloat, FormatProperties {
linear_tiling_features: FormatFeatureFlags::empty(),
optimal_tiling_features: FormatFeatureFlags::empty(),
buffer_features: FormatFeatureFlags::VERTEX_BUFFER,
}),
// Depth buffer uses this
(DEPTH_FORMAT, FormatProperties {
linear_tiling_features: FormatFeatureFlags::empty(),
optimal_tiling_features: FormatFeatureFlags::DEPTH_STENCIL_ATTACHMENT,
buffer_features: FormatFeatureFlags::empty(),
}),
// Diffuse buffer uses this
(DIFFUSE_FORMAT, FormatProperties {
linear_tiling_features: FormatFeatureFlags::empty(),
optimal_tiling_features: FormatFeatureFlags::COLOR_ATTACHMENT,
buffer_features: FormatFeatureFlags::empty(),
}),
// Normals buffer uses this
(NORMALS_FORMAT, FormatProperties {
linear_tiling_features: FormatFeatureFlags::empty(),
optimal_tiling_features: FormatFeatureFlags::COLOR_ATTACHMENT,
buffer_features: FormatFeatureFlags::empty(),
}),
// Material buffer uses this
(MATERIAL_FORMAT, FormatProperties {
linear_tiling_features: FormatFeatureFlags::empty(),
optimal_tiling_features: FormatFeatureFlags::COLOR_ATTACHMENT,
buffer_features: FormatFeatureFlags::empty(),
}),
// Shading attachment uses this
(SHADING_FORMAT, FormatProperties {
linear_tiling_features: FormatFeatureFlags::empty(),
optimal_tiling_features: FormatFeatureFlags::COLOR_ATTACHMENT,
buffer_features: FormatFeatureFlags::empty(),
}),
// We will use these formats for assets
(Format::BC1_RGB_UNorm_Block, FormatProperties {
linear_tiling_features: FormatFeatureFlags::empty(),
optimal_tiling_features: FormatFeatureFlags::SAMPLED_IMAGE,
buffer_features: FormatFeatureFlags::empty(),
}),
(Format::BC3_UNorm_Block, FormatProperties {
linear_tiling_features: FormatFeatureFlags::empty(),
optimal_tiling_features: FormatFeatureFlags::SAMPLED_IMAGE,
buffer_features: FormatFeatureFlags::empty(),
}),
(Format::BC4_UNorm_Block, FormatProperties {
linear_tiling_features: FormatFeatureFlags::empty(),
optimal_tiling_features: FormatFeatureFlags::SAMPLED_IMAGE,
buffer_features: FormatFeatureFlags::empty(),
}),
(Format::BC5_UNorm_Block, FormatProperties {
linear_tiling_features: FormatFeatureFlags::empty(),
optimal_tiling_features: FormatFeatureFlags::SAMPLED_IMAGE,
buffer_features: FormatFeatureFlags::empty(),
}),
(Format::BC6H_SFloat_Block, FormatProperties {
linear_tiling_features: FormatFeatureFlags::empty(),
optimal_tiling_features: FormatFeatureFlags::SAMPLED_IMAGE,
buffer_features: FormatFeatureFlags::empty(),
}),
(Format::BC7_UNorm_Block, FormatProperties {
linear_tiling_features: FormatFeatureFlags::empty(),
optimal_tiling_features: FormatFeatureFlags::SAMPLED_IMAGE,
buffer_features: FormatFeatureFlags::empty(),
}),
]
}
pub const PUSH_CONSTANTS_SIZE_REQUIRED: u32 = 0;
pub const COLOR_ATTACHMENT_COUNT_REQUIRED: u32 = 1;
pub const FRAMEBUFFER_LAYERS_REQUIRED: u32 = 1;
|
THE WORRIES OF A GRIEVOUSLY ILL ENGLISH FATHER AS WRITTEN TO HIS CHILDREN IN 1794
The quotation below is an excellent example of the anxieties once faced by seriously ill parents in the days long before the existence of child welfare programs.
The thoughts of a hard usage which some poor, friendless children meet with from unthinking and unfeeling people, cannot but be matter of concern to every parent, under the prospect of leaving a family of dear infants in such a melancholy situation. They cannot but be under some concern for them, respecting things pertaining to their comfort in this life, in things that are only of a temporal nature; but still greater must be the concern of every pious and religious parent, respecting things of a spiritual nature! Where shall the dear children now find a friend who will watch over them in things that have reference to futurity? Since real religion, and the fear of God, is so very unfashionable, that people who are in real concern about it, are, with St. Paul, thought to be beside themselves. Who will be so singular as to take the pains to shew (sic) them their danger from sin, and their remedy a Saviour? Who will shew them the necessity of holiness, and carefully instruct them in all the branches of it? Who will direct them in the choice of their company with a parental tenderness and anxiety; kindly reminding them, as occasion requires, that "he that walketh with wise men shall be wise; but a companion of fools shall be destroyed." Who will diligently observe, and tenderly check in them the beginnings of bad habits, and considering that "the beginning of sin is as the letting out of water," which, if not early stopped, will soon prove unconquerable? |
<reponame>MJDSys/gobble<filename>timeformatter.go
package main
import (
"log"
"time"
)
func timeToFilename(t time.Time) string {
const layout = "2006-01-02_15-04-05.md"
return t.Format(layout)
}
func timeToString(t time.Time) string {
const layout = "2006-01-02 15:04:05"
return t.Format(layout)
}
func stringToTime(s string) time.Time {
const layout = "2006-01-02 15:04:05"
t, err := time.Parse(layout, s)
if err != nil {
log.Println(err)
}
return t
}
|
// ReadTitle returns a string containing the first line of the fp file
func (s *searchCommand) readTitle(fp string) string {
f, err := os.Open(fp)
if err != nil {
log.Fatal(err)
}
defer f.Close()
scanner := bufio.NewScanner(f)
scanner.Scan()
return scanner.Text()
} |
import React, {useEffect} from 'react';
import {DazzlerProps} from '../../../commons/js/types';
import {loadScript} from 'commons';
type ScriptProps = {
/**
* The script to load.
*/
uri: string;
loaded?: boolean;
timeout?: number;
error?: string;
} & DazzlerProps;
/**
* Load a script when mounted, with loaded status update and error handling.
*/
const Script = (props: ScriptProps) => {
const {uri, loaded, timeout, updateAspects} = props;
useEffect(() => {
if (!loaded) {
loadScript(uri, timeout)
.then(() => updateAspects({loaded: true}))
.catch((error) => updateAspects({error}));
}
}, [uri, loaded, timeout]);
return <></>;
};
Script.defaultProps = {
timeout: 30000,
};
export default Script;
|
// Override this if your state changes are not done synchronously
STDMETHODIMP CAVIDraw::GetState(DWORD dwMSecs, FILTER_STATE *State)
{
DbgLog((LOG_TRACE,5,TEXT("::GetState wait for %ldms"), dwMSecs));
CheckPointer( State, E_POINTER );
if (m_fCueing && dwMSecs) {
m_EventCueing.Wait(dwMSecs);
}
DbgLog((LOG_TRACE,5,TEXT("::GetState done waiting")));
*State = m_State;
if (m_fCueing)
return VFW_S_STATE_INTERMEDIATE;
else
return S_OK;
} |
/**
@Author: wei-g
@Date: 2020/11/3 3:52 下午
@Description:
*/
package sparrow
const (
Version = "v0.0.15"
)
|
<reponame>ngonhi/mmpose
# Copyright (c) OpenMMLab. All rights reserved.
import tempfile
import warnings
from mmcv.runner import HOOKS, Hook
@HOOKS.register_module()
class ComputeTrainMetricsHook(Hook):
def __init__(self,
dataloader,
**eval_kwargs):
self.dataloader = dataloader
self.eval_kwargs = eval_kwargs
self.sum = {}
self.count = 0
self.avg = {}
def reset(self):
"""Reset the internal evaluation results."""
self.sum = {}
self.count = 0
self.avg = {}
def before_train_epoch(self, runner):
self.reset()
def after_train_iter(self, runner):
"""Called after every training iteration to aggregate the results."""
results = runner.outputs['results']
temp = [None]*len(results)
for n, result in enumerate(results):
temp[n] = result.copy()
temp[n]['preds'] = result['preds'].copy()
for j, item in enumerate(result['preds']):
w_scale, h_scale = result['rescale']
temp[n]['preds'][j] = item.copy()
temp[n]['preds'][j][:, 0] = item[:, 0] * w_scale
temp[n]['preds'][j][:, 1] = item[:, 1] * h_scale
eval_res = self.evaluate(runner, temp)
n = len(results)
self.update(eval_res, n)
def after_train_epoch(self, runner):
"""Called after every training epoch to evaluate the results."""
runner.log_buffer.output['eval_iter_num'] = len(self.dataloader)
for name, val in self.avg.items():
runner.log_buffer.output[name+'_train'] = val
runner.log_buffer.ready = True
self.reset()
def evaluate(self, runner, results):
"""Evaluate the results.
Args:
runner (:obj:`mmcv.Runner`): The underlined training runner.
results (list): Output results.
"""
with tempfile.TemporaryDirectory() as tmp_dir:
eval_res = self.dataloader.dataset.evaluate(
results,
res_folder=tmp_dir,
logger=runner.logger,
**self.eval_kwargs)
return eval_res
def update(self, eval_res, n):
self.count += n
for name, val in eval_res.items():
if name not in self.sum:
self.sum[name] = val*n
else:
self.sum[name] += val*n
self.avg[name] = self.sum[name]/self.count
|
/*
* An example on how a change in a lib ABI can screw up programs that use the
* library.
*
* The program adds up two 32-bit integers. When the lib's ABI changes, the
* program has no way to learn about the change and prints an incorrect value.
* Run like this:
*
* $ gcc -m32 -shared -o libabi.so libabi-32.c
* $ gcc -m32 -L. -Xlinker -R . -labi abi-main.c
* $ ./a.out 7 4
* 10002000
* result: 11
* $ gcc -m32 -shared -o libabi.so libabi-64.c
* $ ./a.out 7 4
* 10002000
* result: 2007
*
* For Sun Studio (cc), use "-G" instead of "-shared". Also note that newer
* GCC does not support "-R" so we have to use "-Xlinker -R ." instead
* (note the ``.'' as part of the option).
*
* (c) <EMAIL>
*/
#include <sys/types.h>
#include <stdio.h>
#include <stdlib.h>
#include <err.h>
extern int32_t abiadd(int32_t a, int32_t b);
int
main(int argc, char **argv)
{
if (argc != 3)
errx(1, "usage: a.out <number> <number>");
/* Put some data on the stack first by calling printf(). */
printf("%d%d\n", 1000, 2000);
/* See the result. */
printf("result: %d\n", abiadd(atoi(argv[1]), atoi(argv[2])));
return (0);
}
|
<filename>fs/operations/multithread_test.go<gh_stars>1-10
package operations
import (
"context"
"fmt"
"testing"
"github.com/clive2000/rclone/fs/accounting"
"github.com/clive2000/rclone/fstest/mockfs"
"github.com/clive2000/rclone/fstest/mockobject"
"github.com/clive2000/rclone/lib/random"
"github.com/clive2000/rclone/fs"
"github.com/clive2000/rclone/fstest"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestDoMultiThreadCopy(t *testing.T) {
f := mockfs.NewFs("potato", "")
src := mockobject.New("file.txt").WithContent([]byte(random.String(100)), mockobject.SeekModeNone)
srcFs := mockfs.NewFs("sausage", "")
src.SetFs(srcFs)
oldStreams := fs.Config.MultiThreadStreams
oldCutoff := fs.Config.MultiThreadCutoff
oldIsSet := fs.Config.MultiThreadSet
defer func() {
fs.Config.MultiThreadStreams = oldStreams
fs.Config.MultiThreadCutoff = oldCutoff
fs.Config.MultiThreadSet = oldIsSet
}()
fs.Config.MultiThreadStreams, fs.Config.MultiThreadCutoff = 4, 50
fs.Config.MultiThreadSet = false
nullWriterAt := func(ctx context.Context, remote string, size int64) (fs.WriterAtCloser, error) {
panic("don't call me")
}
f.Features().OpenWriterAt = nullWriterAt
assert.True(t, doMultiThreadCopy(f, src))
fs.Config.MultiThreadStreams = 0
assert.False(t, doMultiThreadCopy(f, src))
fs.Config.MultiThreadStreams = 1
assert.False(t, doMultiThreadCopy(f, src))
fs.Config.MultiThreadStreams = 2
assert.True(t, doMultiThreadCopy(f, src))
fs.Config.MultiThreadCutoff = 200
assert.False(t, doMultiThreadCopy(f, src))
fs.Config.MultiThreadCutoff = 101
assert.False(t, doMultiThreadCopy(f, src))
fs.Config.MultiThreadCutoff = 100
assert.True(t, doMultiThreadCopy(f, src))
f.Features().OpenWriterAt = nil
assert.False(t, doMultiThreadCopy(f, src))
f.Features().OpenWriterAt = nullWriterAt
assert.True(t, doMultiThreadCopy(f, src))
f.Features().IsLocal = true
srcFs.Features().IsLocal = true
assert.False(t, doMultiThreadCopy(f, src))
fs.Config.MultiThreadSet = true
assert.True(t, doMultiThreadCopy(f, src))
fs.Config.MultiThreadSet = false
assert.False(t, doMultiThreadCopy(f, src))
srcFs.Features().IsLocal = false
assert.True(t, doMultiThreadCopy(f, src))
srcFs.Features().IsLocal = true
assert.False(t, doMultiThreadCopy(f, src))
f.Features().IsLocal = false
assert.True(t, doMultiThreadCopy(f, src))
srcFs.Features().IsLocal = false
assert.True(t, doMultiThreadCopy(f, src))
}
func TestMultithreadCalculateChunks(t *testing.T) {
for _, test := range []struct {
size int64
streams int
wantPartSize int64
wantStreams int
}{
{size: 1, streams: 10, wantPartSize: multithreadChunkSize, wantStreams: 1},
{size: 1 << 20, streams: 1, wantPartSize: 1 << 20, wantStreams: 1},
{size: 1 << 20, streams: 2, wantPartSize: 1 << 19, wantStreams: 2},
{size: (1 << 20) + 1, streams: 2, wantPartSize: (1 << 19) + multithreadChunkSize, wantStreams: 2},
{size: (1 << 20) - 1, streams: 2, wantPartSize: (1 << 19), wantStreams: 2},
} {
t.Run(fmt.Sprintf("%+v", test), func(t *testing.T) {
mc := &multiThreadCopyState{
size: test.size,
streams: test.streams,
}
mc.calculateChunks()
assert.Equal(t, test.wantPartSize, mc.partSize)
assert.Equal(t, test.wantStreams, mc.streams)
})
}
}
func TestMultithreadCopy(t *testing.T) {
r := fstest.NewRun(t)
defer r.Finalise()
for _, test := range []struct {
size int
streams int
}{
{size: multithreadChunkSize*2 - 1, streams: 2},
{size: multithreadChunkSize * 2, streams: 2},
{size: multithreadChunkSize*2 + 1, streams: 2},
} {
t.Run(fmt.Sprintf("%+v", test), func(t *testing.T) {
if *fstest.SizeLimit > 0 && int64(test.size) > *fstest.SizeLimit {
t.Skipf("exceeded file size limit %d > %d", test.size, *fstest.SizeLimit)
}
var err error
contents := random.String(test.size)
t1 := fstest.Time("2001-02-03T04:05:06.499999999Z")
file1 := r.WriteObject(context.Background(), "file1", contents, t1)
fstest.CheckItems(t, r.Fremote, file1)
fstest.CheckItems(t, r.Flocal)
src, err := r.Fremote.NewObject(context.Background(), "file1")
require.NoError(t, err)
accounting.GlobalStats().ResetCounters()
tr := accounting.GlobalStats().NewTransfer(src)
defer func() {
tr.Done(err)
}()
dst, err := multiThreadCopy(context.Background(), r.Flocal, "file1", src, 2, tr)
require.NoError(t, err)
assert.Equal(t, src.Size(), dst.Size())
assert.Equal(t, "file1", dst.Remote())
fstest.CheckListingWithPrecision(t, r.Flocal, []fstest.Item{file1}, nil, fs.GetModifyWindow(r.Flocal, r.Fremote))
require.NoError(t, dst.Remove(context.Background()))
})
}
}
|
Clinical investigation of the efficacy of a commercial mouthrinse containing 0.05% cetylpyridinium chloride in reducing dental plaque.
OBJECTIVE
The objective of this independent, double-blind, seven-day clinical study was to assess the efficacy of a commercially available mouthrinse containing 0.05% CPC for controlling dental plaque relative to that of a control mouthrinse without 0.05% CPC.
METHODS
Adult male and female subjects from the San Juan, Puerto Rico area reported to the clinical facility, having refrained from any oral hygiene procedures for 12 hours, and from eating, drinking, and smoking for four hours, for an assessment of the oral soft and hard tissues and a baseline dental plaque evaluation. Qualifying subjects were randomly assigned into one of the two treatment groups, and were provided with their assigned mouthrinse, an adult soft-bristled toothbrush, and a commercially available fluoride toothpaste for home use. Over the seven-day period of home use, during which there were no restrictions regarding diet or smoking habits, subjects were instructed to brush their teeth for one minute twice daily (morning and evening) with the toothbrush and toothpaste supplied, to rinse their mouths with water after brushing, and then to rinse with 15 ml of their assigned mouthrinse for one minute before expectorating. The use of other oral hygiene products or procedures, such as floss or interdental stimulators, was not permitted during the study. After seven days of product use, subjects returned to the clinical facility having followed the same restrictions with respect to oral hygiene procedures, eating, and drinking as prior to the baseline examination, and the oral soft and hard tissue assessments and dental plaque evaluations were repeated. Comparisons between treatment groups with respect to baseline-adjusted Plaque Index scores at the seven-day examination were performed using Analyses of Covariance, p < or = 0.05.
RESULTS
Forty-eight subjects complied with the protocol and completed the study. Results demonstrated that, after seven days of product use and 12 hours after rinsing, both the CPC mouthrinse group and the control mouthrinse group exhibited statistically significant reductions in whole-mouth Plaque Index scores (25.3% and 6.6%, respectively), in Plaque Index scores measured at interproximal sites (51.3% and 32.9%, respectively), and in Plaque Severity Index scores (43.5% and 25.4%, respectively). Relative to the control mouthrinse, the 0.05% CPC mouthrinse group exhibited statistically significant greater reductions in whole-mouth plaque scores (15.9%), in Plaque Index scores measured at interproximal sites (23%), and in Plaque Severity Index scores (17%). As the measurements were made 12 hours after final product use, the results also demonstrate that the CPC mouthrinse provides 12-hour protection against plaque accumulation in individuals with existing plaque.
CONCLUSION
The overall results of this double-blind clinical study support the conclusion that after seven days of product use, a mouthrinse containing 0.05% CPC provides significantly greater efficacy for reducing dental plaque 12 hours after use, than does a control mouthrinse without 0.05% CPC. |
/// Parse a list of dnas.
/// If paths are directories then each directory
/// will be searched for the first file that matches
/// `*.dna`.
pub fn parse_dnas(mut dnas: Vec<PathBuf>) -> anyhow::Result<Vec<PathBuf>> {
if dnas.is_empty() {
dnas.push(std::env::current_dir()?);
}
for dna in dnas.iter_mut() {
if dna.is_dir() {
let file_path = search_for_dna(dna)?;
*dna = file_path;
}
ensure!(
dna.file_name()
.map(|f| f.to_string_lossy().ends_with(".dna"))
.unwrap_or(false),
"File {} is not a valid dna file name: (e.g. my-dna.dna)",
dna.display()
);
}
Ok(dnas)
} |
/**
* @param serviceCategory - A broad categorization of the service that is to be performed during
* this appointment.
*/
public AppointmentBuilder.Impl withServiceCategory(
@NonNull CodeableConcept... serviceCategory) {
this.serviceCategory = Arrays.asList(serviceCategory);
return this;
} |
Viewpoint in Translation of Academic Writing: An Illustrative Case Study
This article employs the concept of viewpoint, also referred to as point of view or stance, to offer a short case study of semantic shifts in the translation of academic writing. Drawing on a model of the concept developed specifically for research into the subjective aspects of academic prose, the study seeks to show what viewpoint shifts can occur in translation, based on an analysis of a Cognitive Linguistics monograph translated from English into Polish. The examples, supplemented with English back-translation glosses, illustrate several types of viewpoint shifts taking place in translation, such as increasing or decreasing the author’s commitment to a claim, the removal of author emphasis from the text, and shifts from implicit to explicit author mention. Given that academic discourse has ceased to be regarded as objective description of facts, and based on the assumption that the linguistic resources connected with hedging, evaluation and (avoidance of) self-mention are consciously deployed by authors of academic texts, it is suggested that viewpoint phenomena may represent a valuable research area for the strand of translation studies concerned with academic writing. |
// GetNoParamsError is a helper that expects a connection error on a Get with no params but the stmt
func (mr *MockQueryableMockRecorder) GetNoParamsError(typ interface{}, err error) *gomock.Call {
call := mr.Get(matcher.Interface(typ), StringType)
call.Return(err)
return call.Times(1)
} |
// MinPadding returns the underlying encryption algorithm's minimum padding.
// Used to calculate the maximum plaintext blocksize that can be fed into
// the encryption algorithm.
func (e *EncryptionAlgorithm) MinPadding() int {
if e.minPadding == nil {
e.minPadding = minPaddingNone
}
return e.minPadding()
} |
Sound Transit recently announced that they will extend Link light rail service on the 4th of July until 2am, an hour later than normal. This marks the first time the transit agency has extended service on the holiday since light rail opened in 2009. The last train departures from Angle Lake Station andUniversity of Washington Station will be at 1am and 2am, respectively. From about 11.30pm and midnight onward, trains will be on half-hourly frequencies and the Downtown Seattle Transit Tunnel will close at 2.20am.
Bus service will also be extended with 40 additional buses serving special routes between Seattle Center, Gas Works Park, Downtown Seattle, and light rail stations. King County Metro will add 250 service hours across 20 routes, including the 5, 8, 26, 28, 31, 32, 40, 44, 49, 62, 70, 120, RapidRide A, B, C, D, and E Lines, and Sound Transit Routes 545, 550, and 554.
The service extension will help holiday revelers get home safely and keep more cars off the road. Riders, however, should be aware that other service on the holiday may be partially or fully curtailed, including Sunday-style bus service for ST Express and Metro and no Sounder commuter rail service.
Sound Transit received pushback for their decision not to extend service for New Years. They defended the decision, citing concerns about necessary maintenance work that could only be completed during the short window each night when service stops. The maintenance record casts doubt on the urgency of the work completed that night, and it appears they were able to adjust their maintenance schedule this time around.
We remain hopeful that service extensions become standard moving forward.
Image courtesy of Andi Szilagyi via Wikimedia Commons.
We hope you loved this article. If so, please consider supporting our work. The Urbanist is a non-profit that depends on donations from readers like you. |
from tests.controllers.csv import TestCsvServiceApi
from tests.controllers.excel import TestExcelServiceApi
|
Dilated cardiomyopathy: A preventable presentation of DiGeorge Syndrome
Patients with cardiac failure require careful evaluation to determine the precise nature of the cause of their illness. Genetic causes of dilated cardiomyopathy are well known but inherited conditions may lead to unexpected consequences through intermediate mechanisms not readily recognised as a feature of the inherited disorder. We describe a case of dilated cardiomyopathy resulting from prolonged hypocalcaemia due to previously undiagnosed hypoparathyroidism resulting from DiGeorge Syndrome and describe the features of this case and the treatment of hypoparathyroidism. |
/*
* @Author: <EMAIL>
* @Date: 2021-10-14 10:50:41
* @LastEditors: <EMAIL>
* @LastEditTime: 2021-10-18 16:19:28
* @FilePath: \borderlands3-ui\src\Dialog\Dialog.stories.tsx
*/
import React, { useState } from 'react';
import { Dialog } from '.';
import { Button } from '../Button';
export default {
component: Dialog,
title: 'Dialog',
};
export const Default = () => {
const [visible, setVisible] = useState(false);
const openDialog = () => setVisible(true);
const closeDialog = () => setVisible(false);
// Posterama Text Regular
return (
<>
<Button onClick={openDialog}>Quit Game</Button>
<Dialog visible={visible} onCancel={closeDialog} title="Quit Game?">
<p style={{ fontFamily: 'Posterama Text Regular', marginTop: '40px' }}>Aue you sure you want to quit the game?</p>
</Dialog>
</>
);
};
|
<filename>whatwg-dom/Web/DOM/Document.hs<gh_stars>0
module Web.DOM.Document where
import Prelude hiding (undefined, null)
import Web.DOM.Types
import Control.Monad.IO.Class
import Data.Coerce (coerce)
import qualified Language.JavaScript.Reflect as Reflect
import Language.JavaScript.Prim
toMaybe :: JSVal -> Maybe JSVal
toMaybe value
| value == null = Nothing
| value == undefined = Nothing
| otherwise = Just value
(?.) :: IsJSObject a => a -> String -> Maybe JSVal
(?.) a k = toMaybe $ Reflect.get (toJSObject a) (toJSString k)
body :: Document -> Maybe HTMLElement
body document = (HTMLElement) <$> document ?. "body"
|
def find_character_device(port_name):
root = '/sys/class/virtio-ports'
logger.debug("Automatically selecting appropriate character device based on %s ..", root)
for entry in os.listdir(root):
name_file = os.path.join(root, entry, 'name')
if os.path.isfile(name_file):
with open(name_file) as handle:
contents = handle.read().strip()
if contents == port_name:
character_device = '/dev/%s' % entry
logger.debug("Selected character device: %s", character_device)
return character_device
raise Exception(compact("""
Failed to select the appropriate character device for the port name
{name}! This is probably caused by a configuration issue on either the
QEMU host or inside the QEMU guest. Please refer to the following web
page for help: http://negotiator.readthedocs.org/en/latest/#character-device-detection-fails
""", name=repr(port_name))) |
import java.util.ArrayList;
import java.util.List;
import java.util.Scanner;
public class CF
{
public static void main(String[] args)
{
Scanner scan = new Scanner(System.in);
String start = scan.next();
String end = scan.next();
List<String> moves = new ArrayList<String>();
int horiz = end.charAt(0) - start.charAt(0);
int vert = end.charAt(1) - start.charAt(1);
while (Math.abs(horiz) > 0 || Math.abs(vert) > 0)
{
if (horiz > 0 && vert > 0) moves.add("RU");
else if (horiz > 0 && vert < 0) moves.add("RD");
else if (horiz < 0 && vert > 0) moves.add("LU");
else if (horiz < 0 && vert < 0) moves.add("LD");
else if (horiz > 0) moves.add("R");
else if (horiz < 0) moves.add("L");
else if (vert > 0) moves.add("U");
else if (vert < 0) moves.add("D");
if (horiz > 0) horiz--;
else if (horiz < 0) horiz++;
if (vert > 0) vert--;
else if (vert < 0) vert++;
}
System.out.println(moves.size());
for (String m : moves)
System.out.println(m);
scan.close();
}
} |
/**
* Checks whether the string contains given word on current position. If not
* then throws an exception.
*
* @param words list of words to check
*/
public void expect(final String... words) {
for (final String word : words) {
expect(word, false);
}
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.