content
stringlengths 10
4.9M
|
---|
def rotate(x: float, y: float, angle: float) -> Tuple[float, float]:
angle_rad = angle / 360 * 2 * np.pi
sin, cos = np.sin(angle_rad), np.cos(angle_rad)
return x * cos - y * sin, x * sin + y * cos |
package v2
import (
"context"
"fmt"
"github.com/ctreminiom/go-atlassian/pkg/infra/models"
"net/http"
)
type IssueTypeService struct {
client *Client
Scheme *IssueTypeSchemeService
ScreenScheme *IssueTypeScreenSchemeService
}
// Gets returns all issue types.
// Docs: https://docs.go-atlassian.io/jira-software-cloud/issues/type#get-all-issue-types-for-user
// Atlassian Docs: https://developer.atlassian.com/cloud/jira/platform/rest/v2/api-group-issue-types/#api-rest-api-2-issuetype-get
func (i *IssueTypeService) Gets(ctx context.Context) (result []*models.IssueTypeScheme, response *ResponseScheme, err error) {
var endpoint = "rest/api/2/issuetype"
request, err := i.client.newRequest(ctx, http.MethodGet, endpoint, nil)
if err != nil {
return
}
request.Header.Set("Accept", "application/json")
response, err = i.client.call(request, &result)
if err != nil {
return
}
return
}
// Create creates an issue type and adds it to the default issue type scheme.
// Docs: https://docs.go-atlassian.io/jira-software-cloud/issues/type#create-issue-type
// Atlassian Docs: https://developer.atlassian.com/cloud/jira/platform/rest/v2/api-group-issue-types/#api-rest-api-2-issuetype-post
func (i *IssueTypeService) Create(ctx context.Context, payload *models.IssueTypePayloadScheme) (result *models.IssueTypeScheme,
response *ResponseScheme, err error) {
payloadAsReader, err := transformStructToReader(payload)
if err != nil {
return nil, nil, err
}
var endpoint = "rest/api/2/issuetype"
request, err := i.client.newRequest(ctx, http.MethodPost, endpoint, payloadAsReader)
if err != nil {
return
}
request.Header.Set("Accept", "application/json")
request.Header.Set("Content-Type", "application/json")
response, err = i.client.call(request, &result)
if err != nil {
return
}
return
}
// Get returns an issue type.
// Docs: https://docs.go-atlassian.io/jira-software-cloud/issues/type#get-issue-type
// Atlassian Docs: https://developer.atlassian.com/cloud/jira/platform/rest/v2/api-group-issue-types/#api-rest-api-2-issuetype-id-get
func (i *IssueTypeService) Get(ctx context.Context, issueTypeID string) (result *models.IssueTypeScheme, response *ResponseScheme,
err error) {
if len(issueTypeID) == 0 {
return nil, nil, models.ErrNoIssueTypeIDError
}
var endpoint = fmt.Sprintf("rest/api/2/issuetype/%v", issueTypeID)
request, err := i.client.newRequest(ctx, http.MethodGet, endpoint, nil)
if err != nil {
return
}
request.Header.Set("Accept", "application/json")
response, err = i.client.call(request, &result)
if err != nil {
return
}
return
}
// Update updates the issue type.
// Docs: https://docs.go-atlassian.io/jira-software-cloud/issues/type#update-issue-type
// Atlassian Docs: https://developer.atlassian.com/cloud/jira/platform/rest/v2/api-group-issue-types/#api-rest-api-2-issuetype-id-put
func (i *IssueTypeService) Update(ctx context.Context, issueTypeID string, payload *models.IssueTypePayloadScheme) (
result *models.IssueTypeScheme, response *ResponseScheme, err error) {
if len(issueTypeID) == 0 {
return nil, nil, models.ErrNoIssueTypeIDError
}
var endpoint = fmt.Sprintf("rest/api/2/issuetype/%v", issueTypeID)
payloadAsReader, err := transformStructToReader(payload)
if err != nil {
return nil, nil, err
}
request, err := i.client.newRequest(ctx, http.MethodPut, endpoint, payloadAsReader)
if err != nil {
return
}
request.Header.Set("Accept", "application/json")
request.Header.Set("Content-Type", "application/json")
response, err = i.client.call(request, &result)
if err != nil {
return
}
return
}
// Delete deletes the issue type.
// If the issue type is in use, all uses are updated with the alternative issue type (alternativeIssueTypeId).
// A list of alternative issue types are obtained from the Get alternative issue types resource.
// Docs: https://docs.go-atlassian.io/jira-software-cloud/issues/type#delete-issue-type
// Atlassian Docs: https://developer.atlassian.com/cloud/jira/platform/rest/v2/api-group-issue-types/#api-rest-api-2-issuetype-id-delete
func (i *IssueTypeService) Delete(ctx context.Context, issueTypeID string) (response *ResponseScheme, err error) {
if len(issueTypeID) == 0 {
return nil, models.ErrNoIssueTypeIDError
}
var endpoint = fmt.Sprintf("rest/api/2/issuetype/%v", issueTypeID)
request, err := i.client.newRequest(ctx, http.MethodDelete, endpoint, nil)
if err != nil {
return
}
response, err = i.client.call(request, nil)
if err != nil {
return
}
return
}
// Alternatives returns a list of issue types that can be used to replace the issue type.
// The alternative issue types are those assigned to the same workflow scheme, field configuration scheme, and screen scheme.
// Docs: https://docs.go-atlassian.io/jira-software-cloud/issues/type#get-alternative-issue-types
// Atlassian Docs: https://developer.atlassian.com/cloud/jira/platform/rest/v2/api-group-issue-types/#api-rest-api-2-issuetype-id-alternatives-get
func (i *IssueTypeService) Alternatives(ctx context.Context, issueTypeID string) (result []*models.IssueTypeScheme,
response *ResponseScheme, err error) {
if len(issueTypeID) == 0 {
return nil, nil, models.ErrNoIssueTypeIDError
}
var endpoint = fmt.Sprintf("rest/api/2/issuetype/%v/alternatives", issueTypeID)
request, err := i.client.newRequest(ctx, http.MethodGet, endpoint, nil)
if err != nil {
return
}
request.Header.Set("Accept", "application/json")
response, err = i.client.call(request, &result)
if err != nil {
return
}
return
}
|
def is_dimension(value: object) -> "TypeGuard[AnyDimension]":
if value is None:
return True
if callable(value):
return True
if isinstance(value, (int, Dimension)):
return True
return False |
class Algorithm:
""" Provides all services necessary to load an algorithm, run it
and provide facilities for visualization """
def __init__(self):
""" TODO: Add docstring """
self.DB = AlgorithmDebugger(self)
self.source = "" # Source as a big string
self.interactive = []
self.breakpoints = [] # Doesnt debugger take care of it ?
self.algoFileName = ""
self.prologFileName = ""
self.graphFileName = ""
self.mode = 0
# mode = 0 Stop
# mode = 1 Running
# mode = 2 Stepping
self.graph = None # graph for the algorithm
self.cleanGraphCopy = None # this is the backup of the graph
self.graphIsDirty = 0 # If graph was changed by running
self.algoGlobals = {} # Sandbox for Algorithm
self.logAnimator = 1
self.about = None
self.commentPattern = re.compile('[ \t]*#')
self.blankLinePattern = re.compile('[ \t]*\n')
def SetGUI(self, itsGUI):
""" Set the connection to its GUI """
self.GUI = itsGUI
def Open(self, filename):
""" Read in an algorithm from file. """
self.ClearBreakpoints()
self.algoFileName = filename
input_file = open(filename, 'r')
self.source = input_file.read()
input_file.close()
# Create prolog file name by removing .alg.py and adding .pro.py to file name
self.prologFileName = self.algoFileName.strip()[0:-len(".alg.py")] + ".pro.py"
# Now read in the prolog as a module to get access to the following data
# Maybe should obfuscate the names ala xxx_<bla>, have one dict ?
try:
input_file = open(self.prologFileName, 'r')
options = self.ReadPrologOptions(input_file)
input_file.close()
except (EOFError, IOError),(errno, strerror):
self.GUI.HandleFileIOError("prolog", self.prologFileName, errno, strerror)
return
try:
self.breakpoints = options['breakpoints']
except:
self.breakpoints = []
try:
self.interactive = options['interactive']
except:
self.interactive = []
try:
self.graphDisplays = options['graphDisplays']
except:
self.graphDisplays = None
try:
self.about = options['about']
except:
self.about = None
if self.graphDisplays != None:
if self.graphDisplays == 1 and hasattr(self,"GUI"):
self.GUI.WithdrawSecondaryGraphDisplay()
def ReadPrologOptions(self, file_handle):
""" Prolog files should contain the following variables:
- breakpoints = [] a list of line numbers which are choosen as default
breakpoints
- interactive = [] a list of line numbers which contain interactive commands
(e.g., PickVertex)
- graphDisplays = 1 | 2 the number of graphDisplays needed by the algorithm
- about = \"\"\"<HTML-code>\"\"\" information about the algorithm
Parameter: filelike object
"""
text = file_handle.read()
options = {}
optionPattern = {'breakpoints':'breakpoints[ \t]*=[ \t]*(\[[^\]]+\])',
'interactive':'interactive[ \t]*=[ \t]*(\[[^\]]+\])',
'graphDisplays':'graphDisplays[ \t]*=[ \t]*([1-2])'}
# about is more complicated
for patternName in optionPattern.keys():
compPattern = re.compile(optionPattern[patternName])
match = compPattern.search(text)
if match != None:
options[patternName] = eval(match.group(1))
# Special case with about (XXX: assuming about = """ ... """)
try:
aboutStartPat = re.compile('about[ \t]*=[ \t]*"""')
aboutEndPat = re.compile('"""')
left = aboutStartPat.search(text).end()
right = aboutEndPat.search(text, left).start()
options['about'] = text[left:right]
except:
pass
return options
def About(self):
""" Return a HTML-page giving information about the algorithm """
if self.about != None:
return self.about
else:
return "<HTML><BODY> <H3>No information available</H3></BODY></HTML>"
def OpenGraph(self, file_obj, fileName=None):
""" Read in a graph from file and open the display """
if type(file_obj) in types.StringTypes:
self.graphFileName = file_obj
elif type(file_obj)==types.FileType or issubclass(file_obj.__class__,StringIO.StringIO):
self.graphFileName = fileName
else:
raise Exception("wrong types in argument list: expected string or file like object")
self.cleanGraphCopy = OpenCATBoxGraph(file_obj)
self.restoreGraph()
self.GUI.graphDisplay.Show() # In case we are hidden
self.GUI.graphDisplay.ShowGraph(self.graph, stripPath(self.graphFileName))
self.GUI.graphDisplay.RegisterGraphInformer(WeightedGraphInformer(self.graph))
self.GUI.graphDisplay.UpdateScrollRegion(auto=1)
def restoreGraph(self):
""" TODO: Add docstring """
self.graph = copy.deepcopy(self.cleanGraphCopy)
self.graphIsDirty = 0
def OpenSecondaryGraph(self, G, title, informer=None):
""" Read in graph from file and open the the second display """
self.GUI.OpenSecondaryGraphDisplay()
self.GUI.secondaryGraphDisplay.ShowGraph(G, title)
self.GUI.secondaryGraphDisplay.UpdateScrollRegion(auto=1)
if informer is not None:
self.GUI.secondaryGraphDisplay.RegisterGraphInformer(informer)
def ReadyToStart(self):
""" Return 1 if we are ready to run. That is when we user
has opened both an algorithm and a graph. """
if self.graphFileName != "" and self.algoFileName != "":
return 1
else:
return 0
def Start(self):
""" Start an loaded algorithm. It firsts execs the prolog and
then starts the algorithm in the debugger. The algorithms
globals (i.e., the top-level locals are in a dict we supply
and for which we preload the packages we want to make available)"""
if self.graphIsDirty == 1:
self.restoreGraph()
# Does show
self.GUI.graphDisplay.Show() # In case we are hidden
self.GUI.graphDisplay.ShowGraph(self.graph, stripPath(self.graphFileName))
self.GUI.graphDisplay.RegisterGraphInformer(WeightedGraphInformer(self.graph))
else:
self.GUI.graphDisplay.Show() # In case we are hidden
self.graphIsDirty = 1
self.mode = 1
# Set global vars ...
self.algoGlobals = {}
self.algoGlobals['algo'] = self
self.algoGlobals['graph'] = self.graph
self.animation_history = None
if self.logAnimator == 1:
self.animation_history = AnimationHistory(self.GUI.graphDisplay)
self.algoGlobals['disp'] = self.animation_history
elif self.logAnimator == 2:
self.algoGlobals['disp'] = MethodLogger(self.GUI.graphDisplay)
else:
self.algoGlobals['disp'] = self.GUI.graphDisplay
## explictly loading packages we want to make available to the algorithm
#modules = ['data_structs',
#'gato.data_structs',
#'gato.anim.algorithms',
#'gato.graphs.util',
#'gato.util']
#for m in modules:
#self.algoGlobals[m] = __import__(m)
# transfer required globals
self.algoGlobals['gInteractive'] = gInteractive
# Read in prolog and execute it
try:
execfile(self.prologFileName, self.algoGlobals, self.algoGlobals)
except AbortProlog:
# Only get here because NeededProperties was canceled by user
self.GUI.CmdStop()
except (EOFError, IOError), (errno, strerror):
self.GUI.HandleFileIOError("prolog",
self.prologFileName,
errno,strerror)
except:
log.exception("Bug in %s" % self.prologFileName)
#traceback.print_exc()
# Filename must be handed over in a very safe way
# because of \ and ~1 under windows
self.algoGlobals['_tmp_file'] = self.algoFileName
# Switch on all shown breakpoints
for line in self.breakpoints:
self.DB.set_break(self.algoFileName, line)
try:
command = "execfile(_tmp_file)"
self.DB.run(command, self.algoGlobals, self.algoGlobals)
except:
log.exception("Bug in %s" % self.algoFileName)
#traceback.print_exc()
self.GUI.CommitStop()
def Stop(self):
""" TODO: Add docstring """
self.mode = 0
def Step(self):
""" TODO: Add docstring """
if self.animation_history is not None:
self.animation_history.DoAll()
self.DB.doTrace = 0
self.mode = 2
def Continue(self):
""" TODO: Add docstring """
if self.animation_history is not None:
self.animation_history.DoAll()
self.DB.doTrace = 0
self.mode = 1
def Trace(self):
""" TODO: Add docstring """
if self.animation_history is not None:
self.animation_history.DoAll()
self.mode = 2
self.DB.doTrace = 1
def Replay(self):
""" TODO: Add docstring """
#self.GUI.CmdStep()
if self.animation_history is not None:
self.animation_history.DoAll()
self.animation_history.Replay()
def Undo(self):
""" TODO: Add docstring """
#self.GUI.CmdStep()
if self.animation_history is not None:
self.animation_history.Undo()
def Do(self):
""" TODO: Add docstring """
#self.GUI.CmdStep()
if self.animation_history is not None:
self.animation_history.Do()
def ClearBreakpoints(self):
""" Clear all breakpoints """
for line in self.breakpoints:
self.GUI.HideBreakpoint(line)
self.DB.clear_break(self.algoFileName, line)
self.breakpoints = []
def SetBreakpoints(self, bp_list):
""" SetBreakpoints is depreciated
NOTE: Use 'breakpoint' var in prolog instead.
Set all breakpoints in list: So an algorithm prolog
can set a bunch of pre-assigned breakpoints at once """
log.info("SetBreakpoints() is depreciated. Use 'breakpoint' var in prolog instead. ")
for line in bp_list:
self.GUI.ShowBreakpoint(line)
self.breakpoints.append(line)
self.DB.set_break(self.algoFileName, line)
def ToggleBreakpoint(self, line=None):
""" If we have a breakpoint on line, delete it, else add it.
If no line is passed we ask the DB for it"""
if line == None:
line = self.DB.lastLine
if line in self.breakpoints:
self.GUI.HideBreakpoint(line)
self.breakpoints.remove(line)
self.DB.clear_break(self.algoFileName, line)
else: # New Breakpoint
# check for not breaking in comments nor on empty lines.
import linecache
codeline = linecache.getline(self.algoFileName, line)
if codeline != '' and self.commentPattern.match(codeline) == None and \
self.blankLinePattern.match(codeline) == None:
self.GUI.ShowBreakpoint(line)
self.breakpoints.append(line)
self.DB.set_break(self.algoFileName, line)
def GetInteractiveLines(self):
""" Return lines on which user interaction (e.g., choosing a
vertex occurrs. """
return self.interactive
def GetBreakpointLines(self):
""" Return lines on which user interaction (e.g., choosing a
vertex occurrs. """
return self.breakpoints
def GetSource(self):
""" Return the algorithms source """
return self.source
def NeededProperties(self, propertyValueDict):
""" Check that graph has that value for each property
specified in the dictionary 'propertyValueDict'
If check fails algorithm is stopped
Proper names for properties are defined in gProperty
"""
for prop, requiredValue in propertyValueDict.iteritems():
failed = 0
value = self.graph.get_property(prop)
if value != 'Unknown':
try:
c = cmp(value, requiredValue)
if gProperty[prop][0] < 0 and c > 0:
failed = 1
elif gProperty[prop][0] == 0 and c != 0:
failed = 1
if gProperty[prop][0] > 0 and c < 0:
failed = 1
except ValueError:
failed = 1
if failed or value == 'Unknown':
errMsg = "The algorithm %s requires that the graph %s has %s" % \
(stripPath(self.algoFileName),
stripPath(self.graphFileName),
gProperty[prop][1])
if gProperty[prop][0] < 0:
errMsg += " of %s or less" % str(requiredValue)
elif gProperty[prop][0] > 0:
errMsg += " of %s or more" % str(requiredValue)
errMsg += ". This is not known"
errMsg += ".\nDo you still want to proceed ?"
r = tkMessageBox.askokcancel("Gato - Error", errMsg)
if not r:
raise AbortProlog, "User aborted at check for property %s" % prop
def PickVertex(self, default=None, filter_fun=None, visual=None):
""" Pick a vertex interactively.
- default: specifies the vertex returned when user does not
want to select one. If default==None, a random
vertex not subject to filter will be returned.
- filter: a function which should return a non-None value
if the passed vertex is acceptable
- visual is a function which takes the vertex as its
only argument and cause e.g. some visual feedback """
v = None
#log.debug("pickVertex %s" %s globals()['gInteractive'])
if gInteractive == 1:
v = self.GUI.PickInteractive('vertex', filter_fun, default)
if v == None:
if default == None:
v = random.choice(self.graph.vertices)
else:
v = default
if visual is not None:
visual(v)
return v
def PickEdge(self, default=None, filter_fun=None, visual=None):
""" Pick an edge interactively
- default: specifies the edge returned when user does not
want to select one. If default==None, a random
edge not subject to filter will be returned
- filter: a function which should return a non-None value
if the passed edge is acceptable
- visual is a function which takes the edge as its
only argument and cause e.g. some visual feedback """
e = None
if gInteractive == 1:
e = self.GUI.PickInteractive('edge', filter_fun, default)
if e == None:
if default == None:
e = random.choice(self.graph.get_all_edges())
else:
e = default
if visual is not None:
visual(e)
return e |
// renewToken accepts application ID, password and refresh token and requests
// new access token and refresh token from Yandex oauth service.
func renewToken(clientID, clientSecret, refreshToken string) (accesstoken, refreshtoken string, expires int, err error) {
if refreshToken == "" || clientID == "" || clientSecret == "" {
err = fmt.Errorf("credentials missing or no refresh token present")
return
}
token, err := requestRefreshToken(clientID, clientSecret, refreshToken)
if err == nil {
accesstoken = token.AccessToken
refreshtoken = token.RefreshToken
expires = token.ExpiresIn
}
return
} |
/**
* @brief Add sampled stats to the aggregator
*
* Called to add system level sampled data from a flow
* within the current observation window
*
* @param aggr the aggregator
* @param key the lookup flow key
* @param counters the stat counters to aggregate
* @return true if successful, false otherwise
*/
bool net_md_add_sample(struct net_md_aggregator *aggr,
struct net_md_flow_key *key,
struct flow_counters *counters)
{
struct net_md_stats_accumulator *acc;
if (aggr == NULL) return false;
if (key == NULL) return false;
if (counters == NULL) return false;
acc = net_md_lookup_acc(aggr, key);
if (acc == NULL) return false;
if (key->fstart)
acc->fkey->state.fstart = key->fstart;
if (key->fend)
acc->fkey->state.fend = key->fend;
net_md_set_counters(aggr, acc, counters);
return true;
} |
/**
* Partition supply message.
*/
public class GridDhtPartitionSupplyMessage<K, V> extends GridCacheMessage<K, V> implements GridCacheDeployable {
/** */
private static final long serialVersionUID = 0L;
/** Worker ID. */
private int workerId = -1;
/** Update sequence. */
private long updateSeq;
/** Acknowledgement flag. */
private boolean ack;
/** Partitions that have been fully sent. */
@GridDirectCollection(int.class)
private Set<Integer> last;
/** Partitions which were not found. */
@GridToStringInclude
@GridDirectCollection(int.class)
private Set<Integer> missed;
/** Entries. */
@GridDirectTransient
private Map<Integer, Collection<GridCacheEntryInfo<K, V>>> infos =
new HashMap<>();
/** Cache entries in serialized form. */
@GridToStringExclude
@GridDirectTransient
private Map<Integer, Collection<byte[]>> infoBytesMap = new HashMap<>();
/** */
private byte[] infoBytes;
/** Message size. */
@GridDirectTransient
private int msgSize;
/**
* @param workerId Worker ID.
* @param updateSeq Update sequence for this node.
*/
GridDhtPartitionSupplyMessage(int workerId, long updateSeq) {
assert workerId >= 0;
assert updateSeq > 0;
this.updateSeq = updateSeq;
this.workerId = workerId;
}
/**
* Empty constructor required for {@link Externalizable}.
*/
public GridDhtPartitionSupplyMessage() {
// No-op.
}
/** {@inheritDoc} */
@Override public boolean allowForStartup() {
return true;
}
/** {@inheritDoc} */
@Override public boolean ignoreClassErrors() {
return true;
}
/**
* @return Worker ID.
*/
int workerId() {
return workerId;
}
/**
* @return Update sequence.
*/
long updateSequence() {
return updateSeq;
}
/**
* Marks this message for acknowledgment.
*/
void markAck() {
ack = true;
}
/**
* @return Acknowledgement flag.
*/
boolean ack() {
return ack;
}
/**
* @return Flag to indicate last message for partition.
*/
Set<Integer> last() {
return last == null ? Collections.<Integer>emptySet() : last;
}
/**
* @param p Partition which was fully sent.
*/
void last(int p) {
if (last == null)
last = new HashSet<>();
if (last.add(p)) {
msgSize += 4;
// If partition is empty, we need to add it.
Collection<byte[]> serInfo = infoBytesMap.get(p);
if (serInfo == null)
infoBytesMap.put(p, new LinkedList<byte[]>());
}
}
/**
* @param p Missed partition.
*/
void missed(int p) {
if (missed == null)
missed = new HashSet<>();
if (missed.add(p))
msgSize += 4;
}
/**
* @return Missed partitions.
*/
Set<Integer> missed() {
return missed == null ? Collections.<Integer>emptySet() : missed;
}
/**
* @return Entries.
*/
Map<Integer, Collection<GridCacheEntryInfo<K, V>>> infos() {
return infos;
}
/**
* @return Message size.
*/
int messageSize() {
return msgSize;
}
/**
* @param p Partition.
* @param info Entry to add.
* @param ctx Cache context.
* @throws GridException If failed.
*/
void addEntry(int p, GridCacheEntryInfo<K, V> info, GridCacheContext<K, V> ctx) throws GridException {
assert info != null;
marshalInfo(info, ctx);
byte[] bytes = CU.marshal(ctx, info);
msgSize += bytes.length;
Collection<byte[]> serInfo = infoBytesMap.get(p);
if (serInfo == null) {
msgSize += 4;
infoBytesMap.put(p, serInfo = new LinkedList<>());
}
serInfo.add(bytes);
}
/**
* @param p Partition.
* @param info Entry to add.
* @param ctx Cache context.
* @throws GridException If failed.
*/
void addEntry0(int p, GridCacheEntryInfo<K, V> info, GridCacheContext<K, V> ctx) throws GridException {
assert info != null;
assert info.keyBytes() != null;
assert info.valueBytes() != null;
// Need to call this method to initialize info properly.
marshalInfo(info, ctx);
byte[] bytes = CU.marshal(ctx, info);
msgSize += bytes.length;
Collection<byte[]> serInfo = infoBytesMap.get(p);
if (serInfo == null) {
msgSize += 4;
infoBytesMap.put(p, serInfo = new LinkedList<>());
}
serInfo.add(bytes);
}
/** {@inheritDoc} */
@Override public void prepareMarshal(GridCacheContext<K, V> ctx) throws GridException {
super.prepareMarshal(ctx);
infoBytes = ctx.marshaller().marshal(infoBytesMap);
}
/** {@inheritDoc} */
@Override public void finishUnmarshal(GridCacheContext<K, V> ctx, ClassLoader ldr) throws GridException {
super.finishUnmarshal(ctx, ldr);
infoBytesMap = ctx.marshaller().unmarshal(infoBytes, ldr);
for (Map.Entry<Integer, Collection<byte[]>> e : infoBytesMap.entrySet()) {
Collection<GridCacheEntryInfo<K, V>> entries = unmarshalCollection(e.getValue(), ctx, ldr);
unmarshalInfos(entries, ctx, ldr);
infos.put(e.getKey(), entries);
}
}
/**
* @return Number of entries in message.
*/
public int size() {
return infos.isEmpty() ? infoBytesMap.size() : infos.size();
}
/** {@inheritDoc} */
@SuppressWarnings({"CloneDoesntCallSuperClone", "CloneCallsConstructors"})
@Override public GridTcpCommunicationMessageAdapter clone() {
GridDhtPartitionSupplyMessage _clone = new GridDhtPartitionSupplyMessage();
clone0(_clone);
return _clone;
}
/** {@inheritDoc} */
@Override protected void clone0(GridTcpCommunicationMessageAdapter _msg) {
super.clone0(_msg);
GridDhtPartitionSupplyMessage _clone = (GridDhtPartitionSupplyMessage)_msg;
_clone.workerId = workerId;
_clone.updateSeq = updateSeq;
_clone.ack = ack;
_clone.last = last;
_clone.missed = missed;
_clone.infos = infos;
_clone.infoBytesMap = infoBytesMap;
_clone.infoBytes = infoBytes;
_clone.msgSize = msgSize;
}
/** {@inheritDoc} */
@SuppressWarnings("all")
@Override public boolean writeTo(ByteBuffer buf) {
commState.setBuffer(buf);
if (!super.writeTo(buf))
return false;
if (!commState.typeWritten) {
if (!commState.putByte(directType()))
return false;
commState.typeWritten = true;
}
switch (commState.idx) {
case 2:
if (!commState.putBoolean(ack))
return false;
commState.idx++;
case 3:
if (!commState.putByteArray(infoBytes))
return false;
commState.idx++;
case 4:
if (last != null) {
if (commState.it == null) {
if (!commState.putInt(last.size()))
return false;
commState.it = last.iterator();
}
while (commState.it.hasNext() || commState.cur != NULL) {
if (commState.cur == NULL)
commState.cur = commState.it.next();
if (!commState.putInt((int)commState.cur))
return false;
commState.cur = NULL;
}
commState.it = null;
} else {
if (!commState.putInt(-1))
return false;
}
commState.idx++;
case 5:
if (missed != null) {
if (commState.it == null) {
if (!commState.putInt(missed.size()))
return false;
commState.it = missed.iterator();
}
while (commState.it.hasNext() || commState.cur != NULL) {
if (commState.cur == NULL)
commState.cur = commState.it.next();
if (!commState.putInt((int)commState.cur))
return false;
commState.cur = NULL;
}
commState.it = null;
} else {
if (!commState.putInt(-1))
return false;
}
commState.idx++;
case 6:
if (!commState.putLong(updateSeq))
return false;
commState.idx++;
case 7:
if (!commState.putInt(workerId))
return false;
commState.idx++;
}
return true;
}
/** {@inheritDoc} */
@SuppressWarnings("all")
@Override public boolean readFrom(ByteBuffer buf) {
commState.setBuffer(buf);
if (!super.readFrom(buf))
return false;
switch (commState.idx) {
case 2:
if (buf.remaining() < 1)
return false;
ack = commState.getBoolean();
commState.idx++;
case 3:
byte[] infoBytes0 = commState.getByteArray();
if (infoBytes0 == BYTE_ARR_NOT_READ)
return false;
infoBytes = infoBytes0;
commState.idx++;
case 4:
if (commState.readSize == -1) {
if (buf.remaining() < 4)
return false;
commState.readSize = commState.getInt();
}
if (commState.readSize >= 0) {
if (last == null)
last = new HashSet<>(commState.readSize);
for (int i = commState.readItems; i < commState.readSize; i++) {
if (buf.remaining() < 4)
return false;
int _val = commState.getInt();
last.add((Integer)_val);
commState.readItems++;
}
}
commState.readSize = -1;
commState.readItems = 0;
commState.idx++;
case 5:
if (commState.readSize == -1) {
if (buf.remaining() < 4)
return false;
commState.readSize = commState.getInt();
}
if (commState.readSize >= 0) {
if (missed == null)
missed = new HashSet<>(commState.readSize);
for (int i = commState.readItems; i < commState.readSize; i++) {
if (buf.remaining() < 4)
return false;
int _val = commState.getInt();
missed.add((Integer)_val);
commState.readItems++;
}
}
commState.readSize = -1;
commState.readItems = 0;
commState.idx++;
case 6:
if (buf.remaining() < 8)
return false;
updateSeq = commState.getLong();
commState.idx++;
case 7:
if (buf.remaining() < 4)
return false;
workerId = commState.getInt();
commState.idx++;
}
return true;
}
/** {@inheritDoc} */
@Override public byte directType() {
return 44;
}
/** {@inheritDoc} */
@Override public String toString() {
return S.toString(GridDhtPartitionSupplyMessage.class, this,
"size", size(),
"parts", infos.keySet(),
"super", super.toString());
}
} |
import * as md5 from 'apache-md5';
import { Utils } from '../utils';
export interface IElasticScript {
lang: string;
source: string;
}
export interface INodeCreateOpts {
interval?: number;
kibana_network_tag?: string;
kibana_users?: { [username: string]: string };
kso?: any[];
scripts?: { [name: string]: IElasticScript };
sm?: object;
verbose?: boolean;
}
export class NodeCreateOpts implements INodeCreateOpts {
interval: number;
kibana_network_tag?: string;
kibana_users: { [username: string]: string };
kso: any[];
scripts: { [name: string]: IElasticScript };
sm: object;
verbose: boolean;
constructor(v: INodeCreateOpts) {
const o = v || {};
this._set_interval(o);
this._set_kibana_network_tag(o);
this._set_kibana_users(o);
this._set_kso(o);
this._set_scripts(o);
this._set_sm(o);
this._set_verbose(o);
}
get_kibana_users_env_value() {
const usernames = Object.keys(this.kibana_users);
if (usernames.length) {
const tokens = usernames.map(username => {
const pass = this.kibana_users[username];
const hash = md5(pass);
return `${username}:${hash}`;
});
return Buffer.from(tokens.join(' ')).toString('base64');
}
}
private _set_interval(v: INodeCreateOpts) {
if (Utils.is_defined(v.interval) &&
(!Utils.is_number(v.interval) || (<number> v.interval < 1000))) {
throw Error('interval must be >= 1000');
}
this.interval = v.interval ? v.interval : 2000;
}
private _set_kibana_network_tag(v: INodeCreateOpts) {
if (v.kibana_network_tag && (!Utils.is_string(v.kibana_network_tag) ||
!v.kibana_network_tag || / /.test(v.kibana_network_tag))) {
throw Error(`${v.kibana_network_tag} is not a valid name for a gce network tag.`);
} else if (v.kibana_network_tag) {
this.kibana_network_tag = v.kibana_network_tag;
}
}
private _set_kibana_users(v: INodeCreateOpts) {
this.kibana_users = {};
if (v.kibana_users) {
for (const username in v.kibana_users) {
const pass = v.kibana_users[username];
if (!username || (username.length > 255)) {
throw Error('username must be a string <= 255 characters. see apache htpasswd.');
} else if (!Utils.is_string(pass) || !pass || (pass.length > 255)) {
throw Error('password must be a string <= 255 characters. see apache htpasswd.');
}
this.kibana_users[username] = pass;
}
}
}
private _set_kso(v: INodeCreateOpts) {
if (Utils.is_array(v.kso)) {
this.kso = <[]> v.kso;
} else if (Utils.is_defined(v.kso)) {
throw Error('kibana saved objects must be an array.');
} else {
this.kso = [];
}
}
private _set_scripts(v: INodeCreateOpts) {
if (Utils.is_object(v.scripts)) {
this.scripts = <{}> v.scripts;
} else if (Utils.is_defined(v.scripts)) {
throw Error('scripts must be an object.');
} else {
this.scripts = {};
}
}
private _set_sm(v: INodeCreateOpts) {
if (Utils.is_object(v.sm)) {
this.sm = <object> v.sm;
} else if (Utils.is_defined(v.sm)) {
throw Error('settings and mappings must be an object.');
} else {
this.sm = {};
}
}
private _set_verbose(v: INodeCreateOpts) {
this.verbose = !!v.verbose;
}
}
|
/**
* Authorization Exception with http response code 401
*
* @author DreamJM
* @see RequestException
*/
public class UnauthorizedException extends RequestException {
public UnauthorizedException(int code) {
super(code);
}
public UnauthorizedException(int code, Object[] params) {
super(code, params);
}
public UnauthorizedException(int code, String msg) {
super(code, msg);
}
public UnauthorizedException(int code, Throwable cause) {
super(code, cause);
}
public UnauthorizedException(int code, Throwable cause, Object[] params) {
super(code, cause, params);
}
public UnauthorizedException(int code, String msg, Throwable throwable) {
super(code, msg, throwable);
}
@Override
public HttpStatus getStatus() {
return HttpStatus.UNAUTHORIZED;
}
} |
//definisanje labele
//nakon poziva funkcije ne treba zvati free za naziv labele
int insert_label(char* name, int address, int line, uchar type) {
int i = -1;
if ((i = label_index(name)) != -1) {
if (symtab[i].usage != LAB_UNDEFINED)
parsererror("Label already defined in line %d.",symtab[i].line);
else {
symtab[i].address = address;
symtab[i].line = line;
symtab[i].type = type;
symtab[i].usage = LAB_USED;
}
free(name);
}
else {
symtab[symtab_cnt].name = name;
symtab[symtab_cnt].address = address;
symtab[symtab_cnt].line = line;
symtab[symtab_cnt].type = type;
symtab[symtab_cnt].usage = LAB_DEFINED;
i = symtab_cnt++;
}
return i;
} |
/**
* Created by Administrator on 18/04/16.
*/
@Entity
public class UserEntity implements Serializable {
@Id
@GenericGenerator(name = "uuid",strategy = "uuid")
@GeneratedValue(generator = "uuid")
private String id;
private String account;
private String name;
private Instant lastLoginTime;
private Instant createTime;
private String lastLoginIp;
private String password;
public String getId() {
return id;
}
public UserEntity setId(String id) {
this.id = id;
return this;
}
public String getAccount() {
return account;
}
public UserEntity setAccount(String account) {
this.account = account;
return this;
}
public String getName() {
return name;
}
public UserEntity setName(String name) {
this.name = name;
return this;
}
public Instant getLastLoginTime() {
return lastLoginTime;
}
public UserEntity setLastLoginTime(Instant lastLoginTime) {
this.lastLoginTime = lastLoginTime;
return this;
}
public Instant getCreateTime() {
return createTime;
}
public UserEntity setCreateTime(Instant createTime) {
this.createTime = createTime;
return this;
}
public String getLastLoginIp() {
return lastLoginIp;
}
public UserEntity setLastLoginIp(String lastLoginIp) {
this.lastLoginIp = lastLoginIp;
return this;
}
public String getPassword() {
return password;
}
public UserEntity setPassword(String password) {
this.password = password;
return this;
}
} |
# coding:utf-8
import sys
import math
import time
#import numpy as np
import collections
from collections import deque
import queue
import copy
#X = str(input()).split()
#a = [int(x) for x in input().split()]
HW = str(input()).split()
H = int(HW[0])
W = int(HW[1])
N = int(input())
Gr = [[0]*W for i in range(H)]
A = [int(x) for x in input().split()]
countH = 0
countW = 0
for i in range(N):
for j in range(A[i]):
Gr[countH][countW] = i+1
countW += 1
if(countW==W):
countH += 1
countW = 0
#print(Gr)
for i in range(H):
if(i%2 == 0):
L=[str(a) for a in Gr[i]]
L=' '.join(L)
print(L)
else:
L=[str(a) for a in reversed(Gr[i])]
L=' '.join(L)
print(L)
|
from oldowan.mtconvert import seq2sites
from oldowan.mtconvert import sites2seq
from oldowan.mtconvert import str2sites
from oldowan.polymorphism import Polymorphism
def test_haplotype_1810():
sites = str2sites('16093C 16183d 16193.1C 16193.2C 16249C')
seq = sites2seq(sites, region=range(16000,16570))
rts = seq2sites(seq) # rts: round trip sites
print 'EXP: %s' % sites
print 'OBS: %s' % rts
assert sites == rts
def test_haplotype_2236():
sites = str2sites('16126C 16163G 16185.1T 16185.2T 16189d 16294T 16519C')
seq = sites2seq(sites, region=range(16000,16570))
rts = seq2sites(seq) # rts: round trip sites
print 'EXP: %s' % sites
print 'OBS: %s' % rts
assert sites == rts
def test_haplotype_2911():
sites = str2sites('16051G 16129C 16182d 16183d 16193.1C 16193.2C 16362C 16519C')
seq = sites2seq(sites, region=range(16000,16570))
rts = seq2sites(seq) # rts: round trip sites
print 'EXP: %s' % sites
print 'OBS: %s' % rts
assert sites == rts
def test_haplotype_3070():
sites = str2sites('16093C 16183d 16184d 16191.1T 16191.2T 16270T')
seq = sites2seq(sites, region=range(16000,16570))
rts = seq2sites(seq) # rts: round trip sites
print 'EXP: %s' % sites
print 'OBS: %s' % rts
assert sites == rts
def test_haplotype_3805():
sites = str2sites('16183d 16193.1C 16193.2C 16218T 16519C')
seq = sites2seq(sites, region=range(16000,16570))
rts = seq2sites(seq) # rts: round trip sites
print 'EXP: %s' % sites
print 'OBS: %s' % rts
assert sites == rts
def test_haplotype_4826():
sites = str2sites('16172C 16183d 16193.1C 16193.2C 16223T 16320T 16519C')
seq = sites2seq(sites, region=range(16000,16570))
rts = seq2sites(seq) # rts: round trip sites
print 'EXP: %s' % sites
print 'OBS: %s' % rts
assert sites == rts
def test_haplotype_4827():
sites = str2sites('16172C 16183d 16193.1C 16193.2C 16223T 16320T')
seq = sites2seq(sites, region=range(16000,16570))
rts = seq2sites(seq) # rts: round trip sites
print 'EXP: %s' % sites
print 'OBS: %s' % rts
assert sites == rts
|
As reporters and parents pressed Towson University for details about a phone that was discovered recording swim team members in a pool locker room in October, campus officials focused on limiting the flow of information, according to emails reviewed by The Baltimore Sun.
According to the emails provided in a public records request, a parent wrote to campus officials the day the phone was discovered, saying their daughter relayed "disturbing" information about "inappropriate behavior of the diving coach" and asking for more information. That parent followed up the next morning: "If there is any truth in this matter, I would hope I would hear something from the school before reading about it in the news."
The university provided dozens of emails to The Sun and other news organizations in response to the requests. The emails, between swim coaches and university administrators, cover the week after the phone was discovered in the women's team locker room in Burdick Hall on Oct. 16.
Maureen Mead, who was the team's diving coach, was indicted in Baltimore County Circuit Court on Nov. 23 on charges of interception of communication, peeping Tom and altering evidence. She is on paid administrative leave.
Mead's husband, head coach Pat Mead, and an assistant coach, Adrienne Phillips, are no longer listed as team coaches on the team's website. Another assistant, Jake Shrum, has been named interim head coach, and the university brought in Tim Perkins to coach the divers.
The day after the phone was discovered in the locker room, reporters began calling and emailing the university's police department and communications staff, seeking information.
All of the requests were funneled to Marina Cooper, deputy chief of staff to the university's interim president, Timothy J.L. Chandler. Cooper responded with a brief statement with few details about the incident.
Cooper sent out a second media update several days later, adding only that the recording was limited to the team locker room, which has restricted access.
"Prepare for impact — just went out," Cooper wrote to members of the communications staff after sending the update.
Ray Feldmann, a spokesman for the university, responded to remind the others to caution against speculation. "I know that may be frustrating to some who want answers in under 60 minutes because that's what happens on CSI. But that's not the reality we're dealing with," he wrote.
Feldmann added that "at some point we need to do on-camera TV interviews, but I respect that we're probably not there yet."
Cooper replied: "Yes, agreed on all fronts — and yes not there yet. Everyone has been good and quiet."
University officials also drafted a statement to members of the university's Board of Visitors and a statement that swim team members could send to their parents. Neither statement offered substantial information about the incident or the investigation.
In the days after the phone's discovery, some athletes and parents wrote emails expressing frustration.
"I do not like how this is being handled," one team member wrote to Debbie Seeberger, an official in the president's office who had met with the team.
Another team member wrote to Seeberger: "I am in the video and would like to meet with you to express my concern about the situation. ... I, along with my teammates, do not think that this is being handled in the way that it should be."
A parent wrote to Pat Mead complaining about a lack of information. "I do have concerns related to the poor communication from the University to parents. ... We recognize that there is an ongoing investigation, however, communication is needed. Parents should not have to find out news from social media," the parent wrote.
Some swim team parents were supportive.
"We are thinking of you, Maureen and the kids," one parent wrote to Pat Mead. "I am sure that taking care of your family is not easy right now and we are so sorry that this is happening."
Another wrote to athletic director Tim Leonard: "Pat is known to be a no-nonsense coach who expects the most from his swimmers and holds them to a high standard. ... He works the kids very hard and my daughter believes this is how he can take average swimmers and make them great swimmers."
On Friday, the university named Kim E. Schatzel, previously of Eastern Michigan University, as its new president.
[email protected]
twitter.com/pwoodreporter |
/**
* For compatibility with Java 6, where Socket isn't Closeable
*/
public static void closeQuietly(Socket closeable) {
try {
if (closeable != null) {
closeable.close();
}
} catch (IOException ioe) {
}
} |
def send(self) -> Dict:
payload = {}
if not self.audience:
raise ValueError("An audience is required to modify tags")
payload["audience"] = self.audience
if not self.add_group and not self.remove_group and not self.set_group:
raise ValueError("An add, remove, or set field was not set")
if self.set_group:
if self.add_group or self.remove_group:
raise ValueError(
'A "set" tag request cannot contain "add" or "remove" fields'
)
payload["set"] = self.set_group
if self.add_group:
payload["add"] = self.add_group
if self.remove_group:
payload["remove"] = self.remove_group
body = json.dumps(payload)
response = self._airship._request(
"POST", body, self.url, "application/json", version=3
)
return response.json() |
import React from 'react';
const Footer = () => {
return (
<footer className="p-10 footer bg-base-200 text-base-content footer-center">
<div className="grid grid-flow-col gap-4">
<a href="/" className="link link-hover">
About us
</a>
<a href="/" className="link link-hover">
Contact
</a>
<a href="/" className="link link-hover">
Jobs
</a>
<a href="/" className="link link-hover">
Press kit
</a>
</div>
<div>
<div className="grid grid-flow-col gap-4">
<a href="/">
<svg
xmlns="http://www.w3.org/2000/svg"
width={24}
height={24}
viewBox="0 0 24 24"
className="fill-current"
>
<path d="M24 4.557c-.883.392-1.832.656-2.828.775 1.017-.609 1.798-1.574 2.165-2.724-.951.564-2.005.974-3.127 1.195-.897-.957-2.178-1.555-3.594-1.555-3.179 0-5.515 2.966-4.797 6.045-4.091-.205-7.719-2.165-10.148-5.144-1.29 2.213-.669 5.108 1.523 6.574-.806-.026-1.566-.247-2.229-.616-.054 2.281 1.581 4.415 3.949 4.89-.693.188-1.452.232-2.224.084.626 1.956 2.444 3.379 4.6 3.419-2.07 1.623-4.678 2.348-7.29 2.04 2.179 1.397 4.768 2.212 7.548 2.212 9.142 0 14.307-7.721 13.995-14.646.962-.695 1.797-1.562 2.457-2.549z" />
</svg>
</a>
<a href="/">
<svg
xmlns="http://www.w3.org/2000/svg"
width={24}
height={24}
viewBox="0 0 24 24"
className="fill-current"
>
<path d="M19.615 3.184c-3.604-.246-11.631-.245-15.23 0-3.897.266-4.356 2.62-4.385 8.816.029 6.185.484 8.549 4.385 8.816 3.6.245 11.626.246 15.23 0 3.897-.266 4.356-2.62 4.385-8.816-.029-6.185-.484-8.549-4.385-8.816zm-10.615 12.816v-8l8 3.993-8 4.007z" />
</svg>
</a>
<a href="/">
<svg
xmlns="http://www.w3.org/2000/svg"
width={24}
height={24}
viewBox="0 0 24 24"
className="fill-current"
>
<path d="M9 8h-3v4h3v12h5v-12h3.642l.358-4h-4v-1.667c0-.955.192-1.333 1.115-1.333h2.885v-5h-3.808c-3.596 0-5.192 1.583-5.192 4.615v3.385z" />
</svg>
</a>
</div>
</div>
<div>
<p>Copyright © 2021 - All right reserved by CinematicCow</p>
</div>
</footer>
);
};
export default Footer;
|
/**
* Contains details of machines detected at a given cloud (ComputeService),
* and records claims made against those machines via this pool.
* <p>
* Machine instances themselves are persisted and rescanned as new instances of this class are created.
* Claims however are specific to this instance of the class, i.e. <b>not</b> persisted.
* <p>
* This class is believed to be thread-safe.
* Refreshes to the remote detected machines are synchronized on the pool instance.
* Details of detected and claimed machines are also synchronized on the pool instance.
* (If it is necessary to claim machines whilst the pool is being rescanned,
* we can investigate a more sophisticated threading model.
* Access to some fields is clearly independent and uses a tighter synchonization
* strategy, e.g. templates.
* Synchronization of fields within a synch block on the class instance
* is permitted, but not the other way round,
* and synching on multiple fields is also not permitted.)
* <p>
* Callers wishing to guarantee results of e.g. ensureUnclaimed remaining available
* can synchronize on this class for the duration that they wish to have that guarantee
* (at the cost, of course, of any other threads being able to access this pool).
* <p>
* If underlying provisioning/destroying operations fail, the pool
* currently may be in an unknown state, currently.
* If more robustness is needed this can be added.
*
* @deprecated since 0.6.0; never used in production setting, and thus of dubious value; best avoided as unlikely to be supported in future versions
*/
@Deprecated
public class MachinePool {
private static final Logger log = LoggerFactory.getLogger(MachinePool.class);
protected final ComputeService computeService;
final AtomicBoolean refreshNeeded = new AtomicBoolean(true);
final List<ReusableMachineTemplate> templates = new ArrayList<ReusableMachineTemplate>();
String poolName = null;
/** all machines detected, less those in the black list */
volatile MachineSet detectedMachines = new MachineSet();
volatile MachineSet matchedMachines = new MachineSet();
volatile MachineSet claimedMachines = new MachineSet();
volatile MachineSet blacklistedMachines = new MachineSet();
public MachinePool(ComputeService computeService) {
this.computeService = computeService;
}
protected synchronized void init() {
if (!refreshNeeded.get()) return;
refresh();
}
public void setPoolName(String poolName) {
if (poolName!=null)
log.warn("Changing pool name of "+this+" (from "+this.poolName+" to "+poolName+") is discouraged.");
this.poolName = poolName;
}
/** pool name is used as a group/label by jclouds, for convenience only;
* it has no special properties for detecting matching instances
* (use explicit tags on the templates, for that).
* defaults to name of pool class and user name.
* callers should set pool name before getting, if using a custom name. */
public synchronized String getPoolName() {
if (poolName==null)
poolName = getClass().getSimpleName()+"-"+System.getProperty("user.name");
return poolName;
}
/** refreshes the pool of machines from the server (finding all instances matching the registered templates) */
public synchronized void refresh() {
refreshNeeded.set(false);
Set<? extends ComputeMetadata> computes = computeService.listNodes();
Set<NodeMetadata> nodes = new LinkedHashSet<NodeMetadata>();
for (ComputeMetadata c: computes) {
if (c instanceof NodeMetadata) {
nodes.add((NodeMetadata)c);
} else {
// TODO should we try to fetch more info?
log.warn("MachinePool "+this+" ignoring non-Node record for remote machine: "+c);
}
}
MachineSet allNewDetectedMachines = new MachineSet(nodes);
MachineSet newDetectedMachines = filterForAllowedMachines(allNewDetectedMachines);
MachineSet oldDetectedMachines = detectedMachines;
MachineSet newMatchedMachines = new MachineSet();
detectedMachines = newDetectedMachines;
MachineSet appearedMachinesIncludingBlacklist = allNewDetectedMachines.removed(oldDetectedMachines);
MachineSet appearedMachines = filterForAllowedMachines(appearedMachinesIncludingBlacklist);
if (appearedMachinesIncludingBlacklist.size()>appearedMachines.size())
if (log.isDebugEnabled()) log.debug("Pool "+this+", ignoring "+(appearedMachinesIncludingBlacklist.size()-appearedMachines.size())+" disallowed");
int matchedAppeared = 0;
for (NodeMetadata m: appearedMachines) {
if (m.getStatus() != NodeMetadata.Status.RUNNING) {
if (log.isDebugEnabled())
log.debug("Pool "+this+", newly detected machine "+m+", not running ("+m.getStatus()+")");
} else {
Set<ReusableMachineTemplate> ts = getTemplatesMatchingInstance(m);
if (!ts.isEmpty()) {
matchedAppeared++;
newMatchedMachines = newMatchedMachines.added(new MachineSet(m));
if (log.isDebugEnabled())
log.debug("Pool "+this+", newly detected machine "+m+", matches pool templates "+ts);
} else {
if (log.isDebugEnabled())
log.debug("Pool "+this+", newly detected machine "+m+", does not match any pool templates");
}
}
}
if (matchedAppeared>0) {
log.info("Pool "+this+" discovered "+matchedAppeared+" matching machines (of "+appearedMachines.size()+" total new; "+newDetectedMachines.size()+" total including claimed and unmatched)");
} else {
if (log.isDebugEnabled())
log.debug("Pool "+this+" discovered "+matchedAppeared+" matching machines (of "+appearedMachines.size()+" total new; "+newDetectedMachines.size()+" total including claimed and unmatched)");
}
matchedMachines = newMatchedMachines;
}
protected MachineSet filterForAllowedMachines(MachineSet input) {
return input.removed(blacklistedMachines);
}
// TODO template registry and claiming from a template could be a separate responsibility
protected ReusableMachineTemplate registerTemplate(ReusableMachineTemplate template) {
registerTemplates(template);
return template;
}
protected void registerTemplates(ReusableMachineTemplate ...templatesToReg) {
synchronized (templates) {
for (ReusableMachineTemplate template: templatesToReg)
templates.add(template);
}
}
protected ReusableMachineTemplate newTemplate(String name) {
return registerTemplate(new ReusableMachineTemplate(name));
}
public List<ReusableMachineTemplate> getTemplates() {
List<ReusableMachineTemplate> result;
synchronized (templates) { result = ImmutableList.copyOf(templates); }
return result;
}
/** all machines matching any templates */
public MachineSet all() {
init();
return matchedMachines;
}
/** machines matching any templates which have not been claimed */
public MachineSet unclaimed() {
init();
synchronized (this) {
return matchedMachines.removed(claimedMachines);
}
}
/** returns all machines matching the given criteria (may be claimed) */
@SuppressWarnings("unchecked")
public MachineSet all(Predicate<NodeMetadata> criterion) {
// To avoid generics complaints in callers caused by varargs, overload here
return all(new Predicate[] {criterion});
}
/** returns all machines matching the given criteria (may be claimed) */
public MachineSet all(Predicate<NodeMetadata> ...ops) {
return new MachineSet(Iterables.filter(all(), compose(ops)));
}
/** returns unclaimed machines matching the given criteria */
@SuppressWarnings("unchecked")
public MachineSet unclaimed(Predicate<NodeMetadata> criterion) {
// To avoid generics complaints in callers caused by varargs, overload here
return unclaimed(new Predicate[] {criterion});
}
/** returns unclaimed machines matching the given criteria */
public MachineSet unclaimed(Predicate<NodeMetadata> ...criteria) {
return new MachineSet(Iterables.filter(unclaimed(), compose(criteria)));
}
/** creates machines if necessary so that this spec exists (may already be claimed however)
* returns a set of all matching machines, guaranteed non-empty
* (but possibly some are already claimed) */
public MachineSet ensureExists(ReusableMachineTemplate template) {
return ensureExists(1, template);
}
public synchronized void addToBlacklist(MachineSet newToBlacklist) {
setBlacklist(blacklistedMachines.added(newToBlacklist));
}
/** replaces the blacklist set; callers should generally perform a refresh()
* afterwards, to trigger re-detection of blacklisted machines
*/
public synchronized void setBlacklist(MachineSet newBlacklist) {
blacklistedMachines = newBlacklist;
detectedMachines = detectedMachines.removed(blacklistedMachines);
matchedMachines = matchedMachines.removed(blacklistedMachines);
}
/** creates machines if necessary so that this spec exists (may already be claimed however);
* returns a set of all matching machines, of size at least count (but possibly some are already claimed).
* (the pool can change at any point, so this set is a best-effort but may be out of date.
* see javadoc comments on this class.) */
public MachineSet ensureExists(int count, ReusableMachineTemplate template) {
MachineSet current;
current = all(matching(template));
if (current.size() >= count)
return current;
//have to create more
MachineSet moreNeeded = create(count-current.size(), template);
return current.added(moreNeeded);
}
/** creates machines if necessary so that this spec can subsequently be claimed;
* returns all such unclaimed machines, guaranteed to be non-empty.
* (the pool can change at any point, so this set is a best-effort but may be out of date.
* see javadoc comments on this class.) */
public MachineSet ensureUnclaimed(ReusableMachineTemplate template) {
return ensureUnclaimed(1, template);
}
/** creates machines if necessary so that this spec can subsequently be claimed;
* returns a set of at least count unclaimed machines */
public MachineSet ensureUnclaimed(int count, ReusableMachineTemplate template) {
MachineSet current;
current = unclaimed(matching(template));
if (current.size() >= count)
return current;
//have to create more
MachineSet moreNeeded = create(count-current.size(), template);
return current.added(moreNeeded);
}
public Set<ReusableMachineTemplate> getTemplatesMatchingInstance(NodeMetadata nm) {
Set<ReusableMachineTemplate> result = new LinkedHashSet<ReusableMachineTemplate>();
for (ReusableMachineTemplate t: getTemplates()) {
if (matching(t).apply(nm)) {
result.add(t);
}
}
return result;
}
/** creates the given number of machines of the indicated template */
public MachineSet create(int count, ReusableMachineTemplate template) {
Set<? extends NodeMetadata> nodes;
try {
Template t = template.newJcloudsTemplate(computeService);
if (log.isDebugEnabled()) log.debug("Creating "+count+" new instances of "+t);
nodes = computeService.createNodesInGroup(getPoolName(), count, t);
} catch (RunNodesException e) {
throw Throwables.propagate(e);
}
MachineSet result = new MachineSet(nodes);
registerNewNodes(result, template);
return result;
}
protected void registerNewNodes(MachineSet result, ReusableMachineTemplate template) {
for (NodeMetadata m: result) {
Set<ReusableMachineTemplate> ts = getTemplatesMatchingInstance(m);
if (ts.isEmpty()) {
log.error("Pool "+this+", created machine "+m+" from template "+template+", but no pool templates match!");
} else {
if (log.isDebugEnabled())
log.debug("Pool "+this+", created machine "+m+" from template "+template+", matching templates "+ts);
}
}
synchronized (this) {
detectedMachines = detectedMachines.added(result);
matchedMachines = matchedMachines.added(result);
}
}
/** claims the indicated number of machines with the indicated spec, creating if necessary */
public MachineSet claim(int count, ReusableMachineTemplate t) {
init();
Set<NodeMetadata> claiming = new LinkedHashSet<NodeMetadata>();
while (claiming.size() < count) {
MachineSet mm = ensureUnclaimed(count - claiming.size(), t);
for (NodeMetadata m : mm) {
synchronized (this) {
if (claiming.size() < count && !claimedMachines.contains(m)) {
claiming.add(m);
claimedMachines = claimedMachines.added(new MachineSet(m));
}
}
}
}
MachineSet result = new MachineSet(claiming);
return result;
}
/** claims the indicated set of machines;
* throws exception if cannot all be claimed;
* returns the set passed in if successful */
public MachineSet claim(MachineSet set) {
init();
synchronized (this) {
MachineSet originalClaimed = claimedMachines;
claimedMachines = claimedMachines.added(set);
MachineSet newlyClaimed = claimedMachines.removed(originalClaimed);
if (newlyClaimed.size() != set.size()) {
//did not claim all; unclaim and fail
claimedMachines = originalClaimed;
MachineSet unavailable = set.removed(newlyClaimed);
throw new IllegalArgumentException("Could not claim all requested machines; failed to claim "+unavailable);
}
return newlyClaimed;
}
}
public int unclaim(MachineSet set) {
init();
synchronized (this) {
MachineSet originalClaimed = claimedMachines;
claimedMachines = claimedMachines.removed(set);
return originalClaimed.size() - claimedMachines.size();
}
}
public int destroy(final MachineSet set) {
init();
synchronized (this) {
detectedMachines = detectedMachines.removed(set);
matchedMachines = matchedMachines.removed(set);
claimedMachines = claimedMachines.removed(set);
}
Set<? extends NodeMetadata> destroyed = computeService.destroyNodesMatching(new Predicate<NodeMetadata>() {
@Override
public boolean apply(NodeMetadata input) {
return set.contains(input);
}
});
synchronized (this) {
//in case a rescan happened while we were destroying
detectedMachines = detectedMachines.removed(set);
matchedMachines = matchedMachines.removed(set);
claimedMachines = claimedMachines.removed(set);
}
return destroyed.size();
}
} |
Thrombotic Microangiopathy and Hypothermia in an HIV-positive Patient: Importance of Cytomegalovirus Infection
Cytomegalovirus (CMV) infection is a well-known frequent complication in HIV-infected patients. We report a case of thrombotic microangiopathy and severe hypothermia in a 21-y-old woman positive for HIV infection. CMV infection was diagnosed by PCR. The symptoms completely resolved after ganciclovir therapy which supports the role of CMV as a causative agent. |
def render_ranges(dim_minimums, dim_maximums):
renderers = []
left = None
right = None
bottom = None
top = None
if dim_minimums[0] is not None:
left = dim_minimums[0]
renderers.append(
Span(location=left, dimension='height', line_width=line_width, line_color=line_color)
)
if dim_maximums[0] is not None:
right = dim_maximums[0]
renderers.append(
Span(location=right, dimension='height', line_width=line_width, line_color=line_color)
)
if len(dim_minimums) > 1:
if dim_minimums[1] is not None:
bottom = dim_minimums[1]
renderers.append(
Span(location=bottom, dimension='width', line_width=line_width, line_color=line_color)
)
if dim_maximums[1] is not None:
top = dim_maximums[1]
renderers.append(
Span(location=top, dimension='width', line_width=line_width, line_color=line_color)
)
mid_box = BoxAnnotation(
left=left,
right=right,
bottom=bottom,
top=top,
fill_alpha=fill_alpha,
fill_color=fill_color
)
renderers.append(mid_box)
return renderers |
import matplotlib.pyplot as plt
import matplotlib.colors as plt_colors
import numpy as np
import os
import glob
import multiprocessing
from dc2g.util import get_object_goal_names, get_training_testing_houses
dir_path, _ = os.path.split(os.path.dirname(os.path.realpath(__file__)))
dataset = "house3d"
# dataset = "driveways_icra19"
object_goal_names = get_object_goal_names(dataset)
room_goal_names = []
training_houses, testing_houses = get_training_testing_houses(dataset)
def apply_mask(world_id, mode):
map_name_and_id = "world" + world_id
mask_filenames = "{dir_path}/training_data/{dataset}/masks/{mode}/transformed/*.png".format(dataset=dataset, dir_path=dir_path, mode=mode)
num_masks = len(glob.glob(mask_filenames))
semantic_filename = "{dir_path}/training_data/{dataset}/full_semantic/{mode}/{map_name_and_id}.png".format(dataset=dataset, map_name_and_id=map_name_and_id, dir_path=dir_path, mode=mode)
if not os.path.isfile(semantic_filename):
return
semantic_array = plt.imread(semantic_filename)
for object_goal_name in object_goal_names:
c2g_filename = "{dir_path}/training_data/{dataset}/full_c2g/{mode}/{map_name_and_id}-{goal_name}.png".format(dataset=dataset, map_name_and_id=map_name_and_id, dir_path=dir_path, mode=mode, goal_name=object_goal_name)
if not os.path.isfile(c2g_filename):
continue
c2g_array = plt.imread(c2g_filename)
for mask_index in range(num_masks):
mask_id = str(mask_index).zfill(3)
mask_filename = "{dir_path}/training_data/{dataset}/masks/{mode}/transformed/{mask_id}.png".format(dataset=dataset, mode=mode, mask_id=mask_id, dir_path=dir_path)
mask = plt.imread(mask_filename)
actually_apply_mask(c2g_array.copy(), semantic_array.copy(), mask, mask_id, dataset, object_goal_name, mode, map_name_and_id)
print(map_name_and_id)
def actually_apply_mask(c2g_array, semantic_array, mask, mask_id, dataset, object_goal_name, mode, map_name_and_id):
masked_semantic_filename = "{dir_path}/training_data/{dataset}/masked_semantic/{mode}/{map_name_and_id}_{mask_id}.png".format(dataset=dataset, mode=mode, map_name_and_id=map_name_and_id, mask_id=mask_id, dir_path=dir_path)
masked_c2g_filename = "{dir_path}/training_data/{dataset}/masked_c2g/{mode}/{map_name_and_id}_{mask_id}-{goal_name}.png".format(goal_name=object_goal_name, dataset=dataset, mode=mode, map_name_and_id=map_name_and_id, mask_id=mask_id, dir_path=dir_path)
if not os.path.isfile(masked_semantic_filename):
semantic_array[mask[:,:,0] == 1] = 0
semantic_array = semantic_array[:,:,:3]
plt.imsave(masked_semantic_filename, semantic_array)
if not os.path.isfile(masked_c2g_filename):
c2g_array[mask[:,:,0] == 1] = 0 # mask out the unobserved regions
c2g_array = c2g_array[:,:,:3]
# re-scale the gray intensity of the un-masked region
hsv = plt_colors.rgb_to_hsv(c2g_array)
grayscale_inds = np.where(hsv[:, :, 1] < 0.3)
max_intensity = np.max(c2g_array[grayscale_inds])
if max_intensity > 0:
c2g_array[grayscale_inds] /= max_intensity
plt.imsave(masked_c2g_filename, c2g_array)
def apply_mask_training(world_id):
apply_mask(world_id, "train")
def apply_mask_testing(world_id):
apply_mask(world_id, "test")
if __name__ == "__main__":
pool = multiprocessing.Pool(2*multiprocessing.cpu_count()-2 or 1)
print("\n\n -------------------- \n Applying Training masks \n -------------------- \n\n")
pool.map(apply_mask_training, training_houses)
print("\n\n -------------------- \n Applying Testing masks \n -------------------- \n\n")
pool.map(apply_mask_testing, testing_houses)
print("--- All done ---") |
<reponame>yklishevich/RubetekIOS-CPP-releases<filename>RubetekIOS-CPP.framework/Versions/A/Headers/libnet/rubetek/utility/load_binary_file.hpp
#pragma once
#include <string>
#include <fstream>
#include <rubetek/essence/buffer.hpp>
namespace rubetek {
namespace utility {
buffer load_binary_file(std::string const& file_name);
}}
namespace rubetek {
namespace utility {
inline buffer load_binary_file(std::string const& file_name)
{
buffer buf;
buf.reserve(10 * 1024 * 1024);
std::ifstream ifile(file_name.c_str(), std::ios::binary);
if (!ifile.good()) throw std::runtime_error("can't open file: " + file_name);
buffer block(1024 * 1024);
do
{
ifile.read(reinterpret_cast<char*>(block.data()), block.size());
buf.insert(buf.end(), block.begin(), block.begin() + static_cast<std::size_t>(ifile.gcount()));
}
while (static_cast<buffer::size_type>(ifile.gcount()) == block.size());
return std::move(buf);
}
}}
|
/*
* Copyright 2017 The University of Oklahoma.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "hudlatencygraphimpl.h"
#include "qmath.h"
namespace Soro {
namespace MissionControl {
HudLatencyGraphImpl::HudLatencyGraphImpl(QQuickItem *parent) : QQuickPaintedItem(parent)
{
_mode = "vertical";
START_TIMER(_updateTimerId, 20);
}
QString HudLatencyGraphImpl::mode() const {
return _mode;
}
void HudLatencyGraphImpl::setMode(QString mode) {
_mode = mode;
}
int HudLatencyGraphImpl::latency() const {
return _latency;
}
void HudLatencyGraphImpl::setLatency(int latency) {
if (qAbs(_latency - latency) > _latencyThreshold) {
_latency = latency;
}
}
float HudLatencyGraphImpl::value() const {
return _value;
}
void HudLatencyGraphImpl::setValue(float value) {
_value = value;
}
int HudLatencyGraphImpl::latencyThreshold() const {
return _latencyThreshold;
}
void HudLatencyGraphImpl::setLatencyThreshold(int threshold) {
_latencyThreshold = threshold;
}
void HudLatencyGraphImpl::paint(QPainter *painter) {
qint64 now = QDateTime::currentDateTime().toMSecsSinceEpoch();
QMap<qint64, float>::const_iterator i = _history.constBegin();
if (_mode == "horizontal") {
int blobSize = height() / 4;
// Draw bottom blob
int startBlobY = (height() / 2) + ((height() / 2 - blobSize / 2) * _value);
painter->setBrush(QBrush(Qt::white));
painter->setPen(Qt::NoPen);
painter->setRenderHint(QPainter::Antialiasing);
painter->drawEllipse(0,
startBlobY - (blobSize / 2),
blobSize,
blobSize);
// Draw top blob
float endValue = nearestValue(QDateTime::currentDateTime().toMSecsSinceEpoch());
int endBlobY = (height() / 2) + ((height() / 2 - blobSize / 2) * endValue);
painter->setBrush(QBrush(Qt::white));
painter->setPen(Qt::NoPen);
painter->setRenderHint(QPainter::Antialiasing);
painter->drawEllipse(width() - blobSize,
endBlobY - (blobSize / 2),
blobSize,
blobSize);
// Draw graph
QPen pen;
pen.setColor(QColor("#88ffffff"));
pen.setWidth(blobSize / 5);
painter->setPen(pen);
painter->setBrush(Qt::NoBrush);
qint64 now = QDateTime::currentDateTime().toMSecsSinceEpoch();
QPainterPath path;
path.moveTo(width() - blobSize / 2, endBlobY);
while (i != _history.constEnd()) {
path.lineTo(width() - (blobSize / 2) - ((width() - 2 * blobSize) * ((float)(i.key() - now) / (float)_latency)),
(height() / 2) + (height() / 2 - blobSize / 2) * i.value());
i++;
}
path.lineTo(0, startBlobY);
painter->drawPath(path);
}
else if (_mode == "vertical") {
int blobSize = width() / 4;
// Draw bottom blob
int startBlobX = (width() / 2) + ((width() / 2 - blobSize / 2) * _value);
painter->setBrush(QBrush(Qt::white));
painter->setPen(Qt::NoPen);
painter->setRenderHint(QPainter::Antialiasing);
painter->drawEllipse(startBlobX - (blobSize / 2),
height() - blobSize,
blobSize,
blobSize);
// Draw top blob
float endValue = nearestValue(QDateTime::currentDateTime().toMSecsSinceEpoch());
int endBlobX = (width() / 2) + ((width() / 2 - blobSize / 2) * endValue);
painter->setBrush(QBrush(Qt::white));
painter->setPen(Qt::NoPen);
painter->setRenderHint(QPainter::Antialiasing);
painter->drawEllipse(endBlobX - (blobSize / 2),
0,
blobSize,
blobSize);
// Draw graph
QPen pen;
pen.setColor(QColor("#88ffffff"));
pen.setWidth(blobSize / 5);
painter->setPen(pen);
painter->setBrush(Qt::NoBrush);
qint64 now = QDateTime::currentDateTime().toMSecsSinceEpoch();
QPainterPath path;
path.moveTo(endBlobX, blobSize / 2);
while (i != _history.constEnd()) {
path.lineTo((width() / 2) + (width() / 2 - blobSize / 2) * i.value(),
(blobSize / 2) + ((height() - 2 * blobSize) * ((float)(i.key() - now) / (float)_latency)));
i++;
}
path.lineTo(startBlobX, height() - blobSize / 2);
painter->drawPath(path);
}
}
float HudLatencyGraphImpl::nearestValue(qint64 time) {
QList<qint64> keys = _history.keys();
int left = 0, right = keys.length() - 1;
if (right == -1) return 0;
while (right - left > 1) {
qint64 m = keys.at((left + right) / 2);
if (m > time) {
right = (left + right) / 2;
}
else if (m < time) {
left = (left + right) / 2;
}
else return _history.value(m);
}
qint64 ml = keys.at(left);
qint64 mr = keys.at(right);
if (qAbs(ml - time) > qAbs(mr - time)) {
return _history.value(mr);
}
return _history.value(ml);
}
void HudLatencyGraphImpl::timerEvent(QTimerEvent *e) {
if (e->timerId() == _updateTimerId) {
// Prune value map
qint64 now = QDateTime::currentDateTime().toMSecsSinceEpoch();
// Remove values in the past
QMap<qint64, float>::iterator i = _history.begin();
while ((i != _history.end()) && (i.key() <= now)) {
i = _history.erase(i);
}
// Remove values farther in the future than our latency
qint64 lastKey;
while (!_history.empty() && ((lastKey = _history.lastKey()) > now + _latency)) {
_history.remove(lastKey);
}
_history.insert((now + _latency), _value);
// Invalidate
update();
}
}
} // namespace MissionControl
} // namespace Soro
|
//package Practise.Round267Div2;
import java.io.BufferedReader;
import java.io.InputStreamReader;
import java.io.PrintWriter;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashMap;
import java.util.StringTokenizer;
public class FedorAndEssay {
static HashMap<String,Integer> map;
static ArrayList<Integer> g[];
static ArrayList<Node> how;
static Node dp[];
@SuppressWarnings("unchecked")
public static void main(String args[] ) throws Exception {
BufferedReader br = new BufferedReader(new InputStreamReader(System.in));
PrintWriter w = new PrintWriter(System.out);
int m = Integer.parseInt(br.readLine()),count = -1;
map = new HashMap<String,Integer>();
how = new ArrayList<Node>();
int id[] = new int[m];
StringTokenizer st1 = new StringTokenizer(br.readLine());
for(int i=0;i<m;i++){
String nxt = st1.nextToken().toLowerCase();
if(map.containsKey(nxt))
id[i] = map.get(nxt);
else{
map.put(nxt, ++count);
id[i] = count;
int r = 0;
int s = nxt.length();
for(int j=0;j<s;j++)
if(nxt.charAt(j) == 'r')
r++;
how.add(new Node(r,s,count));
}
}
g = new ArrayList[300001];
for(int i=0;i<g.length;i++)
g[i] = new ArrayList<Integer>();
int n = Integer.parseInt(br.readLine());
for(int i=0;i<n;i++){
StringTokenizer st2 = new StringTokenizer(br.readLine());
String u = st2.nextToken().toLowerCase();
String v = st2.nextToken().toLowerCase();
int l,r;
if(map.containsKey(u))
l = map.get(u);
else{
map.put(u, ++count);
l = count;
int s = u.length();
int cc = 0;
for(int j=0;j<s;j++)
if(u.charAt(j) == 'r')
cc++;
how.add(new Node(cc,s,count));
}
if(map.containsKey(v))
r = map.get(v);
else{
map.put(v, ++count);
r = count;
int s = v.length();
int cc = 0;
for(int j=0;j<s;j++)
if(v.charAt(j) == 'r')
cc++;
how.add(new Node(cc,s,count));
}
g[r].add(l);
}
long rans = 0, lans = 0;
dp = new Node[count+1];
Collections.sort(how);
for(int i=0;i<=count;i++){
if(dp[how.get(i).id] == null){
dp[how.get(i).id] = how.get(i).copy();
dfs(how.get(i),how.get(i).id);
}
}
for(int i=0;i<m;i++){
rans += dp[id[i]].rCount;
lans += dp[id[i]].length;
}
w.println(rans + " " + lans);
w.close();
}
static void dfs(Node root,int curr){
int s = g[curr].size();
for(int i=0;i<s;i++){
if(dp[g[curr].get(i)] == null){
dp[g[curr].get(i)] = root.copy();
dp[g[curr].get(i)].id = g[curr].get(i);
dfs(root,g[curr].get(i));
}
}
}
static public class Node implements Comparable<Node>{
int rCount,length,id;
Node(int r,int l,int i){
rCount = r;
length = l;
id = i;
}
public int compareTo(Node o){
if(rCount != o.rCount)
return Integer.compare(rCount, o.rCount);
return Integer.compare(length, o.length);
}
public Node copy(){
return new Node(rCount,length,id);
}
public String toString(){
return id + " " + rCount + " " + length;
}
}
} |
/**
* Created by Cicinnus on 2017/1/27.
*/
public class WaitMovieMultiAdapter extends BaseMultiItemQuickAdapter<WaitMovieBean.DataBean.ComingBean, BaseViewHolder> {
private TrailerRecommendAdapter mTrailerRecommendAdapter;
public WaitMovieMultiAdapter() {
super(null);
addItemType(BaseConstant.TYPE_WAIT_DIVIDER,R.layout.layout_wait_movie_divider);
addItemType(BaseConstant.TYPE_WAIT_NORMAL,R.layout.item_wait_movie);
addItemType(BaseConstant.TYPE_WAIT_TRAILER,R.layout.layout_wait_movie_trailer_recommend);
}
public void setTrailerAdapter(TrailerRecommendAdapter adapter){
mTrailerRecommendAdapter = adapter;
}
@Override
protected void convert(BaseViewHolder helper, WaitMovieBean.DataBean.ComingBean item) {
switch (helper.getItemViewType()) {
case BaseConstant.TYPE_WAIT_NORMAL:
//图片地址不能直接使用,需要进行转换
String originUrl = item.getImg();
String imgUrl = originUrl.replace("/w.h/", "/") + "@171w_240h_1e_1c_1l";//后缀为图片大小
GlideManager.loadImage(mContext, imgUrl, (ImageView) helper.getView(R.id.iv_wait_movie));
helper.setText(R.id.tv_wait_movie_name, item.getNm())
.setText(R.id.tv_wait_movie_desc, item.getScm())
.setText(R.id.tv_wait_movie_wish, String.format("%s人想看", item.getWish()))
.setText(R.id.tv_wait_movie_major, String.format("主演:%s", item.getStar()));
TextView tv_wish = helper.getView(R.id.tv_wait_movie_wish);
Spannable spannable = new SpannableString(tv_wish.getText());
spannable.setSpan(new ForegroundColorSpan(mContext.getResources().getColor(R.color.text_yellow)), 0, tv_wish.getText().toString().indexOf("人想看"), Spanned.SPAN_EXCLUSIVE_EXCLUSIVE);
tv_wish.setText(spannable);
break;
case BaseConstant.TYPE_WAIT_DIVIDER:
//预告片
helper.setText(R.id.tv_divider,item.getComingTitle());
break;
}
}
} |
package mixin
import (
"entgo.io/ent"
"entgo.io/ent/schema/field"
"entgo.io/ent/schema/index"
_mixin "entgo.io/ent/schema/mixin"
)
type TenantId struct {
_mixin.Schema
}
// Fields of the TenantId.
func (TenantId) Fields() []ent.Field {
return []ent.Field{
field.Uint64("tenant_id").
Comment("租户ID").
Default(0),
}
}
// Indexes of the TenantId.
func (TenantId) Indexes() []ent.Index {
return []ent.Index{
index.Fields("tenant_id"),
}
}
|
<gh_stars>1-10
/*
* Copyright (c) Microsoft Corporation.
* Licensed under the MIT license.
*/
package testcases
import (
"fmt"
"go/token"
"github.com/dave/dst"
"github.com/pkg/errors"
"github.com/Azure/azure-service-operator/v2/tools/generator/internal/astbuilder"
"github.com/Azure/azure-service-operator/v2/tools/generator/internal/astmodel"
"github.com/Azure/azure-service-operator/v2/tools/generator/internal/conversions"
"github.com/Azure/azure-service-operator/v2/tools/generator/internal/functions"
)
// ResourceConversionTestCase represents a test that verifies we can convert from one resource to the hub resource
// (based on our conversion graph), and back again, with no loss of data (lossless conversion via the PropertyBag).
// This will be a multistep conversion, building on the PropertyAssignment functions.
type ResourceConversionTestCase struct {
testName string // The name of this particular test
subject astmodel.TypeName // The subject this test is going to exercise
toFn *functions.ResourceConversionFunction // The function to convert TO our hub instance
fromFn *functions.ResourceConversionFunction // The function to convert FROM our hub instance
idFactory astmodel.IdentifierFactory // a reference to our common factory for creating identifiers
}
var _ astmodel.TestCase = &ResourceConversionTestCase{}
// NewResourceConversionTestCase creates a new test case for the specified resource
func NewResourceConversionTestCase(
name astmodel.TypeName,
resourceType *astmodel.ResourceType,
idFactory astmodel.IdentifierFactory) (*ResourceConversionTestCase, error) {
result := &ResourceConversionTestCase{
subject: name,
idFactory: idFactory,
}
conversionImplementation, ok := resourceType.FindInterface(astmodel.ConvertibleInterface)
if !ok {
return nil, errors.Errorf("expected %s to implement conversions.Convertible including ConvertTo() and ConvertFrom()", name)
}
// Find ConvertTo and ConvertFrom functions from the implementation
for _, implementationFunction := range conversionImplementation.Functions() {
if fn, ok := implementationFunction.(*functions.ResourceConversionFunction); ok {
if fn.Direction() == conversions.ConvertFrom {
result.fromFn = fn
} else if fn.Direction() == conversions.ConvertTo {
result.toFn = fn
}
}
}
// Fail fast if something goes wrong
if result.fromFn == nil {
return nil, errors.Errorf("expected to find function ConvertFrom() on %s", name)
}
if result.toFn == nil {
return nil, errors.Errorf("expected to find function ConvertTo() on %s", name)
}
if !astmodel.TypeEquals(result.fromFn.Hub(), result.toFn.Hub()) {
return nil, errors.Errorf(
"expected ConvertFrom(%s) and ConvertTo(%s) on %s to have the same parameter type",
result.fromFn.Hub(),
result.toFn.Hub(),
name)
}
result.testName = fmt.Sprintf(
"%s_WhenConvertedToHub_RoundTripsWithoutLoss",
name.Name())
return result, nil
}
// Name returns the unique name of this test case
func (tc *ResourceConversionTestCase) Name() string {
return tc.testName
}
// References returns the set of types to which this test case refers.
func (tc *ResourceConversionTestCase) References() astmodel.TypeNameSet {
return astmodel.NewTypeNameSet(
tc.subject,
tc.toFn.Hub())
}
// RequiredImports returns a set of the package imports required by this test case
func (tc *ResourceConversionTestCase) RequiredImports() *astmodel.PackageImportSet {
result := astmodel.NewPackageImportSet()
// Standard Go Packages
result.AddImportsOfReferences(astmodel.OSReference, astmodel.TestingReference)
// Cmp
result.AddImportsOfReferences(astmodel.CmpReference, astmodel.CmpOptsReference)
// Gopter
result.AddImportsOfReferences(astmodel.GopterReference, astmodel.GopterGenReference, astmodel.GopterPropReference)
// Other References
result.AddImportOfReference(astmodel.DiffReference)
result.AddImportOfReference(astmodel.PrettyReference)
result.AddImportOfReference(tc.toFn.Hub().PackageReference)
return result
}
// AsFuncs renders the current test case and any supporting methods as Go abstract syntax trees
// subject is the name of the type under test
// codeGenerationContext contains reference material to use when generating
func (tc *ResourceConversionTestCase) AsFuncs(receiver astmodel.TypeName, codeGenerationContext *astmodel.CodeGenerationContext) []dst.Decl {
return []dst.Decl{
tc.createTestRunner(codeGenerationContext),
tc.createTestMethod(receiver, codeGenerationContext),
}
}
// Equals determines if this TestCase is equal to another one
func (tc *ResourceConversionTestCase) Equals(other astmodel.TestCase, override astmodel.EqualityOverrides) bool {
fn, ok := other.(*ResourceConversionTestCase)
if !ok {
return false
}
return tc.testName == fn.testName &&
tc.subject.Equals(fn.subject, override) &&
tc.toFn.Equals(fn.toFn, override) &&
tc.fromFn.Equals(fn.fromFn, override)
}
// createTestRunner generates the AST for the test runner itself
//
// parameters := gopter.DefaultTestParameters()
// parameters.MaxSize = 10
// properties := gopter.NewProperties(parameters)
// properties.Property("...", prop.ForAll(RunTestForX, XGenerator())
// properties.TestingRun(t, gopter.NewFormatedReporter(true, 240, os.Stdout))
//
func (tc *ResourceConversionTestCase) createTestRunner(codegenContext *astmodel.CodeGenerationContext) dst.Decl {
const (
parametersLocal = "parameters"
propertiesLocal = "properties"
propertyMethod = "Property"
testingRunMethod = "TestingRun"
)
gopterPackage := codegenContext.MustGetImportedPackageName(astmodel.GopterReference)
osPackage := codegenContext.MustGetImportedPackageName(astmodel.OSReference)
propPackage := codegenContext.MustGetImportedPackageName(astmodel.GopterPropReference)
testingPackage := codegenContext.MustGetImportedPackageName(astmodel.TestingReference)
t := dst.NewIdent("t")
// t.Parallel()
declareParallel := astbuilder.InvokeExpr(t, "Parallel")
// parameters := gopter.DefaultTestParameters()
defineParameters := astbuilder.ShortDeclaration(
parametersLocal,
astbuilder.CallQualifiedFunc(gopterPackage, "DefaultTestParameters"))
// parameters.MaxSize = 10
configureMaxSize := astbuilder.QualifiedAssignment(
dst.NewIdent(parametersLocal),
"MaxSize",
token.ASSIGN,
astbuilder.IntLiteral(10))
// properties := gopter.NewProperties(parameters)
defineProperties := astbuilder.ShortDeclaration(
propertiesLocal,
astbuilder.CallQualifiedFunc(gopterPackage, "NewProperties", dst.NewIdent(parametersLocal)))
// partial expression: description of the test
testName := astbuilder.StringLiteralf("Round trip from %s to hub returns original", tc.subject.Name())
testName.Decs.Before = dst.NewLine
// partial expression: prop.ForAll(RunTestForX, XGenerator())
propForAll := astbuilder.CallQualifiedFunc(
propPackage,
"ForAll",
dst.NewIdent(tc.idOfTestMethod()),
astbuilder.CallFunc(idOfGeneratorMethod(tc.subject, tc.idFactory)))
propForAll.Decs.Before = dst.NewLine
// properties.Property("...", prop.ForAll(RunTestForX, XGenerator())
defineTestCase := astbuilder.InvokeQualifiedFunc(
propertiesLocal,
propertyMethod,
testName,
propForAll)
// properties.TestingRun(t, gopter.NewFormatedReporter(true, 240, os.Stdout))
createReporter := astbuilder.CallQualifiedFunc(
gopterPackage,
"NewFormatedReporter",
dst.NewIdent("false"),
astbuilder.IntLiteral(240),
astbuilder.Selector(dst.NewIdent(osPackage), "Stdout"))
runTests := astbuilder.InvokeQualifiedFunc(propertiesLocal, testingRunMethod, t, createReporter)
// Define our function
fn := astbuilder.NewTestFuncDetails(
testingPackage,
tc.testName,
declareParallel,
defineParameters,
configureMaxSize,
defineProperties,
defineTestCase,
runTests)
return fn.DefineFunc()
}
// createTestMethod generates the AST for a method to run a single test of round trip conversion
//
// var hub OtherType
// err := subject.ConvertTo(&hub)
// if err != nil {
// return err.Error()
// }
//
// var result OurType
// err = result.ConvertFrom(&hub)
// if err != nil {
// return err.Error()
// }
//
// match := cmp.Equal(subject, actual, cmpopts.EquateEmpty())
// if !match {
// result := diff.Diff(subject, actual);
// return result
// }
//
// return ""
//
func (tc *ResourceConversionTestCase) createTestMethod(
subject astmodel.TypeName,
codegenContext *astmodel.CodeGenerationContext) dst.Decl {
const (
errId = "err"
hubId = "hub"
actualId = "actual"
actualFmtId = "actualFmt"
matchId = "match"
subjectId = "subject"
subjectFmtId = "subjectFmt"
copiedId = "copied"
resultId = "result"
)
cmpPackage := codegenContext.MustGetImportedPackageName(astmodel.CmpReference)
cmpoptsPackage := codegenContext.MustGetImportedPackageName(astmodel.CmpOptsReference)
prettyPackage := codegenContext.MustGetImportedPackageName(astmodel.PrettyReference)
diffPackage := codegenContext.MustGetImportedPackageName(astmodel.DiffReference)
// copied := subject.DeepCopy()
assignCopied := astbuilder.ShortDeclaration(
copiedId,
astbuilder.CallQualifiedFunc(subjectId, "DeepCopy"))
assignCopied.Decorations().Before = dst.NewLine
astbuilder.AddComment(&assignCopied.Decorations().Start, "// Copy subject to make sure conversion doesn't modify it")
// var hub OtherType
declareOther := astbuilder.LocalVariableDeclaration(
hubId,
tc.toFn.Hub().AsType(codegenContext),
"// Convert to our hub version")
declareOther.Decorations().Before = dst.EmptyLine
// err := subject.ConvertTo(&hub)
assignTo := astbuilder.ShortDeclaration(
errId,
astbuilder.CallQualifiedFunc(
copiedId,
tc.toFn.Name(),
astbuilder.AddrOf(dst.NewIdent(hubId))))
// if err != nil { return err.Error() }
assignToFailed := astbuilder.ReturnIfNotNil(
dst.NewIdent(errId),
astbuilder.CallQualifiedFunc("err", "Error"))
// var result OurType
declareResult := astbuilder.LocalVariableDeclaration(
actualId,
subject.AsType(codegenContext),
"// Convert from our hub version")
declareResult.Decorations().Before = dst.EmptyLine
// err = result.ConvertFrom(&hub)
assignFrom := astbuilder.SimpleAssignment(
dst.NewIdent(errId),
astbuilder.CallQualifiedFunc(
actualId,
tc.fromFn.Name(),
astbuilder.AddrOf(dst.NewIdent(hubId))))
// if err != nil { return err.Error() }
assignFromFailed := astbuilder.ReturnIfNotNil(
dst.NewIdent(errId),
astbuilder.CallQualifiedFunc("err", "Error"))
// match := cmp.Equal(subject, actual, cmpopts.EquateEmpty())
equateEmpty := astbuilder.CallQualifiedFunc(cmpoptsPackage, "EquateEmpty")
compare := astbuilder.ShortDeclaration(
matchId,
astbuilder.CallQualifiedFunc(cmpPackage, "Equal",
dst.NewIdent(subjectId),
dst.NewIdent(actualId),
equateEmpty))
compare.Decorations().Before = dst.EmptyLine
astbuilder.AddComment(&compare.Decorations().Start, "// Compare actual with what we started with")
// actualFmt := pretty.Sprint(actual)
declareActual := astbuilder.ShortDeclaration(
actualFmtId,
astbuilder.CallQualifiedFunc(prettyPackage, "Sprint", dst.NewIdent(actualId)))
// subjectFmt := pretty.Sprint(subject)
declareSubject := astbuilder.ShortDeclaration(
subjectFmtId,
astbuilder.CallQualifiedFunc(prettyPackage, "Sprint", dst.NewIdent(subjectId)))
// result := diff.Diff(subject, actual)
declareDiff := astbuilder.ShortDeclaration(
resultId,
astbuilder.CallQualifiedFunc(diffPackage, "Diff", dst.NewIdent(subjectFmtId), dst.NewIdent(actualFmtId)))
// return result
returnDiff := astbuilder.Returns(dst.NewIdent(resultId))
// if !match {
// result := diff.Diff(subject, actual);
// return result
// }
prettyPrint := astbuilder.SimpleIf(
astbuilder.NotExpr(dst.NewIdent(matchId)),
declareActual,
declareSubject,
declareDiff,
returnDiff)
// return ""
ret := astbuilder.Returns(astbuilder.StringLiteral(""))
ret.Decorations().Before = dst.EmptyLine
// Create the function
fn := &astbuilder.FuncDetails{
Name: tc.idOfTestMethod(),
Body: astbuilder.Statements(
assignCopied,
declareOther,
assignTo,
assignToFailed,
declareResult,
assignFrom,
assignFromFailed,
compare,
prettyPrint,
ret),
}
fn.AddParameter("subject", tc.subject.AsType(codegenContext))
fn.AddComments(fmt.Sprintf(
"tests if a specific instance of %s round trips to the hub storage version and back losslessly",
tc.subject.Name()))
fn.AddReturns("string")
return fn.DefineFunc()
}
func (tc *ResourceConversionTestCase) idOfTestMethod() string {
return tc.idFactory.CreateIdentifier(
fmt.Sprintf("RunResourceConversionTestFor%s", tc.subject.Name()),
astmodel.Exported)
}
|
import os
import unittest
from example import example
_SOURCE_ID = "abc123"
_SOURCE_URL = "https://some.url"
_PARSED_CASE = (
{
"caseReference": {
"sourceId": _SOURCE_ID,
"sourceUrl": _SOURCE_URL,
},
"location": {
"query": "Some district, Some country",
},
"events": [
{
"name": "confirmed",
"dateRange":
{
"start": "03/06/2020Z",
"end": "03/06/2020Z",
},
},
],
})
class ExampleTest(unittest.TestCase):
def test_parse(self):
current_dir = os.path.dirname(__file__)
sample_data_file = os.path.join(current_dir, "sample_data.csv")
result = example.parse_cases(sample_data_file, _SOURCE_ID, _SOURCE_URL)
self.assertEqual(next(result), _PARSED_CASE)
|
use super::*;
use crate::{
config::{
ConfigParseError,
Control as CfgControl,
ControlMode,
GatherMode,
OptInOut,
OptInOutMode,
},
guild::*,
server::Label,
watchcat::*,
};
use serenity::{
client::*,
framework::standard::{macros::command, Args, CommandResult},
model::prelude::*,
};
use std::sync::Arc;
#[command]
#[aliases("see-config")]
#[description = "Nya! (See all the ways I'm acting for this server!)"]
#[owner_privilege]
pub async fn see_config(ctx: &Context, msg: &Message, _args: Args) -> CommandResult {
let guild = match msg.guild(&ctx.cache).await {
Some(c) => c,
None => {
return confused(&ctx, &msg).await;
},
};
let gs = {
let data = ctx.data.read().await;
Arc::clone(data.get::<GuildStates>().unwrap())
};
let reply_txt = if let Some(state) = gs.get(&guild.id) {
let lock = state.read().await;
lock.to_message()
} else {
"Hiss... (I couldn't find any relevant info for your server...)".into()
};
check_msg(msg.author.dm(&ctx, |m| m.content(reply_txt)).await);
Ok(())
}
#[command]
#[aliases("log-to")]
#[description = "Mrowr... (Tell me where to stash any mail that goes missing...)"]
#[owner_privilege]
pub async fn log_to(ctx: &Context, msg: &Message, mut args: Args) -> CommandResult {
let out_chan = parse_chan_mention(&mut args);
if out_chan.is_none() {
return confused(&ctx, msg).await;
}
let out_chan = out_chan.unwrap();
// Get the guild ID.
let guild_id = match msg.guild(&ctx.cache).await {
Some(c) => c.id,
None => {
return confused(&ctx, msg).await;
},
};
watchcat(&ctx, guild_id, WatchcatCommand::SetChannel(out_chan)).await;
check_msg(
msg.channel_id
.say(&ctx.http, "Mrowrorr! (I'll keep you nyotified!)")
.await,
);
Ok(())
}
#[command]
#[aliases("felyne-prefix")]
#[description = "Nrowr? (How should I know that folks want to talk to me?)"]
#[owner_privilege]
pub async fn felyne_prefix(ctx: &Context, msg: &Message, mut args: Args) -> CommandResult {
let new_prefix = args.single::<String>();
if new_prefix.is_err() {
return confused(&ctx, msg).await;
}
let new_prefix = new_prefix.unwrap();
// Get the guild ID.
let guild_id = match msg.guild(&ctx.cache).await {
Some(c) => c.id,
None => {
return confused(&ctx, msg).await;
},
};
let gs = {
let data = ctx.data.read().await;
Arc::clone(data.get::<GuildStates>().unwrap())
};
if let Some(state) = gs.get(&guild_id) {
let mut lock = state.write().await;
lock.set_custom_prefix(new_prefix.clone()).await;
}
check_msg(
msg.channel_id
.say(
&ctx.http,
format!("Listening to nyew prefix: {}", &new_prefix),
)
.await,
);
Ok(())
}
#[command]
#[aliases("admin-ctl-mode")]
#[description = "Mrrewr? (Who gets to boss me around, all the time?)"]
#[owner_privilege]
pub async fn admin_ctl_mode(ctx: &Context, msg: &Message, args: Args) -> CommandResult {
ctl_mode_basis(ctx, msg, args, true).await
}
#[command]
#[aliases("ctl-mode")]
#[description = "Mrrewr? (Who gets to tell me when to hunt?)"]
#[owner_privilege]
pub async fn ctl_mode(ctx: &Context, msg: &Message, args: Args) -> CommandResult {
ctl_mode_basis(ctx, msg, args, false).await
}
async fn ctl_mode_basis(
ctx: &Context,
msg: &Message,
mut args: Args,
do_for_admin: bool,
) -> CommandResult {
match CfgControl::parse(&mut args) {
Ok(Some(cm)) => {
let gs = {
let datas = ctx.data.read().await;
Arc::clone(datas.get::<GuildStates>().unwrap())
};
if let Some(g_id) = msg.guild_id {
if let Some(gs) = gs.get(&g_id) {
if do_for_admin {
let mut gs_lock = gs.write().await;
gs_lock.set_admin_control_mode(cm).await;
} else {
let mut gs_lock = gs.write().await;
gs_lock.set_voice_control_mode(cm).await;
}
check_msg(
msg.channel_id
.say(
&ctx.http,
format!(
"Now accepting{} commands from: {:?}",
if do_for_admin { " admin" } else { "" },
&cm,
),
)
.await,
);
}
}
// new mode
},
Ok(None) => {
check_msg(
msg.channel_id
.say(
&ctx.http,
format!("I support the modes: {:?}", &ControlMode::LABEL_LIST),
)
.await,
);
},
Err(e) => {
check_msg(msg.channel_id.say(&ctx.http, match e {
ConfigParseError::ArgTake => {
"Uhh, this shouldn't have happened. Report this to FelixMcFelix#2443?"
},
ConfigParseError::BadMode => {
"Mrowr?! That's an illegal mode! Use this commyand without any extra info to see valid chyoices."
},
ConfigParseError::IllegalRole => {
"Myeh? That role doesn't look valid to me: make sure it's a valid mention or ID!"
},
ConfigParseError::MissingRole => {
"Try that command again, with a role mention or ID!"
},
}).await);
},
}
Ok(())
}
#[command]
#[aliases("server-opt")]
#[description = "Mrrewr? (Do you want me to help model how people talk?)"]
#[owner_privilege]
pub async fn server_opt(ctx: &Context, msg: &Message, mut args: Args) -> CommandResult {
match OptInOut::parse(&mut args) {
Ok(Some(om)) =>
if let Some(g_id) = msg.guild_id {
let gs = {
let data = ctx.data.read().await;
Arc::clone(data.get::<GuildStates>().unwrap())
};
if let Some(state) = gs.get(&g_id) {
let mut lock = state.write().await;
lock.set_server_opt(om).await;
}
check_msg(
msg.channel_id
.say(&ctx.http, format!("Voice stats measurement: {:?}", &om,))
.await,
);
},
Ok(None) => {
check_msg(
msg.channel_id
.say(
&ctx.http,
format!("I support the modes: {:?}", &OptInOutMode::LABEL_LIST),
)
.await,
);
},
Err(e) => {
check_msg(msg.channel_id.say(&ctx.http, match e {
ConfigParseError::ArgTake => {
"Uhh, this shouldn't have happened. Report this to FelixMcFelix#2443?"
},
ConfigParseError::BadMode => {
"Mrowr?! That's an illegal mode! Use this commyand without any extra info to see valid chyoices."
},
ConfigParseError::IllegalRole => {
"Myeh? That role doesn't look valid to me: make sure it's a valid mention or ID!"
},
ConfigParseError::MissingRole => {
"Try that command again, with a role mention or ID!"
},
}).await);
},
}
Ok(())
}
#[command]
#[aliases("server-ack")]
#[description = "Mraww? (If I'm measuring how folks talk, should I credit this place?)"]
#[owner_privilege]
pub async fn server_ack(ctx: &Context, msg: &Message, args: Args) -> CommandResult {
if let Some(g_id) = msg.guild_id {
let new_str = args.rest().trim();
let ack = if !new_str.is_empty() {
new_str.to_string()
} else if let Some(g) = msg.guild(ctx).await {
g.name.clone()
} else {
"".into()
};
let gs = {
let data = ctx.data.read().await;
Arc::clone(data.get::<GuildStates>().unwrap())
};
if let Some(state) = gs.get(&g_id) {
let mut lock = state.write().await;
lock.set_custom_ack(ack.to_string()).await;
}
check_msg(
msg.channel_id
.say(&ctx.http, format!("Crediting this server as: {:?}", ack))
.await,
);
}
Ok(())
}
#[command]
#[aliases("remove-server-ack")]
#[description = "Mya!? (You don't want to be credited anymore?)"]
#[owner_privilege]
pub async fn remove_server_ack(ctx: &Context, msg: &Message, _args: Args) -> CommandResult {
if let Some(g_id) = msg.guild_id {
let gs = {
let data = ctx.data.read().await;
Arc::clone(data.get::<GuildStates>().unwrap())
};
if let Some(state) = gs.get(&g_id) {
let mut lock = state.write().await;
lock.remove_custom_ack().await;
}
check_msg(
msg.channel_id
.say(&ctx.http, "No longer crediting this server...".to_string())
.await,
);
}
Ok(())
}
#[command]
#[aliases("server-label")]
#[description = "Mraww? (If I'm measuring how folks talk, what kinda place is this?)"]
#[owner_privilege]
pub async fn server_label(ctx: &Context, msg: &Message, mut args: Args) -> CommandResult {
match Label::parse(&mut args) {
Ok(Some(label)) =>
if let Some(g_id) = msg.guild_id {
let gs = {
let data = ctx.data.read().await;
Arc::clone(data.get::<GuildStates>().unwrap())
};
if let Some(state) = gs.get(&g_id) {
let mut lock = state.write().await;
lock.set_label(label).await;
}
check_msg(
msg.channel_id
.say(&ctx.http, format!("Server label set as: {:?}", &label))
.await,
);
},
Ok(None) => {
check_msg(
msg.channel_id
.say(
&ctx.http,
format!("I support the labels: {:?}", &Label::LABEL_LIST),
)
.await,
);
},
Err(e) => {
check_msg(msg.channel_id.say(&ctx.http, match e {
ConfigParseError::ArgTake => {
"Uhh, this shouldn't have happened. Report this to FelixMcFelix#2443?"
},
ConfigParseError::BadMode => {
"Mrowr?! That's an illegal label! Use this commyand without any extra info to see valid chyoices."
},
ConfigParseError::IllegalRole => {
"Myeh? That role doesn't look valid to me: make sure it's a valid mention or ID!"
},
ConfigParseError::MissingRole => {
"Try that command again, with a role mention or ID!"
},
}).await);
},
}
Ok(())
}
#[command]
#[aliases("server-unlabel")]
#[description = "Mya!? (You want me to forget what kinda place this server is?)"]
#[owner_privilege]
pub async fn server_unlabel(ctx: &Context, msg: &Message, _args: Args) -> CommandResult {
let guild = match msg.guild(&ctx.cache).await {
Some(c) => c,
None => {
return confused(&ctx, &msg).await;
},
};
let gs = {
let data = ctx.data.read().await;
Arc::clone(data.get::<GuildStates>().unwrap())
};
if let Some(state) = gs.get(&guild.id) {
let mut lock = state.write().await;
lock.remove_label().await;
}
Ok(())
}
#[command]
#[aliases("gather-mode")]
#[description = "Mraww? (If I'm measuring how folks talk, when should I listen in?)"]
#[owner_privilege]
pub async fn gather_mode(ctx: &Context, msg: &Message, mut args: Args) -> CommandResult {
match GatherMode::parse(&mut args) {
Ok(Some(gm)) =>
if let Some(g_id) = msg.guild_id {
let gs = {
let data = ctx.data.read().await;
Arc::clone(data.get::<GuildStates>().unwrap())
};
if let Some(state) = gs.get(&g_id) {
let mut lock = state.write().await;
lock.set_gather(gm).await;
}
check_msg(
msg.channel_id
.say(&ctx.http, format!("Server gather-mode set as: {:?}", &gm))
.await,
);
},
Ok(None) => {
check_msg(
msg.channel_id
.say(
&ctx.http,
format!("I support the labels: {:?}", &GatherMode::LABEL_LIST),
)
.await,
);
},
Err(e) => {
check_msg(msg.channel_id.say(&ctx.http, match e {
ConfigParseError::ArgTake => {
"Uhh, this shouldn't have happened. Report this to FelixMcFelix#2443?"
},
ConfigParseError::BadMode => {
"Mrowr?! That's an illegal label! Use this commyand without any extra info to see valid chyoices."
},
ConfigParseError::IllegalRole => {
"Myeh? That role doesn't look valid to me: make sure it's a valid mention or ID!"
},
ConfigParseError::MissingRole => {
"Try that command again, with a role mention or ID!"
},
}).await);
},
}
Ok(())
}
|
/*
* To change this license header, choose License Headers in Project Properties.
* To change this template file, choose Tools | Templates
* and open the template in the editor.
*/
package principal;
import java.sql.Connection;
import java.sql.DriverManager;
import javax.swing.JOptionPane;
/**
*
* @author fabio
*/
public class Conectar {
Connection conect = null;
public Connection conexao() {
try {
Class.forName("com.mysql.jdbc.Driver");
conect = DriverManager.getConnection("jdbc:mysql://localhost/"
+ "sistemapdv21", "fabio", "mysql2021");
} catch (Exception e) {
JOptionPane.showMessageDialog(null, "Error en la conexión" + e);
}
return conect;
}
} |
nums = list(map(int, input().strip().split()))
mmax = 0
for i in range(14):
score = 0
cur = nums[i]
if cur == 0:
continue
for j in range(14):
dist = j - i
if j <= i:
dist = 14 - i + j
if cur % 14 < dist:
add = cur // 14
else:
add = cur // 14 + 1
if j == i:
tmp = add
else:
tmp = nums[j] + add
if tmp % 2 == 0:
score += tmp
#print(add)
#print(score)
mmax = max(mmax, score)
print(mmax)
|
/**
* Find the kth largest element in an unsorted array. Note that it is the kth largest element in the sorted order,
* not the kth distinct element.
*
* @author Haoming Chen
* Created on 2019/11/6
*/
public class KthLargestElementInAnArray_215 {
public int ret;
public int k;
public static void main(String[] args) {
int[] nums = {1};
int k = 1;
KthLargestElementInAnArray_215 kthLargestElementInAnArray_215 = new KthLargestElementInAnArray_215();
System.out.println(kthLargestElementInAnArray_215.findKthLargest(nums, k));
}
public int findKthLargest(int[] nums, int k) {
this.k = k;
quickSortBase(nums, 0, nums.length - 1);
return ret;
}
private void quickSortBase(int[] nums, int l, int r) {
if (l > r) {
return;
}
int pivot = getPivot(nums, l, r);
if (nums.length - pivot == k) {
ret = nums[pivot];
return;
}
quickSortBase(nums, l, pivot - 1);
quickSortBase(nums, pivot + 1, r);
}
private int getPivot(int[] nums, int l, int r) {
int lt = l;
for (int i = l + 1; i <= r; i++) {
if (nums[i] < nums[l]) {
lt++;
swap(lt, i, nums);
}
}
swap(lt, l, nums);
return lt;
}
private void swap(int l, int r, int[] arr) {
int temp = arr[l];
arr[l] = arr[r];
arr[r] = temp;
}
} |
from collections import Counter,defaultdict,deque
from heapq import heappop,heappush,heapify
import sys,bisect,math,itertools,fractions,pprint
sys.setrecursionlimit(10**8)
mod = 10**9+7
INF = float('inf')
def inp(): return int(sys.stdin.readline())
def inpl(): return list(map(int, sys.stdin.readline().split()))
def err():
print('Impossible')
quit()
n,m = inpl()
g = [list(input()) for _ in range(n)]
su = 0
for i in range(n):
for j in range(m):
if g[i][j] == '#':
su += 1
if g[0][0] == '.' or su != n+m-1:
err()
for fl in itertools.permutations(range(n+m-2), n-1):
# print(fl)
now = [0,0]
for i in range(n+m-2):
if i in fl:
now[0] += 1
else:
now[1] += 1
y,x = now
if g[y][x] == '.':
break
else:
print('Possible')
quit()
err()
|
/**
* Mediates between RecipesFragment and Data Repository,
* fetching necessary data for the RecipesFragment to display
*/
public class RecipesViewModel extends ViewModel {
private final DataRepository mRepository;
@SuppressWarnings("unchecked")
@Inject
public RecipesViewModel(DataRepository repository) {
this.mRepository = repository;
}
@VisibleForTesting
public LiveData<List<RecipeWithStepsAndIngredients>> getRecipes() {
return mRepository.getRecipes();
}
public int getRecipeIdPreference() {
return mRepository.getRecipeIdPreference();
}
public void setRecipeIdPreference(int recipeId) {
mRepository.setRecipeIdPreference(recipeId);
}
} |
/* vim: set sw=8 ts=8 sts=8 expandtab: */
#include "Ewl_Test.h"
#include "ewl_test_private.h"
#include "ewl_window.h"
#include "ewl_button.h"
#include <stdio.h>
#include <string.h>
#include <stdlib.h>
static int test_constructor(char *buf, int len);
static int test_title_set_get(char *buf, int len);
static int test_name_set_get(char *buf, int len);
static int test_class_set_get(char *buf, int len);
static int test_borderless_set_get(char *buf, int len);
static int test_dialog_set_get(char *buf, int len);
static int test_fullscreen_set_get(char *buf, int len);
static int test_skip_taskbar_set_get(char *buf, int len);
static int test_skip_pager_set_get(char *buf, int len);
static int test_urgent_set_get(char *buf, int len);
static int test_modal_set_get(char *buf, int len);
static int test_keyboard_grab_set_get(char *buf, int len);
static int test_pointer_grab_set_get(char *buf, int len);
static int test_override_set_get(char *buf, int len);
/*
* This set of tests is targeted at window
*/
Ewl_Unit_Test window_unit_tests[] = {
{"constructor", test_constructor, NULL, -1, 0},
{"title set/get", test_title_set_get, NULL, -1, 0},
{"name set/get", test_name_set_get, NULL, -1, 0},
{"class set/get", test_class_set_get, NULL, -1, 0},
{"borderless set/get", test_borderless_set_get, NULL, -1, 0},
{"dialog set/get", test_dialog_set_get, NULL, -1, 0},
{"fullscreen set/get", test_fullscreen_set_get, NULL, -1, 0},
{"skip_taskbar set/get", test_skip_taskbar_set_get, NULL, -1, 0},
{"skip_pager set/get", test_skip_pager_set_get, NULL, -1, 0},
{"urgent set/get", test_urgent_set_get, NULL, -1, 0},
{"modal set/get", test_modal_set_get, NULL, -1, 0},
{"keyboard_grab set/get", test_keyboard_grab_set_get, NULL, -1, 0},
{"pointer_grab set/get", test_pointer_grab_set_get, NULL, -1, 0},
{"override set/get", test_override_set_get, NULL, -1, 0},
{NULL, NULL, NULL, -1, 0}
};
/*
* Test the default values of a newly created window
*/
static int
test_constructor(char *buf, int len)
{
Ewl_Widget *win;
const char *name;
int ret = 0;
win = ewl_window_new();
if (!EWL_WINDOW_IS(win))
{
LOG_FAILURE(buf, len, "window is not of the type WINDOW");
goto DONE;
}
name = ewl_window_title_get(EWL_WINDOW(win));
if (name)
{
LOG_FAILURE(buf, len, "default title is '%s'", name);
goto DONE;
}
name = ewl_window_name_get(EWL_WINDOW(win));
if (name)
{
LOG_FAILURE(buf, len, "default name is '%s'", name);
goto DONE;
}
name = ewl_window_class_get(EWL_WINDOW(win));
if (name)
{
LOG_FAILURE(buf, len, "default class is '%s'", name);
goto DONE;
}
if (ewl_window_borderless_get(EWL_WINDOW(win)))
{
LOG_FAILURE(buf, len, "window is borderless");
goto DONE;
}
if (ewl_window_dialog_get(EWL_WINDOW(win)))
{
LOG_FAILURE(buf, len, "window is a dialog");
goto DONE;
}
if (ewl_window_fullscreen_get(EWL_WINDOW(win)))
{
LOG_FAILURE(buf, len, "window is fullscreen");
goto DONE;
}
if (ewl_window_skip_taskbar_get(EWL_WINDOW(win)))
{
LOG_FAILURE(buf, len, "window has the \'skip taskbar\' flag");
goto DONE;
}
if (ewl_window_skip_pager_get(EWL_WINDOW(win)))
{
LOG_FAILURE(buf, len, "window has the \'skip pager\' flag");
goto DONE;
}
if (ewl_window_urgent_get(EWL_WINDOW(win)))
{
LOG_FAILURE(buf, len, "window has the \'urgent\' flag");
goto DONE;
}
if (ewl_window_leader_foreign_get(EWL_WINDOW(win)))
{
LOG_FAILURE(buf, len, "window has a foreign leader set");
goto DONE;
}
if (ewl_window_leader_get(EWL_WINDOW(win)))
{
LOG_FAILURE(buf, len, "window has a leader set");
goto DONE;
}
if (ewl_window_modal_get(EWL_WINDOW(win)))
{
LOG_FAILURE(buf, len, "window is modal");
goto DONE;
}
if (ewl_window_keyboard_grab_get(EWL_WINDOW(win)))
{
LOG_FAILURE(buf, len, "window grabs keyboard");
goto DONE;
}
if (ewl_window_pointer_grab_get(EWL_WINDOW(win)))
{
LOG_FAILURE(buf, len, "window grabs pointer");
goto DONE;
}
if (ewl_window_override_get(EWL_WINDOW(win)))
{
LOG_FAILURE(buf, len, "window is an override redirect window");
goto DONE;
}
if (!ewl_embed_render_get(EWL_EMBED(win)))
{
LOG_FAILURE(buf, len, "window doesn't render itself");
goto DONE;
}
ret = 1;
DONE:
ewl_widget_destroy(win);
return ret;
}
/*
* Set a string to a new window title and retrieve it again
*/
static int
test_title_set_get(char *buf, int len)
{
Ewl_Widget *win;
const char *title;
int ret = 0;
win = ewl_window_new();
title = ewl_window_title_get(EWL_WINDOW(win));
if (title)
snprintf(buf, len, "default title set to '%s'", title);
else {
ewl_window_title_set(EWL_WINDOW(win), "A title");
title = ewl_window_title_get(EWL_WINDOW(win));
if (strcmp(title, "A title"))
snprintf(buf, len, "incorrect title set");
else {
ewl_window_title_set(EWL_WINDOW(win), "");
title = ewl_window_title_get(EWL_WINDOW(win));
if (title)
snprintf(buf, len, "non-empty title set");
else
ret = 1;
}
}
ewl_widget_destroy(win);
return ret;
}
/*
* Set a string to a new window name and retrieve it again
*/
static int
test_name_set_get(char *buf, int len)
{
Ewl_Widget *win;
const char *name;
int ret = 0;
win = ewl_window_new();
name = ewl_window_name_get(EWL_WINDOW(win));
if (name)
snprintf(buf, len, "default name set to '%s'", name);
else {
ewl_window_name_set(EWL_WINDOW(win), "A name");
name = ewl_window_name_get(EWL_WINDOW(win));
if (strcmp(name, "A name"))
snprintf(buf, len, "incorrect name set");
else {
ewl_window_name_set(EWL_WINDOW(win), "");
name = ewl_window_name_get(EWL_WINDOW(win));
if (name)
snprintf(buf, len, "non-empty name set");
else
ret = 1;
}
}
ewl_widget_destroy(win);
return ret;
}
/*
* Set a string to a new window class and retrieve it again
*/
static int
test_class_set_get(char *buf, int len)
{
Ewl_Widget *win;
const char *class;
int ret = 0;
win = ewl_window_new();
class = ewl_window_class_get(EWL_WINDOW(win));
if (class)
snprintf(buf, len, "default class set to '%s'", class);
else {
ewl_window_class_set(EWL_WINDOW(win), "A class");
class = ewl_window_class_get(EWL_WINDOW(win));
if (strcmp(class, "A class"))
snprintf(buf, len, "incorrect class set");
else {
ewl_window_class_set(EWL_WINDOW(win), "");
class = ewl_window_class_get(EWL_WINDOW(win));
if (class)
snprintf(buf, len, "non-empty class set");
else
ret = 1;
}
}
ewl_widget_destroy(win);
return ret;
}
/*
* Set a window as borderless and check that the value is set.
*/
static int
test_borderless_set_get(char *buf, int len)
{
Ewl_Widget *win;
int ret = 0;
win = ewl_window_new();
ewl_window_borderless_set(EWL_WINDOW(win), TRUE);
if (ewl_window_borderless_get(EWL_WINDOW(win)))
ret = 1;
else
snprintf(buf, len, "setting borderless TRUE failed");
ewl_window_borderless_set(EWL_WINDOW(win), FALSE);
if (ewl_window_borderless_get(EWL_WINDOW(win)))
snprintf(buf, len, "setting borderless FALSE failed");
ewl_widget_destroy(win);
return ret;
}
/*
* Set a window as a dialog and check that the value is set.
*/
static int
test_dialog_set_get(char *buf, int len)
{
Ewl_Widget *win;
int ret = 0;
win = ewl_window_new();
if (!ewl_window_dialog_get(EWL_WINDOW(win))) {
ewl_window_dialog_set(EWL_WINDOW(win), TRUE);
if (ewl_window_dialog_get(EWL_WINDOW(win))) {
ewl_window_dialog_set(EWL_WINDOW(win), FALSE);
if (!ewl_window_dialog_get(EWL_WINDOW(win)))
ret = 1;
else
snprintf(buf, len, "dialog unset failed");
}
else
snprintf(buf, len, "dialog set failed");
}
else
snprintf(buf, len, "default dialog set");
ewl_widget_destroy(win);
return ret;
}
/*
* Set a window as a fullscreen and check that the value is set.
*/
static int
test_fullscreen_set_get(char *buf, int len)
{
Ewl_Widget *win;
int ret = 0;
win = ewl_window_new();
if (!ewl_window_fullscreen_get(EWL_WINDOW(win))) {
ewl_window_fullscreen_set(EWL_WINDOW(win), TRUE);
if (ewl_window_fullscreen_get(EWL_WINDOW(win))) {
ewl_window_fullscreen_set(EWL_WINDOW(win), FALSE);
if (!ewl_window_fullscreen_get(EWL_WINDOW(win)))
ret = 1;
else
snprintf(buf, len, "fullscreen unset failed");
}
else
snprintf(buf, len, "fullscreen set failed");
}
else
snprintf(buf, len, "default fullscreen set");
ewl_widget_destroy(win);
return ret;
}
/*
* Set a window as a skip_taskbar and check that the value is set.
*/
static int
test_skip_taskbar_set_get(char *buf, int len)
{
Ewl_Widget *win;
int ret = 0;
win = ewl_window_new();
if (!ewl_window_skip_taskbar_get(EWL_WINDOW(win))) {
ewl_window_skip_taskbar_set(EWL_WINDOW(win), TRUE);
if (ewl_window_skip_taskbar_get(EWL_WINDOW(win))) {
ewl_window_skip_taskbar_set(EWL_WINDOW(win), FALSE);
if (!ewl_window_skip_taskbar_get(EWL_WINDOW(win)))
ret = 1;
else
snprintf(buf, len, "skip_taskbar unset failed");
}
else
snprintf(buf, len, "skip_taskbar set failed");
}
else
snprintf(buf, len, "default skip_taskbar set");
ewl_widget_destroy(win);
return ret;
}
/*
* Set a window as a skip_pager and check that the value is set.
*/
static int
test_skip_pager_set_get(char *buf, int len)
{
Ewl_Widget *win;
int ret = 0;
win = ewl_window_new();
if (!ewl_window_skip_pager_get(EWL_WINDOW(win))) {
ewl_window_skip_pager_set(EWL_WINDOW(win), TRUE);
if (ewl_window_skip_pager_get(EWL_WINDOW(win))) {
ewl_window_skip_pager_set(EWL_WINDOW(win), FALSE);
if (!ewl_window_skip_pager_get(EWL_WINDOW(win)))
ret = 1;
else
snprintf(buf, len, "skip_pager unset failed");
}
else
snprintf(buf, len, "skip_pager set failed");
}
else
snprintf(buf, len, "default skip_pager set");
ewl_widget_destroy(win);
return ret;
}
/*
* Set a window as a urgent and check that the value is set.
*/
static int
test_urgent_set_get(char *buf, int len)
{
Ewl_Widget *win;
int ret = 0;
win = ewl_window_new();
if (!ewl_window_urgent_get(EWL_WINDOW(win))) {
ewl_window_urgent_set(EWL_WINDOW(win), TRUE);
if (ewl_window_urgent_get(EWL_WINDOW(win))) {
ewl_window_urgent_set(EWL_WINDOW(win), FALSE);
if (!ewl_window_urgent_get(EWL_WINDOW(win)))
ret = 1;
else
snprintf(buf, len, "urgent unset failed");
}
else
snprintf(buf, len, "urgent set failed");
}
else
snprintf(buf, len, "default urgent set");
ewl_widget_destroy(win);
return ret;
}
/*
* Set a window as a modal and check that the value is set.
*/
static int
test_modal_set_get(char *buf, int len)
{
Ewl_Widget *win;
int ret = 0;
win = ewl_window_new();
if (!ewl_window_modal_get(EWL_WINDOW(win))) {
ewl_window_modal_set(EWL_WINDOW(win), TRUE);
if (ewl_window_modal_get(EWL_WINDOW(win))) {
ewl_window_modal_set(EWL_WINDOW(win), FALSE);
if (!ewl_window_modal_get(EWL_WINDOW(win)))
ret = 1;
else
snprintf(buf, len, "modal unset failed");
}
else
snprintf(buf, len, "modal set failed");
}
else
snprintf(buf, len, "default modal set");
ewl_widget_destroy(win);
return ret;
}
/*
* Set a window as a keyboard_grab and check that the value is set.
*/
static int
test_keyboard_grab_set_get(char *buf, int len)
{
Ewl_Widget *win;
int ret = 0;
win = ewl_window_new();
if (!ewl_window_keyboard_grab_get(EWL_WINDOW(win))) {
ewl_window_keyboard_grab_set(EWL_WINDOW(win), TRUE);
if (ewl_window_keyboard_grab_get(EWL_WINDOW(win))) {
ewl_window_keyboard_grab_set(EWL_WINDOW(win), FALSE);
if (!ewl_window_keyboard_grab_get(EWL_WINDOW(win)))
ret = 1;
else
snprintf(buf, len, "keyboard_grab unset failed");
}
else
snprintf(buf, len, "keyboard_grab set failed");
}
else
snprintf(buf, len, "default keyboard_grab set");
ewl_widget_destroy(win);
return ret;
}
/*
* Set a window as a pointer_grab and check that the value is set.
*/
static int
test_pointer_grab_set_get(char *buf, int len)
{
Ewl_Widget *win;
int ret = 0;
win = ewl_window_new();
if (!ewl_window_pointer_grab_get(EWL_WINDOW(win))) {
ewl_window_pointer_grab_set(EWL_WINDOW(win), TRUE);
if (ewl_window_pointer_grab_get(EWL_WINDOW(win))) {
ewl_window_pointer_grab_set(EWL_WINDOW(win), FALSE);
if (!ewl_window_pointer_grab_get(EWL_WINDOW(win)))
ret = 1;
else
snprintf(buf, len, "pointer_grab unset failed");
}
else
snprintf(buf, len, "pointer_grab set failed");
}
else
snprintf(buf, len, "default pointer_grab set");
ewl_widget_destroy(win);
return ret;
}
/*
* Set a window as a override and check that the value is set.
*/
static int
test_override_set_get(char *buf, int len)
{
Ewl_Widget *win;
int ret = 0;
win = ewl_window_new();
if (!ewl_window_override_get(EWL_WINDOW(win))) {
ewl_window_override_set(EWL_WINDOW(win), TRUE);
if (ewl_window_override_get(EWL_WINDOW(win))) {
ewl_window_override_set(EWL_WINDOW(win), FALSE);
if (!ewl_window_override_get(EWL_WINDOW(win)))
ret = 1;
else
snprintf(buf, len, "override unset failed");
}
else
snprintf(buf, len, "override set failed");
}
else
snprintf(buf, len, "default override set");
ewl_widget_destroy(win);
return ret;
}
|
/**
* Directly prompt as positive
* @param input String
*/
private void promptPositive(String input) {
if (promptListener != null) {
promptListener.OnPrompt(input);
}
} |
<filename>ent/schema/repository.go
package schema
import (
"time"
"entgo.io/ent"
"entgo.io/ent/schema/edge"
"entgo.io/ent/schema/field"
"github.com/google/uuid"
)
// Repository holds the schema definition for the Repository entity.
type Repository struct {
ent.Schema
}
// Fields of the Repository.
func (Repository) Fields() []ent.Field {
return []ent.Field{
field.UUID("id", uuid.UUID{}).Default(func() uuid.UUID { return uuid.Must(uuid.NewRandom()) }),
field.String("github_id").MaxLen(255).NotEmpty(),
field.String("owner").MaxLen(255).NotEmpty(),
field.String("name").MaxLen(255).NotEmpty(),
field.Text("description").Optional(),
field.Int64("total_pr").Default(0).NonNegative(),
field.Int64("total_issue").Default(0).NonNegative(),
field.Bool("get_pull_request").Comment("PR情報を取得したかどうか(0:未取得 1:取得済み)").Default(false),
field.Bool("get_issue").Comment("Issue情報を取得したかどうか(0:未取得 1:取得済み)").Default(false),
field.Time("created_at").Default(time.Now),
field.Time("updated_at").Default(time.Now),
field.Time("pushed_at").Default(time.Now),
}
}
// Edges of the Repository.
func (Repository) Edges() []ent.Edge {
return []ent.Edge{
edge.To("pull_requests", PullRequest.Type).StorageKey(edge.Column("repository_id")),
edge.To("issues", Issue.Type).StorageKey(edge.Column("repository_id")),
}
}
|
/**
* @format
* @file compiler 编译函数
*/
import { BaseTransformer, ITransformPlugin } from './transformer/base';
import { JSXGenerator } from './generator/jsx-gen';
import { ImgGenerator } from './generator/img-gen';
import { Parser } from './parser';
interface ICompilerOptions {
generator: JSXGenerator | ImgGenerator;
content: string;
name: string;
plugins: ITransformPlugin[];
}
export const compiler = (options: ICompilerOptions) => {
const parser = new Parser(options.content);
const info = parser.process();
const transformer = new BaseTransformer({
info,
plugins: options.plugins,
});
const transformed = transformer.process();
return options.generator.process({
name: options.name,
...transformed,
});
};
|
// Bucket is a domain model that describes bucket entity
public class Bucket {
@Expose
@SerializedName("id")
public String id;
@Expose
@SerializedName("name")
public String name;
@Expose
@SerializedName("created")
public String created;
@Expose
@SerializedName("hash")
public long hash;
@Expose
@SerializedName("isDecrypted")
public boolean isDecrypted;
@Expose
@SerializedName("isStarred")
public boolean isStarred;
} |
/**
* @author woxigousade
* @date 2021/8/19
*/
public class RequestSentinelAspect {
@Pointcut("execution(* com.gousade..*.controller.*.*(..))")
public void requestSentinelPointcut() {
}
@Before("requestSentinelPointcut()")
public void requestSentinel(JoinPoint joinPoint) {
MethodSignature signature = (MethodSignature) joinPoint.getSignature();
Method method = signature.getMethod();
//rateLimiterService.checkLimiter(method, joinPoint.getArgs().hashCode());
}
} |
An investigation of VLSI interconnect failure due to subtractive metal defects
Early failures in VLSI metal interconnects due to subtractive defects are analyzed using a competing risks model. The model considers concurrent failure mechanisms and predicts a bimodal cumulative failure distribution for test structures containing intentional defects. Experimental data for three different metallizations show that, if the defect mechanism dominates as the cause of early failures, the distribution is bimodal. Test structures consist of 1000- mu m-long by 3- mu m-wide metal stripes. Defect test structures contain semicircular defects located midway along the stripe which remove 50% and 80% of the stripe width, respectively. All tests are performed at 200 degrees C and at a current density in the 10/sup 6/ A/cm/sup 2/ range. Stress voids occurring at grain boundaries are modeled experimentally as test structures with defects located at grain boundaries, and the results show that void/grain-boundary interaction produces a dominant failure mechanism and results in significant early failures.<<ETX>> |
import logging
import os
import hydra
import pandas as pd
import requests
from omegaconf import DictConfig
def download_csv_file(url_path_to_csv_file: str) -> bytes:
"""
Given a url that links to a CSV file, this function will download that
file and return it as a DataFrame.
"""
logger = logging.getLogger(__name__)
if not url_path_to_csv_file.startswith("http"):
url_path_to_csv_file = f"http://{url_path_to_csv_file}"
response = requests.get(url_path_to_csv_file, verify=False)
csv_content = response.content
logger.info(f"Downloaded CSV data from: {url_path_to_csv_file}")
return csv_content
def process_raw_data(raw_data_filepath: str, processed_data_filepath: str) -> None:
"""
Runs data processing scripts to turn raw data from (../raw) into
cleaned data ready to be analyzed (saved in ../processed).
"""
logger = logging.getLogger(__name__)
raw_data = pd.read_csv(raw_data_filepath, skiprows=1)
for column in ["Volume USD", "unix", "symbol"]:
try:
raw_data.drop(column, axis=1, inplace=True)
except IndexError as e:
logger.warn(
f"Unable to drop column from the raw dataframe: {column} | {e}")
pass
raw_data = raw_data.rename(
{"date": "TimeStamp", "Volume ETH": "Volume_ETH", "close": "CurrentClose"},
axis=1,
)
raw_data["TimeStamp"] = pd.to_datetime(raw_data["TimeStamp"])
target = raw_data["CurrentClose"].shift(-1)
raw_data["NextClose"] = target
raw_data.dropna(inplace=True, axis=0)
raw_data.set_index("TimeStamp", inplace=True)
raw_data.to_csv(processed_data_filepath, index=None)
return None
@hydra.main(config_path="../../configs/data", config_name="data")
def main(cfg: DictConfig):
csv_data = download_csv_file(url_path_to_csv_file=cfg.web_url)
logging.info(f"csv_data type: {type(csv_data)}")
required_directories = [
cfg.raw_file_directory,
cfg.processed_file_directory,
]
for directory in required_directories:
if not os.path.isdir(directory):
os.makedirs(directory)
raw_data_filepath = os.path.join(cfg.raw_file_directory, "raw_data.csv")
processed_data_filepath = os.path.join(
cfg.processed_file_directory, "processed_data.csv"
)
with open(raw_data_filepath, "wb") as csv_file:
csv_file.write(csv_data)
csv_file.close()
process_raw_data(
raw_data_filepath=raw_data_filepath,
processed_data_filepath=processed_data_filepath,
)
logging.info(f"Raw Data File: {raw_data_filepath}")
logging.info(f"Processed Data File: {processed_data_filepath}")
if __name__ == "__main__":
log_fmt = "%(asctime)s - %(name)s - %(levelname)s - %(message)s"
logging.basicConfig(level=logging.INFO, format=log_fmt)
main()
logging.info("Done!")
|
def notify(state: Any, notification_name: str = None):
pass |
def generator_chain(initial_data: T, *factories: Callable[[T], Iterator[T]]) -> Iterator[T]:
generator_count = len(factories)
if generator_count == 0:
yield initial_data
return
generators = [None] * generator_count
next_data = initial_data
generator_index = 0
while True:
try:
while generator_index < generator_count:
if generators[generator_index] is None:
generators[generator_index] = factories[generator_index](next_data)
next_data = next(generators[generator_index])
generator_index += 1
yield next_data
generator_index -= 1
except StopIteration:
generators[generator_index] = None
generator_index -= 1
if generator_index < 0:
break |
For someone with such a painfully distinctive style and voice, Mark Kozelek’s songwriting sure has evolved a lot over the years since the early days of Red House Painters. In recent years the slowcore founding father’s transformation has been especially evident, and nowhere is that truer than “Ben’s My Friend,” the closing track from the forthcoming Sun Kil Moon album Benji. Kozelek has always leaned toward the plainspoken as a lyricist — my favorite song of his, 2003’s “Glenn Tipton,” is basically a series of detail-laden short stories from a serial killer’s point of view — but in recent years he’s taken a turn for the hyperliteral and autobiographical, with songs such as “Sunshine In Chicago” functioning as play-by-play travelogues. That’s truer than ever on “Ben’s My Friend.” As with the other songs we’ve heard from Benji, especially “Richard Ramirez Died Today Of Natural Causes,” Kozelek’s adopted the syllable-cramming cadence of his trusted muse Modest Mouse’s Isaac Brock, but this time out Kozelek’s traded Brock’s quirky, effects-laden guitar squeals for saxophone-infused midtempo lounge music out of Destroyer’s Kaputt. It exists in an entirely different sonic universe from the stone-faced melodrama of Red House Painters’ “Have You Forgotten” or even the spare acoustic plucking of Sun Kil Moon’s Modest Mouse covers album. There are a lot of ways to sing a sad song with an acoustic guitar!
Kozelek’s latest sonic realm is the background for a story about Kozelek going to see the Postal Service. He feels old amongst a crowd of 8,000 twentysomethings, and tinges of jealousy spring up when he thinks back to when he first met Ben Gibbard at a festival in Spain in 2000, when his band was outdrawing Gibbard’s. Hearing a line like “The other night, I saw the Postal Service/ Ben’s my friend, but getting there was the worst” in a song rather than an interview is the indie rock equivalent of Curb Your Enthusiasm, an inside look at the relational strains and tensions in a world of a most of us rarely glimpse. Somehow, though, I’d be willing to wager the frustrated and wistful feelings Kozelek bares here are a lot more true-to-life than a fictionalized Larry David farce. Stream this strange and beautiful song below.
Sun Kil Moon – “Ben’s My Friend”
Benji is out 2/4 via Caldo Verde. |
<reponame>bartuatabek/katamino<filename>src/main/java/ModeSelectionController.java
import javafx.fxml.FXML;
import javafx.fxml.FXMLLoader;
import javafx.fxml.Initializable;
import javafx.scene.input.MouseEvent;
import javafx.scene.layout.AnchorPane;
import kataminoLongButton.KataminoLongButton;
import java.io.IOException;
import java.net.URL;
import java.util.ResourceBundle;
public class ModeSelectionController implements Initializable {
@FXML
private KataminoLongButton kataminoArcadeButton;
@FXML
private KataminoLongButton kataminoCustomButton;
@FXML
private AnchorPane root;
@Override
public void initialize(URL location, ResourceBundle resources) {
kataminoArcadeButton.setButtonName("Classic (Arcade) Mode");
kataminoCustomButton.setButtonName("Custom Board Mode");
}
@FXML
public void arcadeSelected(MouseEvent event) throws IOException {
AnchorPane pane = FXMLLoader.load(getClass().getResource("playerSelection.fxml"));
root.getChildren().setAll(pane);
}
@FXML
public void customSelected(MouseEvent event) throws IOException {
AnchorPane pane = FXMLLoader.load(getClass().getResource("customMenu.fxml"));
root.getChildren().setAll(pane);
}
@FXML
public void backButtonClicked(MouseEvent event) throws IOException {
AnchorPane pane = FXMLLoader.load(getClass().getResource("mainMenu.fxml"));
root.getChildren().setAll(pane);
}
}
|
def edit_list_node(self, title):
itemlist = self.gui.tree.get_subitems(self.item)
maxlen = len(itemlist)
modified = False
edt, newitemlist = self.gui.show_dialog(gui.ListDialog, title, itemlist)
if edt:
for ix, item in enumerate(newitemlist):
if ix < maxlen:
if item != itemlist[ix]:
modified = True
self.gui.tree.set_itemtext(itemlist[ix], item)
else:
modified = True
newnode = self.gui.tree.add_to_parent(item, self.item)
if self.item.text(0) == 'rules':
ruletype = None
for rtype, name in [(x, y[0]) for x, y in ed.RTYPES.items()]:
if name == item:
ruletype = rtype
break
if ruletype is None:
continue
for name in sorted([x for x in ed.init_ruledata(ruletype)]):
subnode = self.gui.tree.add_to_parent(name, newnode)
test = len(newitemlist)
if test < maxlen:
modified = True
for ix in range(maxlen - 1, test, -1):
self.gui.tree.remove_subitem(self.item, ix)
return modified |
<gh_stars>0
/**
*
*/
/**
* @author: 肖学进
* @date: 2018年6月24日 上午11:58:18
*/
package com.jinlong.system.web.controller.main; |
News
With just hours until polls close in California, the crucial Democratic presidential contest between Hillary Clinton and Bernie Sanders appears to be tightening. On the Republican side, the unopposed presumptive GOP nominee Donald Trump is trying to show that he can consolidate the Republican electorate behind his candidacy.
Both make for interesting contests, albeit for completely different reasons.
The results of the online exit poll show Hillary leading in absentee votes by 10 points.
As a core part of the Capitol Weekly Absentee Voter Exit Poll, we sought to develop a tool that would allow us to survey enough of the early electorate to track support at the statewide level for each candidate and obtain similar results from each of California’s 53 congressional districts. We seek to give our readers a sense of which districts are solid for a candidate and where the battlegrounds lie, based solely on absentee voters who have returned ballots.
With more than 35,000 respondents overall who have completed the survey, we definitely have the size and depth to provide both a Republican and a Democratic presidential exit poll of absentee voters broken down by congressional district.
Here are our findings, subject to some caveats described below.
Democrats: A Late Surge, or More of the Same?
Both Hillary Clinton and Bernie Sanders can look at the polling and find challenges. See Online Survey Results Here.
The results of the online exit poll show Hillary leading in absentee votes by 10 points. This does not predict that she is going to win by that margin, but it gives us a sense of the current state of the race based on ballots already cast, and the starting point for each campaign as polls open at 7 a.m. on Tuesday morning.
Statewide Result
Hillary Clinton 55
Bernie Sanders 45
This result is based on at least 21,554 respondents, weighted by geography, party registration, age, ethnicity and gender to match the voters who have already cast ballots as of June 4, according to Political Data Inc.
The online version provides a congressional-level breakdown, is weighted to age and will continue to collect survey responses through Election Day, accounting for some variations.
Much like traditional exit polls this year, we did find participation rates among groups with stronger support for Sanders (younger, more male) over-represented among our respondent pool relative to the demographics of the actual universe of voters who had returned their ballots.
While topline results can be informative, it is deeper within the data that we see some of the trends that have defined this contest for months. As the following two charts show, income and age are key factors for each campaign. Clinton’s strongest support is older and higher income; Sanders’ younger and less affluent.
Additionally, Clinton is showing a greater lead among women who have returned their absentee ballots, along with a nearly 40% lead among African Americans.
The current strongholds for each candidate reflect these variables. They are areas that can easily be identified as seats where income, age and ethnicity come into play and offer a distinctive character.
San Diego County, thus far among people who have voted, appears to be the greatest stronghold for Clinton.
We also find Clinton getting 55%-to-58% of the early vote in the Bay Area congressional districts and narrower margins in the LA-area. Sanders’ best regions are the far North Coast and the Central Coast. In the Central Valley, we found great disparity on a district-by-district basis.
Best Performing Hillary Clinton Districts CA36 Palm Desert (Raul Ruiz- D) 71% CA51 San Diego (Juan Vargas- D) 68% CA52 La Jolla (Scott Peters- D) 65% CA43 Los Angeles (Maxine Waters- D) 64% CA53 San Diego (Susan Davis- D) 63%
Best Performing Bernie Sanders Districts CA24 Santa Barbara (Lois Capps- D) 59% CA10 Atwater (Jeff Denham- R) 59% CA1 Richvale (Doug LaMalfa- R) 63% CA20 Carmel (Sam Farr- D) 63% CA40 Los Angeles (Lucille Roybal-Allard- D) 67%
Turnout appears high on the Democratic side, with 2.8 million votes cast and reported to Political Data Inc as of the Saturday before the election.
For comparison, the Saturday prior to the 2014 General Election had 2.5 million, in the 2012 Primary it was 2 million, and in the 2008 Presidential Primary there were only 1.8 million votes cast by that time.
This high turnout comes on the heels of a massive new voter registration with more than 2.3 million new and re-registrations.
Each of these factors could be the kindling and the spark prior to Tuesday’s presidential Primary election.
But, at the same time, the electorate that has turned out so far appears to be very similar to the trends we have seen in the past.
The turnout of independent voters, those with no party preference, appears low, as one would expect in a primary election.
And because of a problem with these voters getting Democratic ballots, some strong Sanders supporters might be getting lost in a paper shuffle. Thus, the independent share of the Democratic presidential primary electorate will be considerably lower than the overall independent primary vote – a factor for which not all recent polls have accounted.
If all independent absentee voters had received Democratic presidential primary ballots automatically, we suspect the Clinton advantage would have been narrowed a bit. Our poll identified around 800 No Party Preference absentee voters who wanted to vote in the Democratic primary but mailed in their ballot without having made the needed request. They favored Sanders by around a 3:2 margin.
Added to this is the fact that young voters, those who accounted for a majority of the new voter registration surge, only account for 10% of the votes returned thus far.
At the same time, Latinos, a strong part of the Clinton coalition in other states, also appears to be participating at about half-strength (typical for primaries), and polling suggests that Sanders is cutting into, or taking the lead with these voters – as they are disproportionately younger.
Sanders has maintained that he will win the state even though the demographics are not a strong match for the states in which he’s been strongest. These results indicate it will require a primary election day voter turnout considerably larger than any we’ve seen recently, among groups who historically are the least likely primary voters.
A Republican Attempt to Consolidate
On the Republican side, we can use this tool to better understand Trump’s ability to consolidate different kinds of Republican voters – from rural to urban to suburban districts, and dive into support from different income brackets, ages, gender and ethnicities. See the Online Survey Results here.
Statewide, as expected, the vast majority of Republicans are supporting Trump, with some voters casting protest votes for candidates no longer in the race, or stating that they wrote in another name, with Marco Rubio earning the spot as the top write-in.
Statewide Result
Donald Trump 80
Ted Cruz 6
John Kasich 11
Ben Carson 2
Jim Gilmore 1
This result is weighted for age and gender, and based on the results as of Saturday, July 5. The online version is only weighted by age and will continue to update as additional voters complete surveys, accounting for some variation.
The Trump map shows strength in the northern and central portions of the state where the Republicans are white and rural, some urban seats where the Republicans are blue-collar working class whites. His weaknesses are in areas with highly educated white voters, like parts of the Bay Area and areas where there are heavy minority populations, but where working class white voters have left and some high income pockets remain.
A closer look at the districts where Trump is out-performing Mitt Romney reveals that they are some of the districts with the whitest and lowest income residents, areas with high unemployment, and potentially more disaffected Republicans who have felt left out of the state’s economic recovery.
Trump Congressional-Level Results
BEST PERFORMING CA44 Los Angeles (Janice Hahn- D) 88% CA36 Los Angeles (Tony Cardenas- D) 87% CA43 Santa Ana (Loretta Sanchez- D) 85% CA29 Palm Desert (Raul Ruiz- D) 83% CA 22 Tulare (Devon Nunes- R) 82%
WORST PERFORMING CA12 San Francisco (Nancy Pelosi- D) 53% CA37 Los Angeles (Karen Bass- D) 60% CA13 Oakland (Barbara Lee- D) 61% CA11 Concord (Mark DeSaulnier- D) 63% CA19 San Jose (Zoe Lofgren- D) 63%
It is also possible that the districts where he is doing best are those with the most conservative Republican voters, while those with more centrist or even liberal views are casting protest votes, or choosing to not vote at all. As the survey shows, this ideological divide is the greatest we found in the survey.
The real test for Trump is if he can use this Tuesday’s primary election as proof that he can consolidated voters behind his candidacy.
Looking at past Republican and Democratic presidential primaries, the most recent comparison might be the 2012 Mitt Romney campaign.
In that election, Ron Paul had suspended his campaign while voters in this state were already casting ballots, and at the time of the primary he was making one final push with grassroots supporters attempting to give him a win that could restart a serious candidacy. Despite some opposition, Romney still won 79.5% of the vote.
More similar to the current situation: In 1988 George H. W. Bush had hit the delegate threshold to win the nomination and earned 83% of the vote against Bob Dole and Pat Robertson who had already suspended their campaigns.
Then, four years later, Bush came to California facing another challenge from Pat Robertson, and in this election he only obtained 74% of the vote, despite having already locked up the nomination.
This second election, and his inability to consolidate Republican voters, foreshadowed his general election loss to Bill Clinton.
Currently, we have Trump slightly exceeding the Romney vote percentage, but below the 1988 successful vote percentage of the elder Bush.
Trump’s campaign will have to spin the evening’s result, whatever it is, into a story of a consolidating electorate while the Democrats are likely going to be pointing to the large protest vote, and lack of support in some key districts, as a sign that he cannot unite the GOP going into the fall.
—
Ed’s Note: Updates to reflect increase in respondents to absentee voter survey to 35,000 overall, 4th graf; updates respondents on Democratic side to 21,554. Pollster Jonathan Brown, a regular contributor to Capitol Weekly’s CA120 column, is the president of Sextant Strategies. Paul Mitchell, vice president of Political Data Inc., and Alan Nigel Yan, an intern from UC Berkeley, assisted with this story. |
import java.util.*;
import java.io.*;
/**
*
* @author i
*/
public class Task784E {
/**
* @param args the command line arguments
*/
public static void main(String[] args) {
Scanner in = new Scanner(new BufferedReader(new InputStreamReader(System.in)));
boolean a = in.nextByte() == 1;
boolean b = in.nextByte() == 1;
boolean c = in.nextByte() == 1;
boolean d = in.nextByte() == 1;
boolean a1 = a ^ b;
boolean a2 = c | d;
boolean a3 = c & b;
boolean a4 = d ^ a;
boolean x = a1 & a2;
boolean y = a3 | a4;
System.out.println((x ^ y) ? 1 : 0);
}
}
|
package main
import (
"encoding/json"
"fmt"
)
// PrettyPrint .. well it prints things pretty see..
func PrettyPrint(v interface{}) (err error) {
b, err := json.MarshalIndent(v, "", " ")
if err == nil {
fmt.Println(string(b))
}
return
}
// AtlasServers struct contains all the knowledge about unofficial and official
// servers
type AtlasServers struct {
Official []Realm `json:"official"`
Unofficial []Realm `json:"unofficial"`
}
// Realm contains a name (such as NAPVE) and a collection of grid servers
type Realm struct {
RealmName string `json:"realm"`
Grids []Grids `json:"grids"`
}
// Grids is a struct for a grid server (such as A1) containing the QueryInfo and
// Config structs
type Grids struct {
Grid string `json:"Grid"`
Info `json:"info"`
Config `json:"config"`
Players []Player
}
// Player object with steam name and time in zone
type Player struct {
PlayerName string `json:"PlayerName"`
PlayTime string `json:"PlayTime"`
}
// Config struct contains all the information we will need to interact with a
// server/grid
type Config struct {
AtlasIP string `env:"ATLASIP" envDefault:"172.16.58.3" json:"AtlasIP"` // ATLASIP=172.16.58.3
//AtlasMaxPlayers int `env:"ATLASMAXPLAYERS" envDefault:"10"` // MAXPLAYERS=10
AtlasGamePort int `env:"ATLASGAMEPORT1" envDefault:"27005" json:"AtlasGamePort"` // GAMEPORT1=27005
AtlasGamePortAlt int `env:"ATLASGAMEPORT2" envDefault:"27006" json:"AtlasGamePortAlt"` // GAMEPORT2=27006
AtlasQueryPort string `env:"ATLASQUERYPORT" envDefault:"27015" json:"AtlasQueryPort"` // ATLASQUERYPORT=27015
AtlasRCONPort string `env:"ATLASRCONPORT" envDefault:"27025" json:"AtlasRCONPort"` // RCONPORT=27025
AtlasSeamlessPort int `env:"ATLASSEAMLESSPORT" envDefault:"27020" json:"AtlasSeamlessPort"` // SEAMLESSPORT=27020
AtlasAdminPass string `env:"ATLASADMINPASS" envDefault:"changeme" json:"AtlasAdminPass"` // ADMINPASS=<PASSWORD>
//AtlasRCON bool `env:"ATLASRCON" envDefault:false` // RCON=false
//AtlasResPlayers int `env:"ATLASRESPLAYERS" envDefault:"0"` // RESPLAYERS=0
// SLOG=-log
// ALLHOME=-ForceAllHomeServer
// MAP=Ocean
// SVRX=0
// SVRY=0
// Home string `env:"HOME"`
// Port int `env:"PORT" envDefault:"3000"`
// IsProduction bool `env:"PRODUCTION"`
// Hosts []string `env:"HOSTS" envSeparator:":"`
// Duration time.Duration `env:"DURATION"`
// TempFolder string `env:"TEMP_FOLDER" envDefault:"${HOME}/tmp" envExpand:"true"`
}
// Info contains all the information returned from a QueryServer request to an
// Atlas Server
type Info struct {
Name string `json:"Name"` // NAME: Atlas_D6 - (v16.14)
Map string `json:"Map"` // MAP: Ocean
Folder string `json:"Folder"` // FOLDER: atlas
Game string `json:"Game"` // GAME: ATLAS
ID uint16 `json:"ID"` // ID: 0
Players byte `json:"Players"` // PLAYERS: 26
MaxPlayers byte `json:"MaxPlayers"` // MAXPLAYERS: 150
Bot byte `json:"Bot"` // BOTS: 0
ServerType byte `json:"ServerType"` // SERVERTYPE: d
Environment byte `json:"Environment"` // ENVIRONMENT: w
Visibility byte `json:"Visibility"` // VISIBILITY: 0
Vac byte `json:"Vac"` // VAC: 1
Version string `json:"Version"` // VERSION: 172.16.17.32
Port uint16 `json:"Port"` // PORT: 5759
KeyWords string `json:"KeyWords"` // KEYWORDS: @,OWNINGID:90122942757731332,OWNINGNAME:90122942757731332,NUMOPENPUBCONN:124,P2PADDR:90122942757731332,P2PPORT:5759,NONATLAS_i:0
Ping int `json:"Ping"` // PING: 96
}
|
/// Create a string reader from a raw reader.
pub fn new(inner: R, path: &'a Path) -> Self {
Self {
inner: BufReader::new(inner).lines(),
path,
line: 0,
}
} |
<reponame>tiropas/java-game-server<gh_stars>1000+
package org.menacheri.jetclient.communication;
import org.jboss.netty.channel.Channel;
import org.jboss.netty.channel.ChannelFuture;
import org.jboss.netty.channel.ChannelFutureListener;
import org.menacheri.jetclient.communication.MessageSender.Reliable;
import org.menacheri.jetclient.event.Events;
import org.menacheri.jetclient.event.Event;
/**
* A class that transmits messages reliably to remote machines/vm's. Internally
* this class uses Netty tcp {@link Channel} to transmit the message.
*
* @author <NAME>
*
*/
public class NettyTCPMessageSender implements Reliable
{
private boolean isClosed = false;
private final Channel channel;
private static final DeliveryGuaranty DELIVERY_GUARANTY = DeliveryGuaranty.DeliveryGuarantyOptions.RELIABLE;
public NettyTCPMessageSender(Channel channel)
{
super();
this.channel = channel;
}
@Override
public Object sendMessage(Object message)
{
return channel.write(message);
}
@Override
public DeliveryGuaranty getDeliveryGuaranty()
{
return DELIVERY_GUARANTY;
}
public Channel getChannel()
{
return channel;
}
public synchronized void close()
{
if (isClosed)
return;
ChannelFuture closeFuture = channel.close();
closeFuture.awaitUninterruptibly();
if (!closeFuture.isSuccess())
{
System.err.println("TCP channel " + channel.getId()
+ " did not close successfully");
}
isClosed = true;
}
/**
* Writes an event mostly the {@link Events}.CLOSE to the client, flushes
* all the pending writes and closes the channel.
*
* @param closeEvent
*/
public void close(Event closeEvent)
{
closeAfterFlushingPendingWrites(channel, closeEvent);
}
/**
* This method will write an event to the channel and then add a close
* listener which will close it after the write has completed.
*
* @param channel
* @param event
*/
public void closeAfterFlushingPendingWrites(Channel channel, Event event)
{
if (channel.isConnected())
{
channel.write(event).addListener(ChannelFutureListener.CLOSE);
}
else
{
System.err.println("Unable to write the Event :" + event
+ " to socket as channel is ot connected");
}
}
@Override
public String toString()
{
String channelId = "TCP channel with Id: ";
if (null != channel)
{
channelId += channel.getId().toString();
}
else
{
channelId += "0";
}
String sender = "Netty " + channelId;
return sender;
}
}
|
/**
* abstract class to share common attributes and methods between getter and setter
* @author Christian Chevalley
*
*/
public abstract class ConfigHandler {
protected XmlObject docroot;
protected String namespace;
protected String queryexp;
public ConfigHandler(XmlObject root){
this.docroot = root;
if (docroot != null){
this.namespace = root.getDomNode().getNamespaceURI();
if (namespace == null)
this.queryexp = "$this/";
else
this.queryexp = "declare namespace ns = '"+namespace+"'; ";
}
}
/**
* can be used to set a new doc root<p>
* @param root a document
*/
public void setDocRoot(XmlObject root){
this.docroot = root;
if (docroot != null){
this.namespace = root.getDomNode().getNamespaceURI();
if (namespace == null)
this.queryexp = "$this/";
else
this.queryexp = "declare namespace ns = '"+namespace+"'; ";
}
}
} |
// generate the V2 key from the index fields defined
// in the schema.
void ObjectId::setV2Key(const ManagementObject& object)
{
stringstream oname;
oname << object.getPackageName() << ":" << object.getClassName() << ":" << object.getKey();
v2Key = oname.str();
} |
def _get_include_info(self, play_ds, basedir, existing_vars={}):
new_vars = existing_vars.copy()
tokens = split_args(play_ds.get('include', ''))
for t in tokens[1:]:
try:
(k,v) = unquote(t).split("=", 1)
new_vars[k] = template(basedir, v, new_vars)
except ValueError, e:
raise errors.AnsibleError('included playbook variables must be in the form k=v, got: %s' % t)
return (new_vars, unquote(tokens[0])) |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.yarn.failover;
import org.apache.flink.yarn.YarnTestBase;
import org.apache.commons.io.FileUtils;
import org.apache.hadoop.yarn.api.records.ApplicationId;
import org.apache.hadoop.yarn.api.records.ApplicationReport;
import org.apache.hadoop.yarn.api.records.YarnApplicationState;
import org.apache.hadoop.yarn.client.api.YarnClient;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler;
import org.junit.Assert;
import org.junit.BeforeClass;
import org.junit.Test;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.File;
import java.util.EnumSet;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
/**
* JobMaster failover test batch case with finished tasks.
*/
public class BatchWithFinishedTaskITCase extends YarnJobMasterFailoverTestBase {
private static final Logger LOG = LoggerFactory.getLogger(BatchWithFinishedTaskITCase.class);
private static final String logDir = "jm-failover-BatchWithFinishedTaskITCase";
@BeforeClass
public static void setup() {
YarnJobMasterFailoverTestBase.startHighAvailabilityService();
YARN_CONFIGURATION.setClass(YarnConfiguration.RM_SCHEDULER, CapacityScheduler.class, ResourceScheduler.class);
YARN_CONFIGURATION.set(YarnTestBase.TEST_CLUSTER_NAME_KEY, logDir);
YARN_CONFIGURATION.setInt(YarnConfiguration.NM_PMEM_MB, 4096);
startYARNWithConfig(YARN_CONFIGURATION);
}
@Test
public void test() throws Exception {
final Pattern jmCheckerBefore = Pattern.compile("Map.*switched from DEPLOYING to RUNNING");
final Pattern jmCheckerAfter = Pattern.compile("Job.*switched from state RECONCILING to RUNNING.");
final Pattern tmChecker = Pattern.compile("(Source|Sink).*switched from RUNNING to (FAILED|CANCELLING)");
final Runner runner = startSession();
waitUntilCondition(() -> getRunningContainers() >= 2, TIMEOUT.fromNow());
final YarnClient yarnClient = getYarnClient();
Assert.assertNotNull(yarnClient);
Assert.assertEquals(1, yarnClient.getApplications(EnumSet.of(YarnApplicationState.RUNNING)).size());
final ApplicationReport report1 = yarnClient.getApplications(EnumSet.of(YarnApplicationState.RUNNING)).get(0);
Assert.assertEquals(1, report1.getCurrentApplicationAttemptId().getAttemptId());
final ApplicationId id = report1.getApplicationId();
submitJob("org.apache.flink.yarn.failover.BatchCaseWithFinishedTask", "batch-with-finished-task");
waitUntilCondition(
() -> {
final File jmLog = findFile("..", (dir, name) ->
name.contains("jobmanager.log") && dir.getAbsolutePath().contains("_01_")
&& dir.getAbsolutePath().contains(logDir)
&& dir.getAbsolutePath().contains(fmt.format(id.getId())));
if (jmLog != null) {
final String jmLogText = FileUtils.readFileToString(jmLog);
final Matcher m = jmCheckerBefore.matcher(jmLogText);
// match 2 times, all Map nodes running
return m.find() && m.find();
}
return false;
},
TIMEOUT.fromNow());
// trigger kill
killJobMaster();
waitUntilCondition(
() -> 2 == yarnClient.getApplicationReport(id).getCurrentApplicationAttemptId().getAttemptId(),
TIMEOUT.fromNow());
Assert.assertEquals(report1.getTrackingUrl(), yarnClient.getApplicationReport(id).getTrackingUrl());
waitUntilCondition(
() -> {
final File jmLog = findFile("..", (dir, name) ->
name.contains("jobmanager.log") && dir.getAbsolutePath().contains("_02_")
&& dir.getAbsolutePath().contains(logDir)
&& dir.getAbsolutePath().contains(fmt.format(id.getId())));
if (jmLog != null) {
final String jmLogText = FileUtils.readFileToString(jmLog);
final Matcher m = jmCheckerAfter.matcher(jmLogText);
return m.find();
}
return false;
}, TIMEOUT.fromNow());
final File tmLog = findFile("..", (dir, name) ->
name.contains("taskmanager.log") && dir.getAbsolutePath().contains("_01_000003")
&& dir.getAbsolutePath().contains(logDir)
&& dir.getAbsolutePath().contains(fmt.format(id.getId())));
Assert.assertNotNull(tmLog);
final Matcher m = tmChecker.matcher(FileUtils.readFileToString(tmLog));
// no failover
Assert.assertFalse(m.find());
yarnClient.killApplication(id);
runner.sendStop();
// wait for the thread to stop
runner.join();
}
}
|
Onion pakoda recipe or onion pakora recipe with step by step photos. Onion pakoda are deep fried onion fritters with Indian spices. These onion fritters made from gram flour are Indian street food or snack loved by all.
Onion pakoda recipe is made with chickpea flour, rice flour, salt, carom seeds, chili powder. Pakora is a deep fried snack made with different vegetables. In onion pakoda recipe onion is used as main ingredient. Here in Tamil nadu onion pakora, onion bajji, milagai bajji, masal vada, medu vada, onion samosa are all time favorite street foods. These crispy spicy onion pakoda are eternal favorite among Indians.
Onion pakoda is easy to prepare in 10 minutes. You can easily make this snack for sudden guests. These onion pakora are perfect tea time snack too. A perfect onion pakoda platter with good masala tea is ideal for cold winters. We love onion pakora and I make them when we crave some special snack.
Here I am sharing this perfect onion pakoda recipe that is crispy and flavorful. This onion pakora recipe does not soak up oil. Also since I have added some carom seeds these pakodas are comparatively light on tummy. I usually add some rice flour while making pakoda with onion or any other vegetable. Adding rice flour makes the pakoda crunchy and also retains the crunch. If you want to store the pakoda for 2-3 days then do use rice flour. You can also use semolina or corn flour instead of rice flour.
Few alternate recipe for onion pakoda is to use wheat flour instead of chickpea flour. Onion pakoda without gram flour does taste good but different. If you prefer gluten free pakoda then gram flour is the best choice.
To make onion pakoda recipe big onions are used. peel and slice them as thin as possible. I like to use generous amount of freen chili and curry leaves in my onion pakora recipe. In some recipes fennel seeds are also added. If you want to avoid oil these onion pakoras can be baked. Just spread them as single layer on a well greased cookie sheet. Give a quick spray of oil. Bake for 15-20 minutes at 180C. I have tried baking cashew pakodas this way. I guess the same will work for this recipe too.
Serve onion pakoda with tea, coffee. You can also use this pakoda to make kadhi pakodi.
If you are looking for do check gobi paoda, cashew pakoda, cabbage pakoda recipes.
How to make onion pakoda recipe
Heat oil for deep frying.
In a mixing bowl, take sliced onions, chopped green chilies, crushed ginger, curry leaves. Mix well. Squeeze the onions gently. Set aside for 5 minutes.
Add rice flour, gram flour, carom seeds, salt and mix well.
Take around 3 teaspoons hot oil and add to this mixture. Now sprinkle 23 teaspoons water at a time and make a tight dough like mixture.
Now take some dough and add to the hot in sprinkling motion. Deep fry until golden brown in medium flame. Do not add too many pakoras at at time. There should be enough space for the pakodas. Also do not cook in high flame.
Fry until the sizzling sound is reduced and pakodas are golden and crispy.
Drain excess oil on a kitchen tissue or strainer. Serve onion pakoda hot with tea.
Onion pakoda recipe card below:
5 from 1 vote Print Onion pakoda recipe | Vengaya pakoda recipe | Onion fritters Prep Time 10 mins Cook Time 20 mins Total Time 30 mins Onion pakoda recipe- crispy and spicy Indian onion fritters or pakoda made with onions and gram flour. Course: Snack Cuisine: Indian Servings : 4 Calories : 212 kcal Ingredients (1 cup=250 ml) 3 big onions sliced
1/2 cup gram flour
1/2 cup rice flour
3 green chilies chopped
15 curry leaves
1/2 inch ginger peeled and crushed
1/2 teaspoon chili powder
1/4 teaspoon carom seeds
Salt to taste
Oil Instructions Heat oil for deep frying. In a mixing bowl, take sliced onions, chopped green chilies, crushed ginger, curry leaves. Mix well. Squeeze the onions gently. Set aside for 5 minutes. Add rice flour, gram flour, carom seeds, salt and mix well. Take around 3 teaspoons hot oil and add to this mixture. Now sprinkle 2-3 teaspoons water at a time and make a tight dough like mixture. Now take some dough and add to the hot in sprinkling motion. Deep fry until golden brown in medium flame. Do not add too many pakoras at at time. There should be enough space for the pakodas. Also do not cook in high flame. Fry until the sizzling sound is reduced and pakodas are golden and crispy. Drain excess oil on a kitchen tissue or strainer. Serve onion pakoda hot with tea.
Sharing is caring! |
<gh_stars>10-100
export const AtilaIO = ({ className }: { className: string }) => (
<svg
viewBox="0 270 300 150"
role="img"
aria-label="Atila.io logo"
className={className}
>
<g stroke="none" strokeWidth="1" fill="none" fillRule="evenodd">
<g fillRule="nonzero">
<g transform="translate(0.000000, 280.000000)" fill="#fff">
<path
d="M176.7,26.7 L176.7,21.4 L180.2,21.4 L180.2,26.7 L176.7,26.7 Z M176.7,124 L176.7,33.8 L180.2,33.8 L180.2,124 L176.7,124 Z"
id="XMLID_1744_"
></path>
<path
d="M199.7,1.8 L203.2,1.8 L203.2,110.7 C203.2,114.1 204.2,116.7 206.3,118.7 C208.4,120.7 211.2,121.7 214.6,121.7 C215.9,121.7 217.4,121.6 219.2,121.3 C221,121 222.6,120.6 224.2,120.2 L225.4,123 C223.8,123.6 221.8,124.1 219.4,124.5 C217,124.9 215,125.1 213.4,125.1 C209.3,125.1 206,123.8 203.5,121.2 C200.9,118.6 199.7,115.1 199.7,110.9 L199.7,1.8 L199.7,1.8 Z"
id="XMLID_1742_"
></path>
<g transform="translate(0.000000, 1.000000)">
<polygon
id="XMLID_1740_"
points="107.6 124.3 55.6 5.2 3.8 124.3 0 124.3 53.7 0.9 57.5 0.9 111.2 124.3 107.6 124.3"
></polygon>
</g>
<g transform="translate(119.000000, 2.000000)">
<path
d="M42.5,118.3 C40.8,119.5 39.2,120.3 37.7,120.9 C36.2,121.5 34.5,122 32.6,122.6 C30.6,123.1 28.4,123.4 26,123.4 C21.8,123.4 18.1,122.1 15,119.6 C11.9,117 10.3,113.7 10.3,109.5 L10.3,35.5 L0.7,35.5 L0.7,32 L10.3,32 L10.3,0.4 L13.8,0.4 L13.8,32 L48.4,32 L48.4,35.5 L13.8,35.5 L13.8,109.5 C14,112.9 15.4,115.5 17.8,117.3 C20.2,119.1 23,120 26,120 C28,120 29.8,119.8 31.6,119.4 C33.3,119 34.9,118.5 36.2,118 C37.5,117.5 38.6,117 39.3,116.5 C40.1,116 40.5,115.7 40.6,115.6 L42.5,118.3 Z"
id="XMLID_1737_"
></path>
</g>
<g transform="translate(223.000000, 47.000000)">
<path
d="M33.5,0.7 C15.2,0.7 0.3,18.1 0.3,39.6 C0.3,61.1 15.2,78.5 33.5,78.5 C51.8,78.5 66.7,61.1 66.7,39.6 C66.7,18.1 51.8,0.7 33.5,0.7 Z M33.5,74.6 C17,74.6 3.6,58.9 3.6,39.6 C3.6,20.3 17,4.6 33.5,4.6 C50,4.6 63.4,20.3 63.4,39.6 C63.4,58.9 50,74.6 33.5,74.6 Z"
id="XMLID_1733_"
></path>
<rect x="64" y="1" width="3.5" height="76.1"></rect>
</g>
<rect
x="51.6"
y="89.4"
width="64.4"
height="3.5"
id="a-stroke"
fill="#fff"
></rect>
</g>
</g>
</g>
</svg>
)
|
/**
* Generic error handler, called with the appropriate error code and
* the same closure specified at the creation of the message queue.
* Not every message queue implementation supports an error handler.
*
* @param cls closure, a `struct GNUNET_TESTBED_Controller *`
* @param error error code
*/
static void
mq_error_handler (void *cls,
enum GNUNET_MQ_Error error)
{
GNUNET_log (GNUNET_ERROR_TYPE_ERROR,
"Encountered MQ error: %d\n",
error);
GNUNET_SCHEDULER_shutdown ();
} |
/**
* Tries to communicate with Sensor to start or stop a connection
*/
public void handleMessage(Message message) {
int type = message.arg1;
if (type == START_CONNECTION) {
connectToSensorAndReply((Intent) message.obj);
} else if (type == STOP_CONNECTION) {
stopSensorConnection((Looper) message.obj);
} else if (type == SEND_MESSAGE) {
MagpieEvent ev = processSensorMessage(message);
mSensorService.sendEvent(ev);
}
} |
<commit_msg>Remove import for graphite responder
<commit_before>
class Responder(object):
def support(message):
pass
def generate(message):
pass
def on_start(self, consumer):
return False
def support(self, request):
return request.content[0:len(self.name())] == self.name()
class Response(object):
def __init__(self, content):
self.content = content
self.tags = []
self.command = ""
def __str__(self):
return self.content
class StreamResponse(Response):
def __init__(self, content):
self.is_completed = False
self.content = content
def stop(self):
self.is_completed = True
def handle(self, request, consumer):
self.is_completed = True
def __str__(self):
return "<StreamResponse>"
from rageface import RagefaceResponder
from flowdock import FlowdockWhoisResponder
from math import MathResponder
from wat import WatResponder
from xkcd import XkcdResponder
from bigbro import BigbroResponder
from ascii import AsciiResponder
from ninegag import NineGagResponder
from link import LinkResponder
from reminder import ReminderResponder
from status import StatusResponder
from help import HelpResponder
from remote import RemoteResponder
from monitor import MonitorResponder
from process import ProcessResponder
from so import SoResponder
from jira_responder import JiraResponder
from graphite import GraphiteResponder
<commit_after>
class Responder(object):
def support(message):
pass
def generate(message):
pass
def on_start(self, consumer):
return False
def support(self, request):
return request.content[0:len(self.name())] == self.name()
class Response(object):
def __init__(self, content):
self.content = content
self.tags = []
self.command = ""
def __str__(self):
return self.content
class StreamResponse(Response):
def __init__(self, content):
self.is_completed = False
self.content = content
def stop(self):
self.is_completed = True
def handle(self, request, consumer):
self.is_completed = True
def __str__(self):
return "<StreamResponse>"
from rageface import RagefaceResponder
from flowdock import FlowdockWhoisResponder
from math import MathResponder
from wat import WatResponder
from xkcd import XkcdResponder
from bigbro import BigbroResponder
from ascii import AsciiResponder
from ninegag import NineGagResponder
from link import LinkResponder
from reminder import ReminderResponder
from status import StatusResponder
from help import HelpResponder
from remote import RemoteResponder
from monitor import MonitorResponder
from process import ProcessResponder
from so import SoResponder
from jira_responder import JiraResponder
|
/**
* make a copy of the criteria
* @param includeGroupBy if true
* @param includeOrderBy if ture
* @param includePrefetchedRelationships if true
* @return a copy of the criteria
*/
public Criteria copy(boolean includeGroupBy, boolean includeOrderBy, boolean includePrefetchedRelationships)
{
Criteria copy = new Criteria();
copy.m_criteria = new Vector(this.m_criteria);
copy.m_negative = this.m_negative;
if (includeGroupBy)
{
copy.groupby = this.groupby;
}
if (includeOrderBy)
{
copy.orderby = this.orderby;
}
if (includePrefetchedRelationships)
{
copy.prefetchedRelationships = this.prefetchedRelationships;
}
return copy;
} |
import {argsToAttrs} from "@utils/attributes";
import "@elements/widgets/tooltips/right";
import {Kind as TooltipKind} from "@elements/widgets/tooltips/right";
import {Padding} from "@elements/widgets/tooltips/right";
export default {
title: "Widgets / Tooltips"
}
interface Args {
kind: TooltipKind,
contents: string,
padding:Padding,
}
const DEFAULT_ARGS:Args = {
kind: "success",
contents: "hello",
padding:"small",
}
export const Right = (props?:Partial<Args>) => {
props = props ? {...DEFAULT_ARGS, ...props} : DEFAULT_ARGS;
const {contents, ...tooltipProps} = props
return `<tooltip-right ${argsToAttrs(tooltipProps)}>${contents}</tooltip-right>`;
}
Right.args = DEFAULT_ARGS;
Right.argTypes = {
kind: {
control: {
type: 'inline-radio',
options: ["error", "success", "plain"]
}
}
} |
The Ryazan miracle (or Ryazan affair; Russian names: рязанское чудо "Ryazan miracle", рязанский эксперимент "Ryazan experiment", рязанская авантюра "Ryazan venture") was a scandal resulting from a propaganda campaign in support of the Soviet planned economy and organised by the Communist Party Committee of Ryazan Oblast in 1959. It involved promising and then faking the over-fulfilment of the production plan for meat and milk in Ryazan oblast.[1][2]
Context [ edit ]
On May 22, 1957, Nikita Khrushchev, the Soviet leader at that time, made a speech at a regional meeting of Soviet agroindustry representatives, in which he aired his famous slogan "Catch up and overtake America" (Догнать и перегнать Америку). In this speech Khrushchev promised to overtake the United States in terms of major economic indicators and to complete building communism by 1980. One of the goals stated in this speech was to triple the amount of meat produced in the Soviet Union within the following three years. However, implementation of this goal remained far from the target. A year after the promise the production had not grown and the USSR was still experiencing food shortages.[3] Khruschev expressed his discontent and, towards the end of 1958, the Central Committee of CPSU issued a circular to obkoms, regional party committees on the oblast level, to take "decisive action" to ensure improvements in meat production in 1959.[4]
Affair [ edit ]
Alexei Larionov, the first secretary of the Ryazan Obkom (the effective head of the region in the Soviet system), announced a very ambitious goal of tripling the amount of meat produced in the region within the next year. The promise, in spite of being unrealistic, was confirmed at the regional party conference. On October 12, 1958, Larionov delivered the promise to Khrushchev in person, who became excited by the initiative.[5] On January 9, 1959, the promise was published in Pravda, the official party newspaper at the time. The publication was rushed by Khrushchev in spite of objections from the Agricultural Department of the Central Committee. The challenge was met by several other regions including Stavropol and Krasnodar. Even before starting its ambitious programme, the Ryazan region received several awards. In February 1959, the region was awarded with the Order of Lenin.[4]
In order to meet the promise, the region had to slaughter all the bovine herd of 1959, as well as a considerable part of its dairy stock. In addition, all cattle reared by kolkhoz farmers in their private households was appropriated "temporarily". As the collected amount was still not enough to meet the target, obkom had to buy meat in neighbouring regions by reallocating funds from other sources, such as the purchase of agricultural tools and construction. On December 16, 1959, Ryazan obkom was able to announce that the region delivered 150,000 tons of meat to the state, which was three times the amount delivered the previous year. On top of this, the regional authorities promised to deliver 180,000 in the next year.[6]
On December 27, 1959, the success was announced by Khruschev himself at the CPSU Plenum "On further development of agricultural production" (О дальнейшем развитии сельскохозяйственного производства). Also in December, Larionov was awarded the title of Hero of Socialist Labour.[5]
However, in 1960 production of meat in Ryazan oblast plummeted to 30,000 tons, since mass slaughter had reduced the number of cattle by 65% in comparison to the level of 1958. To make matters worse, kolkhoz farmers whose private cattle were "temporarily" appropriated the year before refused to process kolkhoz land. This halved the amount of grain produced in Ryazan oblast. By the fall of 1960, it became impossible to hide the affair. In September 1960, Larionov was dismissed from his post, stripped of the title of Hero of Labour. On October 10, 1960, he committed suicide.[7]
Aftermath [ edit ]
Similar events happening on a smaller scale in other regions of the Soviet Union resulted in a statewide drop in agricultural production. Around the same time Khruschev was obsessed with growing maize and forced its widespread planting. Some party leaders in North-West Russia and Baltic were also eager to report their following of the party line, even though maize does not grow well in northern regions.
All these events gave a blow to Khrushchev's image in the Soviet Union. His slogan "Catch up and overtake America" was widely mocked in jokes. The events contributed to his final fall from power in 1964.
See also [ edit ]
The Great Leap Forward in China and the resulting Great Chinese Famine of 1959-1960, which stemmed from similar practices of agricultural planning. |
/**
* Author: Sam O'Leary
* Email: [email protected]
* Created: 28/11/13
* Revision: 2
* Revision History:
* 1: 28/11/13
* 2: 30/11/13
*
* Description:
* This class handles the opening, closing, inserting and retrieving of data and information from the SQLite
* Database.
*/
public class GameDB {
private SQLiteDatabase db;
private final Context context;
private final MyDBhelper dbhelper; // Responsible for the creation of tables.
public GameDB(Context c) {
context = c;
dbhelper = new MyDBhelper(context, Constants.DATABASE_NAME, null, Constants.DATABASE_VERSION);
}
/**
* Closes the connection to the Database
*/
public void close(){
db.close();
}
/**
* Attempts to open a connection to a Database to write to. If this fails an exception is thrown and an attempt to
* connect to a Database to read from is made.
* @throws SQLiteException
*/
public void open() throws SQLiteException {
try {
db = dbhelper.getWritableDatabase();
} catch(SQLiteException e) {
System.out.println("open database exception caught");
Log.v("open database exception caught", e.getMessage());
db = dbhelper.getReadableDatabase();
}
}
/**
* This method, insertInfo(), is called once information has been retrieved and prepared elsewhere and is now ready
* to be inserted into the Database.
*
* @param gamesPlayed
* Number of games played by the user.
* @param highScore
* Highest Score acheived.
* @param xp
* Total experience points earned.
* @param level
* Current skill level of the user.
* @return
* A return value of anything but -1 is good, -1 means an exception has occurred and the insert was unsuccessful.
*/
public long insertInfo(int gamesPlayed, int highScore, int xp, int level) {
try {
ContentValues newTaskValue = new ContentValues();
newTaskValue.put(Constants.GAMES_PLAYED, gamesPlayed);
newTaskValue.put(Constants.HIGH_SCORE, highScore);
newTaskValue.put(Constants.XP, xp);
newTaskValue.put(Constants.LEVEL, level);
return db.insert(Constants.TABLE_NAME, null, newTaskValue);
} catch(SQLiteException e) {
System.out.println("Insert into database exception caught");
Log.v("Insert into database exception caught", e.getMessage());
return -1;
}
}
/**
* This method, getInfo(), queries the Database and returns all the information it contains.
*
* @return
* Returns the Cursor containing the information retrieved.
*/
public Cursor getInfo() {
Cursor c = db.query(Constants.TABLE_NAME, null, null, null, null, null, null);
return c;
}
/**
* This method, dropTable(), is called when the user resets their profile. The table is dropped, or deleted, and all
* the information gathered up to that point is lost.
*/
public void dropTable() {
db.execSQL("DROP TABLE IF EXISTS " + Constants.TABLE_NAME);
dbhelper.onCreate(db);
}
} |
Delegate outcomes; not actions by Evan Leybourn on
The difference between leadership and management
There’s a particular form of leadership that has difficulty in delegating. I’m not talking about micro-managers (although they would fall into this category as well), but those who delegate actions. The “I need you to put a presentation together by Friday” type of manager.
This management model shows an implicit lack of trust. And while it may not be verbalised (or even thought of in this way), trust between managers and their staff is demonstrated in how we interact with each other.
Rather, leaders should be delegating outcomes - and leave the choice and implementation of the relevant actions to their staff.
For every “I need a presentation on X” there is a “We need to make our clients aware of the new products.”
For every “I need you to sell this car” there is a “We need to increase sales by 10% this quarter.”
For every “I need you to upgrade Microsoft Office to 2013” there is a “I need you to maintain all systems and ensure they are up to date.”
If you do not trust your staff, either because they have proven themselves incompetent (as in the literal definition of not-competent) or they are new, then certainly be specific. But if, as an agile leader, you’re hiring people smarter than you, let them do their job. And if, as should happen, they find a better way to do the job, your act of delegating should not limit them.
Do you delegate actions or outcomes? Why? Let me know below. |
<filename>src/main.rs
/*
* Copyright (c) 2017 <NAME>
*
* SPDX-License-Identifier: MIT
*/
use std::cmp;
use std::env;
// Used for error and debug logging
use env_logger::Env;
use log::{debug, error, info};
// Used to do command line parsing
use std::path::PathBuf;
use structopt::clap::{arg_enum, crate_name, crate_version};
use structopt::StructOpt;
// Load the real functionality
use git_mirror::do_mirror;
use git_mirror::provider::{GitHub, GitLab, Provider};
use git_mirror::MirrorOptions;
use std::process::exit;
arg_enum! {
#[derive(Debug)]
enum Providers {
GitLab,
GitHub
}
}
/// command line options
#[derive(StructOpt, Debug)]
#[structopt(name = "git-mirror")]
struct Opt {
/// Provider to use for fetching repositories
#[structopt(
long = "provider",
short = "p",
default_value = "GitLab",
possible_values = &Providers::variants(),
case_insensitive = true
)]
provider: Providers,
/// URL of the instance to get repositories from
#[structopt(
long = "url",
short = "u",
default_value_ifs(&[
("provider", Some("GitLab"), "https://gitlab.com"),
("provider", Some("GitHub"), "https://api.github.com"),
])
)]
url: String,
/// Name of the group to check for repositories to sync
#[structopt(long = "group", short = "g")]
group: String,
/// Directory where the local clones are stored
#[structopt(long = "mirror-dir", short = "m", default_value = "./mirror-dir")]
mirror_dir: PathBuf,
/// Verbosity level
#[structopt(short, long, parse(from_occurrences))]
verbose: u8,
/// Use http(s) instead of SSH to sync the GitLab repository
#[structopt(long)]
http: bool,
/// Only print what to do without actually running any git commands
#[structopt(long)]
dry_run: bool,
/// Number of concurrent mirror jobs
#[structopt(short = "c", long, default_value = "1")]
worker_count: usize,
/// Location where to store metrics for consumption by
/// Prometheus node exporter's text file colloctor
#[structopt(long)]
metric_file: Option<PathBuf>,
/// Location where to store the Junit XML report
#[structopt(long)]
junit_report: Option<PathBuf>,
/// Git executable to use
#[structopt(long, default_value = "git")]
git_executable: String,
/// Private token or Personal access token to access the GitLab or GitHub API
#[structopt(long, env = "PRIVATE_TOKEN")]
private_token: Option<String>,
/// Default refspec used to mirror repositories, can be overridden per project
#[structopt(long)]
refspec: Option<Vec<String>>,
/// Remove the local working repository after pushing. This requires a full re-clone on the next run.
#[structopt(long)]
remove_workrepo: bool,
/// Fail on sync task error. If set the executable will exit with 1 if any sync task failed.
#[structopt(long)]
fail_on_sync_error: bool,
}
impl From<Opt> for MirrorOptions {
fn from(opt: Opt) -> MirrorOptions {
MirrorOptions {
mirror_dir: opt.mirror_dir,
dry_run: opt.dry_run,
worker_count: opt.worker_count,
metrics_file: opt.metric_file,
junit_file: opt.junit_report,
git_executable: opt.git_executable,
refspec: opt.refspec,
remove_workrepo: opt.remove_workrepo,
fail_on_sync_error: opt.fail_on_sync_error,
}
}
}
fn main() {
// Setup commandline parser
let opt = Opt::from_args();
debug!("{:#?}", opt);
let env_log_level = match cmp::min(opt.verbose, 4) {
4 => "git_mirror=trace",
3 => "git_mirror=debug",
2 => "git_mirror=info",
1 => "git_mirror=warn",
_ => "git_mirror=error",
};
env_logger::Builder::from_env(Env::default().default_filter_or(env_log_level)).init();
// Run OpenSSL probing on all platforms even the ones not using it
openssl_probe::init_ssl_cert_env_vars();
let provider: Box<dyn Provider> = match opt.provider {
Providers::GitLab => Box::new(GitLab {
url: opt.url.to_owned(),
group: opt.group.to_owned(),
use_http: opt.http,
private_token: opt.private_token.to_owned(),
recursive: true,
}),
Providers::GitHub => Box::new(GitHub {
url: opt.url.to_owned(),
org: opt.group.to_owned(),
use_http: opt.http,
private_token: opt.private_token.to_owned(),
useragent: format!("{}/{}", crate_name!(), crate_version!()),
}),
};
let opts: MirrorOptions = opt.into();
match do_mirror(provider, &opts) {
Ok(_) => {
info!("All done");
}
Err(e) => {
error!("Error occured: {}", e);
exit(e.into());
}
};
}
|
/**
* Iterator backed by a specific array.
*/
public static class Iterator<T>
implements java.util.Iterator<T> {
private final T[] dataArray;
private int position = 0;
/**
* Base constructor.
* <p>
* Note: the array is not cloned, changes to it will affect the values returned by next().
* </p>
*
* @param dataArray array backing the iterator.
*/
public Iterator(T[] dataArray) {
this.dataArray = dataArray;
}
public boolean hasNext() {
return position < dataArray.length;
}
public T next() {
if (position == dataArray.length) {
throw new NoSuchElementException("Out of elements: " + position);
}
return dataArray[position++];
}
public void remove() {
throw new UnsupportedOperationException("Cannot remove element from an Array.");
}
} |
Tri-State Spectrum Sensing and Erasure-Injected Probabilistic Inference for Cognitive Radios
Cooperation can significantly improve the diversity order and hence the spectrum sensing accuracy in cognitive radio systems. Since cooperation inevitably introduces communication overhead, question arises as how and how much cooperation should be induced to attain the low-hanging fruits without being excessive and overshadowing the gain. Based on the topology graph, this paper proposes a distributed tri-state probabilistic inference mechanism for cooperative sensing. Conventional decision fusion strategies pull together all the local information (e.g. yes or no for some hypothesis) in the neighborhood, irrespective of its quality. The new idea in the tri-state decision fusion is that if a cognitive radio is rather unsure (up to a threshold) about its sensing result, then instead of sending out this information (which may well be useless anyway), it might as well remain silent, staying in the third state of ``erasure'' to save energy (and bandwidth). Information-theoretic analysis is conducted to determine the optimal threshold that maximizes the data-rate to energy ratio. Extensive simulations are conducted which confirms the advantages of the tri-state information dissemination strategy. |
import { Skill } from '../../index';
import { PlayerStatus } from '../../../player';
import { BloodMagicAffectedEvent } from './index';
export default class BloodMagicSkill extends Skill {
id = 'blood-magic-skill';
name = '血液魔法学';
description = '*一次性技能* 攻击者在攻击阶段若生命值低于最大值的10%,则将两方的生命值皆设定为此前双方生命值的平均值。'
data = {
affected: false,
};
affectTiming = PlayerStatus.beforeAttack
run() {
if (this.data.affected) {
return;
}
const ownerPlayer = this.battleField.getPlayer(this.playerId);
if (ownerPlayer.health.internalValue > ownerPlayer.health.defaultValue * 0.1) {
return;
}
this.battleField.eventRegistry.registerEvent(new BloodMagicAffectedEvent(), this.playerId);
const oppositePlayer = this.battleField.getOppositePlayer(this.playerId);
const averageHP = (
ownerPlayer.health.internalValue
+ oppositePlayer.health.internalValue
) / 2;
ownerPlayer.health.value = averageHP;
oppositePlayer.health.value = averageHP;
this.data.affected = false;
}
}
|
/**
* <p>
* This is a spring based implementation of the {@link JobExecutor} using spring abstraction {@link TaskExecutor} for performing background task execution.
* </p>
* <p>
* The idea behind this implementation is to externalize the configuration of the task executor, so it can leverage to Application servers controller thread pools, for example using the commonj API.
* The use of unmanaged thread in application servers is discouraged by the Java EE spec.
* </p>
*
* @author Pablo Ganga
* @deprecated use {@link org.flowable.spring.job.service.SpringAsyncExecutor}
*/
@Deprecated
public class SpringAsyncExecutor extends org.flowable.spring.job.service.SpringAsyncExecutor {
public SpringAsyncExecutor() {
}
public SpringAsyncExecutor(TaskExecutor taskExecutor, SpringRejectedJobsHandler rejectedJobsHandler) {
super(taskExecutor, rejectedJobsHandler);
}
@Override
public SpringRejectedJobsHandler getRejectedJobsHandler() {
return (SpringRejectedJobsHandler) super.getRejectedJobsHandler();
}
/**
* Required spring injected {@link RejectedJobsHandler} implementation that will be used when jobs were rejected by the task executor.
*
* @param rejectedJobsHandler
* @deprecated use {@link this#setRejectedJobsHandler(org.flowable.spring.job.service.SpringRejectedJobsHandler)}
*/
@Deprecated
public void setRejectedJobsHandler(SpringRejectedJobsHandler rejectedJobsHandler) {
super.setRejectedJobsHandler(rejectedJobsHandler);
}
} |
def cluster_matrix(distance_df, M, squareM, locations_to_index, index_to_locations, method, distance_threshold, criterion):
clustering = hierarchy.linkage(squareM, method=method)
clusters = hierarchy.fcluster(clustering, t=distance_threshold, criterion=criterion)
C = Counter(clusters)
N = len(locations_to_index)
clustered_locations = []
cluster_ids = set(clusters)
cluster_labels = {}
cluster_sizes = []
cluster_size_by_locations = {}
for c in sorted(cluster_ids):
clustered_locations.append([])
cluster_sizes.append(C[c])
for location_index in range(N):
c = clusters[location_index]
clustered_locations[c-1].append(location_index)
location_name = index_to_locations[location_index]
cluster_labels[location_name] = c
cluster_size_by_locations[location_index] = C[c]
clustered_locations_list = []
for cindex in range(len(clustered_locations)):
clustered_locations_list = clustered_locations_list + clustered_locations[cindex]
clustered_Matrix = np.zeros((N,N))
for i in range(N):
location_index_i = clustered_locations_list[i]
location_i = index_to_locations[location_index_i]
for j in range(N):
location_index_j = clustered_locations_list[j]
location_j = index_to_locations[location_index_j]
if clustered_Matrix[i][j] == 0:
dist = float(distance_df[(distance_df.location_1 == location_i) & (distance_df.location_2 == location_j)].distance.values[0])
clustered_Matrix[i][j] = dist
clustered_Matrix[j][i] = dist
return clustered_Matrix, clusters, clustered_locations, clustered_locations_list, cluster_labels, cluster_ids, cluster_sizes, cluster_size_by_locations, clustering |
package main
import "github.com/astaxie/beego"
func main() {
beego.Run()
} |
/*
* zcrypt 2.1.0
*
* Copyright IBM Corp. 2001, 2006
* Author(s): Robert Burroughs
* Eric Rossman ([email protected])
*
* Hotplug & misc device support: Jochen Roehrig ([email protected])
* Major cleanup & driver split: Martin Schwidefsky <[email protected]>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2, or (at your option)
* any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#ifndef _ZCRYPT_ERROR_H_
#define _ZCRYPT_ERROR_H_
#include <linux/atomic.h>
#include "zcrypt_debug.h"
#include "zcrypt_api.h"
/**
* Reply Messages
*
* Error reply messages are of two types:
* 82: Error (see below)
* 88: Error (see below)
* Both type 82 and type 88 have the same structure in the header.
*
* Request reply messages are of three known types:
* 80: Reply from a Type 50 Request (see CEX2A-RELATED STRUCTS)
* 84: Reply from a Type 4 Request (see PCICA-RELATED STRUCTS)
* 86: Reply from a Type 6 Request (see PCICC/PCIXCC/CEX2C-RELATED STRUCTS)
*
*/
struct error_hdr {
unsigned char reserved1; /* 0x00 */
unsigned char type; /* 0x82 or 0x88 */
unsigned char reserved2[2]; /* 0x0000 */
unsigned char reply_code; /* reply code */
unsigned char reserved3[3]; /* 0x000000 */
};
#define TYPE82_RSP_CODE 0x82
#define TYPE88_RSP_CODE 0x88
#define REP82_ERROR_MACHINE_FAILURE 0x10
#define REP82_ERROR_PREEMPT_FAILURE 0x12
#define REP82_ERROR_CHECKPT_FAILURE 0x14
#define REP82_ERROR_MESSAGE_TYPE 0x20
#define REP82_ERROR_INVALID_COMM_CD 0x21 /* Type 84 */
#define REP82_ERROR_INVALID_MSG_LEN 0x23
#define REP82_ERROR_RESERVD_FIELD 0x24 /* was 0x50 */
#define REP82_ERROR_FORMAT_FIELD 0x29
#define REP82_ERROR_INVALID_COMMAND 0x30
#define REP82_ERROR_MALFORMED_MSG 0x40
#define REP82_ERROR_INVALID_DOMAIN_PRECHECK 0x42
#define REP82_ERROR_RESERVED_FIELDO 0x50 /* old value */
#define REP82_ERROR_WORD_ALIGNMENT 0x60
#define REP82_ERROR_MESSAGE_LENGTH 0x80
#define REP82_ERROR_OPERAND_INVALID 0x82
#define REP82_ERROR_OPERAND_SIZE 0x84
#define REP82_ERROR_EVEN_MOD_IN_OPND 0x85
#define REP82_ERROR_RESERVED_FIELD 0x88
#define REP82_ERROR_INVALID_DOMAIN_PENDING 0x8A
#define REP82_ERROR_TRANSPORT_FAIL 0x90
#define REP82_ERROR_PACKET_TRUNCATED 0xA0
#define REP82_ERROR_ZERO_BUFFER_LEN 0xB0
#define REP88_ERROR_MODULE_FAILURE 0x10
#define REP88_ERROR_MESSAGE_TYPE 0x20
#define REP88_ERROR_MESSAGE_MALFORMD 0x22
#define REP88_ERROR_MESSAGE_LENGTH 0x23
#define REP88_ERROR_RESERVED_FIELD 0x24
#define REP88_ERROR_KEY_TYPE 0x34
#define REP88_ERROR_INVALID_KEY 0x82 /* CEX2A */
#define REP88_ERROR_OPERAND 0x84 /* CEX2A */
#define REP88_ERROR_OPERAND_EVEN_MOD 0x85 /* CEX2A */
static inline int convert_error(struct zcrypt_queue *zq,
struct ap_message *reply)
{
struct error_hdr *ehdr = reply->message;
int card = AP_QID_CARD(zq->queue->qid);
int queue = AP_QID_QUEUE(zq->queue->qid);
switch (ehdr->reply_code) {
case REP82_ERROR_OPERAND_INVALID:
case REP82_ERROR_OPERAND_SIZE:
case REP82_ERROR_EVEN_MOD_IN_OPND:
case REP88_ERROR_MESSAGE_MALFORMD:
case REP82_ERROR_INVALID_DOMAIN_PRECHECK:
case REP82_ERROR_INVALID_DOMAIN_PENDING:
// REP88_ERROR_INVALID_KEY // '82' CEX2A
// REP88_ERROR_OPERAND // '84' CEX2A
// REP88_ERROR_OPERAND_EVEN_MOD // '85' CEX2A
/* Invalid input data. */
ZCRYPT_DBF(DBF_WARN,
"device=%02x.%04x reply=0x%02x => rc=EINVAL\n",
card, queue, ehdr->reply_code);
return -EINVAL;
case REP82_ERROR_MESSAGE_TYPE:
// REP88_ERROR_MESSAGE_TYPE // '20' CEX2A
/*
* To sent a message of the wrong type is a bug in the
* device driver. Send error msg, disable the device
* and then repeat the request.
*/
atomic_set(&zcrypt_rescan_req, 1);
zq->online = 0;
pr_err("Cryptographic device %02x.%04x failed and was set offline\n",
card, queue);
ZCRYPT_DBF(DBF_ERR,
"device=%02x.%04x reply=0x%02x => online=0 rc=EAGAIN\n",
card, queue, ehdr->reply_code);
return -EAGAIN;
case REP82_ERROR_TRANSPORT_FAIL:
case REP82_ERROR_MACHINE_FAILURE:
// REP88_ERROR_MODULE_FAILURE // '10' CEX2A
/* If a card fails disable it and repeat the request. */
atomic_set(&zcrypt_rescan_req, 1);
zq->online = 0;
pr_err("Cryptographic device %02x.%04x failed and was set offline\n",
card, queue);
ZCRYPT_DBF(DBF_ERR,
"device=%02x.%04x reply=0x%02x => online=0 rc=EAGAIN\n",
card, queue, ehdr->reply_code);
return -EAGAIN;
default:
zq->online = 0;
pr_err("Cryptographic device %02x.%04x failed and was set offline\n",
card, queue);
ZCRYPT_DBF(DBF_ERR,
"device=%02x.%04x reply=0x%02x => online=0 rc=EAGAIN\n",
card, queue, ehdr->reply_code);
return -EAGAIN; /* repeat the request on a different device. */
}
}
#endif /* _ZCRYPT_ERROR_H_ */
|
def print_line(self, text, indent=0, leader="- "):
assert isinstance(indent, int)
assert indent >= 0
assert isinstance(leader, str)
print("".join([self.spacer for i in range(indent)] + [leader, text])) |
// loadCert loads a single pem-encoded certificate into the given pool.
func loadCert(pool *x509.CertPool, pem []byte) error {
if ok := pool.AppendCertsFromPEM(pem); !ok {
return fmt.Errorf("failed to parse PEM")
}
return nil
} |
// Start brings up the server so that it listens at the port specified by
// RPCPort and starts accepting connections and incoming protobuf messages.
func (rpcs *RealRaftProtobufServer) Start() error {
go rpcs.loop()
startupErrChan := make(chan error)
rpcs.commandChannel <- &startServer{
rpcPort: rpcs.RPCPort,
errChan: startupErrChan,
}
return <-startupErrChan
} |
Implantable Medical Devices (IMDs) are without a doubt a Godsend for so many people out there, but a team of researchers from Leuven, Belgium, and Birmingham, UK, has just demonstrated that hacking into these devices is a piece of cake, even for cybercriminals without advanced skills.
And it goes without saying that once they break into a smart heart device, they have its full control, and can even kill the host with the press of a button.
Specifically, the researchers explain in a paper called “On the (in)security of the Latest Generation Implantable Cardiac Defibrillators and How to Secure Them” that all these smart heart devices rely on a proprietary wireless communication system, in most of the cases a long-range RF channel, that can be compromised by hackers without even being anywhere around them.
Once they intercept the wireless connection between the monitors and the implanted devices, hackers can launch various attacks, including reverse engineering and DDoS attacks, to compromise the security systems (if any) and take control of the device.
No security at all
And what’s worse is that most of the popular IMDs have very weak or no security at all, which makes them easy to hack even by hackers who don’t have advanced technology or skills to do it.
“We want to emphasise that reverse-engineering was possible by only using a black-box approach. Our results demonstrated that security-by-obscurity is a dangerous design approach that often conceals negligent designs,” the research shows.
Researchers launched two different types of attacks, one of which was specifically supposed to show that an attacker who manages to break into the IDM has full control of its functions.
“Our first attack consisted on keeping the ICD alive while the ICD is in ‘standby’ mode by repeatedly sending a message over the long-range communication channel. The goal of this attack was to drain the ICD’s battery life, or to enlarge this time window to send the necessary malicious messages to compromise the patient’s safety,” the research shows.
In order to deal with these risks, researchers explain that jamming the signal is the only effective solution in the short term, but in the long term, a standby mode after the communication ends is the best way to remain secure. |
Trainable Projected Gradient Detector for Sparsely Spread Code Division Multiple Access
Sparsely spread code division multiple access (SCDMA) is a promising non-orthogonal multiple access technique for future wireless communications. In this paper, we propose a novel trainable multiuser detector called sparse trainable projected gradient (STPG) detector, which is based on the notion of deep unfolding. In the STPG detector, trainable parameters are embedded to a projected gradient descent algorithm, which can be trained by standard deep learning techniques such as back propagation and stochastic gradient descent. Advantages of the detector are its low computational cost and small number of trainable parameters, which enables us to treat massive SCDMA systems. In particular, its computational cost is smaller than a conventional belief propagation (BP) detector while the STPG detector exhibits nearly same detection performance with a BP detector. We also propose a scalable joint learning of signature sequences and the STPG detector for signature design. Numerical results show that the joint learning improves multiuser detection performance particular in the low SNR regime.
I. INTRODUCTION
Non-orthogonal multiple access (NOMA) is a key ingredient of recent multiple access techniques for the fifth generation (5G) mobile networks. By allocating several users to the same resource block, NOMA techniques realize high spectral efficiency and low latency even when a network is massively connected . Additionally, in future multiple access communications, overloaded access is considered to be unavoidable because of spectral resource limitation. NOMA techniques are expected to deal with such an overloaded system in which the number of active transmitters is larger than that of the signal dimension .
Code division multiple access (CDMA) is an OMA system in which n active users communicate with a base station (BS) simultaneously by spreading users' signals with their signature sequences (or spreading sequences) of length m. Although orthogonality of signature sequences ensures a reasonable multiuser detection performance if m ≥ n, detection performance will drop in overloaded cases (m < n). Sparsely spread CDMA (SCDMA) is a promising NOMA technique based on CDMA. In SCDMA, data streams are modulated by randomly generated signature sequences which contain a small number of non-zero elements. The BS receives superimposed signals with additive noise and tries to detect data streams from multiple users. Compared with conventional CDMA, sparse signature sequences in SCDMA allow low-complexity detection using a linear-time algorithm such as the belief propagation (BP). Moreover, as a NOMA system, SCDMA potentially achieves reasonable detection performance even in overloaded cases.
Recent studies on SCDMA mainly focused on design of detectors and signature sequences. As described above, BP is a detector suitable for the sparse structure of SCDMA , which exhibits nearly optimal performance predicted theoretically , . The computational complexity of the BP detector rapidly increases with respect to signature sparsity and the constellation size of transmit signals. Since practical SCDMA systems use sufficiently large values of these parameters, we need to reduce a computational cost for faster multiuser detection. Signature design is another crucial issue because detection performance depends on superimposed signals spreaded by signature sequences. In , a signature matrix family that improves BP detection performance is proposed. Recently, and proposed an alternative approach for related SCMA systems which designs signature sequences and a detector jointly by autoencoders. Although learned autoencoders provide signature sequences with reasonable performance, their high training cost is a drawback because they contain a large number of training parameters. In summary, a desirable detector and signature design should posses both high scalability for large systems and good adaptability to practical SCDMA systems with high signature sparsity, large signal constellations, and/or overloaded access.
Rapid development of deep learning (DL) techniques has stimulated design of wireless communication systems . Recently, deep unfolding proposed by Gregor and LeCun has attracted great interests as another DL-based approach in addition to an end-to-end approach . In deep unfolding, the signal-flow graph of an existing iterative algorithm is expanded to a deep network architecture in which some parameters such as step-size parameters are embedded. These embedded parameters are treated as trainable parameters to tune the behavior of the algorithm. Learning trainable parameters is accomplished by standard supervised learning techniques such as back propagation and stochastic gradient descent (SGD) if the original algorithm consists of differentiable processes. An advantage of deep unfolding is that the number of trainable parameters are much fewer than conventional deep neural networks, which leads to fast and stable training process and high scalability. Deep unfolding has been applied to various topics in signal processing and wireless communications: sparse signal recovery , , , massive MIMO detection , , , signal detection for clipped OFDM systems and trainable decoder for LDPC codes .
In this paper, we propose a trainable multiuser detector and signature design with high scalability and adaptability to overloaded SCDMA systems. In order to resolve a scalability issue of multiuser detection, we first introduce a novel SCDMA multiuser detector called sparse trainable gradient projection (STPG) detector. The STPG detector is based on a projected gradient descent algorithm whose gradient can be computed efficiently. Combined with the deep unfolding technique, we will propose a trainable detector with reasonable detection performance, high scalability, and adaptability to practical SCDMA systems. In addition, a scalable DL-based SCDMA signature design is proposed by learning a signature matrix and STPG detector simultaneously. In the proposed method, values of non-zero elements in a signature matrix and trainable parameters of the detector are jointly trained to improve detection performance based on an estimate from a temporal signature matrix and detector. Compared with existing DL-based approaches, the proposed method can be trained in huge systems.
The outline of the paper is as follows. Section II describes a system model and conventional BP detector. In Section III, we propose the STPG detector for SCDMA multiuser detection and compare its detection performance in large systems with a BP decoder. Section IV describes signature design based on STPG detector and demonstrate performance improvement. Section V is a summary of this paper.
II. SYSTEM MODEL AND BP DETECTOR
We first introduce SCDMA system model and a conventional BP detector.
A. SCDMA system model
We consider an uplink SCDMA system where n active users with a single antenna try to transmit their messages to a BS by using signature sequences of length m. The ratio β := n/m is called overloaded factor. From the definition, β > 1 indicates that the system is overloaded, i.e., m < n. We assume that the ratio β is a constant number. Each user has a BPSK-modulated signal x i ∈ {+1, −1} (i = 1, . . . , n) as a transmit data. In addition, users have their own signature sequences a i = (a 1,i , . . . , a m,i ) T ∈ R m . Then, the BS receives superimposed signals given by where w is a noise vector and y ∈ R m is a received signal at the BS. Letting A := (a 1 , . . . , a n ) ∈ R m×n be a signature matrix, (1) has another form written by where x := (x 1 , . . . , x n ) T . In conventional CDMA systems, we assume orthogonality of signature sequences a T i a j = 0 for any i = j. Instead, SCDMA systems require sparsity of signature sequences so that the number of non-zero elements in each signature sequence is constant to n and m.
We consider the following typical SCDMA system. First, we assume an AWGN channel. Second, each row of the signature matrix A is assumed to have k non-zero entries, which is called signature sparsity in this paper. We also assume that the signature matrix is normalized such as A 2 F = km, where · F denotes a Frobenius norm. Under these assumptions, the signal-to-noise ratio (SNR) of the system defined by n 0 := E x Ax 2 2 /E w w 2 2 is calculated as n 0 = k/σ 2 , where σ 2 is the variance of the noise per a symbol. Equivalently, the SCDMA model for a given SNR n 0 is defined by where w 0 is an i.i.d. Gaussian random vector with zero mean and unit variance. We consider a multiuser detector and signature design for this system model.
B. BP detector
We briefly describe a BP detector of a standard multiuser detector for SCDMA .
Recursive equations of the BP are constructed on a factor graph whose nodes are variables x and y and edges are set according to non-zero elements of A. The message U j→i (x) (x ∈ {+1, −1}) is a message from a chip node y j to a symbol node x i , and V i→j (x) is a message from a symbol node x i to a chip node y j . Then, the BP recursive formula is given by where Z i→j and Z j→i are normalization constants, and ∂i := {j ∈ {1, . . . , m}|a j,i = 1} and ∂j := {i ∈ {1, . . . , n}|a j,i = 1} are neighboring node sets on the factor graph. After T BP iterations, the probability that the ith transmit signal takes x is estimated by where Z i is a normalization constant. Finally, the ith transmit signal is detected as The computational cost of the BP detector is O(k 2 2 k−1 n) because (5) contains a sum over all possible combinations of k−1 transmit signals x ∂j\i . Similarly, if the detector is applied to a system with a higher order modulation of size |M|, the computational cost of the BP detector is O(k 2 |M| k−1 n). This rapid increase with respect to k and |M| is a drawback of the BP detector.
III. STPG DETECTOR
In this section, we propose a trainable multiuser detector for SCDMA using the idea of deep unfolding.
A. Structure of STPG detector
Deep unfolding is an efficient DL-based approach borrowing the structure of an iterative algorithm. Here, we employ a gradient descent-based detector apart from a message-passing algorithm such as a BP.
The maximum-likelihood (ML) estimator for SCDMA system (3) is formulated bŷ This ML estimator is formally similar to that for the MIMO signal detection , and computationally intractable for large n. Alternatively, a projected gradient descent (PG) algorithm is used to solve (7) approximately by replacing the constraint x ∈ {+1, −1} n to relaxed one x ∈ n . Its recursive formula of the PG is given by where s 0 = 0 is an initial vector. The first equation is called a gradient step because r t is updated by a gradient descent method with a step size γ > 0. The next equation is named a projection step with an element-wise soft projection function tanh(·). The softness parameter α controls the shape of the soft projection function. In the large-α limit, the function becomes a step function, which is the original projection function onto . It is expected that the detection performance of the PG depends on the choice of the parameters γ and α. As a disadvantage of the plain PG, we should search values of parameters carefully for reasonable performance.
To introduce the STPG detector, we replace a parameter γ to γ t 1 depending on the iteration step t. The proposed STPG detector is thus defined by where {γ t } T t=1 and α are regarded as trainable parameters. The architecture of the ith iteration of the STPG detector is shown in Fig. 1. Note that, although the trainable parameter α can be replaced to α t , a single trainable parameter α is used here to reduce the number of trainable parameters. The total number of trainable parameters is T + 1 in T iterations, which is constant to n and m. This leads to high scalability and stable convergence in its training process.
It is also emphasized that the STPG detector uses A T in the gradient step although a similar MIMO detector called TPG-detector uses the pseudo-inverse matrix U := (A T A) −1 A T or U η := (I + ηA T A) −1 A T with a trainable parameter η . This change reduces the computational complexity of the detector. In particular, the sparse structure of a signature matrix A in SCDMA enables us to calculate all the matrix-vector product operations in O(n) time. On the other hand, even though A is a sparse matrix, a matrixvector product including U or U β takes O(n 2 ) operations because these matrices are dense in general. The details of the computational cost is described in the next subsection.
B. Computational complexity
A crucial property of SCDMA is a low computational cost in multiuser detection. Here, we count the number of additions and multiplications of the STPG and BP detectors in each iteration step. For simplicity, we neglect a calculation of nonlinear functions such as tanh(·) in the STPG detector and exp(·) in the BP detector. Table I shows the number of operations per an iteration as a function of n, β and k. In addition, we show the values when n = m = 1200 (β = 1) and k = 2, 4, 6 for comparison. It is found that both detectors are linear-time algorithms with respect to n. In particular, the use of A T in a gradient step helps the STPG detector to reduce its complexity.
We also find that the STPG detector requires less number of operations than the BP detector in terms of signature sparsity k. In fact, the STPG detector has O(kn) additions/multiplications in each iteration. On the other hand, the BP detector needs O(k 2 2 k n) operations as discussed in Sec. II-B. As shown in Fig. 3, the constant k should be large enough to ensure reasonable detection performance, which results in the rapid increase of the BP computational cost.
It is noteworthy that the gap of computational complexity in terms of k will increase if we consider higher order modulations. For a constellation of size |M|, the number of operations of the BP detector is O(k 2 |M| k−1 n) while that of the STPG detector remains O(kn). This is a strong point of the STPG detector for practical SCDMA systems.
C. Simulation settings
In the following subsections, we compare the proposed STPG detector to the original PG and the BP detector in terms of multiuser detection performance.
In the numerical simulations, we consider a massive SCDMA system with n = 1200 active users. A signature matrix A is randomly generated by an element-wise product , 1} m×n is a mask matrix and W ∈ R m×n is a weight matrix. In numerical simulations, each weight of W is uniformly chosen from {+1, −1}. The mask matrix H is also randomly generated by Gallager's construction so that its row and column weights are exactly equal to k and k = km/n(∈ N), respectively. For the PG and STPG detectors, we set T = 30 as a number of iterations. The STPG detector is implemented by PyTorch 1.2 . Initial values of trainable parameters are set to γ t = 0.01 (t = 1, . . . , T ) and α = 2. In training process of the STPG detector, we can use a mini-batch training by back propagation and SGD. In addition, the use of incremental training , is crucial to avoid a vanishing-gradient problem and obtain reasonable results. In the incremental training, we begin with learning the trainable parameters γ 1 , α assuming that T = 1. This is called the first generation of training. After the first generation is finished, we next train parameters γ 1 , γ 2 , α as if T = 2 by using the trained values of γ 1 , α as their initial values. Learning these generations is repeated in an incremental manner until T reaches to the desired value. In the following simulations, we use 100 minibatches of size 200. We use the Adam optimizer whose learning rate is 0.0005. Training process of the detector is executed for each SNR.
Multiuser detection performance is measured by bit error rate (BER). Since outputs s T of the PG and STPG detectors are continuous values, a sign function sign(x) = 1 (x ≥ 0) and −1 (x < 0) is applied to the outputs. Thus, the detected signal is given byx = sign(s T ).
D. Acceleration of convergence in STPG
We first compare the STPG detector to the original PG to demonstrate advantages of learning parameters by deep unfolding. Figure 2 shows the BER performance of both detectors with different SNRs. In the original PG, we choose γ = 0.01 and α = 2 corresponding to initial values of the STPG detector. We find that the STPG detector exhibits better performance than the PG. For example, when SNR is 11dB, the BER of the STPG detector (T = 30) is about 1.0 × 10 −3 while that of the PG is about 5.1 × 10 −2 . In addition, when SNR=14dB, the STPG detector shows fast convergence to a fixed point compared with the PG. These results indicate that training a constant number of parameters in the PG leads to better detection performance and fast convergence to a fixed point. Detection performance improvement and convergence acceleration are crucial advantages of deep unfolding as shown in other signal detection problems , .
E. Performance comparison to BP detector
Next, we compare the STPG detector to a conventional BP detector. Figure 3 shows multiuser detection performance of the STPG (T = 30) and BP (T BP = 30) detectors with n = 1200 active users and m = 1200 signature sequence length. Since n = m (β = 1), reasonable detection performance is expected by using proper signature sequences. In fact, two detectors exhibit nearly same performance when k = 6. When k = 2, however, the overall BER performance of both detectors de- creases while the STPG detector is inferior to the BP detector. It suggests that the sufficiently large k is preferable for reliable multiuser detection, which leads to a rapid increase of the computational cost of the BP decoder. When k = 6, the computational cost of the BP detector is more than a hundred times as high as that of the STPG detector as shown in Tab. I. Figure 4 shows the multiuser detection performance with different overloaded factors β when n = 1200 and k = 6. In the overloaded case where β = 1.2 (m = 1000), two detectors exhibit similar BER performance. Although the overloaded system suffers from about 1dB performance degradation compared with the β = 1 case, both algorithms successfully detect transmit signals in the high SNR regime, which is an advantage of SCDMA as NOMA. In overloaded systems, a computational cost of a detector is still crucial because the signature sparsity k should be sufficiently large as well as the β = 1 case.
In summary, the STPG detector shows similar detection performance to the BP detector even in an overloaded case. From the discussion in Sec. III-B, we can conclude that the STPG detector has an advantage in the computational cost for sufficiently large signature sparsity k.
IV. SIGNATURE DESIGN WITH STPG DETECTOR
As described in Sec. III, a trained STPG detector shows reasonable SCDMA multiuser detection performance with low computational complexity. Moreover, we can train a signature matrix A combined with the STPG detector. In this section, we propose a new signature design by learning a signature matrix and the STPG detector simultaneously.
A. Joint learning of signature matrix and STPG detector
The structure of deep unfolding enables us to train weights of a signature matrix A by back propagation and SGD. We show signature design with the STPG detector in Alg. 1 for b = 1 to B do (i) Masking and normalization of A.
11:
end for 12: end for parameters consists of three steps: (i) calculating a temporal A, (ii) generating training data, and (iii) updating trainable parameters. For step (i), a signature matrix is modified to satisfy the sparsity and normalization conditions that might be broken by the previous parameter update. In line 4 of Alg. 1, signature sparsity k of A is recovered by multiplying the masking matrix H to A updated in the last training step. The normalization condition A F = √ km is satisfied after line 5. As step (ii), a mini-batch for a parameter update is generated according to the system model (3) with a temporal A. Then, as step (iii), trainable parameters including A are updated to reduce the loss value calculated by the mini-batch and the STPG detector with t iterations. This training step belongs to the tth generation of incremental training.
Due to sparse signature sequences and architecture of the STPG detector, the substantial number of training parameters are km + T + 1 in total. It realizes sufficiently fast joint learning. In fact, the training process is executed within 20 minutes by a PC with GPU NVIDIA GeForce RTX 2080 Ti and Intel Core i9-9900KF CPU (3.6 GHz).
B. Multiuser detection performance
Now we evaluate the multiuser detection performance of the STPG detectors with/without learning a signature matrix.
In the training process of joint learning, we change the number of mini-batches to 1000 because the number of trainable parameters increases. A mask matrix with signature sparsity k = 6 is generated according to Gallager's method. Initial values of weights of A are set to one and signature matrices are trained independently for a given SNR. Other conditions are based on the descriptions in Sec. III-C. For the STPG detector with fixed A, weights of A is randomly and uniformly chosen from {+1, −1}. Figure 5 shows BER performance of the STPG detectors with/without learning a signature matrix with overloaded factor β = 1 and 1.2 when n = 1200. It is found that tuning A largely improves detection performance in the low SNR regime in both cases. When β = 1 and BER= 1.0 × 10 −2 , the gain of learning a signature matrix is about 0.9dB. On the other hand, the gain vanishes as the SNR increases. Especially in the case of β = 1.2, the joint learning shows worse detection performance than the STPG detector with a fixed signature matrix in the high SNR regime. This is because the gain of signature design is expected to be small and training the detector is sensitive to perturbations of a signature matrix when noise level is relatively small. It is a future task to improve the joint learning method in the high SNR regime. These results suggest that the proposed signature design with the STPG detector improves the multiuser detection performance with reasonable training costs especially in the low SNR regime.
V. CONCLUDING REMARKS
In this paper, we propose a trainable SCDMA multiuser detector called STPG detector. Applying the notion of deep unfolding to a computationally efficient PG detector, the STPG detector contains a constant number of trainable parameters which can be trained by standard deep learning techniques. An advantage of the STPG detector is the low computational cost that is proportional to the number of active users. Moreover, compared with a conventional BP detector, the STPG detector has less computational complexity with respect to signature sparsity k and signal constellation size while its detection performance is fairly close to that of a BP detector. In addition, we demonstrate a DL-based signature design using the STPG detector. Numerical results show that the joint learning method improves multiuser detection performance especially in the low SNR regime with reasonable training costs. |
def _save_annotation(annotation_np, filename):
pil_image = Image.fromarray(annotation_np.astype(dtype=np.uint8))
pil_image.save(filename) |
<reponame>wangziqi2013/Android-Dalvik-Analysis<filename>src/res/res_table.h
#pragma once
#ifndef _RES_TBALE_H
#define _RES_TBALE_H
#include "common.h"
#include "package_group.h"
#include "res_base.h"
namespace wangziqi2013 {
namespace android_dalvik_analysis {
/*
* class ResourceTable - Represents binary resource table
*/
class ResourceTable : public ResourceBase {
// type declarations
private:
/*
* class TableHeader - The resource table header which is at the beginning of
* the table
*/
class TableHeader {
public:
CommonHeader common_header;
// Number of packages included in this resource table
uint32_t package_count;
} BYTE_ALIGNED;
/*
* class PackageHeader - Package header that records metadata about package
*/
class PackageHeader {
public:
CommonHeader common_header;
// Base package's ID (also in the res identifier)
// If 0 then it is not a base package; it is always 0x7F for application
// package type
uint32_t id;
// UTF-16 encoded name of the package
// The length is fixed 256 bytes encoded in UTF16 though UTF16 itself
// is a variable length encoding scheme
unsigned char name_utf16[128 * sizeof(char16_t)];
// An offset from the beginning of this struct to the string pool for types
uint32_t type_string_pool_offset;
// Index into string pool to indicate the last type visible
uint32_t last_public_type;
// Same - an offset from this struct as the key string pool
uint32_t key_string_pool_offset;
// The last index of a key that is visible
uint32_t last_public_key;
} BYTE_ALIGNED;
/*
* class TypeSpecHeader - Type specification header
*
* This header is followed by an array of uint32_t integers denoting the
* configuration of resources available for a certain resource of this
* type. There is excatly one entry for each resource instance
*/
class TypeSpecHeader {
public:
CommonHeader common_header;
// Low 8 bits of a 32 bit integer represents the type id
uint8_t id;
uint8_t zero1;
uint16_t zero2;
// Number of entries after this struct
uint32_t entry_count;
// The pointer to the first entry (it does not consume struct space)
uint32_t data[0];
} BYTE_ALIGNED;
/*
* class TypeHeader - Header of the type instance chunk
*/
class TypeHeader {
public:
CommonHeader common_header;
// This is the ID of the type being described here
uint8_t type_id;
uint8_t zero1;
uint16_t zero2;
// Number of resource value instances inside this chunk
uint32_t entry_count;
// The offset to the starting address of the data part
// Between this structure and the header is an array of uint32_t
// that records the offset of each instance into the data part
uint32_t data_offset;
// This structure records the config of this group of values
// i.e. they are used under common configurations
TypeConfig config;
} BYTE_ALIGNED;
// We need the pointer type before it is declared
class TypeSpec;
/*
* class ResourceEntryField - If a resource entry is a complicated one then
* this represents fields inside the resource entry
*/
class ResourceEntryField {
public:
/*
* Res_MAKEINTERNAL() - Makes special name field values for attr types
*/
#define Res_MAKEINTERNAL(entry) (0x01000000 | (entry&0xFFFF))
// Special values for 'name' when defining attribute resources.
enum {
// This entry holds the attribute's type code.
ATTR_TYPE = Res_MAKEINTERNAL(0),
// For integral attributes, this is the minimum value it can hold.
ATTR_MIN = Res_MAKEINTERNAL(1),
// For integral attributes, this is the maximum value it can hold.
ATTR_MAX = Res_MAKEINTERNAL(2),
// Localization of this resource is can be encouraged or required with
// an aapt flag if this is set
ATTR_L10N = Res_MAKEINTERNAL(3),
// for plural support, see android.content.res.PluralRules#attrForQuantity(int)
ATTR_OTHER = Res_MAKEINTERNAL(4),
ATTR_ZERO = Res_MAKEINTERNAL(5),
ATTR_ONE = Res_MAKEINTERNAL(6),
ATTR_TWO = Res_MAKEINTERNAL(7),
ATTR_FEW = Res_MAKEINTERNAL(8),
ATTR_MANY = Res_MAKEINTERNAL(9),
};
#undef Res_MAKEINTERNAL
// Bit mask of allowed types, for use with ATTR_TYPE.
enum {
// No type has been defined for this attribute, use generic
// type handling. The low 16 bits are for types that can be
// handled generically; the upper 16 require additional information
// in the bag so can not be handled generically for TYPE_ANY.
TYPE_ANY = 0x0000FFFF,
// Attribute holds a references to another resource.
TYPE_REFERENCE = 1<<0,
// Attribute holds a generic string.
TYPE_STRING = 1<<1,
// Attribute holds an integer value. ATTR_MIN and ATTR_MIN can
// optionally specify a constrained range of possible integer values.
TYPE_INTEGER = 1<<2,
// Attribute holds a boolean integer.
TYPE_BOOLEAN = 1<<3,
// Attribute holds a color value.
TYPE_COLOR = 1<<4,
// Attribute holds a floating point value.
TYPE_FLOAT = 1<<5,
// Attribute holds a dimension value, such as "20px".
TYPE_DIMENSION = 1<<6,
// Attribute holds a fraction value, such as "20%".
TYPE_FRACTION = 1<<7,
// Attribute holds an enumeration. The enumeration values are
// supplied as additional entries in the map.
TYPE_ENUM = 1<<16,
// Attribute holds a bitmaks of flags. The flag bit values are
// supplied as additional entries in the map.
TYPE_FLAGS = 1<<17
};
// Enum of localization modes, for use with ATTR_L10N.
enum {
L10N_NOT_REQUIRED = 0,
L10N_SUGGESTED = 1
};
// This field has different interpretations
ResourceId name;
ResourceValue value;
} BYTE_ALIGNED;
/*
* class ResourceEntry - Represents resource entry in the body of type chunk
*/
class ResourceEntry {
public:
// The length of only this structure (i.e. not responsible for
// data after this structure)
uint16_t entry_length;
/*
* enum Flags
*/
enum Flags : uint16_t {
// This flag decides how the following data is organized
// For a simple entry the following data is just a ResourceValue instance
// Otherwise it is followed by a mapping descriptor and several maps
// to form a composite value
COMPLEX = 0x0001,
PUBLIC = 0x0002,
// If set, this is a weak resource and may be overriden by strong
// resources of the same name/type. This is only useful during
// linking with other resource tables.
FLAG_WEAK = 0x0004,
};
// As defined above
Flags flags;
// A string into key string table of the package denoting the name of the
// resource entry
uint32_t key;
// Different interpretation of the following bytes depending on
// whether the type is complex or not
// Note that the following structure is not counted as the length in
// the common header
union {
// This struct is used only if the resource is a complex one
struct {
// The resource ID of its parent which refers to another resource
// 0x00000000 if there is no parent
ResourceId parent_id;
// The number of key-value pairs after the body
uint32_t entry_count;
// This is the starting address of entry fields, there are
// entry_count of them
ResourceEntryField field_data[0];
} BYTE_ALIGNED;
// If the resource is not a complex one then use this one
ResourceValue value;
} BYTE_ALIGNED;
/*
* IsComplex() - Whether the resource entry is composite
*/
inline bool IsComplex() const {
return flags & Flags::COMPLEX;
}
/*
* IsPublic() - Returns true if the entry is in the public name space
*/
inline bool IsPublic() const {
return flags & Flags::PUBLIC;
}
} BYTE_ALIGNED;
class Package;
/*
* class Type - Represents a certain type of resource and all its contents
*/
class Type {
private:
// This is the number of bytes we use to initialize the buffer
static constexpr size_t INIT_BUFFER_LENGTH = 16UL;
public:
// If the offset table entry has value like this then the entry
// does not exist
// Note that this is 32 bit
static constexpr uint32_t ENTRY_NOT_PRESENT = 0xFFFFFFFF;
public:
// Original pointer to the header
TypeHeader *header_p;
// A pointer to the spec object (not the header!)
TypeSpec *type_spec_p;
// This stores readable names of the type
// Note that we need to specify a length for buffer objects because by
// default the buffer uses 64 KB internal storage on initialization
// Note that this name does not have any type information, so when
// use this to create directory we need to prepend the directory name
//
// Note: This is not always a valid C-String. If there is no special
// configuration then it is empty buffer with length being 0
//
// Note 2: If this buffer is empty then it means this is the default
// type config. Sometimes if we could not find a specific type config
// we just resort to the default
Buffer readable_name;
// Number of entries in this type table
size_t entry_count;
// This points to the offset table
uint32_t *offset_table;
// Points to the resource entry indexed by the offset table
unsigned char *data_p;
/*
* Constructor
*/
Type() :
header_p{nullptr},
type_spec_p{nullptr},
readable_name{INIT_BUFFER_LENGTH},
entry_count{0UL},
offset_table{nullptr},
data_p{nullptr}
{}
/*
* HasDefaultTypeConfig() - Whether the type config of this type object
* is the default one
*
* We use the size of the readable name as an indicator
*/
inline bool HasDefaultTypeConfig() const {
return readable_name.GetLength() == 0UL;
}
/*
* IsEntryPresent() - Whether an entry is present or not
*/
inline bool IsEntryPresent(uint16_t entry_id) const {
assert(entry_id < entry_count);
return offset_table[entry_id] != ENTRY_NOT_PRESENT;
}
/*
* GetEntryPtr() - Given the entry ID, return a pointer to the entry
*/
inline ResourceEntry *GetEntryPtr(size_t entry_id) {
assert(entry_id < entry_count);
// Lookup the offset and get the pointer to it
return reinterpret_cast<ResourceEntry *>(data_p +
offset_table[entry_id]);
}
// Entry point of all write xml functions. It dispatches control
// based on the base type
void WriteXml();
static constexpr const char *RES_PATH = "res";
static constexpr const char *VALUE_PATH_PREFIX = "values";
static constexpr const char *XML_SUFFIX = ".xml";
/*
* SwitchToValuesDir() - Switch to the values directory and opens the
* file for current base type
*
* This function takes care of possible postfix of "values"; The CWD
* is not changed after returning. If file open or directory operations
* returns error then exception is thrown
*/
FILE *SwitchToValuesDir(const char *file_name) {
// Save current directory first to get back after we have finished this
const char *cwd = FileUtility::GetCwd();
// Enters 'res' first
FileUtility::CreateOrEnterDir(RES_PATH);
// So the total length we need is "values-" + length of the readable name
// and if readable name is empty just omit the dash after "values"
// so need 1 more bytes for '\0' and 1 byte for the possible '-'
size_t value_path_length = strlen(VALUE_PATH_PREFIX) + \
readable_name.GetLength() + \
2;
Buffer value_path{value_path_length};
value_path.Append("values");
// If there is a special name then append them also after the dash
if(readable_name.GetLength() != 0UL) {
value_path.Append('-');
value_path.Append(readable_name);
}
// Make it a valid C string
value_path.Append('\0');
// And then enters the dir or creates it if first time
FileUtility::CreateOrEnterDir(value_path.GetCharData());
FILE *fp = FileUtility::OpenFile(file_name, "w");
// Frees current directory after switching back
FileUtility::EnterDir(cwd);
delete[] cwd;
return fp;
}
// For attribute type resources, prints its allowed format by interpreting
// the value of the first field
void PrintAttrFormat(Buffer *buffer_p, uint32_t format);
// Writes attr and enum flags for type attribute
void WriteAttrEnumFlags(FILE *fp,
Buffer *buffer_p,
uint32_t format,
ResourceEntryField *field_p,
uint32_t entry_count);
// Prints other possible fields in type attribute
size_t PrintAttrOther(ResourceEntryField *field_p,
ResourceEntry *entry_p,
ResourceTable *table_p,
Buffer *buffer_p,
uint32_t format_mask);
// Writes attribute as a XML file
void WriteAttrXml(const char *file_name);
/*
* HasNonStringDrawableEntry() - Whether the current type object contains
* any non-string drawable entry
*
* If not return false and the caller should not create an XML file for it
*/
bool HasNonStringDrawableEntry() {
// We only print non-string entry
size_t printable_entry_count = 0UL;
for(size_t i = 0;i < entry_count;i++) {
// Skip non-existing entries
if(IsEntryPresent(i) == false) {
continue;
}
// This is the pointer to resource entry field
ResourceEntry *entry_p = GetEntryPtr(i);
// We don't know how to deal with complex drawable entry
if(entry_p->IsComplex() == true) {
ReportError(INVALID_DRAWABLE_ENTRY, i);
} else if(entry_p->value.type != ResourceValue::DataType::STRING) {
printable_entry_count++;
}
}
// If the entry has nothing to print just return without creating the
// path
if(printable_entry_count == 0UL) {
#ifndef NDEBUG
dbg_printf("Skip resource type \"");
type_spec_p->GetBaseTypeName()->WriteToFile(stderr);
if(HasDefaultTypeConfig() == false) {
fputc('-', stderr);
readable_name.WriteToFile(stderr);
}
fprintf(stderr, "\" because it has no non-string entry\n");
#endif
return false;
}
// There is something to print
return true;
}
// Writes non-string valued drawables into a XML file
void WriteDrawableXml(const char *file_name);
// Store common array tag here
static constexpr const char *ARRAY_TAG_LIST[] = {
"string-array",
"integer-array",
"array",
};
// Valid indices inside ARRAY_TAG_LIST
static constexpr int STRING_ARRAY_TYPE_INDEX = 0;
static constexpr int INTEGER_ARRAY_TYPE_INDEX = 1;
static constexpr int OTHER_ARRAY_TYPE_INDEX = 2;
// Writes array into XML file
void WriteArrayXml(const char *file_name);
// Writes boolean into a XML file
void WriteBoolXml(const char *file_name);
// Writes ID into a XML file
void WriteIdXml(const char *file_name);
// Writes styles into a XML file
void WriteStyleXml(const char *file_name);
// Writes strings into a XML file
void WriteStringXml(const char *file_name);
// Writes dimention into a XML file
void WriteDimenXml(const char *file_name);
// Writes the color tag line into file
// Implemented in the CPP file
void WriteColorTagLine(ResourceTable *table_p,
Package *package_p,
ResourceEntry *entry_p,
Buffer *buffer_p,
FILE *fp);
// Writes color type information into a XML file
void WriteColorXml(const char *file_name);
// Writes integer type information into a XML file
void WriteIntegerXml(const char *file_name);
// Writes fraction type information into a XML file
void WriteFractionXml(const char *file_name);
// Writes plurals type information into a XML file
void WritePluralsXml(const char *file_name);
// Process layout type xmls
void ProcessLayoutXml();
// Process animation XMLs
void ProcessAnimXml();
// Process XML type files
void ProcessXmlTypeXml();
// Processes raw type files
void ProcessRawType();
// Process XML files inside animator type
void ProcessAnimatorXml();
// Process XML files inside interpolator type
void ProcessInterpolatorXml();
// Process XML files inside mipmap type
void ProcessMipmapXml();
// Process XML files inside transition type
void ProcessTransitionXml();
// Process XML files inside menu type
void ProcessMenuXml();
};
/*
* class TypeSpec - General type specification on configurations
*/
class TypeSpec {
static constexpr size_t BASE_TYPE_NAME_INIT_LENGTH = 16UL;
public:
// This points to the type spec header
TypeSpecHeader *header_p;
// This points to the containing package instance (not the package header)
Package *package_p;
// Type ID - begins at 1, and 0 means invalid (so whenever we use this
// to probe the string table we need to decrease it by 1)
uint32_t type_id;
// Number of entries in the table
size_t entry_count;
// Pointer to the configuration table about configurations of different
// value instances (i.e. a bit field indicating which kind of resources
// are available)
uint32_t *config_table;
// Type values of different configurations
std::vector<Type> type_list;
// ASCII representation of the type name
// This is used by many routines to identify a type because type ID
// does not identify types across packages
Buffer base_type_name;
/*
* Constructor
*/
TypeSpec() :
header_p{nullptr},
package_p{nullptr},
entry_count{0UL},
config_table{nullptr},
type_list{},
base_type_name{BASE_TYPE_NAME_INIT_LENGTH}
{}
/*
* GetBaseTypeName() - Returns the base type name buffer pointer
*/
inline Buffer *GetBaseTypeName() {
return &base_type_name;
}
/*
* GetDefaultConfigType() - Returns the Type object pointer if it has
* a default config
*
* If default config type does not exist then return nullptr which is
* usually treated as an error
*/
Type *GetDefaultConfigType() {
for(size_t i = 0;i < type_list.size();i++) {
if(type_list[i].HasDefaultTypeConfig() == true) {
return &type_list[i];
}
}
// Not found
return nullptr;
}
/*
* GetConfigType() - Given a configuration, returns its type pointer
*
* This function loops through all available type objects and matches
* the type config object. If the object is not found, nullptr is returned
*/
Type *GetConfigType(const TypeConfig &type_config) {
for(size_t i = 0;i < type_list.size();i++) {
if(type_config == type_list[i].header_p->config) {
return &type_list[i];
}
}
return nullptr;
}
};
/*
* class Package - Represents internals of a package
*/
class Package {
public:
// Pointer to the package header
PackageHeader *header_p;
// These two are used to store headers
StringPoolHeader *type_string_pool_header_p;
StringPoolHeader *key_string_pool_header_p;
// Two string pools indicated in the header
StringPool type_string_pool;
StringPool key_string_pool;
// A list of type spec objects
std::vector<TypeSpec> type_spec_list;
// This points to the resource table
ResourceTable *table_p;
/*
* GetTypeCount() - Returns the number of types defined as resources
*
* Since the number of strings in the string pool defines all types
* we could treat this as the number of types
*/
size_t GetTypeCount() const {
return type_string_pool.string_count;
}
};
// Data members
private:
TableHeader *table_header_p;
// This is a mapping between package ID and the package pointer stored
// also inside this class. We have two access methods for packages:
// either through this mapping using the package ID or through the
// array of package objects
std::unordered_map<uint8_t, Package *> package_map;
// A list of packages
// We do reserve space for this vector such that only 1 malloc() is done
std::vector<Package> package_list;
public:
/*
* Constructor
*/
ResourceTable(unsigned char *p_raw_data_p,
size_t p_length,
bool p_own_data=false) :
ResourceBase{p_raw_data_p, p_length, p_own_data},
table_header_p{nullptr} {
// Check whether all header fields are valid; if not just exit
CommonHeader *next_header_p = VerifyTableHeader();
if(IsValidTable() == false) {
table_header_p = nullptr;
return;
}
while(next_header_p != nullptr) {
next_header_p = ParseNext(next_header_p);
}
assert(string_pool_header_p != nullptr);
return;
}
/*
* Destructor
*
* Note that the base class constructor has already taken care of the
* ownership of the data array
*/
~ResourceTable() {
dbg_printf("Unregistering packages...\n");
// Unregister all packages from the package group here
for(Package &package : package_list) {
package_group.UnregisterPackage(package.header_p->id);
}
return;
}
// This is the Android system package ID
static constexpr uint8_t SYSTEM_PACKAGE_ID = 0x01;
/*
* GetResourceIdString() - Returns the string representation of a resource ID
*
* This is used in the resource file to refer to other resources. This
* function does not clear the buffer, so the buffer could contain some other
* contents before calling this function
*/
static void GetResourceIdString(ResourceId id,
const TypeConfig *type_config_p,
Buffer *buffer_p) {
// Maybe in the future we will need this
(void)type_config_p;
uint8_t package_id = id.package_id;
uint8_t type_id = id.type_id;
uint16_t entry_id = id.entry_id;
uint8_t type_index = type_id - 1;
uint16_t entry_index = entry_id;
// Since this function is static we always use the global package group
ResourceTable *table_p = package_group.GetResourceTable(package_id);
auto it = table_p->package_map.find(package_id);
if(it == table_p->package_map.end()) {
ReportError(PACKAGE_ID_NOT_FOUND, static_cast<uint32_t>(package_id));
}
// This is the package pointer found
Package *package_p = it->second;
// Then verify whether the type ID is correct
if(type_index >= package_p->GetTypeCount()) {
ReportError(INVALID_TYPE_ID, static_cast<uint32_t>(type_id));
}
// This is a pointer to the type spec header
TypeSpec *type_spec_p = &package_p->type_spec_list[type_index];
// Then for each type in the type spec's list loop to find the most
// appropriate one
for(Type &type : type_spec_p->type_list) {
if(entry_index >= type.entry_count) {
dbg_printf("Entry index is greater than the"
" entry count of type instance\n");
continue;
}
// Entry is not present - try next
if(type.IsEntryPresent(entry_index) == false) {
continue;
}
// This is the entry whose name we are using
ResourceEntry *entry_p = type.GetEntryPtr(entry_index);
if(package_id == SYSTEM_PACKAGE_ID) {
buffer_p->Append("@android:");
} else {
buffer_p->Append('@');
}
buffer_p->Append(*type.type_spec_p->GetBaseTypeName());
buffer_p->Append('/');
// Append the name of the entry as the last component
package_p->key_string_pool.AppendToBuffer(entry_p->key, buffer_p);
return;
}
ReportError(RESOURCE_ID_NOT_AVAILABLE, id.data);
return;
}
/*
* GetResourceIdBaseTypeName() - Returns a buffer indicating the type name
* of a certain resource ID
*
* Since type name is stored inside the TypeSpec class, we do not iterate
* over types, which is an optimization over the traditional
* GetResourceEntry()
*/
static Buffer *GetResourceIdBaseTypeName(ResourceId id) {
uint8_t package_id = id.package_id;
uint8_t type_id = id.type_id;
// Type index starts at 1 because we use type = 0 as invalid indicator
uint8_t type_index = type_id - 1;
// Since this function is static we always use the global package group
ResourceTable *table_p = package_group.GetResourceTable(package_id);
auto it = table_p->package_map.find(package_id);
if(it == table_p->package_map.end()) {
ReportError(PACKAGE_ID_NOT_FOUND, static_cast<uint32_t>(package_id));
}
// This is the package pointer found
Package *package_p = it->second;
// Then verify whether the type ID is correct
if(type_index >= package_p->GetTypeCount()) {
ReportError(INVALID_TYPE_ID, static_cast<uint32_t>(type_id));
}
// This is a pointer to the type spec header
TypeSpec *type_spec_p = &package_p->type_spec_list[type_index];
return &type_spec_p->base_type_name;
}
/*
* GetResourceEntry() - Returns the resource entry pointer given an ID
*
* Since each resource has a unique resource identifier we could locate them
* using the identifier and print its name. Whether or not the resource
* is a complex one does not matter since we return ResourceEntry pointer
*
* In the case that there are multiple resource types for different
* configurations, we need a type config object to match all possible
* configurations, and in the case none matches, just fall back to the
* default
*
* Note that this function only search the current resource table. If
* the package ID is not in the current resource table then we need to
* check external packages using package group instance
*
* This function also accepts an optional type pointer for getting the actual
* type instance being used.
*
* If type_config is nullptr then we do not match type and always use
* the default resource
*/
static ResourceEntry *GetResourceEntry(ResourceId id,
const TypeConfig *type_config_p,
Type **type_p_p = nullptr) {
uint8_t package_id = id.package_id;
uint8_t type_id = id.type_id;
uint16_t entry_id = id.entry_id;
uint8_t type_index = type_id - 1;
uint16_t entry_index = entry_id;
// Since this function is static we always use the global package group
ResourceTable *table_p = package_group.GetResourceTable(package_id);
auto it = table_p->package_map.find(package_id);
if(it == table_p->package_map.end()) {
ReportError(PACKAGE_ID_NOT_FOUND, static_cast<uint32_t>(package_id));
}
// This is the package pointer found
Package *package_p = it->second;
// Then verify whether the type ID is correct
if(type_index >= package_p->GetTypeCount()) {
ReportError(INVALID_TYPE_ID, static_cast<uint32_t>(type_id));
}
// This is a pointer to the type spec header
TypeSpec *type_spec_p = &package_p->type_spec_list[type_index];
// If the type spec list has no element then we know the type spec
// has problem
if(type_spec_p->type_list.size() == 0UL) {
ReportError(NO_TYPE_IN_TYPE_SPEC, id.data);
}
// Use this to indicate whether we have already used the default type
bool use_default_type = false;
// make it nullptr to distinguish the case that there is no type config
Type *type_p = nullptr;
// Match the configure index if it is supplied
if(type_config_p != nullptr) {
// Try to match the type config - if not found just use the default one
type_p = type_spec_p->GetConfigType(*type_config_p);
}
if(type_p == nullptr) {
dbg_printf("Type config not matched - using default config type\n");
// Get the default config type (usually has a buffer length of 0)
type_p = type_spec_p->GetDefaultConfigType();
use_default_type = true;
if(type_p == nullptr) {
ReportError(DEFAULT_CONFIG_TYPE_NOT_FOUND,
static_cast<uint32_t>(type_id));
}
}
// If entry ID is out of bound then just report error
if(entry_index >= type_p->entry_count) {
ReportError(INVALID_ENTRY_ID, entry_id);
} else if(type_p->IsEntryPresent(entry_index) == false) {
if(use_default_type == false) {
dbg_printf("Type config index matched but entry is not present"
" - using default config type\n");
type_p = type_spec_p->GetDefaultConfigType();
use_default_type = true;
if(type_p == nullptr) {
ReportError(DEFAULT_CONFIG_TYPE_NOT_FOUND,
static_cast<uint32_t>(type_id));
}
} else {
ReportError(ENTRY_NOT_PRESENT_IN_DEFAULT_TYPE,
static_cast<uint32_t>(type_id),
static_cast<uint32_t>(entry_id));
}
// After switching to default type if the entry is still not
// present then report error
if(type_p->IsEntryPresent(entry_index) == false) {
ReportError(ENTRY_NOT_PRESENT_IN_DEFAULT_TYPE,
static_cast<uint32_t>(type_id),
static_cast<uint32_t>(entry_id));
}
// After fetching the default type, if entry ID is in correct then
// report error
if(entry_index >= type_p->entry_count) {
ReportError(INVALID_ENTRY_ID, entry_id);
}
}
// After this we have fetched the correct type object pointer and know
// that the entry ID is valid
// Also optionally output the type pointer actually being used because
// there is no backward pointer inside the resource entry object
if(type_p_p != nullptr) {
*type_p_p = type_p;
}
return type_p->GetEntryPtr(entry_index);
}
/*
* IsValidTable() - Whether the resource table is valid after verification
*/
inline bool IsValidTable() const {
return table_header_p != nullptr;
}
/*
* VerifyTableHeader() - Check fields in the table header
*
* If all check pass then return the byte next to table header. Otherwise
* return nullptr.
*
* This function always sets table_header_p to the first byte of the stream.
* However if check fails the caller is responsible for setting the pointer
* to nullptr to indicate that the resource table is not valid
*
* This function has exactly the same structure as VerifyXmlHeader()
*/
CommonHeader *VerifyTableHeader() {
table_header_p = reinterpret_cast<TableHeader *>(raw_data_p);
if(table_header_p->common_header.type != ChunkType::RESOURCE_TABLE) {
dbg_printf("Resource table type 0x%X is wrong (expecting 0x%X)\n",
static_cast<uint32_t>(table_header_p->common_header.type),
static_cast<uint32_t>(ChunkType::RESOURCE_TABLE));
return nullptr;
} else if(table_header_p->common_header.header_length != \
sizeof(TableHeader)) {
dbg_printf("Resource table length 0x%X is wrong (expecting 0x%lX)\n",
table_header_p->common_header.header_length,
sizeof(TableHeader));
return nullptr;
} else if(table_header_p->common_header.total_length != length) {
dbg_printf("XML total length 0x%X is wrong (expecting 0x%lX)\n",
table_header_p->common_header.total_length,
length);
// We require that the entire document is part of the XML
// Otherwise we are unable to decode the rest of it
return nullptr;
}
dbg_printf("Verified resource table header; package count = %u\n",
table_header_p->package_count);
// This serves as an optimization such that we only allocate once
// for the parsing process
package_list.reserve(table_header_p->package_count);
// Return the next byte and cast it as common header for later parsing
return reinterpret_cast<CommonHeader *>(
TypeUtility::Advance(table_header_p, sizeof(TableHeader)));
}
/*
* InitPackage() - Initialize a package object
*
* This function initializes the type spec list with proper number
* of slots to hold type spec, since we know the number of type spec objects
* is exactly the size of the string pool
*/
void InitPackage(Package *package_p, PackageHeader *package_header_p) {
package_p->header_p = package_header_p;
// Also assign the resource table's instance with the package
package_p->table_p = this;
package_p->type_string_pool_header_p = \
reinterpret_cast<StringPoolHeader *>( \
TypeUtility::Advance(package_header_p,
package_header_p->type_string_pool_offset));
ConstructStringPool( \
reinterpret_cast<CommonHeader *>(package_p->type_string_pool_header_p),
&package_p->type_string_pool);
package_p->key_string_pool_header_p = \
reinterpret_cast<StringPoolHeader *>( \
TypeUtility::Advance(package_header_p,
package_header_p->key_string_pool_offset));
ConstructStringPool( \
reinterpret_cast<CommonHeader *>(package_p->key_string_pool_header_p),
&package_p->key_string_pool);
// This is done to only allocate exactly GetTypeCount() slots for type
// spec objects (i.e. string pool size)
package_p->type_spec_list.resize(package_p->GetTypeCount());
return;
}
/*
* DebugPrintPackageTypeString() - Prints all type strings in a package
*
* This function is meant for debugging
*/
void DebugPrintPackageTypeString(Package *package_p) {
dbg_printf(" Resource types: ");
if(package_p->GetTypeCount() > 0) {
package_p->type_string_pool.DebugPrint(0, "%s");
// Print out all types in debug output; if debug is turned off this will
// be optimized out
for(size_t i = 1;i < package_p->GetTypeCount();i++) {
fprintf(stderr, " | ");
package_p->type_string_pool.DebugPrint(i, "%s");
}
fprintf(stderr, "\n");
} else {
fprintf(stderr, "[None]\n");
}
return;
}
/*
* ParsePackage() - Parses the package header and push a package
* object to the package list
*
* This function locates all type specs inside the package and then parses
* all type headers after the type spec header. Each typespec stores metadata
* about a type and each type object stores the resource value under a certain
* configuration (e.g. language, screen resolution, etc.)
*/
void ParsePackage(CommonHeader *header_p) {
PackageHeader *package_header_p = \
reinterpret_cast<PackageHeader *>(header_p);
dbg_printf(" Package ID = 0x%02X\n", package_header_p->id);
// Even in Android runtime this is not taken care, so ....
assert(package_header_p->type_string_pool_offset != 0);
assert(package_header_p->key_string_pool_offset != 0);
// Construct a packge object at the back of the vector
// This saves the cost of copying the object
package_list.emplace_back();
// This points to the package object we just inserted
Package *package_p = &package_list.back();
// Initialize members inside the package object
InitPackage(package_p, package_header_p);
// Also put the package object into the vector (we do not support
// non-base packages)
if(package_p->header_p->id == 0x00000000) {
ReportError(ONLY_SUPPORT_BASE_PACKAGE);
}
package_map[static_cast<uint8_t>(package_p->header_p->id)] = package_p;
// Also register the package within package group global object
// using the package ID as well as the resource table instance
package_group.RegisterPackage(package_header_p->id, this);
#ifndef NDEBUG
DebugPrintPackageTypeString(package_p);
#endif
// The first type spec chunk must be after the key string pool
// so use its total size to determine (hopefully string pool is aligned)
CommonHeader *type_spec_header_p = \
reinterpret_cast<CommonHeader *>( \
TypeUtility::Advance(
package_p->key_string_pool_header_p,
package_p->key_string_pool_header_p->common_header.total_length));
// Each type will have a type spec chunk, so just use the number of
// elements in type string pool
for(size_t i = 0;i < package_p->GetTypeCount();i++) {
uint32_t type_id = ParseTypeSpecHeader(type_spec_header_p, package_p);
// Use its length field to find the following type spec chunk
type_spec_header_p = \
TypeUtility::Advance(type_spec_header_p,
type_spec_header_p->total_length);
// Then loop to parse types
// Note that here everytime we speculate a type header we need to
// check whether the pointer is still in the range of the data area
while(IsValidPointer(type_spec_header_p) == true && \
type_spec_header_p->type == ChunkType::TYPE) {
ParseTypeHeader(type_spec_header_p, package_p, type_id);
type_spec_header_p = \
TypeUtility::Advance(type_spec_header_p,
type_spec_header_p->total_length);
}
}
return;
}
/*
* ParseTypeSpecHeader() - Parses type specification header and
* returns the type ID
*/
uint32_t ParseTypeSpecHeader(CommonHeader *header_p, Package *package_p) {
dbg_printf("Parsing TypeSpec chunk @ offset 0x%lX\n",
TypeUtility::GetPtrDiff(raw_data_p, header_p));
assert(header_p->type == ChunkType::TYPE_SPEC);
TypeSpecHeader *type_spec_header_p = \
reinterpret_cast<TypeSpecHeader *>(header_p);
// Get the type ID which also represents its position in the vector
// NOTE: The real ID is always 1 less then the recodrd ID
uint32_t type_id = static_cast<uint32_t>(type_spec_header_p->id);
// It could not be 0 and also could not exceed the maximum
if(type_id == 0 || type_id > package_p->GetTypeCount()) {
ReportError(INVALID_TYPE_ID, type_id);
}
// This is the type spec object already allocated on the type spec list
TypeSpec *type_spec_p = &package_p->type_spec_list[type_id - 1];
// Assert the type has never be seen
assert(type_spec_p->header_p == nullptr);
assert(type_spec_p->type_list.size() == 0);
// Assign data members
type_spec_p->header_p = type_spec_header_p;
type_spec_p->package_p = package_p;
type_spec_p->type_id = type_id;
type_spec_p->entry_count = type_spec_header_p->entry_count;
type_spec_p->config_table = type_spec_header_p->data;
package_p->type_string_pool.AppendToBuffer(type_id - 1,
&type_spec_p->base_type_name);
return type_id;
}
/*
* DebugPrintAllTypeEntryNames() - Prints on stderr names of all entries in the
* type chunk body
*/
void DebugPrintAllTypeEntryNames(Package *package_p, Type *type_p) {
for(size_t i = 0;i < type_p->entry_count;i++) {
// Resource entry does not exist for current configuration
if(type_p->IsEntryPresent(i) == false) {
continue;
}
// This is a pointer to the resource entry
ResourceEntry *resource_entry_p = type_p->GetEntryPtr(i);
dbg_printf(" Name %lu = ", i);
Buffer buffer{128};
package_p->key_string_pool.AppendToBuffer(resource_entry_p->key, &buffer);
if(resource_entry_p->IsComplex() == true || \
resource_entry_p->IsPublic() == true) {
buffer.Append(" (");
}
// For complex types it has two more fields - parent resource ID and
// count of the key value pair that follows
if(resource_entry_p->IsComplex() == true) {
assert(resource_entry_p->entry_length == 16UL);
buffer.Append("COMPLEX ");
} else {
assert(resource_entry_p->entry_length == 8UL);
}
if(resource_entry_p->IsPublic() == true) {
buffer.Append("PUBLIC ");
}
if(resource_entry_p->IsComplex() == true || \
resource_entry_p->IsPublic() == true) {
// Eat back the last space character
buffer.Rewind(1);
buffer.Append(')');
}
buffer.Append('\n');
buffer.WriteToFile(stderr);
// In the next line print out the extra complex field
if(resource_entry_p->IsComplex() == true) {
dbg_printf(" * Parent ID = 0x%X; entry count = %u\n",
resource_entry_p->parent_id.data,
resource_entry_p->entry_count);
// This is the starting address of the array of ResourceEntryField
ResourceEntryField *entry_field_p = resource_entry_p->field_data;
// Loop through each entry to see its internal data
for(uint32_t j = 0;j < resource_entry_p->entry_count;j++) {
// This is the current entry filed being processed
ResourceEntryField *field_p = entry_field_p + j;
// Print out the 32 bit integer resource ID
dbg_printf(" "
"entry name = 0x%.8X; type = 0x%.4X, data = 0x%.8X\n",
field_p->name.data,
static_cast<uint32_t>(field_p->value.type),
field_p->value.data);
buffer.Reset();
// Append value but do not resolve reference
AppendResourceValueToBuffer(&field_p->value, &buffer, nullptr, false);
dbg_printf(" Printed value \"");
buffer.Append("\"\n");
buffer.WriteToFile(stderr);
// If the type ID is not attr then the field name must have
// a ATTR type ID
//if(type_p->type_spec_p->type_id != 0x01) {
// assert(field_p->name.type_id == 0x01);
//}
} // Loop through entry fields
} else {
dbg_printf(" "
"entry type = 0x%.4X, data = 0x%.8X\n",
static_cast<uint32_t>(resource_entry_p->value.type),
resource_entry_p->value.data);
buffer.Reset();
AppendResourceValueToBuffer(&resource_entry_p->value,
&buffer,
nullptr,
false);
dbg_printf(" Printed value \"");
buffer.Append("\"\n");
buffer.WriteToFile(stderr);
} // If is complex type then ... else ...
} // for resource entry for the current type
return;
}
/*
* DebugWriteTypeXml() - Writes a XML file for a given type
*/
void DebugWriteTypeXml(Type *type_p) {
type_p->WriteXml();
return;
}
/*
* ParseTypeHeader() - Parses type header
*
* Note that the ID of the type recorded in the type spec header is
* passed in to verify that the type header has the same ID
*/
void ParseTypeHeader(CommonHeader *header_p,
Package *package_p,
uint32_t type_id) {
TypeHeader *type_header_p = \
reinterpret_cast<TypeHeader *>(header_p);
// This points to the type spec object
TypeSpec *type_spec_p = &package_p->type_spec_list[type_id - 1];
// Construct a new type object in-place and grab a pointer to it
type_spec_p->type_list.emplace_back();
Type *type_p = &type_spec_p->type_list.back();
type_p->header_p = type_header_p;
type_p->type_spec_p = type_spec_p;
type_header_p->config.GetName(&type_p->readable_name);
type_p->entry_count = type_header_p->entry_count;
// The offset table is just located after the header
// Note that since we are not certain about the length of the config
// structure, currently just use header length to determine the end
// of the header
type_p->offset_table = reinterpret_cast<uint32_t *>( \
TypeUtility::Advance(type_header_p,
type_header_p->common_header.header_length));
// This is the base address of offset_table and its offset itself is
// specified by a header field
type_p->data_p = reinterpret_cast<unsigned char *>( \
TypeUtility::Advance(type_header_p,
type_header_p->data_offset));
// The ID must match
assert(static_cast<uint32_t>(type_header_p->type_id) == type_id);
return;
}
/*
* IsValidPointer() - Checks whether a pointer is still in the range of the
* data area
*/
inline bool IsValidPointer(void *p) const {
return TypeUtility::GetPtrDiff(raw_data_p, p) < length;
}
/*
* ParseNext() - Parse the contents of the resource table
*
* This function has the same structure as the one in class BinaryXml
*
* If we have reached the end then just return nullptr and that's it
*/
CommonHeader *ParseNext(CommonHeader *next_header_p) {
assert(next_header_p != nullptr);
if(IsValidPointer(next_header_p) == false) {
return nullptr;
}
CommonHeader *ret_header_p = \
TypeUtility::Advance(next_header_p, next_header_p->total_length);
dbg_printf("Parsing header of type %u @ offset 0x%lX\n",
(uint32_t)next_header_p->type,
TypeUtility::GetPtrDiff(raw_data_p, next_header_p));
switch(next_header_p->type) {
case ChunkType::RESOURCE_TABLE: {
ReportError(UNEXPECTED_START_OF_RESOURCE_TABLE);
break;
}
case ChunkType::STRING_POOL: {
ParseStringPool(next_header_p);
break;
}
case ChunkType::PACKAGE: {
// This function also needs to recognize the end of data
ParsePackage(next_header_p);
break;
}
default: {
ReportError(UNKNOWN_CHUNK_TYPE,
static_cast<uint32_t>(next_header_p->type),
(size_t)next_header_p - (size_t)raw_data_p);
break;
}
} // switch type
return ret_header_p;
}
/*
* DebugPrintAll() - Prints everything in this resource table
*/
void DebugPrintAll() {
for(Package &package : package_list) {
for(TypeSpec &type_spec : package.type_spec_list) {
TypeSpecHeader *type_spec_header_p = type_spec.header_p;
dbg_printf("Type id = %u; entry_count = %u (type name = ",
static_cast<uint32_t>(type_spec_header_p->id),
static_cast<uint32_t>(type_spec_header_p->entry_count));
Buffer buffer{128};
package.type_string_pool.AppendToBuffer(type_spec.type_id - 1, &buffer);
buffer.Append(")\n");
buffer.WriteToFile(stderr);
// Here we try to find default value every type we need
bool default_found = false;
for(Type &type : type_spec.type_list) {
dbg_printf(" Type config name = ");
if(type.readable_name.GetLength() == 0UL) {
fprintf(stderr, "[Default]\n");
default_found = true;
} else {
type.readable_name.WriteToFile(stderr);
fputc('\n', stderr);
}
dbg_printf(" Entry count = %lu\n", type.entry_count);
DebugPrintAllTypeEntryNames(&package, &type);
}
// Print as a warning
if(default_found == false) {
dbg_printf("*** No default type found for typespec (ID = %u)!\n",
type_spec.type_id);
}
}
}
return;
}
/*
* DebugWriteXml() - Writes XML into the corresponding file under /res folder
*/
void DebugWriteXml() {
for(Package &package : package_list) {
for(TypeSpec &type_spec : package.type_spec_list) {
for(Type &type : type_spec.type_list) {
type.WriteXml();
}
}
}
return;
}
};
} // namespace android_dalvik_analysis
} // namespace wangziqi2013
#endif
|
import {
Component,
ChangeDetectionStrategy,
EventEmitter,
OnInit,
Input
} from '@angular/core';
@Component({
selector: 'ng-demo-component',
changeDetection: ChangeDetectionStrategy.OnPush,
templateUrl: 'template.html'
})
export class DemoComponent implements OnInit {
ngOnInit() {
this.anotherContext.emitter.subscribe(function() {
alert('hi from emitter');
});
}
myContext = { $implicit: 'World', localSk: 'Svet' };
name: string = 'jimmy';
anotherContext = {
name: 'jimmy',
income: 1800,
skills: ['c++', 'javascript'],
eventClicked: function() {
alert('event click');
},
emitter: new EventEmitter()
};
}
|
/* Sequential File Operations for Device Log.
*/
static inline void *devlog_get_idx(struct devlog_info *dinfo, loff_t pos)
{
if (pos > dinfo->nentries)
return NULL;
return (void *)(uintptr_t)(pos + 1);
} |
/**
* Listen for events on a file descriptor
*
* @param fd File descriptor
* @param flags Wanted event flags
* @param fh Event handler
* @param arg Handler argument
*
* @return 0 if success, otherwise errorcode
*/
int fd_listen(int fd, int flags, fd_h *fh, void *arg)
{
struct re *re = re_get();
int err = 0;
DEBUG_INFO("fd_listen: fd=%d flags=0x%02x\n", fd, flags);
if (fd < 0) {
DEBUG_WARNING("fd_listen: corrupt fd %d\n", fd);
return EBADF;
}
if (flags || fh) {
err = poll_setup(re);
if (err)
return err;
}
if (fd >= re->maxfds) {
if (flags) {
DEBUG_WARNING("fd_listen: fd=%d flags=0x%02x"
" - Max %d fds\n",
fd, flags, re->maxfds);
}
return EMFILE;
}
if (re->fhs) {
re->fhs[fd].flags = flags;
re->fhs[fd].fh = fh;
re->fhs[fd].arg = arg;
}
re->nfds = max(re->nfds, fd+1);
switch (re->method) {
#ifdef HAVE_POLL
case METHOD_POLL:
err = set_poll_fds(re, fd, flags);
break;
#endif
#ifdef HAVE_EPOLL
case METHOD_EPOLL:
if (re->epfd <= 0)
return EBADFD;
err = set_epoll_fds(re, fd, flags);
break;
#endif
default:
break;
}
if (err) {
if (flags && fh) {
fd_close(fd);
DEBUG_WARNING("fd_listen: fd=%d flags=0x%02x (%m)\n",
fd, flags, err);
}
}
return err;
} |
CPEC will not bring automatic prosperity for Pakistan
Recently, a WhatsApp message based on a write-up that created ambiguities about CPEC and its impacts on Pakistan went viral. Rather than a game changer, it claimed that it was game over for CPEC. It is hardly the first time that such ambiguities have been spread. We have seen similar tactics being employed for the completion of a hidden agenda. For a better understanding of the project, here are my counter-arguments to the concerns raised.
1) Ninety-one per cent of the income from Gwadar port will go to China and only 9% will come to Pakistan: Developing and running a port is not like building a house or running a shop, it needs a huge investment along with a number of liabilities. Hence, if from the net income Pakistan gets around 10% without having spent a single penny, it’s great indeed. A strategic resource would be developed with someone else’s money but will ultimately remain Pakistan’s. Development of Gwadar will result in great economic activities not only for Pakistan but also for this deprived area that lacks even the basic necessities. Do you think Pakistan would have been able to develop this port without any external help in the next three to four decades? If no then rest assured, it is the best possible proposition with minimum risk.
2) Chinese companies will get preferential treatment and tax exemptions, making it impossible for local companies to compete and open the market for commercial invasion: This statement itself is misleading as no long-term CPEC plans have been disclosed yet. Only the projects including power and infrastructure were given tax rebates and only because we are suffering from chronic power shortages. Does anyone remember when was the last power project installed in Pakistan before CPEC? There is another aspect of this tax rebate. It will result in reducing the cost of construction of these projects, which will result in lower tariffs on electricity.
3) Money for the road network comes from Pakistan and hence, we are paying for the roads which will be used by China to export their products: This statement has two major concerns. Firstly, the roads built under CPEC will remain Pakistan’s property through the National Highway Authority and will generate revenue from toll tax and other commercial activities that will take place across these roads. Let’s leave out the social impacts and other benefits for now. Secondly, Chinese companies will build these roads with money pouring in from China on a concessionary rate that is in Pakistan’s favour. Pakistani companies are free to use this infrastructure for their exports or other logistic needs.
4) We will have to repay the loans taken for the energy project: This is a completely false and misleading statement, as out of initial $46 billion $30 billion is for energy projects that will be installed on BOOT basis (Build-Operate-Own-Transfer). Respective Chinese companies of each and every power project will be responsible for financing and repayment issues. Pakistan will buy the electricity that will be supplied for both commercial and domestic use. No loan has been taken by the government for any energy or power project so far. On the other hand, Chinese companies investing in power projects will receive repayment for their investment and there is nothing wrong in that.
5) There is no guaranteed profit: Considering that the circular debt in Pakistan is huge, will you install a 1kw power generator for the government at your own expenses? The answer is no. In that case, if China is going to resolve one of Pakistan’s mega problems, so why does one want to ruin it by making false arguments?
6) India is buying solar electricity for four cents/kWh, while we will buy coal-based electricity for eight cents/kWh: First of all, one can’t compare coal prices with solar prices. Also consider the exchange rate of Pakistani and Indian currencies and then try to understand the energy mix of both the countries. India is already producing 59% coal-based electricity and before CPEC, share of coal in our energy mix is only 2%. Another problem is that solar-based electricity can only be used till noon and industries can’t stop functioning after that. Any country in the world shifting to renewable energy has already produced enough electricity through reliable sources such as coal that can bear the burden of a major demand.
7) Pakistan’s ‘East India Company’: When the East India Company invaded the Asian subcontinent its main motive was money. India’s GDP was more than 25% of the world’s GDP at that time and Britain’s was less than 5%. Now China’s GDP is the second best in the world and where do we stand in terms of GDP, nowhere. So why would they colonise Pakistan?
Lastly, CPEC will not bring automatic prosperity for Pakistan. God helps those who help themselves; if we want economic prosperity and development, we will have to devise our path ourselves. CPEC will provide an opportunity for everyone. I met an industrialist last month who wants to start a mega project next year for which he will need 5,000 qualified engineers. But this will only happen if we have the infrastructure and electricity for it. You can blame the government for many things but not CPEC for any. Our industrialists are well aware of the opportunities and I receive multiple queries every week from different businessmen regarding these opportunities and possibilities of increasing exports once the project is complete. Believing in Pakistan is the only way forward.
Published in The Express Tribune, September 8th, 2017.
Like Opinion & Editorial on Facebook, follow @ETOpEd on Twitter to receive all updates on all our daily pieces.
Read full story |
#include<iostream>
#include<stdio.h>
#include<cstdlib>
using namespace std;
int main()
{
int A[100008],M[100008],i,k,n,z;
long long int l=0,r=0;
scanf("%d",&n);
for(i=1;i<=n;i++)
{
scanf("%d",&A[i]);
M[A[i]]=i;
}
scanf("%d",&k);
while(k--)
{
scanf("%d",&z);
l=l+M[z];
r=r+ (n+1)-M[z];
}
//printf("%d %d\n",l,r);
cout << l << " " << r << endl;
return 0;
}
|
def add_text_to_file(self, text):
code_input_element = self.get_text_area()
code_input_element.send_keys(text)
return code_input_element |
LOS ANGELES -- The UCLA Bruins stormed the cardinal logo at midfield, posing for photos and whipping blue flags through the crisp evening air. They moved the party up the Coliseum tunnel, roaring and stomping and thrusting their helmets aloft.
Southern California undoubtedly could hear the celebration in its locker room. The Trojans can't do anything about it for a year.
"UCLA runs L.A. now," Brett Hundley said.
Hundley passed for 208 yards and rushed for two touchdowns, leading No. 22 UCLA past No. 23 USC 35-14 Saturday night and winning the crosstown showdown for the second straight season.
Linebacker Myles Jack and defensive end Eddie Vanderdoes also rushed for touchdowns as the Bruins (9-3, 6-3 Pac-12) earned their first win at the Coliseum since 1997, retaining the Victory Bell with their third victory over USC (9-4, 6-3) in 15 years -- and UCLA's biggest margin of victory over its rival since 1970.
The Bruins never let it get tight in the second half, holding down the revitalized Trojans with sound work on both sides of the ball. UCLA's raucous cheering section dominated the Coliseum with noise in the fourth quarter, and the Bruins took over midfield at the final gun.
"Oh, we run this town," said Bruins defensive end Cassius Marsh, who killed two late USC drives with back-to-back sacks and a fumble recovery. "It's solidified. We're 2-0. We won this game. It's our city now. They can come try to get it next year."
The crosstown rivals had rarely been so evenly matched heading into their annual meeting. UCLA and USC hadn't been next to each other in the AP rankings for the game since 1976, and both schools are headed to bowl games after solid seasons.
The result only revealed just how much has changed in Los Angeles football since the Trojans beat UCLA 50-0 on this field just two years ago.
"You win two in a row in this town, and things start to change," UCLA coach Jim Mora said. "If I'm a high school player, I want to play at UCLA. ... There's nothing like tonight. You don't get that in the NFL. That was more exciting than the dang Super Bowl."
Javorius Allen rushed for 123 yards and a score for the Trojans, who had won five straight in their revitalized season under interim coach Ed Orgeron. Cody Kessler passed for 174 yards and hit Xavier Grimble with a TD pass for USC, but its defense couldn't handle Hundley, who rushed for 80 yards.
USC had chances late, but went scoreless in the fourth quarter. After Jack recovered a fumble early in the quarter, Anthony Barr swatted the ball out of Kessler's hand from behind with 6 minutes left, and Marsh fell on the fumble near midfield. UCLA's Paul Perkins ran in the clinching TD from 8 yards out with 3:50 to play.
"I think it was the worst performance we've had since we started back together," Orgeron said. "We started off very slow, weren't able to run or pass the ball. No excuses. We just didn't play well."
The game didn't help the unlikely cause of Orgeron, who emerged as a legitimate candidate for the full-time USC job by revitalizing a stagnant USC program since replacing Lane Kiffin in late September.
Orgeron got the Trojans back into the national rankings, shocked No. 5 Stanford earlier this month, and made football fun again at USC -- but he also lost to Notre Dame and UCLA, the Trojans' two biggest rivals. Athletic director Pat Haden hasn't said when he'll pick a full-time coach.
Marqise Lee had six catches for 69 yards in what's expected to be the final home game for the USC junior, who won the Biletnikoff Award last year before struggling with injuries all season long. Lee claimed he hasn't thought about his decision to turn pro, and won't decide until after USC's bowl game.
"I'm upset that we lost, but when I think about the overall expectation, you can't really be all that mad," Lee said. "We left some things on the field tonight, but we've come a long way when nobody gave us a chance."
Emotions ran high even two hours before the game when USC's players and coaches gathered in a large circle at midfield in what's become a tradition under Orgeron. The Trojans exchanged angry words with several Bruins attempting to warm up on the field, but security and coaches preventing anything but insults from being thrown.
Jack scored UCLA's first points with a 3-yard TD run. UCLA added an 80-yard scoring drive early in the second quarter ending in a TD run by the 305-pound Vanderdoes, who committed to USC and Notre Dame before switching from the Irish to UCLA earlier this year.
The Trojans replied with an 11-yard TD run by Allen, who has seized the top job at Tailback U. from injured Silas Redd and Tre Madden.
UCLA's offense was stopped on fourth down at the USC 12 late in the first half, but the Bruins went up 21-7 early in the second half on an 11-yard TD draw by Hundley, who went in untouched.
The Trojans responded with a 22-yard TD pass to Grimble, but the Bruins got another huge kick return from Ishmael Adams and scored less than three minutes later. Hundley took it in on a 5-yard run, beating USC's Devon Kennard to the corner for his second TD. |
package cmd
import (
"archive/tar"
"archive/zip"
"bytes"
"compress/gzip"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"net/http"
"os"
"strings"
)
var (
osMap = map[string]string{
"darwin": "mac",
"linux": "linux",
"windows": "windows",
}
archMap = map[string]string{
"amd64": "64bit",
"386": "32bit",
"arm": "arm",
}
)
type asset struct {
ID int `json:"id"`
Name string `json:"name"`
ContentType string `json:"content_type"`
}
func (a *asset) download() (*bytes.Reader, error) {
downloadURL := fmt.Sprintf("https://api.github.com/repos/exercism/cli/releases/assets/%d", a.ID)
req, err := http.NewRequest("GET", downloadURL, nil)
if err != nil {
return nil, err
}
// https://developer.github.com/v3/repos/releases/#get-a-single-release-asset
req.Header.Set("Accept", "application/octet-stream")
res, err := http.DefaultClient.Do(req)
if err != nil {
return nil, err
}
defer res.Body.Close()
bs, err := ioutil.ReadAll(res.Body)
if err != nil {
return nil, err
}
return bytes.NewReader(bs), nil
}
type release struct {
Location string `json:"html_url"`
TagName string `json:"tag_name"`
Assets []asset `json:"assets"`
}
func (r *release) Version() string {
return strings.TrimPrefix(r.TagName, "v")
}
const installFlag = os.O_RDWR | os.O_CREATE | os.O_TRUNC
func installTgz(source *bytes.Reader, dest string) error {
gr, err := gzip.NewReader(source)
if err != nil {
return err
}
defer gr.Close()
tr := tar.NewReader(gr)
for {
hdr, err := tr.Next()
if err == io.EOF {
break
}
if err != nil {
return err
}
// Move the old version to a backup path that we can recover from
// in case the upgrade fails
destBackup := dest + ".bak"
if _, err := os.Stat(dest); err == nil {
os.Rename(dest, destBackup)
}
fileCopy, err := os.OpenFile(dest, installFlag, hdr.FileInfo().Mode())
if err != nil {
os.Rename(destBackup, dest)
return err
}
defer fileCopy.Close()
if _, err = io.Copy(fileCopy, tr); err != nil {
os.Rename(destBackup, dest)
return err
} else {
os.Remove(destBackup)
}
}
return nil
}
func installZip(source *bytes.Reader, dest string) error {
zr, err := zip.NewReader(source, int64(source.Len()))
if err != nil {
return err
}
for _, f := range zr.File {
fileCopy, err := os.OpenFile(dest, installFlag, f.Mode())
if err != nil {
return err
}
defer fileCopy.Close()
rc, err := f.Open()
if err != nil {
return err
}
defer rc.Close()
_, err = io.Copy(fileCopy, rc)
if err != nil {
return err
}
}
return nil
}
func fetchLatestRelease(client http.Client) (*release, error) {
resp, err := client.Get("https://api.github.com/repos/exercism/cli/releases/latest")
if err != nil {
return nil, err
}
defer resp.Body.Close()
var rel release
if err := json.NewDecoder(resp.Body).Decode(&rel); err != nil {
return nil, err
}
return &rel, nil
}
|
class AppConfig:
"""
App Configuration container
"""
app: AppDescriptor
engine: AppEngineConfig = field(default_factory=AppEngineConfig)
app_connections: Dict[str, AppConnection] = field(default_factory=dict)
env: Env = field(default_factory=dict)
events: Dict[str, EventDescriptor] = field(default_factory=dict)
server: Optional[ServerConfig] = None
plugins: List[AppDescriptor] = field(default_factory=list)
settings: AppSettings = field(default_factory=dict)
def app_key(self):
return self.app.app_key()
def __post_init__(self):
for event in self.events.values():
if event.config.stream.compression is None:
event.config.stream.compression = self.engine.default_stream_compression
if event.config.stream.serialization is None:
event.config.stream.serialization = self.engine.default_stream_serialization |
// RemoveAll element(s) from an array that are in remove array
func RemoveAll(field string, array []interface{}) Update {
return Update(&bson.M{
"$pullAll": bson.M{
field: array,
},
})
} |
# -*- coding: utf-8 -*-
import cv2
import datetime
def resize_with_pad(image, height, width):
def get_padding_size(image):
h, w, _ = image.shape
longest_edge = max(h, w)
top, bottom, left, right = (0, 0, 0, 0)
if h < longest_edge:
dh = longest_edge - h
top = dh // 2
bottom = dh - top
elif w < longest_edge:
dw = longest_edge - w
left = dw // 2
right = dw - left
else:
pass
return top, bottom, left, right
top, bottom, left, right = get_padding_size(image)
BLACK = [0, 0, 0]
constant = cv2.copyMakeBorder(
image, top , bottom, left, right,
cv2.BORDER_CONSTANT, value=BLACK)
resized_image = cv2.resize(constant, (height, width))
return resized_image
def debug(msg):
print("DEBUG:{} [{}]".format(
msg, datetime.datetime.now().strftime("%Y%m%d_%H%M%S_%f"))
)
|
/**
* Wizard for edit/create stream spec
*/
public class EditStreamWizard extends Wizard implements IWorkbenchWizard {
// wizard pages
BasicSettingPage basicPage;
AdvancedSettingPage advancedPage;
private boolean editMode; // New or edit mode
private IP4Connection connection;
public boolean isEditMode(){
return editMode;
}
// editing mode variables
private IStream orgStream;
private String snapShot;
// new mode variables
private IStream editStream;
boolean rootFolderChanged; // whether stream root folder is changed manually.
// stream locked and message.
private boolean streamLocked;
private String streamLockMessage;
public String getStreamLockMessage() {
return streamLockMessage;
}
public boolean isStreamLocked(){
return streamLocked;
}
public EditStreamWizard(IP4Stream stream, boolean editMode) {
super();
this.editMode=editMode;
setWindowTitle(MessageFormat.format(Messages.EditStreamWizard_StreamWizardTitle,(editMode?Messages.EditStreamWizard_Edit:Messages.EditStreamWizard_New)));
connection = stream.getConnection();
if(editMode){
orgStream=(IStream) stream.getAdapter(IStream.class);
}else{
orgStream=StreamUtil.createNewStream(stream);
orgStream.setDescription(MessageFormat.format(Messages.EditStreamWizard_CreatedBy, connection.getUser()));
}
editStream=StreamUtil.copyStream(orgStream);
snapShot=ReflectionToStringBuilder.reflectionToString(editStream, new P4LogUtils.RecursiveToStringStyle(-1));
initDialogSettings();
setNeedsProgressMonitor(true);
setStreamLockFlag(stream);
}
private void setStreamLockFlag(IP4Stream stream) {
String owner = stream.getStreamSummary().getOwnerName();
String editor = stream.getConnection().getUser();
streamLocked=!(editor.equals(owner) || !stream.getStreamSummary().getOptions().isLocked());
streamLockMessage=MessageFormat.format(Messages.EditStreamWizard_LockedBy,owner);
}
public EditStreamWizard(IP4Connection conn) {
super();
this.editMode=false;
connection = conn;
orgStream=StreamUtil.createNewStream(null);
orgStream.setOwnerName(connection.getUser());
orgStream.setDescription(MessageFormat.format(Messages.EditStreamWizard_CreatedBy, connection.getUser()));
editStream=StreamUtil.copyStream(orgStream);
snapShot=ReflectionToStringBuilder.reflectionToString(editStream, new P4LogUtils.RecursiveToStringStyle(-1));
initDialogSettings();
setNeedsProgressMonitor(true);
}
private void initDialogSettings() {
}
@Override
public IDialogSettings getDialogSettings() {
IDialogSettings dialogSettings = PerforceUIPlugin.getPlugin().getDialogSettings();
IDialogSettings section = dialogSettings.getSection("EditStreamsWizard");//$NON-NLS-1$
if (section == null) {
section = dialogSettings.addNewSection("EditStreamsWizard");//$NON-NLS-1$
}
setDialogSettings(section);
return section;
}
public void addPages() {
basicPage = new BasicSettingPage();
addPage(basicPage);
advancedPage = new AdvancedSettingPage();
addPage(advancedPage);
}
/**
* @see IWorkbenchWizard#init(IWorkbench, IStructuredSelection)
*/
public void init(IWorkbench workbench, IStructuredSelection selection) {
}
public boolean canFinish() {
return super.canFinish() && modelChanged() && !StreamUtil.isStreamEmpty(getStream());
}
private boolean modelChanged() {
String newShot = ReflectionToStringBuilder.reflectionToString(editStream, new P4LogUtils.RecursiveToStringStyle(-1));
return !snapShot.equals(newShot);
}
@Override
public boolean performFinish() {
for(IWizardPage page:getPages()){
if(page instanceof AbstractEditStreamWizardPage){
((AbstractEditStreamWizardPage)page).saveSettings();
}
}
if(isEditMode()){
boolean doit=true;
Type orgType = orgStream.getType();
Type editType = editStream.getType();
if(orgType==Type.TASK && orgType!=editType){
doit=MessageDialog.openQuestion(null, Messages.EditStreamWizard_ConvertOrphanWarn, Messages.EditStreamWizard_ConvertOrphanToMain);
}
if(doit){
try {
connection.updateStream(editStream);
StreamUtil.updateStream(editStream,orgStream);
} catch (Exception e) {
MessageDialog.openError(null, Messages.EditStreamWizard_Error, e.getLocalizedMessage());
return false;
}
}else
return false;
}else{
try {
getConnection().createStream(editStream);
if(basicPage.isPopulate()){
populateStream(editStream);
}
} catch (Exception e) {
MessageDialog.openError(null, Messages.EditStreamWizard_Error, e.getLocalizedMessage());
return false;
}
}
return true;
}
private void populateStream(IStream stream) {
if(stream.getParent()!=null)
connection.populateStream(stream);
else{
PopulateAction action = new PopulateAction();
action.populate(connection, stream.getStream()+"/..."); //$NON-NLS-1$
}
}
public IP4Connection getConnection() {
return connection;
}
public IStream getStream() {
return editStream;
}
public IStream getOriginalStream() {
return orgStream;
}
@Override
public boolean needsProgressMonitor() {
return true;
}
public static IStatus validate(IP4Stream stream){
return ValidationStatus.ok();
}
} |
// Remove removes the loop device.
func (d *Device) Remove() error {
if err := d.ensureOpen(); err != nil {
return err
}
err := unix.IoctlSetInt(int(d.dev.Fd()), unix.LOOP_CLR_FD, 0)
if err != nil {
return err
}
if err := d.Close(); err != nil {
return fmt.Errorf("failed to close device: %w", err)
}
if err := unix.IoctlSetInt(int(loopControlFd.Fd()), unix.LOOP_CTL_REMOVE, int(d.num)); err != nil {
return err
}
return nil
} |
/*
* File: SigmoidKernel.java
* Authors: <NAME>
* Company: Sandia National Laboratories
* Project: Cognitive Foundry
*
* Copyright July 19, 2007, Sandia Corporation. Under the terms of Contract
* DE-AC04-94AL85000, there is a non-exclusive license for use of this work by
* or on behalf of the U.S. Government. Export of this program may require a
* license from the United States Government. See CopyrightHistory.txt for
* complete details.
*
*/
package gov.sandia.cognition.learning.function.kernel;
import gov.sandia.cognition.annotation.CodeReview;
import gov.sandia.cognition.math.matrix.Vectorizable;
import gov.sandia.cognition.util.AbstractCloneableSerializable;
/**
* The <code>SigmoidKernel</code> class implements a sigmoid kernel based on the
* hyperbolic tangent. The kernel it computes is:
*
* tanh(kappa * (x dot y) + c)
*
* @author <NAME>
* @since 2.0
*/
@CodeReview(
reviewer="<NAME>",
date="2009-07-08",
changesNeeded=false,
comments={
"Made clone call super.clone.",
"Looks fine otherwise."
}
)
public class SigmoidKernel
extends AbstractCloneableSerializable
implements Kernel<Vectorizable>
{
/** The default value for kappa is {@value}. */
public static final double DEFAULT_KAPPA = 1.0;
/** The default value for the constant is {@value}. */
public static final double DEFAULT_CONSTANT = 0.0;
/** The kappa value to multiply times the dot product. */
protected double kappa;
/** The constant used in the sigmoid. */
protected double constant;
/**
* Creates a new instance of SigmoidKernel with default values of 1.0 for
* kappa and 0.0 for the constant.
*/
public SigmoidKernel()
{
this(DEFAULT_KAPPA, DEFAULT_CONSTANT);
}
/**
* Creates a new instance of SigmoidKernel from its two needed parameters:
* kappa and a constant. The kernel it evaluates is:
*
* tanh(kappa * (x dot y) + c)
*
* @param kappa The value multiplied by the dot product of the two vectors
* before it is passed to the hyperbolic tangent function.
* @param constant The constant inside of the sigmoid kernel.
*/
public SigmoidKernel(
final double kappa,
final double constant)
{
super();
this.setKappa(kappa);
this.setConstant(constant);
}
/**
* Creates a new copy of a SigmoidKernel.
*
* @param other The SigmoidKernel to copy.
*/
public SigmoidKernel(
final SigmoidKernel other)
{
this(other.getKappa(), other.getConstant());
}
@Override
public SigmoidKernel clone()
{
return (SigmoidKernel) super.clone();
}
/**
* Evaluates the sigmoid kernel between the two given vectors, which is:
*
* tanh(kappa * (x dot y) + c)
*
* @param x The first vector.
* @param y The second vector.
* @return The result of the sigmoid kernel: tanh(kappa * (x dot y) + c)
*/
public double evaluate(
final Vectorizable x,
final Vectorizable y)
{
final double product =
x.convertToVector().dotProduct(y.convertToVector());
return Math.tanh(this.kappa * product + this.constant);
}
/**
* Gets kappa, the value multiplied by the dot product of the two vectors
* before it is passed to the hyperbolic tangent function.
*
* @return The kappa value for the sigmoid kernel.
*/
public double getKappa()
{
return this.kappa;
}
/**
* Sets kappa, the value multiplied by the dot product of the two vectors
* before it is passed to the hyperbolic tangent function.
*
* @param kappa The kappa value for the sigmoid kernel.
*/
public void setKappa(
final double kappa)
{
this.kappa = kappa;
}
/**
* Gets the constant inside of the sigmoid kernel.
*
* @return The constant term used for the sigmoid kernel.
*/
public double getConstant()
{
return this.constant;
}
/**
* Sets the constant inside of the sigmoid kernel.
*
* @param constant The constant term used for the sigmoid kernel.
*/
public void setConstant(
final double constant)
{
this.constant = constant;
}
}
|
import java.io.BufferedReader;
import java.io.IOException;
import java.io.InputStreamReader;
import java.util.Arrays;
public class EducationReform {
public static void main(String[] args) throws IOException {
// TODO Auto-generated method stub
BufferedReader r = new BufferedReader(new InputStreamReader(System.in));
String s = r.readLine();
String[] sp = s.split("[ ]+");
n = new Integer(sp[0]);
m = new Integer(sp[1]);
k = new Integer(sp[2]);
arr = new Subject[m];
for (int i = 0; i < m; i++) {
arr[i] = new Subject(r.readLine(),i+1);
}
Arrays.sort(arr);
boolean can = false;
int orgIndex = -1;
int count=0;
long taken=-1;
int lastTaken=-1;
int index = -1;
int lastShift = -1;
long maxRes = -1;
for(int i=0;i<m;i++){
for(long ex=arr[i].a;ex<=arr[i].b;ex++){
long dif=ex-arr[i].a;
if(ex+go(i+1, 1, i,(int)dif )>maxRes){
maxRes=ex+go(i+1, 1, i,(int)dif );
can=true;
orgIndex=arr[i].index;
count=1;
taken=ex;
lastTaken=i;
index=i+1;
lastShift = (int) dif;
}
}
}
if(!can)
System.out.println("NO");
else{
System.out.println("YES");
System.out.println(orgIndex+" "+taken);
loop: while(count<n){
if(arr[index].c>arr[lastTaken].c){
long lastHomeWork = arr[lastTaken].a + lastShift;
long w1 = -inf ,w2=-inf,w3=-inf;
if (lastHomeWork + k >= arr[index].a && lastHomeWork + k <= arr[index].b) {
long dif = lastHomeWork + k - arr[index].a;
w1=lastHomeWork+k+go(index + 1, count + 1, index, (int) dif);
}
if (lastHomeWork * k >= arr[index].a && lastHomeWork * k <= arr[index].b) {
long dif = (lastHomeWork * k) - arr[index].a;
w2=lastHomeWork*k+go(index + 1, count + 1, index, (int) dif);
}
w3=go(index + 1, count, lastTaken, lastShift);
long max = Math.max(w1, Math.max(w2, w3));
if(max==w1){
orgIndex=arr[index].index;
long dif = lastHomeWork + k - arr[index].a;
lastTaken = index;
index++;
count++;
lastShift = (int)dif;
taken = arr[lastTaken].a+lastShift;
System.out.println(orgIndex+" "+taken);
continue loop;
}
else if(max==w2){
long dif = (lastHomeWork * k) - arr[index].a;
orgIndex=arr[index].index;
lastTaken = index;
index++;
count++;
lastShift = (int)dif;
taken = arr[lastTaken].a+lastShift;
System.out.println(orgIndex+" "+taken);
continue loop;
}
else if(max==w3){
index++;
continue loop;
}
}
else{
index++;
continue loop;
}
}
}
}
static int n, m, k;
static Subject[] arr;
static Long[][][][] dp =new Long[52][52][52][102];
static long inf = Long.MAX_VALUE/8;
public static long go(int index, int count, int lastTaken, int lastShift) {
if (count == n)
return 0;
if (index == m)
return -inf;
if(dp[index][count][lastTaken][lastShift]!=null)
return dp[index][count][lastTaken][lastShift];
long w1 = -inf ,w2=-inf,w3=-inf;
if(arr[index].c>arr[lastTaken].c){
long lastHomeWork = arr[lastTaken].a + lastShift;
if (lastHomeWork + k >= arr[index].a && lastHomeWork + k <= arr[index].b) {
long dif = lastHomeWork + k - arr[index].a;
w1= lastHomeWork+k+go(index + 1, count + 1, index, (int) dif);
}
if (lastHomeWork * k >= arr[index].a && lastHomeWork * k <= arr[index].b) {
long dif = (lastHomeWork * k) - arr[index].a;
w2= lastHomeWork*k+go(index + 1, count + 1, index, (int) dif);
}
}
w3= go(index + 1, count, lastTaken, lastShift);
return dp[index][count][lastTaken][lastShift]=Math.max(w1,Math.max(w2,w3));
}
static class Subject implements Comparable<Subject> {
long a, b;
int c,index;
public Subject(String s,int i) {
String[] sp = s.split("[ ]+");
a = new Long(sp[0]);
b = new Long(sp[1]);
c = new Integer(sp[2]);
index=i;
}
@Override
public int compareTo(Subject arg0) {
// TODO Auto-generated method stub
return c - arg0.c;
}
}
}
|
export { default as ScrollSync } from "./ScrollSync";
export { default as ScrollSyncNode } from "./ScrollSyncNode";
export default {};
|
package de.agilecoders.wicket.core.markup.html.bootstrap.dialog;
import java.util.ArrayList;
import java.util.List;
import org.apache.wicket.AttributeModifier;
import org.apache.wicket.Component;
import org.apache.wicket.MarkupContainer;
import org.apache.wicket.ajax.AjaxEventBehavior;
import org.apache.wicket.ajax.AjaxRequestTarget;
import org.apache.wicket.ajax.attributes.AjaxRequestAttributes;
import org.apache.wicket.markup.ComponentTag;
import org.apache.wicket.markup.MarkupStream;
import org.apache.wicket.markup.head.IHeaderResponse;
import org.apache.wicket.markup.head.OnDomReadyHeaderItem;
import org.apache.wicket.markup.html.WebMarkupContainer;
import org.apache.wicket.markup.html.basic.Label;
import org.apache.wicket.markup.html.form.Form;
import org.apache.wicket.markup.html.list.ListItem;
import org.apache.wicket.markup.html.list.ListView;
import org.apache.wicket.markup.html.panel.GenericPanel;
import org.apache.wicket.markup.resolver.IComponentResolver;
import org.apache.wicket.model.IModel;
import org.apache.wicket.model.Model;
import org.apache.wicket.util.string.Strings;
import de.agilecoders.wicket.core.markup.html.bootstrap.behavior.BootstrapResourcesBehavior;
import de.agilecoders.wicket.core.markup.html.bootstrap.behavior.CssClassNameAppender;
import de.agilecoders.wicket.core.util.Attributes;
import de.agilecoders.wicket.jquery.JQuery;
/**
* A component providing a bootstrap modal containing a Wicket {@link Form} for which
* footer buttons can be used to submit form.
*
* @author reiern70
*/
public class ModalWithForm<T> extends GenericPanel<T> implements IModal {
public static final String BUTTON_MARKUP_ID = "button";
private final WebMarkupContainer header;
private final IModel<Boolean> show = Model.of(false);
private final IModel<Boolean> fadein = Model.of(true);
private final IModel<Boolean> keyboard = Model.of(true);
private final Label headerLabel;
private final List<Component> buttons = new ArrayList<Component>();
private final WebMarkupContainer footer;
private final IModel<Boolean> useCloseHandler = Model.of(false);
private final AjaxEventBehavior closeBehavior;
private final Form<?> form;
/**
* Transparent form.
*
* @author reiern70
*/
private static class TranspanentForm extends Form<Void> implements IComponentResolver {
public TranspanentForm(String id) {
super(id);
}
@Override
public Component resolve(MarkupContainer container,
MarkupStream markupStream, ComponentTag tag) {
Component resolvedComponent = getParent().get(tag.getId());
if (resolvedComponent != null && getPage().wasRendered(resolvedComponent))
{
/*
* Means that parent container has an associated homonymous tag to this grandchildren
* tag in markup. The parent container wants render it and it should be not resolved to
* their grandchildren.
*/
return null;
}
return resolvedComponent;
}
}
/**
* Constructor.
*
* @param markupId The non-null id of this component
*/
public ModalWithForm(final String markupId) {
this(markupId, null);
}
/**
* Constructor.
*
* @param id The non-null id of this component
* @param model The component's body model
*/
public ModalWithForm(String id, IModel<T> model) {
super(id, model);
setOutputMarkupId(true);
setOutputMarkupPlaceholderTag(true);
form = new TranspanentForm("form");
add(form);
footer = new WebMarkupContainer("footer");
header = new WebMarkupContainer("header");
header.add(headerLabel = new Label("header-label", ""));
headerLabel.setOutputMarkupId(true);
footer.add(new ListView<Component>("buttons", buttons) {
@Override
protected void populateItem(ListItem<Component> item) {
item.add(item.getModelObject());
}
});
closeBehavior = new AjaxEventBehavior("hidden.bs.modal") {
@Override
protected void onEvent(final AjaxRequestTarget target) {
handleCloseEvent(target);
}
@Override
protected void updateAjaxAttributes(AjaxRequestAttributes attributes) {
super.updateAjaxAttributes(attributes);
attributes.setEventPropagation(AjaxRequestAttributes.EventPropagation.BUBBLE);
}
};
form.add(footer);
add(header);
BootstrapResourcesBehavior.addTo(this);
}
@Override
protected void onComponentTag(ComponentTag tag) {
super.onComponentTag(tag);
checkComponentTag(tag, "div");
Attributes.addClass(tag, "modal");
Attributes.set(tag, "tabindex", "-1");
// ARIA
Attributes.set(tag, "role", "dialog");
Attributes.set(tag, "aria-labelledby", headerLabel.getMarkupId());
Attributes.set(tag, "aria-hidden", "true");
}
/**
* hook to react on modal close event. The {@link }
*
* @param target The current {@link AjaxRequestTarget}
*/
protected void onClose(final AjaxRequestTarget target) {}
/**
* Adds a className to the form.
*
* @param className The class name to add
* @return This
*/
public ModalWithForm<T> addFormCssClass(String className) {
return addFormCssClass(Model.of(className));
}
/**
* Adds a className to the form.
*
* @param className The class name to add
* @return This
*/
public ModalWithForm<T> addFormCssClass(IModel<String> className) {
form.add(new CssClassNameAppender(className));
return this;
}
/**
* Sets the header label text.
*
* @param label The header label
* @return This
*/
public ModalWithForm<T> header(IModel<String> label) {
headerLabel.setDefaultModel(label);
setHeaderVisible(true);
return this;
}
/**
* Sets the header label text and whether model strings should be escaped.
*
* @param label The header label
* @param escapeMarkup True is model strings should be escaped
* @return This
*/
public ModalWithForm<T> header(final IModel<String> label, final boolean escapeMarkup) {
headerLabel.setDefaultModel(label);
headerLabel.setEscapeModelStrings(escapeMarkup);
return this;
}
/**
* Sets whether the footer and any children are visible.
*
* @param visible True if footer and any children should be visible
* @return This
*/
public ModalWithForm<T> setFooterVisible(final boolean visible) {
footer.setVisible(visible);
return this;
}
/**
* Sets whether the header and any children are visible.
*
* @param visible True if header and any children should be visible
* @return This
*/
public ModalWithForm<T> setHeaderVisible(final boolean visible) {
header.setVisible(visible);
return this;
}
/**
* Sets whether the close handler is used or not. Default is false.
*
* @param useCloseHandler True if close handler should be used
* @return This
*/
public final ModalWithForm<T> setUseCloseHandler(final boolean useCloseHandler) {
this.useCloseHandler.setObject(useCloseHandler);
return this;
}
/**
* Sets the initial visibility of the modal dialog.
*
* @param show Whether to show the dialog or not
* @return This
*/
public ModalWithForm<T> show(boolean show) {
this.show.setObject(show);
return this;
}
/**
* Append dialog close/hide JavaScript to current AJAX request target.
*
* @param target
* @return This
*/
public ModalWithForm<T> appendCloseDialogJavaScript(final AjaxRequestTarget target) {
target.appendJavaScript(createActionScript(getMarkupId(true), ModalAction.Action.hide));
return this;
}
/**
* A short alias for {@link ModalWithForm#appendCloseDialogJavaScript}
* @param target
* @return
*/
public ModalWithForm<T> close(final AjaxRequestTarget target) {
return appendCloseDialogJavaScript(target);
}
/**
* Append dialog show JavaScript to current request AJAX target.
*
* @param target
* @return This
*/
public ModalWithForm<T> appendShowDialogJavaScript(final AjaxRequestTarget target) {
target.appendJavaScript(createActionScript(getMarkupId(true), ModalAction.Action.show));
return this;
}
/**
* A short alias for {@link ModalWithForm#appendShowDialogJavaScript}
* @param target
* @return
*/
public ModalWithForm<T> show(final AjaxRequestTarget target) {
return appendShowDialogJavaScript(target);
}
/**
* creates an action script to open/close the dialog on client side.
*
* @param markupId The component's markup id
* @param action Possible values: show/hide
* @return new script.
*/
protected String createActionScript(final String markupId, final ModalAction.Action action) {
return JQuery.$("#"+markupId).chain(ModalAction.action(action)).get();
}
public ModalWithForm<T> addOpenerAttributesTo(final Component component) {
component.add(new AttributeModifier("data-toggle", "modal"));
component.add(new AttributeModifier("href", "#" + getMarkupId(true)));
return this;
}
/**
* adds a close button with specific label
*
* @param label The label of close button
* @return this instance
*/
public ModalWithForm<T> addCloseButton(final IModel<String> label) {
BootstrapModalCloseButton button = new BootstrapModalCloseButton(label);
return addButton(button);
}
/**
* adds a close button with default label ("Close")
*
* @return this instance
*/
public ModalWithForm<T> addCloseButton() {
return addCloseButton(Model.of("Close"));
}
/**
* adds a button to footer section.
*
* @param button Button to add to footer
* @return this instance.
*/
public ModalWithForm<T> addButton(final Component button) {
if (!BUTTON_MARKUP_ID.equals(button.getId())) {
throw new IllegalArgumentException(
String.format("Invalid button markup id. Must be '%s'.", BUTTON_MARKUP_ID));
}
buttons.add(button);
return this;
}
@Override
protected void onInitialize() {
super.onInitialize();
if (useCloseHandler.getObject()) {
add(closeBehavior);
}
}
/**
* handles the close event.
*
* @param target The current {@link AjaxRequestTarget}
*/
private void handleCloseEvent(final AjaxRequestTarget target) {
if (isVisible()) {
onClose(target);
appendCloseDialogJavaScript(target);
}
}
@Override
protected void onConfigure() {
super.onConfigure();
if (useFadein()) {
add(new CssClassNameAppender("fade"));
}
if (Strings.isEmpty(headerLabel.getDefaultModelObjectAsString())) {
// there must be at least on character inside the header to prevent
// layout problems.
headerLabel.setDefaultModelObject(" ");
headerLabel.setEscapeModelStrings(false);
}
footer.setVisible(buttons.size() > 0);
}
@Override
public void renderHead(IHeaderResponse response) {
super.renderHead(response);
response.render(OnDomReadyHeaderItem.forScript(createInitializerScript(getMarkupId(true))));
}
/**
* creates the initializer script of the modal dialog.
*
* @param markupId The component's markup id
* @return initializer script
*/
private String createInitializerScript(final String markupId) {
return addCloseHandlerScript(markupId, createBasicInitializerScript(markupId));
}
/**
* creates the basic initialization script of the modal dialog.
* Override this to pass in your custom initialization, add event handlers, etc.
*
* @param markupId markup id
* @return initializer script
* @see #createInitializerScript
*/
protected String createBasicInitializerScript(final String markupId) {
return "$('#" + markupId + "').modal({keyboard:" + useKeyboard() + ", show:" + showImmediately() + "})";
}
/**
* adds close handler to initializer script, if use of close handler has been defined.
*
* @param markupId markup id
* @param script base script to prepend
* @return close handler script
*/
private String addCloseHandlerScript(final String markupId, final String script) {
if (useCloseHandler.getObject()) {
return script + ";$('#" + markupId + "').on('hidden', function () { "
+ " Wicket.Ajax.ajax({'u':'" + closeBehavior.getCallbackUrl() + "','c':'" + markupId + "'});"
+ "})";
}
return script;
}
/**
* @return true, if fade in animation is activated
*/
protected final boolean useFadein() {
return fadein.getObject();
}
/**
* @return true, if keyboard usage is activated
*/
protected final boolean useKeyboard() {
return keyboard.getObject();
}
/**
* @return true, if modal dialog should be shown after initialization
*/
protected final boolean showImmediately() {
return show.getObject();
}
/**
* Whether to fadin/fadeout the modal dialog or not
*
* @param fadein true, if dialog should be animated
* @return This
*/
public final ModalWithForm<T> setFadeIn(boolean fadein) {
this.fadein.setObject(fadein);
return this;
}
/**
* Whether to enable keyboard interaction like ESC to close the dialog.
*
* @param keyboard true, if keyboard interaction is enabled
* @return This
*/
public final ModalWithForm<T> setUseKeyboard(boolean keyboard) {
this.keyboard.setObject(keyboard);
return this;
}
@Override
public Component getModal() {
return this;
}
}
|
import sys
import os
import struct
import numpy as np
from astropy.io import fits
from trm import cline
from trm.cline import Cline
import hipercam as hcam
__all__ = [
"hlog2fits",
]
###############################################
#
# hlog2fits -- convert reduce log files to FITS
#
###############################################
def hlog2fits(args=None):
"""``hlog2fits log [origin dir]``
Converts a |hiper| ASCII log into a FITS file. As well as a modest
reduction in file size (~40%, the ASCII logs are written relatively
efficiently), the resulting file is faster to read than the ASCII log
so this may be useful for very large log files [test of 78,000 frame file:
12.9 seconds to read the ASCII file, 1.9 to read the FITS version]. The FITS
log is also much easier to understand than the ASCII files, but they don't
have all the header information, so are not a replacement. At the
moment no significant header information is transferred beyond the CCD
names. Each CCD appears as a single binary table, starting at the second
HDU (or HDU 1 if you number them 0,1,2 ..). This can be read using
:meth:`hipercam.hlog.Hlog.from_fits`.
Parameters:
log : str
name of the log file (should end .log). The output FITS file
will have the same root name but end .fits. The routine will abort
if there is a pre-existing file of the same name.
origin : str [hidden]
'h' or 'u' depending upon whether the log file was created with
the hipercam or old ultracam pipeline. Defaults to 'h'.
dir : str [hidden]
directory for the output; defaults to the present working directory
NB Because of the danger of over-writing raw data (also ends
.fits), this routine will not over-write pre-existing files. You
should delete clashing files if you really want to proceed.
"""
command, args = cline.script_args(args)
# get input section
with Cline("HIPERCAM_ENV", ".hipercam", command, args) as cl:
# register parameters
cl.register("log", Cline.LOCAL, Cline.PROMPT)
cl.register("origin", Cline.LOCAL, Cline.HIDE)
cl.register("dir", Cline.LOCAL, Cline.HIDE)
# get inputs
log = cl.get_value(
"log",
'name of log file from "reduce" to convert to FITS',
cline.Fname("red", hcam.LOG),
)
cl.set_default('origin','h')
origin = cl.get_value(
"origin", "h(ipercam) or u(ltracam) pipeline?", "h",
lvals=["h", "u"]
)
cl.set_default('dir','.')
dir = cl.get_value(
"dir", "directory for output", ".",
)
root = os.path.splitext(os.path.basename(log))[0]
oname = os.path.join(dir, root + ".fits")
if os.path.exists(oname):
raise hcam.HipercamError(
f"A file called {oname} already exists and"
" will not be over-written; aborting"
)
# Read in the ASCII log
if origin == "h":
hlg = hcam.hlog.Hlog.rascii(log)
elif origin == "u":
hlg = hcam.hlog.Hlog.fulog(log)
print(f"Loaded ASCII log = {log}")
# Generate HDU list
# First the primary HDU (no data)
phdr = fits.Header()
phdr["LOGFILE"] = (os.path.basename(log), "Original log file")
phdu = fits.PrimaryHDU(header=phdr)
hdul = [
phdu,
]
# Now a BinTable for each CCD
for cnam in sorted(hlg):
hdr = fits.Header()
hdr["CCDNAME"] = (cnam, "CCD name")
hdul.append(
fits.BinTableHDU(
hlg[cnam], header=hdr,
name=f"CCD {cnam}"
)
)
hdul = fits.HDUList(hdul)
# finally write to disk
print(f"Writing to disk in file = {oname}")
hdul.writeto(oname)
print(f"Converted {log} to {oname}")
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.