max_stars_repo_path
stringlengths 3
269
| max_stars_repo_name
stringlengths 4
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.05M
| score
float64 0.23
5.13
| int_score
int64 0
5
|
---|---|---|---|---|---|---|
run.py | SDJustus/skip-ganomaly | 0 | 12796951 | <filename>run.py
"""
TRAIN SKIP/GANOMALY
. Example: Run the following command from the terminal.
run train.py \
--dataset cifar10 \
--abnormal_class airplane \
--display \
"""
##
# LIBRARIES
import time
from options import Options
from lib.data.dataloader import load_data
from lib.models import load_model
import torch
import numpy as np
from lib.models.skipganomaly import seed
##
def main():
""" Training
"""
opt = Options().parse()
opt.print_freq = opt.batchsize
seed(opt.manualseed)
print("Seed:", str(torch.seed()))
if opt.phase == "inference":
opt.batchsize=1
data = load_data(opt)
model = load_model(opt, data)
if opt.phase == "inference":
model.inference()
else:
if opt.path_to_weights:
model.test()
else:
train_start = time.time()
model.train()
train_time = time.time() - train_start
print (f'Train time: {train_time} secs')
if __name__ == '__main__':
main()
| 2.890625 | 3 |
misago/misago/users/api/ranks.py | vascoalramos/misago-deployment | 2 | 12796952 | from rest_framework import mixins, viewsets
from ..models import Rank
from ..serializers import RankSerializer
class RanksViewSet(mixins.ListModelMixin, viewsets.GenericViewSet):
serializer_class = RankSerializer
queryset = Rank.objects.filter(is_tab=True).order_by("order")
| 1.898438 | 2 |
bot/models/party.py | maxsaltonstall/letters-with-strangers | 3 | 12796953 | <reponame>maxsaltonstall/letters-with-strangers<filename>bot/models/party.py
import logging, uuid, random
from .dictionary import Dictionary
from .player import Player
from .util.string_util import StringUtil
from .util.datastore import save_party, load_party, disband_party
class Party:
def __init__(self, party_id: str = ''):
if party_id:
self.party_id = party_id
self.state = load_party(self.party_id)
else:
self.state = {}
self.state["members"] = []
self.party_id = uuid.uuid4().hex
self.save_state()
logging.info(f"initialized party {self.party_id}: {str(self.state['members'])}")
def save_state(self):
save_party(self.party_id, self.state)
def load_state(self):
self.state = load_party(self.party_id)
def get_id(self) -> int:
return self.party_id
def add_members(self, members) -> str:
members_added = []
already_partying_members = []
for member in members:
player = Player(member)
if player.get_party_id() and player.get_party_id() != self.get_id():
# player is in another party
already_partying_members.append(player.get_mention_tag())
else:
if player.get_id() not in self.state["members"]:
self.state["members"].append(player.get_id())
player.set_party_id(self.get_id())
members_added.append(player.get_mention_tag())
self.save_state()
msg = ""
if len(members_added):
msg += f"Added {StringUtil.readable_list(members_added)} to your party. "
if len(already_partying_members):
msg += f"Couldn't add {StringUtil.readable_list(already_partying_members)} -- they're already in another party!"
return msg
def remove_member(self, member_id: int):
if self.state and "members" in self.state:
self.state["members"].remove(member_id)
self.save_state()
if len(self.get_members()) <= 1:
disband_party(self.get_id())
else:
# this is a corrupted party; purge it
disband_party(self.get_id())
def get_members(self) -> list:
return self.state["members"]
def get_members_as_string(self) -> str:
player_names = []
for player_id in self.get_members():
player = Player.load(player_id)
player_names.append(player.get_mention_tag())
return StringUtil.readable_list(player_names)
def get_letters(self) -> list:
party_letters = set()
for player_id in self.get_members():
player = Player.load(player_id)
for letter in player.get_letters():
party_letters.add(letter)
list_party_letters = list(party_letters)
list_party_letters.sort()
return list_party_letters
def make_word(self, word: str, dictionary: Dictionary) -> str:
msg = ""
if not dictionary.check_word(word):
logging.info(f"Word '{word}' not found in dictionary {dictionary}")
return f"Sorry, the word '{word}' isn't in my vocabulary!"
letters = list(word)
missing_letters = []
for letter in letters:
if letter not in self.get_letters():
missing_letters.append(letter)
missing_letters = list(set(missing_letters))
missing_letters.sort()
if len(missing_letters):
return f"unable to spell the word {word}; you don't have the letter(s) {StringUtil.readable_list(missing_letters, 'bold')}"
# Calculate points and money gained
party_size = len(self.get_members())
word_size = len(word)
word_points = (word_size - 3) * (word_size - 2) + party_size # add xp/score/points based on length of word and size of party
word_money = party_size * party_size + word_size
word_event_chance = (word_size * 2 + party_size)
if random.randint(1, 1000) < word_event_chance:
msg += f"You got a random event, with chance {word_event_chance} out of 1000\n"
for player_id in self.get_members():
player = Player.load(player_id)
player_leveled_up = player.add_points_and_check_for_levelup(word_points)
if player_leveled_up:
msg += f"{player.get_mention_tag()} is now level {player_leveled_up}! :rocket:\n"
player.add_money(word_money)
for letter in letters: # give each player xp for each letter in word
player.add_letter_xp(letter, 1)
player.remove_letters(letters)
msg += f"you formed the word '{word}'\n{'everyone' if len(self.get_members()) > 1 else 'and'} scored {word_points} points and received {word_money} glyphs\n"
if party_size <= 1:
disband_party(self.party_id)
return msg
def __str__(self):
return f"Party members: {self.get_members_as_string()}"
| 2.71875 | 3 |
venv/Lib/site-packages/plotnine/tests/test_position.py | EkremBayar/bayar | 0 | 12796954 | import string
import numpy as np
import pandas as pd
import pytest
from plotnine import (ggplot, aes, geom_point, geom_jitter, geom_bar,
geom_col, geom_boxplot, geom_text, geom_rect,
after_stat, position_dodge, position_dodge2,
position_jitter, position_jitterdodge,
position_nudge, position_stack, theme)
from plotnine.positions.position import position
from plotnine.exceptions import PlotnineError
n = 6
m = 10
random_state = np.random.RandomState(1234567890)
df1 = pd.DataFrame({'x': [1, 2, 1, 2],
'y': [1, 1, 2, 2]})
df2 = pd.DataFrame({'x': np.repeat(range(n+1), range(n+1)),
'z': np.repeat(range(n//2), range(3, n*2, 4))})
df3 = pd.DataFrame({
'x': random_state.choice(['A', 'B'], n*m),
'y': random_state.randint(0, 20, n*m),
'c': random_state.choice([False, False, True, False], n*m)
})
random_state.seed(1234567890)
_theme = theme(subplots_adjust={'right': 0.85})
def test_jitter():
df1 = pd.DataFrame({'x': [1, 2, 1, 2],
'y': [1, 1, 2, 2]})
p = (ggplot(df1, aes('x', 'y')) +
geom_point(size=10) +
geom_jitter(size=10, color='red', random_state=random_state) +
geom_jitter(size=10, color='blue', width=0.1,
height=0.1, random_state=random_state))
assert p + _theme == 'jitter'
with pytest.raises(PlotnineError):
geom_jitter(position=position_jitter(), width=0.1)
def test_nudge():
p = (ggplot(df1, aes('x', 'y')) +
geom_point(size=10) +
geom_point(size=10, color='red',
position=position_nudge(.25, .25)))
assert p + _theme == 'nudge'
def test_stack():
p = (ggplot(df2, aes('factor(z)')) +
geom_bar(aes(fill='factor(x)'), position='stack'))
assert p + _theme == 'stack'
def test_stack_negative():
df = df1.copy()
_loc = df.columns.get_loc
df.iloc[0, _loc('y')] *= -1
df.iloc[len(df)-1, _loc('y')] *= -1
p = (ggplot(df)
+ geom_col(aes('factor(x)', 'y', fill='factor(y)'),
position='stack')
+ geom_text(aes('factor(x)', 'y', label='y'),
position=position_stack(vjust=0.5))
)
assert p + _theme == 'stack-negative'
def test_fill():
p = (ggplot(df2, aes('factor(z)')) +
geom_bar(aes(fill='factor(x)'), position='fill'))
assert p + _theme == 'fill'
def test_dodge():
p = (ggplot(df2, aes('factor(z)')) +
geom_bar(aes(fill='factor(x)'), position='dodge'))
assert p + _theme == 'dodge'
def test_dodge_preserve_single():
df1 = pd.DataFrame({'x': ['a', 'b', 'b'],
'y': ['a', 'a', 'b']})
p = (ggplot(df1, aes('x', fill='y')) +
geom_bar(position=position_dodge(preserve='single')))
assert p + _theme == 'dodge_preserve_single'
def test_dodge_preserve_single_text():
df1 = pd.DataFrame({'x': ['a', 'b', 'b', 'b'],
'y': ['a', 'a', 'b', 'b']})
d = position_dodge(preserve='single', width=0.9)
p = (ggplot(df1, aes('x', fill='y'))
+ geom_bar(position=d)
+ geom_text(
aes(y=after_stat('count'), label=after_stat('count')),
stat='count',
position=d,
va='bottom')
)
assert p + _theme == 'dodge_preserve_single_text'
def test_dodge2():
p = (ggplot(df3, aes('x', 'y', color='c')) +
geom_boxplot(position='dodge2', size=2))
assert p + _theme == 'dodge2'
def test_dodge2_varwidth():
p = (ggplot(df3, aes('x', 'y', color='c')) +
geom_boxplot(
position=position_dodge2(preserve='single'),
varwidth=True,
size=2)
)
assert p + _theme == 'dodge2_varwidth'
def test_jitterdodge():
df = pd.DataFrame({
'x': np.ones(n*2),
'y': np.repeat(np.arange(n), 2),
'letters': np.repeat(list(string.ascii_lowercase[:n]), 2)})
position = position_jitterdodge(random_state=random_state)
p = (ggplot(df, aes('x', 'y', fill='letters')) +
geom_point(size=10, fill='black') +
geom_point(size=10, position=position))
assert p + _theme == 'jitterdodge'
def test_position_from_geom():
geom = geom_point(position='jitter')
assert isinstance(position.from_geom(geom), position_jitter)
geom = geom_point(position='position_jitter')
assert isinstance(position.from_geom(geom), position_jitter)
geom = geom_point(position=position_jitter())
assert isinstance(position.from_geom(geom), position_jitter)
geom = geom_point(position=position_jitter)
assert isinstance(position.from_geom(geom), position_jitter)
def test_dodge_empty_data():
empty_df = pd.DataFrame({'x': [], 'y': []})
p = (ggplot(df1, aes('x', 'y'))
+ geom_point()
+ geom_rect(
empty_df,
aes(xmin='x', xmax='x+1', ymin='y', ymax='y+1'),
position='dodge')
)
p.draw_test()
| 2.09375 | 2 |
bibstuff/bibfile.py | cpitclaudel/bibstuff | 9 | 12796955 | <reponame>cpitclaudel/bibstuff
"""
:mod:`bibstuff.bibfile`: High level BibTeX file interface
---------------------------------------------------------
Provides two classes, BibFile and BibEntry for accessing the parts of a bibtex
database. BibFile inherits from ``simpleparse.dispatchprocessor``. To fill a
BibFile instance, bfi, call bibgrammar.Parse(src, bfi).
:copyright: <NAME> and <NAME>, see AUTHORS
:license: MIT (see LICENSE)
:requires: Python 2.4+
:TODO: make this framework more general,
perhaps along the lines of the btparse library
in `btOOL <http://www.gerg.ca/software/btOOL/>`_
:TODO: add support for bibtexparser (BSD)
https://github.com/sciunto-org/python-bibtexparser
"""
__docformat__ = "restructuredtext en"
__authors__ = ["<NAME>", "<NAME>"]
__version__ = '1.13'
__needs__ = '2.4'
# options:
# __strict__ = False allows empty citekeys
__strict__ = False # should we be strict with bibtex format?
####################### IMPORTS #####################################
# import from standard library
import re, logging
bibfile_logger = logging.getLogger('bibstuff_logger')
# import dependencies
from simpleparse import dispatchprocessor as spdp
from simpleparse.dispatchprocessor import dispatch, DispatchProcessor, getString, lines
#bibstuff imports
# from . import bibgrammar
#####################################################################
############### GLOBAL VARIABLES ##################################
months_en = ('January','February','March','April','May','June',
'July','August','September','October','November','December')
monthslower_en = [m.lower() for m in months_en]
monthmacros_en = [m[:3] for m in monthslower_en]
MONTH_DICT = dict( zip(monthmacros_en, months_en) )
#####################################################################
class BibEntry(dict):
"""
Stores a single bibliographic entry.
Provides a dictionary interface to the fields:
field keys are case-insensitive and fields are stored
in the order added.
:note: 2006-08-10 use 'citekey' instead of 'key' since BibTeX allows a 'key' field
:note: 2008-03-29 'entry_type' instead of 'type' since BibTeX allows a 'type' field
"""
def __init__(self,*args,**kwargs):
dict.__init__(self,*args,**kwargs)
self._fields = []
def __repr__(self):
"""return string representation of entry
"""
stringrep = '@%s{%s,\n' % (self.entry_type.upper() , self.citekey)
try:
mlen = max( len(key_str) for key_str in self._fields ) # for pretty format
except ValueError: #no fields (not a true entry)
mlen = 0
bibfile_logger.warn("Entry apparently has no fields.")
field_list = []
for key in self._fields:
addbraces = True
addquotes = False
#spacer = ' '*(mlen - len(key) )
val = self[key]
#handle crossref
if key == 'crossref':
try: val = val['citekey'] #might be an entry
except TypeError: pass #->must be a string
elif key == 'journal':
if val.isalpha() and val.islower(): #:TODO: allow punctuation!!
addbraces = False #i.e., assume it is a macro
elif key == 'month':
# always use month macros if possible
if val.lower() in monthslower_en + monthmacros_en:
val = val[:3].lower()
addbraces = False
elif key in ("year","number","volume","chapter"):
try:
addbraces = not int(val)
except:
pass
if '@' in val: # need to protect '@'
addquotes = True
if addquotes:
val = '"' + val + '"'
elif addbraces:
val = "{" + val + "}"
field_list.append(" %-*s = %s" % (mlen, key, val))
stringrep += ",\n".join(field_list)
stringrep += '\n}\n'
return stringrep
def __setitem__(self, key, val):
key = key.lower()
dict.__setitem__(self, key, val)
if key == "key":
bibfile_logger.info(
"Setting 'key' as an entry *field*. (Recall 'citekey' holds the entry id.)")
if key not in self._fields and key not in ["citekey","entry_type"] and val:
self._fields.append(key)
def __getitem__(self, field): #field is usually a BibTeX field but can be a citekey
field = field.lower()
if field == "key":
bibfile_logger.info(
"Seeking 'key' as an entry *field*. (Recall 'citekey' holds the entry id.)")
try:
result = dict.__getitem__(self, field)
#:TODO: rethink this decision (but it is used for formatting)
#:note: 20080331 changed KeyError to return '' instead of None
except KeyError:
crossref = self.get('crossref', '')
if isinstance(crossref, self.__class__):
result = crossref[field]
else:
result = ''
#:note: 20080331 add handling of month macros
if field == 'month' and result in monthmacros_en:
result = MONTH_DICT[result]
return result
def __delitem__(self,key) :
key = key.lower()
try:
dict.__delitem__(self, key)
except KeyError:
pass
try:
self._fields.remove(key)
except ValueError:
pass
def set_entry_type(self, val):
self["entry_type"] = val.lower() #:note: entry_type stored as lowercase
def get_entry_type(self):
return self["entry_type"]
entry_type = property(get_entry_type, set_entry_type, None, "property: 'entry_type'")
def set_citekey(self, val):
self["citekey"] = val
def get_citekey(self):
return self["citekey"]
citekey = property(get_citekey,set_citekey,None,"property: 'citekey'")
def get_fields(self):
return self._fields
def set_fields(self, lst):
self._fields = lst
fields = property(get_fields, set_fields, None, "property: 'fields'")
def search_fields(self, string_or_compiled, field='', ignore_case=True):
"""Find regular expression in entry.
Return MatchObject if string_or_compiled found in entry else None. If
field is omitted, search is through all fields.
:note: used by BibFile's find_re method, which is used in turn by bibsearch.py
:Parameters:
`string_or_compiled` : string to compile or compiled regex
pattern for searching
`field` : string
field to search in self (default: search all fields)
"""
if isinstance(string_or_compiled, str):
if ignore_case:
reo = re.compile(string_or_compiled, re.MULTILINE | re.IGNORECASE)
else:
reo = re.compile(string_or_compiled, re.MULTILINE)
else: #must have a compiled regular expression
reo = string_or_compiled
if not field: #->try all fields (but not citekey)
for f in self.get_fields():
found = reo.search( self[f] )
if found: break # no need to check more fields
# :note: CAN test 'field in self' (even though an entry will not raise
#KeyError! see TODO above) BUT do not test 'field in self' bc want test
#for empty fields below
elif self[field]:
found = reo.search( self[field] )
else:
if field in self:
bibfile_logger.info("Empty field %s in entry\n%s.\n."%(self,field))
found = None
return found
def format_names(self, names_formatter):
"""return formatted BibName-object if possible else raw name
:type `names_formatter`: NamesFormatter
:note: called by CitationManager in format_citation
:note: 2006-08-08 no longer sets a `_names` attribute
:TODO: add default name_template useful for .bib files?
"""
bibfile_logger.debug("BibEntry.format_names: arg is:"+str(names_formatter))
names = self.get_names() #get a BibName instance (or possibly, a string)
#keep string if stuck with it
if isinstance(names,str):
result = names
else: #assume a BibName instance
#ask BibName instance to format itself (and it asks a NamesFormatter to do it)
result = names.format(names_formatter)
bibfile_logger.debug("BibEntry.format_names result = "+str(result))
return result
def get_names(self, entry_formatter=None, try_fields=None):
"""return (BibName-object if possible else string)
:note: 2006-08-09 matching change to `make_names`, no longer sets `self._names`
"""
if entry_formatter is None:
if not try_fields:
try_fields = ['author','editor','organization']
return self.make_names(entry_formatter, try_fields=try_fields)
def make_names(self, entry_formatter=None, try_fields=None):
"""return (BibName-object if possible else string)
(from "raw" names).
:change: 2006-08-02 altered to return BibName instance and not set _names
:note: self returns None if field missing (-> no KeyError)
:note: this method introduces the only dependence on simpleparse (via bibname)
:TODO: return BibName instance for each available name field??
:Parameters:
- `entry_formatter`: EntryFormatter instance to provide style information
- `try_fields`: list of field names to try sequentially; none empty filed -> name
"""
# importing bibname here to avoid recursive import
from bibstuff import bibname #ai: shd move all bibname into here? possibly
if entry_formatter is None:
for field in try_fields:
raw_names = self[field]
if raw_names:
break
else:
raw_names, field = entry_formatter.pick_raw_names(self,try_fields)
return bibname.BibName(raw_names,from_field=field) #names are in a BibName object
def format_with(self, entry_formatter):
bibfile_logger.debug("BibEntry.format_with: arg is:"+str(entry_formatter))
#ask the EntryFormatter to do it
return entry_formatter.format_entry(self)
# A default label style for citekeys created by make_citekey()
# first max_names names included, then etal
citekey_label_style1 = dict(
name_template = 'v{_}_|l{}', # "van_der_Meer" or "van_DerStadt"
max_names = 2,
name_name_sep = '+',
etal = 'etal',
anonymous = 'anon',
lower_name = False,
article = "%(names)s-%(year)s",
book = "%(names)s-%(year)s",
misc = "%(names)s-%(year)s",
default_type = "%(names)s-%(year)s",
)
#style2 shd be rst compatible
# citekey_label_style2 = dict(
# name_first = 'l{_}',
# name_other = 'l{_}',
# max_names = 2,
# use_max_names = False,
# name_name_sep = ('.','.'),
# etal = '',
# lower_name = True,
# anonymous = 'anon',
# article = "%(names)s-%(year)s-%(jrnl)s",
# book = "%(names)s-%(year)s",
# misc = "%(names)s-%(year)s",
# default_type = "%(names)s-%(year)s",
# )
def make_citekey(self, used_citekeys = [], style = citekey_label_style1):
"""Create and return a new citekey based on the entry's data. This is for
creating predictable and useful citekey (labels) for BibEntry objects.
This is not integrated with the citation styles in bibstuff.bibstyles;
but it serves a very different purpose. This is to create consistent
citation keys that are easy to type and guess and that are valid BibTeX
citation keys.
:Parameters:
- used_citekeys : list
a list of the already taken citation keys
so that the function can avoid duplicates (by adding a,b,c,d... etc)
- style : str
The format of the citetekey is determined by a `label_style` (see below)
:Returns: string
the citation key (label)
Example:
The label style is a dict with the following fields::
citekey_label_style1 = dict(
name_template = 'v{_}_|l{}', # see NameFormatter class
max_names = 2,
name_name_sep = "+",
etal = 'etal',
anonymous = 'anon',
lower_name = False,
article = "%(names)s-%(year)s",
book = "%(names)s-%(year)s",
misc = "%(names)s-%(year)s",
default_type = "%(names)s-%(year)s")
:TODO: Strip LaTeX accent characters from names when making label
"""
from .bibstyles.shared import NameFormatter
from string import ascii_lowercase
format_dict = {}
entry_type = self.entry_type.lower()
try:
label_template = style[entry_type]
except KeyError:
label_template = style['default_type']
name_template = style['name_template']
max_names = style['max_names']
name_name_sep = style['name_name_sep']
lower_name = style['lower_name']
etal = style['etal']
# first, make names
name_formatter = NameFormatter(template = name_template)
names_dicts = self.get_names().get_names_dicts()
# make list of 'v_|l' last names, which can possibly have multiple
# tokens (e.g., two piece last names)
ls = [name_formatter.format_name(name_dict) for name_dict in names_dicts]
if len(ls) > max_names:
ls = ls[:max_names] + [etal]
names = name_name_sep.join(ls)
if lower_name:
names = names.lower()
format_dict['names'] = names
year = self['year'] or '????'
format_dict['year'] = year
if entry_type == "article":
jrnl = self['journal']
jrnl = ''.join(jrnl.split()).lower() # keep macro
jrnl = jrnl.replace("journal","j",1)
format_dict['jrnl'] = jrnl # short form, no spaces
# make unique result: if needed, append suffix b or c or d... to year
sfx = ''; c = 1
# while result+sfx in used_citekeys:
while label_template%format_dict in used_citekeys:
sfx = ascii_lowercase[c%26]*(1+c//26) # :note: lowercase since
# BibTeX does not
# distinguish case
format_dict['year'] = year+sfx
c += 1
result = label_template%format_dict
return result
# ----------------------------------------------------------
# Bibfile
# -------
# Data storage for bibtex file
# ----------------------------------------------------------
class BibFile( DispatchProcessor ):
"""Stores parsed bibtex file. Access entries by key.
:note: a BibFile object should simply *store* .bib file parts
(a list of entries and a macro map) and provide access
to these parts
"""
def __init__(self) :
self.entries = []
self._macroMap = {}
def get_entrylist(self, citekeys, discard=True):
"""Return list, the BibEntry instances that were found
(and None for entries not found, unless discarded).
"""
if not citekeys:
bibfile_logger.warning("get_entrylist: No keys provided; returning empty cited-entry list.")
return []
temp = [ (key,self.get_entry_by_citekey(key)) for key in citekeys ]
bad_keys = [pair[0] for pair in temp if not pair[1]]
if bad_keys and discard:
bibfile_logger.warning("Database entries not found for the following keys:\n"+"\n".join(bad_keys))
if discard:
result = [pair[1] for pair in temp if pair[1]]
else: #keep None when occurs in entry list
result = [pair[1] for pair in temp]
#attach cross references
for entry in result:
if entry:
crossref = entry.get('crossref', None)
if isinstance(crossref, str):
crossref = self.get_entry_by_citekey(crossref)
if crossref:
entry['crossref'] = crossref
return result
def get_entry_by_citekey(self, citekey):
"""Return entry or None."""
for entry in self.entries:
if entry.citekey == citekey:
return entry
"""PRODUCTION FUNCTIONS:
for parsing, must provide a function for each production name.
"""
def string(self, tuple4, buffer ):
"""Return a string, stripping leading and trailing markers"""
(tag,start,stop,subtags) = tuple4
return buffer[start+1:stop-1]
def number(self, tuple4, buffer ):
"""return a number as a string"""
(tag,start,stop,subtags) = tuple4
return buffer[start:stop]
def entry_type( self, tuple4, buffer ):
"""Return the entry type"""
(tag,start,stop,subtags) = tuple4
return getString((tag,start,stop,subtags), buffer)
def citekey( self, tuple4, buffer ):
"""Return the entry's citekey"""
(tag,start,stop,subtags) = tuple4
return getString((tag,start,stop,subtags), buffer)
# macro name
def name(self, tuple4, buffer ):
"""Return lookup on name or name if not in map."""
(tag,start,stop,subtags) = tuple4
return self._macroMap.get(buffer[start:stop],buffer[start:stop])
def field(self, tuple4, buffer ):
"""Process a bibentry field and return tuple of name, value."""
(tag,start,stop,subtags) = tuple4
str = ''
for t in subtags[1][3]:
if(t) :
str += dispatch(self, t, buffer) # concatenate hashed together strings
return (dispatch(self, subtags[0], buffer), str)
def entry( self, tuple4, buffer ):
"""Process the bibentry and its children.
"""
(tag,start,stop,subtags) = tuple4
entry = BibEntry()
entry.entry_type = dispatch(self, subtags[0], buffer)
entry.citekey = dispatch(self, subtags[1], buffer)
for field in subtags[2][3] :
#bibfile_logger.debug("entry: ready to add field: "+str(dispatch(self, field, buffer)))
k,v = dispatch(self, field, buffer)
#:note: entry will force k to lowercase
entry[k] = v
self.entries.append(entry)
def macro( self, tuple4, buffer ):
"""Process a macro entry and add macros to macro map"""
(tag,start,stop,subtags) = tuple4
name, str = dispatch(self, subtags[0], buffer)
"""
the_type = getString(subtags[0], buffer)
if the_type.upper() != 'STRING' :
# it looks like a macro, but is not: could be a regular entry with no key
lineno = lines(0, start, buffer)+1
bibfile_logger.warning("Entry at line %d has macro syntax, but entry_type is %s" % (lineno , the_type))
if not __strict__: # we can add a dummy key and treat this entry as a regular entry
entry = BibEntry()
entry.entry_type = dispatch(self, subtags[0], buffer)
entry.citekey = 'KEY' # dummy key -- or should we be strict?
for field in subtags[1][3] :
k,v = dispatch(self, field, buffer)
#:note: entry will force k to lowercase
entry[k] = v
self.entries.append(entry)
bibfile_logger.warning("Dummy key added to entry at line %d" % lineno)
else : # otherwise it is really a macro entry
for field in subtags[1][3]:
name, str = dispatch(self, field, buffer)
self._macroMap[name] = str
"""
self._macroMap[name] = str
def preamble( self, tuple4, buffer ):
"""Process the given production and it's children"""
(tag,start,stop,subtags) = tuple4
the_type = getString(subtags[0], buffer)
lineno = lines(0,start,buffer)+1
if the_type.upper() != 'PREAMBLE' :
bibfile_logger.warning("Entry at line %d has preamble syntax but entry_type is %s" % (lineno,the_type))
else :
bibfile_logger.warning("Preamble entry on line %d:" % lineno + "\n" + buffer[start:stop])
def comment_entry(self, tuple4, buffer):
"""Process the given production and it's children"""
(tag,start,stop,subtags) = tuple4
the_type = getString(subtags[0], buffer)
lineno = spdp.lines(0, start, buffer) + 1
if the_type.upper() != 'COMMENT' :
bibfile_logger.warning("""Entry at line %d has comment syntax
but entry_type is %s:
Details: %s""" % (lineno, the_type, getString(subtags[1], buffer)))
else :
bibfile_logger.info("Comment entry on line %d:" % lineno + " " + getString(subtags[1], buffer))
def search_entries(self, string_or_compiled, field='', ignore_case=True):
"""Return list of matching entries.
Search for regular expression in the fields of each entry.
If field is omitted, search is through all fields.
:note: used by bibsearch.py
:Parameters:
- `string_or_compiled` : string to compile or compiled regex
pattern for searching
- `field` : string
field to search in self (default: search all fields)
"""
if isinstance(string_or_compiled, str):
if ignore_case:
reo = re.compile(string_or_compiled, re.MULTILINE | re.IGNORECASE)
else:
reo = re.compile(string_or_compiled, re.MULTILINE)
else: #->must have a compiled regular expression
reo = string_or_compiled
"""
Find regex in bib_entry.
If field is omitted, search is through all fields.
:note: used by bibsearch.py
"""
ls = [entry for entry in self.entries
if entry.search_fields(string_or_compiled=reo, field=field, ignore_case=ignore_case)]
return ls
# self test
# -------------------------
# usage: bibfile.py DATABASE_FILE
# if __name__ == "__main__":
# import sys
# if len(sys.argv) > 1 :
# src = open(sys.argv[1]).read()
# bfile = BibFile()
# bibgrammar.Parse(src, bfile)
# for entry in bfile.entries :
# print entry
# else :
# print "self test usage: bibfile.py DATABASE_FILE"
| 2.15625 | 2 |
tea_admin/tea_admin/doctype/labour_information/test_labour_information.py | nivedita05/Tea-Admin | 0 | 12796956 | <reponame>nivedita05/Tea-Admin<gh_stars>0
# -*- coding: utf-8 -*-
# Copyright (c) 2015, Frappe and Contributors
# See license.txt
from __future__ import unicode_literals
import frappe
import unittest
test_records = frappe.get_test_records('Labour Information')
class TestLabourInformation(unittest.TestCase):
#from erpnext.stock.get_item_details import get_item_details
#from frappe import MandatoryError
def make_employee(self,emp_id):
if not frappe.db.get_value("Labour Information",{"emp_id": emp_id}):
test_record = {
"doctype": "Labour Information",
"title": "_Test Labour Information",
"category": "STAFF",
"name1":"abc",
"sirdar":"aaa",
"status":"Permanent",
"gender1":"Male",
"doj":"2016-08-19",
"book_code":"STAFF",
"emp_id":"001",
"garden": "Ghatia Tea Estate"
}
frappe.get_doc(test_record).insert()
def employee_name(self):
employee = self.make_employee("001")
self.assertEqual(employee.name1, "abc")
res = frappe.get_list("Labour Information", filters=[["Labour Information", "name1", "like", "abc%"]], fields=["emp_id", "name1"])
self.assertEquals(len(res), 1)
| 2.484375 | 2 |
setup.py | deepakunni3/golr-schema-generator | 0 | 12796957 | from setuptools import setup, find_packages
NAME = 'golr-schema-generator'
DESCRIPTION = 'GOlr Schema Generator'
URL = 'https://github.com/deepakunni3/golr-schema-generator'
AUTHOR = '<NAME>'
EMAIL = '<EMAIL>'
REQUIRES_PYTHON = '>=3.7.0'
VERSION = '0.0.1'
LICENSE = 'BSD3'
REQUIRED = [
'PyYAML>=5.3'
]
EXTRAS = {
'test': ['pytest']
}
setup(
name=NAME,
version=VERSION,
author=AUTHOR,
author_email=EMAIL,
python_requires=REQUIRES_PYTHON,
url=URL,
description=DESCRIPTION,
long_description=open('README.md').read(),
license=LICENSE,
packages=find_packages(),
keywords='Solr GOlr golr-schema',
classifiers=[
'License :: OSI Approved :: BSD License',
'Programming Language :: Python :: 3'
],
install_requires=REQUIRED,
extras_require=EXTRAS,
include_package_data=True
) | 1.1875 | 1 |
jqp/cli.py | abranjith/jqp | 0 | 12796958 | import json
import click
from tokenizer import get_grouped_tokens, TokenName
NULL = "null"
#from click documentation to support alias command
class AliasedGroup(click.Group):
def get_command(self, ctx, cmd_name):
rv = click.Group.get_command(self, ctx, cmd_name)
if rv is not None:
return rv
matches = [x for x in self.list_commands(ctx)
if x.startswith(cmd_name)]
if not matches:
return None
elif len(matches) == 1:
return click.Group.get_command(self, ctx, matches[0])
ctx.fail('Too many matches: %s' % ', '.join(sorted(matches)))
@click.command()
@click.option("--expression", "-e", type=click.STRING, help="jq style expression to search in the json", required=True)
@click.option("--file", "-f", type=click.File("r"), help="File with valid json content", required=True)
def cli(expression, file):
all_tokens = [g for g in get_grouped_tokens(expression)]
validate_tokens(all_tokens, expression)
json_obj = get_json(file)
result = jq_parser(json_obj, all_tokens)
result = json.dumps(result, indent=4)
click.echo(result)
def jq_parser(json_obj, tokens):
if not (json_obj and json_obj != NULL and tokens):
return json_obj
if(len(tokens) == 1):
token = tokens[0]
return retrieve_token_from_json(json_obj, token)
first_token = tokens[0]
remaining_tokens = tokens[1:]
if isinstance(json_obj, list):
result = []
for obj in json_obj:
r = retrieve_token_from_json(obj, first_token)
if r and r != NULL:
result.append(jq_parser(r, remaining_tokens))
else:
result.append(NULL)
index = _get_index(first_token)
if index is None:
return result
if index >= len(result):
raise click.ClickException(f"Bad index {index}. There are only {len(result)} elements in the array")
return result[index]
elif isinstance(json_obj, dict):
r = retrieve_token_from_json(json_obj, first_token)
return jq_parser(r, remaining_tokens)
def retrieve_token_from_json(json_obj, token):
if not (json_obj and json_obj != NULL and token):
return json_obj
index = _get_index(token)
if isinstance(json_obj, list):
result = []
for obj in json_obj:
#this is probably the only case for a valid json
if isinstance(obj, dict):
#case insensitive
obj = {k.strip().lower() : v for k,v in obj.items()}
result.append(obj.get(token[0].value.strip().lower(), NULL))
if index is None:
return result
if index >= len(result):
raise click.ClickException(f"Bad index {index}. There are only {len(result)} elements in the array")
return result[index]
elif isinstance(json_obj, dict):
#case insensitive
json_obj = {k.strip().lower() : v for k,v in json_obj.items()}
val = json_obj.get(token[0].value.strip().lower(), NULL)
if isinstance(val, list):
if index is None:
return val
if index >= len(val):
raise click.ClickException(f"Bad index {index}. There are only {len(val)} elements in the array")
return val[index]
return val
def get_json(fp):
try:
return json.load(fp)
except Exception as ex:
raise click.ClickException(str(ex))
def validate_tokens(all_tokens, expression):
if not all_tokens or len(all_tokens) == 0:
raise click.ClickException(f"{expression} is a bad expression")
for g in all_tokens:
if not g:
raise click.ClickException(f"{expression} is a bad expression. Currently not supporting unix style multiple dots (such as .. etc)")
if len(g) == 1:
if not ( g[0].name == TokenName.KEY ):
message = str(g[0])
raise click.ClickException(f"{message} is a bad token. Currently supports either plain key or key with one index (in case of array)")
elif len(g) == 2:
if not ( g[0].name == TokenName.KEY and g[1].name == TokenName.INDEX):
message = str(g[0]) + ", " + str(g[1])
raise click.ClickException(f"{message} is a bad token. Currently supports either plain key or key with one index (in case of array)")
elif len(g) > 2:
message = ", ".join([str(r) for r in g])
raise click.ClickException(f"{message} is a bad token. Currently supports either plain key or key with one index (in case of array)")
def _get_index(token):
if not token or len(token) <= 1:
return None
t = token[1]
if t.name == TokenName.INDEX:
if t.value.strip().isdecimal():
return int(t.value.strip())
else:
raise click.ClickException(f"{t.value} is a bad value where a numeric index of >= 0 is expected")
return None | 2.703125 | 3 |
config.py | andela-oadeniran/bucket_list_app | 0 | 12796959 | import os
BASE_DIR = os.path.abspath(os.path.dirname(__file__))
MAIN_DB_URL = os.path.join(BASE_DIR, 'bucketlist.sqlite')
TEST_DB_URL = os.path.join(BASE_DIR, 'test.sqlite')
class BaseConfig(object):
'''
The class holds base config for each environment
'''
SECRET_KEY = os.getenv('SECRET_KEY', 'This should be changed')
SQLALCHEMY_DATABASE_URI = os.getenv(
'DATABASE_URI', "'sqlite:///' + {}".format(MAIN_DB_URL))
SQLALCHEMY_TRACK_MODIFICATIONS = False
ERROR_404_HELP = False
DEBUG = False
TESTING = False
class DevelopmentConfig(BaseConfig):
'''
configuration for the development environment
'''
SQLALCHEMY_DATABASE_URI = 'sqlite:///' + MAIN_DB_URL
DEBUG = True
DEVELOPMENT = True
class TestingConfig(BaseConfig):
'''
config when testing
'''
TESTING = True
SQLALCHEMY_DATABASE_URI = 'sqlite:///' + TEST_DB_URL
CSRF_ENABLED = False
class StagingConfig(BaseConfig):
DEVELOPMENT = True
DEBUG = True
class ProductionConfig(BaseConfig):
'''
config for when in production
'''
DEBUG = False
| 2.328125 | 2 |
parentopticon/db/test_model.py | EliRibble/parentopticon | 0 | 12796960 | import datetime
import os
from typing import List, Optional
import unittest
from parentopticon.db import test_utilities
from parentopticon.db.model import ColumnInteger, ColumnText, Model
class ModelTests(test_utilities.DBTestCase):
"Test all of our logic around the model class."
class MyTable(Model):
COLUMNS = {
"id": ColumnInteger(autoincrement=True, primary_key=True),
"count": ColumnInteger(),
"name": ColumnText(null=True),
}
def _makerows(self, names: Optional[List[str]] = None):
"Make a few rows. Useful for many tests."
names = names or ["foo", "bar", "baz"]
return {
ModelTests.MyTable.insert(self.db, count=(i+1)*2, name=name)
for i, name in enumerate(names)
}
def setUp(self):
super().setUp()
self.db.execute_commit_return(ModelTests.MyTable.create_statement())
self.db.execute_commit_return(ModelTests.MyTable.truncate_statement())
def test_create_statement(self):
"Can we get a proper create table clause?"
result = ModelTests.MyTable.create_statement()
expected = "\n".join((
"CREATE TABLE IF NOT EXISTS MyTable (",
"count INTEGER,",
"id INTEGER PRIMARY KEY AUTOINCREMENT,",
"name TEXT",
");",
))
self.assertEqual(result, expected)
def test_insert(self):
"Can we insert a row into a table?"
rowid = ModelTests.MyTable.insert(self.db, count=3, name="foobar")
found = self.db.execute("SELECT count, name FROM MyTable").fetchall()
self.assertEqual(len(found), 1)
def test_get(self):
"Can we get a row from the table?"
rowid = ModelTests.MyTable.insert(self.db, count=3, name="foobar")
result = ModelTests.MyTable.get(self.db, rowid)
self.assertEqual(result.id, rowid)
self.assertEqual(result.count, 3)
self.assertEqual(result.name, "foobar")
def test_get_none(self):
"Can we get None when the row does not exist?"
result = ModelTests.MyTable.get(self.db, -1)
self.assertIs(result, None)
def test_list_all(self):
"Can we get several rows from the table?"
rowids = self._makerows()
results = ModelTests.MyTable.list(self.db)
self.assertEqual({result.id for result in results}, rowids)
def test_list_some(self):
"Can we get several rows from the table with a where clause?"
rowids = self._makerows()
results = ModelTests.MyTable.list_where(self.db, where="count >= 4")
self.assertEqual({result.count for result in results}, {4, 6})
def test_list_with_none(self):
"Can we get a list where an item is NULL?"
rowids = self._makerows(names=["foo", None, "bar"])
results = ModelTests.MyTable.list(self.db, name=None)
self.assertEqual({result.count for result in results}, {4})
def test_search_not_found(self):
"Can we search and not find something?"
results = ModelTests.MyTable.search(self.db, name="sir-not-appearing")
self.assertIs(results, None)
def test_search_one(self):
"Can we search and find a single row?"
rowids = self._makerows()
results = ModelTests.MyTable.search(self.db, name="foo")
self.assertEqual(results.name, "foo")
self.assertEqual(results.count, 2)
def test_search_many(self):
"Do we error when we have multiple matches?"
self._makerows(names=["foo", "foo", "bar"])
with self.assertRaises(ValueError):
ModelTests.MyTable.search(self.db, name="foo")
def test_search_with_none(self):
"Do we properly search for NULL columns?"
self._makerows(names=["foo", None, "bar"])
results = ModelTests.MyTable.search(self.db, name=None)
self.assertEqual(results.name, None)
self.assertEqual(results.count, 4)
def test_update(self):
"Can we update a row with update()?"
rows = self._makerows(names=["foo"])
row_id = list(rows)[0]
ModelTests.MyTable.update(self.db, row_id, name="biff")
results = ModelTests.MyTable.get(self.db, row_id)
self.assertEqual(results.name, "biff")
def test_update_multiple(self):
"Can we update a row with multiple values?"
rows = self._makerows(names=["foo"])
row_id = list(rows)[0]
ModelTests.MyTable.update(self.db, row_id, name="biff", count=100)
results = ModelTests.MyTable.get(self.db, row_id)
self.assertEqual(results.count, 100)
self.assertEqual(results.name, "biff")
| 2.9375 | 3 |
babilim/data/specialized_readers/data_downloader.py | penguinmenac3/babilim | 1 | 12796961 | <reponame>penguinmenac3/babilim<filename>babilim/data/specialized_readers/data_downloader.py
import os
import urllib.request
from zipfile import ZipFile
def download_zip(root_dir: str, url: str) -> None:
"""
Download a zip from a url and extract it into a data folder.
Usefull for downloading small datasets.
The zip file gets extracted in the root_folder. It is recommended to set the root dir to your dataset folder when you download a dataset.
:param root_dir: The root directory where to extract all.
:param url: The url from which to download the zip archive.
"""
dataset_name = url.split("/")[-1]
assert ".zip" == dataset_name[-4:]
dataset_name = dataset_name[:-4]
if not os.path.exists(os.path.join(root_dir, dataset_name)):
urllib.request.urlretrieve(url, "{}.zip".format(dataset_name))
# Create a ZipFile Object and load sample.zip in it
with ZipFile('{}.zip'.format(dataset_name), 'r') as zipObj:
# Extract all the contents of zip file in current directory
zipObj.extractall(root_dir)
os.remove("{}.zip".format(dataset_name))
else:
print("Using buffered data.")
| 3.859375 | 4 |
kiv_bit_rsa/hash/__init__.py | miroslavkrysl/kiv-bit-rsa | 0 | 12796962 | """Hashing module
A simple module for hashing.
Contains base class :py:class:`Hash` from which all
hash implementations inherits.
For now contains only MD5 hash class :py:class:`Md5`
"""
from .hash import Hash
from .md5 import Md5
__all__ = ["Hash", "Md5"]
| 3.28125 | 3 |
Numpy/Zeros and Ones.py | Code-With-Aagam/python-hackerrank | 3 | 12796963 | <reponame>Code-With-Aagam/python-hackerrank
import numpy
numbers = tuple(map(int, input().split()))
print(numpy.zeros(numbers, dtype = numpy.int), numpy.ones(numbers, dtype = numpy.int), sep='\n')
| 3.3125 | 3 |
plaso/formatters/utmpx.py | CNR-ITTIG/plasodfaxp | 1 | 12796964 | # -*- coding: utf-8 -*-
"""The UTMPX binary file event formatter."""
from plaso.formatters import interface
from plaso.formatters import manager
from plaso.lib import errors
class UtmpxSessionFormatter(interface.ConditionalEventFormatter):
"""Formatter for an UTMPX session event."""
DATA_TYPE = u'mac:utmpx:event'
FORMAT_STRING_PIECES = [
u'User: {user}',
u'Status: {status}',
u'Computer Name: {computer_name}',
u'Terminal: {terminal}']
FORMAT_STRING_SHORT_PIECES = [u'User: {user}']
SOURCE_LONG = u'UTMPX session'
SOURCE_SHORT = u'LOG'
# 9, 10 and 11 are only for Darwin and IOS.
_STATUS_TYPES = {
0: u'EMPTY',
1: u'RUN_LVL',
2: u'BOOT_TIME',
3: u'OLD_TIME',
4: u'NEW_TIME',
5: u'INIT_PROCESS',
6: u'LOGIN_PROCESS',
7: u'USER_PROCESS',
8: u'DEAD_PROCESS',
9: u'ACCOUNTING',
10: u'SIGNATURE',
11: u'SHUTDOWN_TIME'}
def GetMessages(self, unused_formatter_mediator, event_object):
"""Determines the formatted message strings for an event object.
Args:
formatter_mediator: the formatter mediator object (instance of
FormatterMediator).
event_object: the event object (instance of EventObject).
Returns:
A tuple containing the formatted message string and short message string.
Raises:
WrongFormatter: if the event object cannot be formatted by the formatter.
"""
if self.DATA_TYPE != event_object.data_type:
raise errors.WrongFormatter(u'Unsupported data type: {0:s}.'.format(
event_object.data_type))
event_values = event_object.GetValues()
status_type = event_values.get(u'status_type', None)
if status_type is not None:
event_values[u'status'] = self._STATUS_TYPES.get(
status_type, u'{0:d}'.format(status_type))
else:
event_values[u'status'] = u'N/A'
return self._ConditionalFormatMessages(event_values)
manager.FormattersManager.RegisterFormatter(UtmpxSessionFormatter)
| 2.375 | 2 |
log.py | k1k9/job-offer-fetcher | 0 | 12796965 | <filename>log.py
#!/usr/bin/env python3
#
# Title: job-ofert-fetcher
# Description: This part of script is responsible for saving script behaviors into log file
# Author: @k1k9
# License: MIT
#
from time import localtime as ltime
def saveLog(content, tag="INFO"):
''' Save information (log) into log file '''
# Config
logDate = '{0}/{1}/{2} {3}:{4}:{5}'.format(ltime().tm_mday, ltime().tm_mon,
ltime().tm_year, ltime().tm_hour, ltime().tm_min, ltime().tm_sec)
log = '[{0}]\t{1}\t\t{2}\n'.format(logDate, tag.upper(), content)
file = open('logs.txt', 'a+')
file.write(log)
def saveError(path, error):
''' Save error into log file like normal log '''
saveLog('In {0} error: {1}'.format(path, error), 'error') | 3.140625 | 3 |
mousemorph/mousemorph.py | dancebean/mousemorph | 20 | 12796966 | <filename>mousemorph/mousemorph.py
#!/usr/bin/python
"""MouseMorph, an automatic mouse MR image processor
This is the base MouseMorph program. Run it with Python.
Usage
python mousemorph.py --help
Orient brains to standard space
-------------------------------
python mousemorph.py orient <arguments>
Non-uniformity correct brains
-----------------------------
python mousemorph.py nuc <arguments>
"""
# To do
# -----
# 1. if -i has more than one argument following it, use as a list of input directories (or files) and combine all input files into the same list. Likewise for other directory arguments.
# corresponding == FSL FLIRT's "secondary"
# adapt to accept -corr [dir] [filter]
# if input is a single image and there's no filter here, all images in dir will be oriented as per the single input
# if input is a single image and there is a filter here, all images in dir matching the filter will be oriented as per the single input
# if input is a directory and there's no filter here, corresponding images in dir will be oriented
# if input is a directory and there is a filter here, corresponding images in dir which also matching the filter will be oriented
# Replace mm_multi.py with: run mousemorph.py multi action1 [-arg1 -arg2 -arg3 'param3'] action2 [-arg1 'param1' -arg2] action3 [-arg1 -arg2]
# Run any script from this one
import os
import sys
import glob
import time
import argparse
import subprocess
from itertools import chain
from datetime import datetime
# import test_go
import mm_functions as mmfn
__author__ = '<NAME> (PhD student, CMIC & CABI, UCL, UK), <EMAIL>'
__created__ = '2015-06-28'
def notify_start():
"""Give the user some helpful notes at the launch of the script."""
time_format = '%H:%M:%S %p on %b %d, %Y (%Z)'
time_start = time.strftime(time_format)
mmfn.alert_user("Start time is {0} ...".format(time_start))
return time_start
def notify_complete(time_start=None, log_location=''):
time_format = '%H:%M:%S %p on %b %d, %Y (%Z)'
time_stop = time.strftime(time_format)
time_diff = datetime.strptime(time_stop, time_format) - datetime.strptime(time_start, time_format)
mmfn.alert_user("Completion time is {0}. Took {1}.".format(time_stop, time_diff))
return
class MouseMorph(object):
"""Define all the necessary arguments passed to MouseMorph programs.
"""
def __init__(self):
command_parser = argparse.ArgumentParser("argp_mousemorph", description = "MouseMorph, an automatic mouse MR image processor.",
usage = """mousemorph <command> [<arguments>], where <command> can be any of: \n\textract, \n\torient, \n\tnuc, \n\tintstan, \n\t...""")
command_parser.add_argument('command', help="MouseMorph program to be run.")
first_arg = command_parser.parse_args(sys.argv[1:2])
if not hasattr(self, first_arg.command):
print("command '{0}' not recognised.".format(first_arg.command))
command_parser.print_help()
sys.exit(1)
# Top-level parser with universal arguments
top_parser = argparse.ArgumentParser("MouseMorph user input parser", description = "MouseMorph, an automatic mouse MR image processor.",
usage = "mousemorph <command> [<arguments>]")
# Input and output
top_parser.add_argument('-i', '--input', dest='input', metavar='<directory> or <file path>', help='Input file (NIfTI-1, *.nii.gz or *.nii) or directory [current]')
top_parser.add_argument('-o', '--output', dest='output', metavar='<directory>', help='Output directory [input directory]')
top_parser.add_argument('-onp', '--out_name_prepend', metavar='<string>', help='Prepend this string to output name', default="")
top_parser.add_argument('-ona', '--out_name_append', metavar='<string>', help='Append this string to output name', default="")
top_parser.add_argument('-uz', '--unzip', action='store_true', help='Uncompressed output files [compressed]')
top_parser.add_argument('-no', '--no_output', dest='no_output', action='store_true', help='Don\'t save output files [False]')
top_parser.add_argument('-ow', '--overwrite', action='store_true', help='Overwrite existing output files [skip]')
top_parser.add_argument('-dt', '--delete_temp', action='store_true', help='Delete temp files upon completion [False]')
# Mask arguments
top_parser.add_argument('-m', '--mask', dest='mask', metavar='<mask directory> or <mask file>', help='Mask directory or file', required=False)
top_parser.add_argument('-mf', '--mask_filter', dest='mn_filter', metavar='<filter>', help="If mask is a directory, filter files ['']", default='')
# Filters
top_parser.add_argument('-if', '--in_filter', dest='in_filter', metavar='<filter>', help="If input is a directory, filter files ['']", default='')
# Processing options
top_parser.add_argument('-ds', '--downsample', dest='downsample', metavar='<factor>', help='Downsampling factor [0, off]\n\t(Downsampling input files may speed up processing, at the expense of accuracy.)', default=0, type=float)
top_parser.add_argument('-par', '--parallel', dest='parallel', action="store_true", help='Use multiple cores to process in parallel using multiprocessing [off]')
top_parser.add_argument('-rev', '--reverse_input', dest='reverse_input', action="store_true", help='Run through input files in reverse order')
top_parser.add_argument('-v', '--verbose', action="store_true", help="Verbose output")
# Add command-specific arguments
parser = getattr(self, first_arg.command + '_args')(top_parser)
self.args = parser.parse_args(sys.argv[2:])
# Sanitise arguments & directories
self.args = self.sanitise_arguments(self.args)
if self.args.input:
# Pre-populate a list of relevant files.
self.args.input_files_list = mmfn.get_files_list(self.args.input_directory, self.args.input_name_filter, self.args.in_ext_filter)
if self.args.reverse_input:
self.args.input_files_list = self.args.input_files_list[::-1]
# Run
print("MouseMorph {0} will be run with arguments: \n\t{1}".format(first_arg.command, vars(self.args)))
time_start = notify_start()
getattr(self, first_arg.command)()
notify_complete(time_start)
# def sanitise_arguments(self, args):
# """Windows: only necessary if the user has supplied directories ending with a "\" (os.sep), which argparse assumes was intentional and adds the final user quote to the end of the string. The user shouldn't supply that "\", really, but just in case..."""
# for name in args.__dict__.keys():
# try:
# s = getattr(args, name)
# setattr(args, name, s.rstrip(os.sep).rstrip('"'))
# except AttributeError:
# pass
# else:
# break
# return args
def add_arg_csv(self, parser, req=False):
parser.add_argument('-csv', '--csv', dest='csv_path', metavar='<.CSV file path>', help='Path to .CSV file', required=req)
parser.add_argument('-col', '--column', dest='column', metavar='<int int ... int>', nargs='+', help='Column number(s) [0 1]', default=1, type=int, required=req)
return parser
def add_arg_mask(self, parser, req=False):
# Mask arguments
parser.add_argument('-m', '--mask', dest='mask', metavar='<mask directory> or <mask file>', help='Mask directory or file', required=req)
parser.add_argument('-mf', '--mask_filter', dest='mn_filter', metavar='<filter>', help="If mask is a directory, filter files ['']", default='')
return parser
def add_arg_list(self, parser, req=False):
parser.add_argument('-l', '--list', dest='list', metavar='<directory> or <.CSV file path>', help='Either a directory containing files whose names are to be matched, or a .CSV file whose Nth column will be used as a list of names [current]', required=req)
parser.add_argument('-lf', '--list_filter', dest='list_filter', metavar='<string>', help="String used to filter list input ['*']", default='*')
return parser
def bsi_args(self, parser):
"""These are the somewhat-unique requirements for BSI."""
parser.add_argument('-b', '--baseline', dest='baseline_path', metavar='<baseline NIfTI file path>', help='Full path of the baseline NIfTI image', required=True)
parser.add_argument('-r', '--repeat', dest='repeat_path', metavar='<repeat NIfTI file path>', help='Full path of the repeat NIfTI image', required=True)
parser.add_argument('-bm', '--baseline_mask', dest='baseline_mask_path', metavar='<baseline mask NIfTI file path>', help='Full path of the baseline NIfTI image mask', required=True)
parser.add_argument('-rm', '--repeat_mask', dest='repeat_mask_path', metavar='<repeat mask NIfTI file path>', help='Full path of the repeat NIfTI image mask', required=True)
return parser
def orient_args(self, parser):
# Add specific arguments
parser.add_argument('-at','--atlas', dest='atlas', metavar='<atlas>', help='Atlas directory containing NIfTIs, or a single file.', required=False)
parser.add_argument('-corr','--corresponding', dest='corresponding', metavar='<corresponding>', help='NIfTI-1 file, or directory of files, to be oriented in the same manner as their correspondingly-named files in input_directory. (As per "secondary" in FSL FLIRT.)')
parser.add_argument('-res','--resample', dest='resample', action='store_true', help='Also resample output files.')
parser.add_argument('--allsame', dest='allsame', action='store_true', help='Flag to indicate that all brains are in approximately the same initial orientation. Only the first image will be compared with an atlas and the rest will have the same gross orientation applied. Final minor corrections will be performed individually.')
parser.add_argument('--allpa', dest='allpa', action='store_true', help='Check all 12 possible principle axis orientations, in case AP is not the greatest dimension.')
# parser.add_argument('--allpa', dest='allpa', action='store_true', help='Check all 12 possible principle axis orientations, in case AP is not the greatest dimension.')
return parser
def nuc_args(self, parser):
# Add specific arguments
parser.add_argument('-its','--iterations', dest='iterations', metavar='<iterations>', help='Iterations to run [200]', default=200, type=int)
parser.add_argument('-fwhm', dest='fwhm', metavar='<fwhm>', help='Full width, half maximum [0.15]', default=0.15, type=float)
parser.add_argument('-ss', dest='subsample', metavar='<factor>', help='Subsampling factor [4]', default=4, type=int)
parser.add_argument('-nlevels', dest='nlevels', metavar='<nlevels>', help='Number of levels [4]', default=4, type=int)
parser.add_argument('-conv', dest='convergence', metavar='<convergence>', help='Convergence threshold [0.001]', default=0.001, type=float)
parser.add_argument('-nhb', dest='nhistbins', metavar='<nhistbins>', help='Number of histogram bins [256]', default=256, type=int)
return parser
def tails_type(self, str):
acceptable = ['one', 'two']
if str not in acceptable:
raise argparse.ArgumentTypeError("--tails argument must be 'one' or 'two' (default is two, if omitted)")
else:
return str
def power_args(self, parser):
parser.add_argument('--power', dest='power', metavar='<0 < float < 1>', help='Desired power, 1-beta [0.8]', default=0.8, type=float)
parser.add_argument('--significance', dest='significance', metavar='<0 < float < 1>', help='Desired significance level, alpha [0.05]', default=0.05, type=float)
parser.add_argument('--detect_difference', dest='detect_difference', metavar='<0 < float < 1>', help='Fractional difference from the control mean to detect', default=0.25, type=float)
parser.add_argument('--tails', dest='tails', metavar='<string>', help='Tails, one or two [two]', default='two', type=self.tails_type)
parser = self.add_arg_csv(parser, req=True)
parser.add_argument('--group', dest='csv_group_filter', metavar='<string>', help='Control group name filter [*]', default='')
return parser
def pair_args(self, parser):
parser = self.add_arg_list(parser, req=False)
parser.add_argument('-col', '--column', dest='column', metavar='<int>', help='Column number [0]', default=0, type=int, required=False)
parser.add_argument('-i2','--input_2', dest='input_2', metavar='<directory> or <file path>', help='Second input directory containing NIfTIs, or a single file.', required=True)
parser.add_argument('-fn', '--function', dest='function_name', metavar='<function>', help='MouseMorph function to run on each file', required=False)
return parser
def loop_args(self, parser):
parser.add_argument('-fn', '--function', dest='function_name', metavar='<function>', help='MouseMorph function to run on each file', required=False)
return parser
def seg_EM_args(self, parser):
# parser = self.add_arg_mask(parser, req=True)
parser.add_argument('-t', '--tpm', dest='tpm', metavar='<tpm directory> or <tpm file>', help='TPM directory or file', required=False)
parser.add_argument('-tf', '--tpm_filter', dest='tn_filter', metavar='<filter>', help="If tpm is a directory, filter files ['']", default='')
parser.add_argument('--priors4D', dest='priors4D', action='store_true', help='Use this flag if the priors are all single 4D NIfTIs rather than individual files per class', required=False)
parser.add_argument('--nopriors', dest='nopriors', metavar='<int>', help='Number of classes (no TPM inputs)', type=int)
parser.add_argument('--mrf_beta', dest='mrf_beta', metavar='<0 < float < 1>', help='MRF prior strength [0.4]', default=0.4, type=float)
parser.add_argument('--max_iter', dest='max_iter', metavar='<int>', help='Maximum number of iterations [100]', default=100, type=int)
parser.add_argument('--rf_rel', dest='rf_rel', metavar='<0 < float < 1>', help='Prior relaxation factor [0.5]', default=0.5, type=float)
parser.add_argument('--rf_gstd', dest='rf_gstd', metavar='<float>', help='Prior gaussian regularization [2.0]', default=2.0, type=float)
return parser
def sanitise_arguments(self, args):
""" """
args.in_ext_filter = '.nii*'
if not args.unzip:
args.ext = '.nii.gz'
else:
args.ext = '.nii'
if args.input:
if os.path.isdir(args.input):
# print (" Input {0} is a directory ...".format(args.input))
args.input_name_filter = '*' + args.in_filter + '*' # use wildcards if provided a directory alone
args.input_name_filter_exact = args.in_filter
args.input_directory = os.path.normpath(os.path.join(args.input))
elif os.path.isfile(args.input):
# print (" Input {0} is a file ...".format(args.input))
# Get the filename, removing path and 1+ extensions
args.input_name_filter = os.path.basename(args.input).split(os.extsep)[0]
args.input_directory = os.path.dirname(args.input)
else:
raise Exception("Input not recognised or does not exist: {0}".format(args.input))
else:
args.input_directory = os.getcwd()
if not args.no_output:
if args.output:
if os.path.isdir(args.output):
# print (" Output {0} is a directory ...".format(args.input))
args.output_directory = os.path.normpath(os.path.join(args.output))
else:
print "Specified output ({0}) is not a directory; creating it ...".format(args.output)
args.output_directory = os.path.normpath(os.path.join(args.output))
mmfn.check_create_directories([args.output_directory])
else:
print "No output directory specified. Setting to input directory ({0}) in case it is required.".format(args.input_directory)
args.output_directory = args.input_directory
if hasattr(args, 'input_2'):
if args.input_2:
if os.path.isdir(args.input_2):
# print (" Input 2 {0} is a directory ...".format(args.input))
args.input_name_filter_2 = '*' + args.in_filter + '*' # use wildcards if provided a directory alone
args.input_name_filter_exact_2 = args.in_filter
args.input_directory_2 = os.path.normpath(os.path.join(args.input_2))
elif os.path.isfile(args.input_2):
# print (" Input 2 {0} is a file ...".format(args.input))
# Get the filename, removing path and 1+ extensions
args.input_name_filter_2 = os.path.basename(args.input_2).split(os.extsep)[0]
args.input_directory_2 = os.path.dirname(args.input_2)
else:
raise Exception("Input 2 not recognised or does not exist: {0}".format(args.input_2))
else:
args.input_directory_2 = os.getcwd()
if hasattr(args, 'mask'):
if args.mask:
args.mask = os.path.normpath(args.mask)
if os.path.isdir(args.mask):
args.mask_name_filter = '*' + args.mn_filter + '*' # use wildcards if provided a directory alone
args.mask_name_filter_exact = args.mn_filter
args.mask_directory = os.path.normpath(os.path.join(args.mask))
elif os.path.isfile(args.mask):
# Get the filename, removing path and 1+ extensions
args.mask_name_filter = os.path.basename(args.mask).split(os.extsep)[0]
args.mask_directory = os.path.dirname(args.mask)
else:
args.mask_directory = None
if hasattr(args, 'tpm'):
if args.tpm:
args.tpm = os.path.normpath(args.tpm)
if os.path.isdir(args.tpm):
args.tpm_name_filter = '*' + args.tn_filter + '*' # use wildcards if provided a directory alone
args.tpm_name_filter_exact = args.tn_filter
args.tpm_directory = os.path.normpath(os.path.join(args.tpm))
elif os.path.isfile(args.tpm):
# Get the filename, removing path and 1+ extensions
args.tpm_name_filter = os.path.basename(args.tpm).split(os.extsep)[0]
args.tpm_directory = os.path.dirname(args.tpm)
else:
args.tpm_directory = None
# Either get a list of strings as file names from a directory, or from a given column of a .CSV file
if hasattr(args, 'list'):
if args.list:
args.list = os.path.normpath(args.list)
if os.path.isdir(os.path.normpath(args.list)):
args.list_names = mmfn.get_names_list(args.list, args.list_filter, extension=args.in_ext_filter)
# elif os.path.isfile(os.path.normpath(args.list)):
# args.column
return args
# Methods which actually run the command the user asked for
def nuc(self):
import mm_nuc_n4
mm_nuc_n4.go(self.args)
def orient(self):
import mm_orient
mm_orient.go(self.args)
def pair(self):
import mm_pair
mm_pair.go(self.args)
def power(self):
import mm_powercalc
mm_powercalc.go(self.args)
def loop(self):
import mm_loop
mm_loop.go(self.args)
def seg_EM(self):
import mm_seg_EM_group
mm_seg_EM_group.go(self.args)
def main():
mm = MouseMorph()
# print("{0}".format(mm.__dict__))
print("{0}".format(mm.args))
if __name__ == '__main__':
main()
# End | 3.265625 | 3 |
kdcrf/tests/test_template.py | fagonzalezo/sklearn-kdcrf | 0 | 12796967 | import pytest
import numpy as np
from sklearn.datasets import load_iris
from numpy.testing import assert_array_equal
from numpy.testing import assert_allclose
from .._kdclassifier import KDClassifierRF
from .._RBFSamplerORF import RBFSamplerORF
from .._RBFSamplerSORF import RBFSamplerSORF
@pytest.fixture
def data():
return load_iris(return_X_y=True)
def test_KDClassifierRF(data):
X, y = data
clf = KDClassifierRF()
assert hasattr(clf, 'approx')
assert hasattr(clf, 'normalize')
assert hasattr(clf, 'gamma')
assert hasattr(clf, 'n_components')
for approx in ['rff+','rff', 'lrff', 'lrff+', 'exact']:
clf = KDClassifierRF(approx=approx)
clf.fit(X, y)
assert hasattr(clf, 'classes_')
assert hasattr(clf, 'Xtrain_')
if clf.approx != 'exact':
assert hasattr(clf, 'rbf_sampler_')
y_pred = clf.predict(X)
assert y_pred.shape == (X.shape[0],)
def test_KDClassifierORF(data):
X, y = data
for approx in ['rff+','rff', 'lrff', 'lrff+', 'exact']:
clf = KDClassifierRF(approx=approx, sampler=RBFSamplerORF())
clf.fit(X, y)
assert hasattr(clf, 'classes_')
assert hasattr(clf, 'Xtrain_')
if clf.approx != 'exact':
assert hasattr(clf, 'rbf_sampler_')
y_pred = clf.predict(X)
assert y_pred.shape == (X.shape[0],)
def test_KDClassifierSORF(data):
X, y = data
for approx in ['rff+','rff', 'lrff', 'lrff+', 'exact']:
clf = KDClassifierRF(approx=approx, sampler=RBFSamplerSORF())
clf.fit(X, y)
assert hasattr(clf, 'classes_')
assert hasattr(clf, 'Xtrain_')
if clf.approx != 'exact':
assert hasattr(clf, 'rbf_sampler_')
y_pred = clf.predict(X)
assert y_pred.shape == (X.shape[0],)
| 2.28125 | 2 |
docs_build/tutorials_templates/data_management/data_versioning/mds.py | dataloop-ai/sdk_examples | 3 | 12796968 | def section1():
"""
# Data Versioning
Dataloop's powerful data versioning provides you with unique tools for data management - clone, merge, slice & dice your files, to create multiple versions for various applications. Sample use cases include:
Golden training sets management
Reproducibility (dataset training snapshot)
Experimentation (creating subsets from different kinds)
Task/Assignment management
Data Version "Snapshot" - Use our versioning feature as a way to save data (items, annotations, metadata) before any major process. For example, a snapshot can serve as a roll-back mechanism to original datasets in case of any error without losing the data.
## Clone Datasets
Cloning a dataset creates a new dataset with the same files as the original. Files are actually a reference to the original binary and not a new copy of the original, so your cloud data remains safe and protected. When cloning a dataset, you can add a destination dataset, remote file path, and more...
"""
def section2():
"""
## Merge Datasets
Dataset merging outcome depends on how similar or different the datasets are.
* Cloned Datasets - items, annotations, and metadata will be merged. This means that you will see annotations from different datasets on the same item.
* Different datasets (not clones) with similar recipes - items will be summed up, which will cause duplication of similar items.
* Datasets with different recipes - Datasets with different default recipes cannot be merged. Use the 'Switch recipe' option on dataset level (3-dots action button) to match recipes between datasets and be able to merge them.
"""
| 2.984375 | 3 |
applied_python/applied_python/lib/python2.7/site-packages/ansible/runner/lookup_plugins/dict.py | mith1979/ansible_automation | 1 | 12796969 | <gh_stars>1-10
# (c) 2014, <NAME> <<EMAIL>>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from ansible.utils import safe_eval
import ansible.utils as utils
import ansible.errors as errors
def flatten_hash_to_list(terms):
ret = []
for key in terms:
ret.append({'key': key, 'value': terms[key]})
return ret
class LookupModule(object):
def __init__(self, basedir=None, **kwargs):
self.basedir = basedir
def run(self, terms, inject=None, **kwargs):
terms = utils.listify_lookup_plugin_terms(terms, self.basedir, inject)
if not isinstance(terms, dict):
raise errors.AnsibleError("with_dict expects a dict")
return flatten_hash_to_list(terms)
| 2.03125 | 2 |
2015/day_14/reindeers.py | ceronman/AdventOfCode2015 | 4 | 12796970 | from itertools import cycle, islice
import re
distances = {}
for line in open('input.txt'):
m = re.match(r'(\w+) can fly (\d+) km/s for (\d+) seconds, '
r'but then must rest for (\d+) seconds.', line)
reindeer, speed, time, rest = m.groups()
distances[reindeer] = cycle([int(speed)] * int(time) + [0] * int(rest))
print(max(sum(islice(seconds, 0, 2503)) for seconds in distances.values()))
| 3.046875 | 3 |
bliss/urls.py | jugovich/teresajugovich | 0 | 12796971 | <reponame>jugovich/teresajugovich
from django.conf.urls import include, url
from dolove import views
# Uncomment the next two lines to enable the admin:
from django.contrib import admin
from django.conf.urls.static import static
from django.conf import settings
admin.autodiscover()
urlpatterns = [
url(r'^$', views.home, name='home'),
url(r'about$', views.about, name='about'),
url(r'doula_description$', views.doula_description, name='doula_description'),
url(r'doula_services$', views.doula_services, name='doula_services'),
url(r'photo_gallery$', views.photo_gallery, name='photo_gallery'),
url(r'photo_price$', views.photo_price, name='photo_price'),
url(r'yoga_class$', views.yoga_class, name='yoga_class'),
url(r'yoga_locations$', views.yoga_locations, name='yoga_locations'),
url(r'yoga_schedule$', views.yoga_schedule, name='yoga_schedule'),
url(r'yoga_price$', views.yoga_price, name='yoga_price'),
url(r'^admin/doc/', include('django.contrib.admindocs.urls')),
url(r'^admin/', include(admin.site.urls))
] + static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
| 1.859375 | 2 |
Day-32/Birthday_Wisher/main.py | MihirMore/100daysofcode-Python | 4 | 12796972 | import datetime as dt
import pandas
import random
import smtplib
from astroid import Pass
MY_EMAIL = "your_email"
PASSWORD = "<PASSWORD>"
today = dt.datetime.now()
today_tuple = (today.month, today.day)
data = pandas.read_csv("birthdays.csv")
birthday_dict = {(data_row.month, data_row.day): data_row for (index, data_row) in data.iterrows()}
if today_tuple in birthday_dict:
birthday_person = birthday_dict[today_tuple]
file_path = f"letter_templates/letter_{random.randint(1, 3)}.txt"
with open(file_path) as letter:
contents = letter.read()
contents = contents.replace("[NAME]", birthday_person["name"])
with smtplib.SMTP_SSL("smtp.gmail.com") as connection:
connection.login(MY_EMAIL, PASSWORD)
connection.sendmail(from_addr=MY_EMAIL, to_addrs=birthday_person["email"], msg=f"Subject: Happy Birthday! \n\n"
f"{contents}") | 3.296875 | 3 |
pyodesys/convergence.py | slayoo/pyodesys | 82 | 12796973 | # -*- coding: utf-8 -*-
from __future__ import (absolute_import, division, print_function)
import warnings
from math import exp
import numpy as np
def fit_factory(discard=1):
def fit(x, y):
p = np.polyfit(x, y, 1)
v = np.polyval(p, x)
e = np.abs(y - v)
drop_idxs = np.argsort(e)[-discard]
return np.polyfit(np.delete(x, drop_idxs),
np.delete(y, drop_idxs), 1)
return fit
def integrate_tolerance_series(odesys, atols, rtols, x, y0, params=(),
fit=lambda x, y: np.polyfit(x, y, 1), val=np.polyval, **kwargs):
"""
Parameters
----------
odesys : :class:`ODESys`
atols : array_like
Positive, monotonically increasing 1D array.
rtols : array_like
Positive, monotonically increasing 1D array.
x : array_like
Passed on to ``odesys.integrate`` for first set of tolerances.
(subsequent calls will use xout from first integration).
y0 : array_like
Passed on to ``odesys.integrate``.
params : array_like
Passed on to ``odesys.integrate``.
fit : callable
val : callable
\\*\\*kwargs:
Passed on to ``odesys.integrate``.
Returns
-------
result0 : Result
results : list of Result instances
extra : dict
errest : 2D array of error estimates for result0.yout
"""
if atols is None:
atols = rtols
if rtols is None:
rtols = atols
atols, rtols = map(np.asarray, (atols, rtols))
if atols.ndim != 1:
raise NotImplementedError("Assuming 1-dimensional array")
if atols.shape != rtols.shape:
raise ValueError("atols & rtols need to be of same length")
if 'atol' in kwargs or 'rtol' in kwargs:
raise ValueError("Neither atol nor rtol are allowed in kwargs")
if not np.all(atols > 0) or not np.all(rtols > 0):
raise ValueError("atols & rtols need to > 0")
if not np.all(np.diff(atols) > 0) or not np.all(np.diff(rtols) > 0):
raise ValueError("atols & rtols need to obey strict positive monotonicity")
if atols.size < 4:
raise ValueError("Pointless doing linear interpolation on less than 3 points")
if atols.size < 6:
warnings.warn("Statistics will be (very) shaky when doing linear "
"interpolation on less than 5 points.")
ntols = atols.size
result0 = odesys.integrate(x, y0, params, atol=atols[0], rtol=rtols[0], **kwargs)
results = [odesys.integrate(result0.xout, y0, params, atol=atols[i], rtol=rtols[i], **kwargs)
for i in range(1, ntols)]
errest = []
for ix, vx in enumerate(result0.xout):
diffs = np.array([result0.yout[ix, :] - r.yout[ix, :] for r in results])
tols = np.array([atol + rtol*np.abs(r.yout[ix, :]) for r, atol, rtol in
zip([result0] + results, atols, rtols)])
ln_tols = np.log(tols).astype(np.float64)
ln_absd = np.log(np.abs(diffs)).astype(np.float64)
yerrs = []
for iy in range(result0.yout.shape[-1]):
if np.all(diffs[:, iy] == 0):
yerrs.append(0)
else:
p = fit(ln_tols[1:, iy], ln_absd[:, iy])
yerrs.append(exp(val(p, ln_tols[0, iy])))
errest.append(yerrs)
return result0, results, {'errest': np.array(errest)}
| 2.8125 | 3 |
supports/pyload/src/pyload/plugins/accounts/PremiumizeMe.py | LuckyNicky/pycrawler | 1 | 12796974 | <gh_stars>1-10
# -*- coding: utf-8 -*-
import json
from ..base.multi_account import MultiAccount
class PremiumizeMe(MultiAccount):
__name__ = "PremiumizeMe"
__type__ = "account"
__version__ = "0.30"
__status__ = "testing"
__pyload_version__ = "0.5"
__config__ = [
("mh_mode", "all;listed;unlisted", "Filter hosters to use", "all"),
("mh_list", "str", "Hoster list (comma separated)", ""),
("mh_interval", "int", "Reload interval in hours", 12),
]
__description__ = """Premiumize.me account plugin"""
__license__ = "GPLv3"
__authors__ = [
("<NAME>", "<EMAIL>"),
("GammaC0de", "nitzo2001[AT]yahoo[DOT]com"),
]
# See https://www.premiumize.me/static/api/api.html
API_URL = "https://api.premiumize.me/pm-api/v1.php"
def api_respond(self, method, user, password, **kwargs):
get_params = {"method": method, "params[login]": user, "params[pass]": password}
for key, val in kwargs.items():
get_params["params[{}]".format(key)] = val
json_data = self.load(self.API_URL, get=get_params)
return json.loads(json_data)
def grab_hosters(self, user, password, data):
res = self.api_respond("hosterlist", user, password)
if res["status"] != 200:
return []
return res["result"]["tldlist"]
def grab_info(self, user, password, data):
validuntil = None
trafficleft = None
premium = False
res = self.api_respond("accountstatus", user, password)
if res["status"] == 200:
validuntil = float(res["result"]["expires"])
# TODO: Remove `>> 10` in 0.6.x
trafficleft = max(0, res["result"]["trafficleft_bytes"] >> 10)
if res["result"]["type"] != "free":
premium = True
return {
"validuntil": validuntil,
"trafficleft": trafficleft,
"premium": premium,
}
def signin(self, user, password, data):
res = self.api_respond("accountstatus", user, password)
if res["status"] != 200:
self.fail_login(res["statusmessage"])
| 2.203125 | 2 |
catalog/urls.py | gcrsaldanha/sms | 0 | 12796975 | <gh_stars>0
from django.urls import path
from . import views
urlpatterns = [
path('item/', views.ItemListCreate.as_view()),
]
| 1.4375 | 1 |
setup.py | tsurusekazuki/Create_Package | 0 | 12796976 | # -*- coding: utf-8 -*-
# Learn more: https://github.com/kennethreitz/setup.py
import os, sys
from setuptools import setup, find_packages
def read_requirements() -> List:
"""Parse requirements from requirements.txt."""
reqs_path = os.path.join('.', 'requirements.txt')
with open(reqs_path, 'r') as f:
requirements = [line.rstrip() for line in f]
return requirements
setup(
name='sample',
version='0.1.0',
description='Sample package for Python-Guide.org',
long_description=readme,
author='<NAME>',
author_email='<EMAIL>',
install_requires=read_requirements(),
url='test',
license=license,
packages=find_packages(exclude=('tests', 'docs'))
)
| 1.90625 | 2 |
tests/test_models.py | kalev/flatpak-indexer | 6 | 12796977 | from flatpak_indexer.models import FlatpakBuildModel, ImageModel, ImageBuildModel, RegistryModel
IMAGE1 = {
"Annotations": {"key1": "value1"},
"Architecture": "amd64",
"Digest": "sha256:baabaa",
"Labels": {"key2": "value2"},
"MediaType": "application/vnd.docker.distribution.manifest.v2+json",
"OS": "linux",
"Tags": ["tag1"],
'PullSpec': 'candidate-registry.fedoraproject.org/baobab@sha256:12345'
}
IMAGE2 = {
"Annotations": {"key1": "value1"},
"Architecture": "ppc64le",
"Digest": "sha256:beebee",
"Labels": {"key2": "value2"},
"MediaType": "application/vnd.docker.distribution.manifest.v2+json",
"OS": "linux",
"Tags": ["tag2"]
}
LIST1 = {
"Digest": "sha256:booboo",
"MediaType": "application/vnd.docker.distribution.manifest.list.v2+json",
"Images": [IMAGE1, IMAGE2],
"Tags": ["latest"],
}
REGISTRY = {
"Repositories": [
{
"Name": "aisleriot",
"Images": [
IMAGE1,
IMAGE2,
],
"Lists": [
LIST1
],
}
]
}
IMAGE_BUILD = {
'BuildId': 12345,
'Nvr': 'testrepo-1.2.3-1',
'Source': 'git://src.fedoraproject.org/flatpaks/baobab#BAOBAB_GIT_DIGEST',
'CompletionTime': '2020-07-31T16:26:22+00:00',
'UserName': 'jdoe',
'Images': [IMAGE1]
}
FLATPAK_BUILD = {
'BuildId': 12345,
'Nvr': 'testrepo-1.2.3-1',
'Source': 'git://src.fedoraproject.org/flatpaks/baobab#BAOBAB_GIT_DIGEST',
'CompletionTime': '2020-07-31T16:26:22+00:00',
'UserName': 'jdoe',
'Images': [IMAGE1],
'ModuleBuilds': ['baobab-1.2.3-3020190603102507'],
'PackageBuilds': ['baobab-1.2.3-1'],
}
def test_registry_model():
model = RegistryModel.from_json(REGISTRY)
json = model.to_json()
assert json == REGISTRY
def test_registry_model_add_image():
model = RegistryModel.from_json(REGISTRY)
image = ImageModel.from_json(IMAGE1)
model.add_image('aisleriot2', image)
assert model.repositories['aisleriot2'].images[image.digest] == image
def test_image_build_repository():
image = ImageBuildModel.from_json(IMAGE_BUILD)
assert image.repository == 'baobab'
def test_image_build_from_json():
image = ImageBuildModel.from_json(IMAGE_BUILD)
assert isinstance(image, ImageBuildModel)
flatpak = ImageBuildModel.from_json(FLATPAK_BUILD)
assert isinstance(flatpak, FlatpakBuildModel)
| 1.734375 | 2 |
IMTool/IMUtil.py | naong2/MayaPython | 0 | 12796978 | <reponame>naong2/MayaPython
#-*- coding: utf-8 -*-
import sys, os
import maya.cmds as cmds
import maya.mel as mel
from inspect import getsourcefile
from os.path import abspath
import maya.app.general.resourceBrowser as resourceBrowser
# 기능
#############################################################################
def convertSlash(_path):
u'''
파일 경로를 윈도우에서 바로 복사할 경우 \를 /로 이쁘게 바꿔준다
아니면 경로 앞에 r을 붙여줘라
'''
return _path.replace("\\","/")
def clearOutput():
u'''
Clear History for Script Editor
'''
cmds.scriptEditorInfo(clearHistory=True)
def forPrint(_list):
u'''
그냥 리스트를 나열해줍니다
'''
for item in _list:
print item
# 파일 리스트 얻기
def get_dirfiles(_path):
file_list = []
for r, d, f in os.walk(_path):
for file in f:
#패스가 같을 경우 만( 고로 같은 폴더 녀석들만 골라쥼)
if r == _path:
file_list.append(os.path.join(r,file).replace('\\','/'))
return file_list
def get_dirfiles_sub(_path):
'''
sub 폴더 포함
'''
file_list = []
for r, d, f in os.walk(_path):
for file in f:
file_list.append(os.path.join(r,file).replace('\\','/'))
return file_list
# 경로
#############################################################################
def getPathMayaFile():
u'''
Maya파일의 절대경로
'''
#파일이름 포함 풀경로 얻어오기
return cmds.file(q=True, sn=True)
def getNameMayaFile():
u'''
Maya파일의 이름
'''
#파일이름 얻어오기 /로 나눠서 맨끝에서 한칸 읽어오기(unicode)
return cmds.file(q=True, sn=True).split('/')[-1]
def getPathFindFile(_filename):
u'''
MAYA_SCRIPT_PATH안에 파일이 있으면 경로값을, 없으면 None을 반환
Script안 서브폴더까지는 검색하지 않음
'''
# scriptPath = os.environ['MAYA_SCRIPT_PATH'] 환경변수값 가져오는 다른 방식
scriptPaths = mel.eval('getenv "MAYA_SCRIPT_PATH"').split(";")
if not scriptPaths:
return None
for path in scriptPaths:
testPath = path +'/' + _filename
if os.path.exists(testPath):
return testPath
return None
def get_maya_AppDir():
'''
C:/Users/rationalcat/Documents/maya/
:return:
'''
return cmds.internalVar(userAppDir=True)
def getPathDocumentMaya():
u'''
* Dos에서도 잘 먹힘
C:/Users/rationalcat/Documents/maya/
'''
if os.environ.get('MAYA_APP_DIR'):
maya_app_dir = os.environ['MAYA_APP_DIR']
return maya_app_dir
if os.environ.get('HOME'):
home = os.environ['HOME']
else:
home = os.environ['USERPROFILE']
return os.path.realpath(os.path.join(home, 'Documents/maya'))
def getPathDocumentMaya_Add(path):
u'''
* Dos에서도 잘 먹힘
User/Documents/Maya 폴더의 경로를 반환
'''
if os.environ.get('MAYA_APP_DIR'):
maya_app_dir = os.environ['MAYA_APP_DIR']
if path != '':
return os.path.join(maya_app_dir, path)
else:
return maya_app_dir
if os.environ.get('HOME'):
home = os.environ['HOME']
else:
home = os.environ['USERPROFILE']
if path != '':
return os.path.join(os.path.realpath(os.path.join(home, 'Documents/maya')),path)
else:
return os.path.realpath(os.path.join(home, 'Documents/maya'))
def getPathMaya2018Script():
u'''
maya/2018/Script 경로 반환
'''
scriptPaths = mel.eval('getenv "MAYA_SCRIPT_PATH"').split(";")
for path in scriptPaths:
if path.find('markingMenus') != -1:
return path.replace('prefs/markingMenus','scripts')
def getPathIMTool():
u'''
C:/Users/rationalcat/Documents/maya/scripts/IMTool/utility.py
abspath(getsourcefile(lambda:0)).replace("\\","/")
'''
#scriptPath = scriptPath.replace('/IMUtility/common.py','')
return abspath(getsourcefile(lambda:0)).replace("\\","/").replace('/IMUtil.py','')
def get_module_dir_path():
'''
IMTool 폴더
:return:
'''
return (os.path.dirname(__file__).replace("\\","/"))
# 정보
#############################################################################
def getMayaVersion():
u'''
마야의 버전을 반환합니다.
2018 이런식으로 int 값임
'''
return int(cmds.about(v = True).split("-")[0].split(" ")[0])
def get_resource_path():
resource_browser = resourceBrowser.resourceBrowser()
resource_path = resource_browser.run()
return resource_path
| 2.359375 | 2 |
but/trades/apps.py | yevgnenll/but | 4 | 12796979 | from django.apps import AppConfig
class AppConfigTrade(AppConfig):
name = "trades"
def ready(self):
from trades.signals.post_save import post_save_hashid
| 1.65625 | 2 |
set3/crack_MT19937_seed.py | nahgil2614/cryptopals | 0 | 12796980 | import time
import random
from MT19937 import seed_mt
from MT19937 import extract_number
def delay( seconds ): #delay thì Ctrl + C được, còn time.sleep() thì không
start = time.time()
while time.time() - start < seconds:
pass
def main():
start_time = time.time()
print('Pending...')
delay(random.randint(40,1000))
timestamp = int(time.time())
seed_mt(timestamp)
rand = extract_number()
delay(random.randint(40,1000))
print('\nFirst output of the RNG: ' + str(rand))
print('\nNow I will try to discover the seed the program has taken')
print('given the fact that I know it used MT19937!')
print('\nNow cracking...')
test = int(time.time())
seed_mt(test)
first = extract_number()
while first != rand:
test -= 1
seed_mt(test)
first = extract_number()
print('Haha, the time seed is ' + str(test) + ', isn\'t it?')
if test == timestamp:
print('Congratulation! You have broken my super-insecure randomness using timestamp!')
else:
print('Huh? That\'s all you have? The real timestamp is ' + str(timestamp))
print('Poor you.')
time_elapsed = time.time() - start_time
print('Time elapsed: ' + str(time_elapsed))
if __name__ == '__main__':
main()
| 3.03125 | 3 |
guild/commands/main_impl.py | guildai/guild-cli | 63 | 12796981 | <reponame>guildai/guild-cli
# Copyright 2017-2022 RStudio, PBC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
from guild import cli
from guild import config
from guild import log
from guild import util
def main(args):
_init_logging(args)
config.set_cwd(_cwd(args))
config.set_guild_home(_guild_home(args))
_apply_guild_patch()
_register_cmd_context_handlers()
def _init_logging(args):
log_level = args.log_level or logging.INFO
log.init_logging(log_level)
log.disable_noisy_loggers(log_level)
def _cwd(args):
return _validated_dir(args.cwd)
def _guild_home(args):
return _validated_dir(args.guild_home, abs=True, create=True, guild_nocopy=True)
def _validated_dir(path, abs=False, create=False, guild_nocopy=False):
path = os.path.expanduser(path)
if abs:
path = os.path.abspath(path)
if not os.path.exists(path):
if create:
util.ensure_dir(path)
else:
cli.error("directory '%s' does not exist" % path)
if not os.path.isdir(path):
cli.error("'%s' is not a directory" % path)
if guild_nocopy:
util.ensure_file(os.path.join(path, ".guild-nocopy"))
return path
def _apply_guild_patch():
"""Look in config cwd for guild_patch.py and load if exists."""
patch_path = os.path.join(config.cwd(), "guild_patch.py")
if os.path.exists(patch_path):
from guild import python_util
python_util.exec_script(patch_path)
def _register_cmd_context_handlers():
"""Register command context handlers.
Command context handlers can be used to respond to start and stop
of Guild commands.
Currently Guild supports one handler type - socket notification of
command info. This can be used to monitor Guild commands by
setting the `GUILD_CMD_NOTIFY_PORT` env var to a port of a socket
server. See `guild.cmd_notify` for details.
"""
_maybe_register_cmd_notify()
def _maybe_register_cmd_notify():
port = _try_cmd_notify_port()
if port:
from guild import cmd_notify
cmd_notify.init_cmd_context_handler(port)
def _try_cmd_notify_port():
port = os.getenv("GUILD_CMD_NOTIFY_PORT")
if not port:
return None
try:
return int(port)
except ValueError:
raise SystemExit(
"invalid value for GUILD_CMD_NOTIFY_PORT %r: must "
"be a valid numeric port" % port
)
| 1.867188 | 2 |
demos/python/sdk_wireless_camera_control/tests/test_wifi_commands.py | hypoxic/OpenGoPro | 1 | 12796982 | <gh_stars>1-10
# test_wifi_commands.py/Open GoPro, Version 1.0 (C) Copyright 2021 GoPro, Inc. (http://gopro.com/OpenGoPro).
# This copyright was auto-generated on Tue May 18 22:08:51 UTC 2021
from pathlib import Path
import pytest
from open_gopro.wifi_commands import WifiCommands, WifiSettings, WifiCommunicator
from open_gopro import params
@pytest.fixture
def wifi():
class Communicator(WifiCommunicator):
def __init__(self):
self.commands = WifiCommands(self)
self.settings = WifiSettings(self)
def get(self, url: str):
return url
def stream_to_file(self, url: str, file: Path):
return url, file
yield Communicator()
def test_get_with_no_params(wifi):
url = wifi.commands.set_third_party_client_info()
assert url == "gp/gpControl/command/set_client_info"
def test_get_with_params(wifi):
zoom = 99
url = wifi.commands.set_digital_zoom(zoom)
assert url == f"gopro/camera/digital_zoom?percent={zoom}"
def test_get_binary(wifi):
file = wifi.commands.download_file(camera_file="test_file", local_file=Path("local_file"))
assert file.name == "local_file"
| 2.25 | 2 |
sources/praline/client/project/pipeline/stages/load_clang_format.py | dansandu/praline | 0 | 12796983 | from praline.client.project.pipeline.stage_resources import StageResources
from praline.client.project.pipeline.stages.stage import stage
from praline.client.repository.remote_proxy import RemoteProxy
from praline.common.file_system import FileSystem, join
from typing import Any, Dict
clang_format_style_file_contents = """\
Language: Cpp
AccessModifierOffset: -4
AlignTrailingComments: true
AllowShortBlocksOnASingleLine: false
AllowShortFunctionsOnASingleLine: None
AlwaysBreakTemplateDeclarations: true
BreakBeforeBraces: Allman
ColumnLimit: 120
ConstructorInitializerAllOnOneLineOrOnePerLine: true
FixNamespaceComments: false
IndentWidth: 4
PointerAlignment: Left
ReflowComments: true
SortIncludes: true
SortUsingDeclarations: true
SpaceAfterTemplateKeyword: false
SpacesInAngles: false
UseTab: Never
"""
class ClangFormatConfigurationError(Exception):
pass
def predicate(file_system: FileSystem, program_arguments: Dict[str, Any], configuration: Dict[str, Any]):
return not program_arguments['global']['skip_formatting']
@stage(requirements=[['project_directory']],
output=['clang_format_style_file', 'clang_format_executable'],
predicate=predicate)
def load_clang_format(file_system: FileSystem, resources: StageResources, cache: Dict[str, Any], program_arguments: Dict[str, Any], configuration: Dict[str, Any], remote_proxy: RemoteProxy):
if 'clang-format-executable-path' in configuration:
clang_format_executable = configuration['clang-format-executable-path']
if not file_system.is_file(clang_format_executable):
raise ClangFormatConfigurationError(f"user supplied clang-format '{clang_format_executable}' is not a file")
else:
clang_format_executable = file_system.which('clang-format')
if clang_format_executable is None:
raise ClangFormatConfigurationError("coudn't find clang-format in path -- either supply it in the praline-client.config file or add it to the path environment variable")
project_directory = resources['project_directory']
resources['clang_format_executable'] = clang_format_executable
resources['clang_format_style_file'] = clang_format_style_file = join(project_directory, '.clang-format')
file_system.create_file_if_missing(clang_format_style_file, clang_format_style_file_contents)
| 1.90625 | 2 |
peacecorps/contenteditor/forms.py | cmc333333/peacecorps-site | 8 | 12796984 | <reponame>cmc333333/peacecorps-site
import logging
from django import forms
from django.contrib.admin.forms import AdminAuthenticationForm
from django.contrib.auth.forms import (
AdminPasswordChangeForm, PasswordChangeForm, UserCreationForm)
from django.utils.translation import ugettext_lazy as _
from contenteditor import models
class LoggingAuthenticationForm(AdminAuthenticationForm):
"""Override login form to log attempts"""
def clean(self):
logger = logging.getLogger("peacecorps.login")
try:
cleaned = super(LoggingAuthenticationForm, self).clean()
if cleaned.get('password'):
logger.info("%s successfully logged in",
self.cleaned_data['username'])
else:
logger.warn("Failed login attempt for %s",
self.cleaned_data.get('username'))
return cleaned
except forms.ValidationError:
logger.warn("Failed login attempt for %s",
self.cleaned_data.get('username'))
raise
class StrictUserCreationForm(UserCreationForm):
password1 = forms.CharField(
label=_("Password"), widget=forms.PasswordInput, help_text=_("""
Enter a password. Requirements include: at least 20 characters,
at least one uppercase letter, at least one lowercase letter, at
least one number, and at least one special character.
"""))
def clean_password1(self):
"""Adds to the default password validation routine in order to enforce
stronger passwords"""
password = self.cleaned_data['password1']
errors = models.password_errors(password)
# If password_validator returns errors, raise an error, else proceed.
if errors:
raise forms.ValidationError('\n'.join(errors))
else:
return password
class StrictAdminPasswordChangeForm(AdminPasswordChangeForm):
"""Password form for editing a user"""
password1 = forms.CharField(
label=_("Password"), widget=forms.PasswordInput, help_text=_("""
Enter a password. Requirements include: at least 20 characters,
at least one uppercase letter, at least one lowercase letter, at
least one number, and at least one special character.
"""))
def clean_password1(self):
"""Adds to the default password validation routine in order to enforce
stronger passwords"""
password = self.cleaned_data['password1']
errors = models.password_errors(password)
# Also check that this is a new password
if self.user.check_password(self.cleaned_data['password1']):
errors.append("Must not reuse a password")
# If password_validator returns errors, raise an error, else proceed.
if errors:
raise forms.ValidationError('\n'.join(errors))
else:
return password
def save(self):
user = super(StrictAdminPasswordChangeForm, self).save()
user.extra.password_expires = models.expires()
user.extra.save()
return user
class StrictPasswordChangeForm(PasswordChangeForm):
"""Password form residing at /admin/password_change"""
new_password1 = forms.CharField(
label=_("New password"), widget=forms.PasswordInput, help_text=_("""
Enter a password. Requirements include: at least 20 characters,
at least one uppercase letter, at least one lowercase letter, at
least one number, and at least one special character.
"""))
def clean_new_password1(self):
"""Adds to the default password validation routine in order to enforce
stronger passwords"""
password = self.cleaned_data['<PASSWORD>']
errors = models.password_errors(password)
# Also check that this is a new password
if self.user.check_password(self.cleaned_data['<PASSWORD>1']):
errors.append("Must not reuse a password")
# If password_validator returns errors, raise an error, else proceed.
if errors:
raise forms.ValidationError('\n'.join(errors))
else:
return password
def save(self):
user = super(StrictPasswordChangeForm, self).save()
user.extra.password_expires = models.expires()
user.extra.save()
return user
| 2.4375 | 2 |
tests/test_slack_neuron.py | royto/slack_neuron | 0 | 12796985 | import unittest
from kalliope.core.NeuronModule import MissingParameterException
from kalliope.neurons.slack.slack import Slack
class TestSlack(unittest.TestCase):
def setUp(self):
self.slack_token="<PASSWORD>"
self.channel = "kalliochannel"
self.message = "kalliomessage"
def testParameters(self):
def run_test(parameters_to_test):
with self.assertRaises(MissingParameterException):
Slack(**parameters_to_test)
# empty
parameters = dict()
run_test(parameters)
# missing message
parameters = {
"slack_token": self.slack_token,
"channel": self.channel,
}
run_test(parameters)
# missing slack_token
parameters = {
"channel": self.channel,
"message": self.message
}
run_test(parameters)
# missing channel
parameters = {
"slack_token": self.slack_token,
"message": self.message
}
run_test(parameters) | 2.640625 | 3 |
lib/googlecloudsdk/api_lib/spanner/instance_configs.py | google-cloud-sdk-unofficial/google-cloud-sdk | 2 | 12796986 | # -*- coding: utf-8 -*- #
# Copyright 2016 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Spanner instanceConfigs API helper."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from apitools.base.py import list_pager
from googlecloudsdk.api_lib.util import apis
from googlecloudsdk.command_lib.ai import errors
from googlecloudsdk.command_lib.util.args import labels_util
from googlecloudsdk.core import properties
from googlecloudsdk.core import resources
import six
def Get(config):
"""Get the specified instance config."""
client = apis.GetClientInstance('spanner', 'v1')
msgs = apis.GetMessagesModule('spanner', 'v1')
ref = resources.REGISTRY.Parse(
config,
params={'projectsId': properties.VALUES.core.project.GetOrFail},
collection='spanner.projects.instanceConfigs')
req = msgs.SpannerProjectsInstanceConfigsGetRequest(
name=ref.RelativeName())
return client.projects_instanceConfigs.Get(req)
def List():
"""List instance configs in the project."""
client = apis.GetClientInstance('spanner', 'v1')
msgs = apis.GetMessagesModule('spanner', 'v1')
req = msgs.SpannerProjectsInstanceConfigsListRequest(
parent='projects/'+properties.VALUES.core.project.GetOrFail())
return list_pager.YieldFromList(
client.projects_instanceConfigs,
req,
field='instanceConfigs',
batch_size_attribute='pageSize')
def Delete(config, etag=None, validate_only=False):
"""Delete an instance config."""
client = apis.GetClientInstance('spanner', 'v1')
msgs = apis.GetMessagesModule('spanner', 'v1')
ref = resources.REGISTRY.Parse(
config,
params={'projectsId': properties.VALUES.core.project.GetOrFail},
collection='spanner.projects.instanceConfigs')
req = msgs.SpannerProjectsInstanceConfigsDeleteRequest(
name=ref.RelativeName(), etag=etag, validateOnly=validate_only)
return client.projects_instanceConfigs.Delete(req)
def Create(config,
display_name,
base_config,
replicas,
validate_only,
labels=None,
etag=None):
"""Create instance configs in the project."""
client = apis.GetClientInstance('spanner', 'v1')
msgs = apis.GetMessagesModule('spanner', 'v1')
project_ref = resources.REGISTRY.Create(
'spanner.projects', projectsId=properties.VALUES.core.project.GetOrFail)
config_ref = resources.REGISTRY.Parse(
config,
params={'projectsId': properties.VALUES.core.project.GetOrFail},
collection='spanner.projects.instanceConfigs')
replica_info = []
for replica in replicas:
# TODO(b/399093071): Change type to ReplicaInfo.TypeValueValuesEnum instead
# of str.
replica_type = msgs.ReplicaInfo.TypeValueValuesEnum.TYPE_UNSPECIFIED
if replica['type'] == 'READ_ONLY':
replica_type = msgs.ReplicaInfo.TypeValueValuesEnum.READ_ONLY
elif replica['type'] == 'READ_WRITE':
replica_type = msgs.ReplicaInfo.TypeValueValuesEnum.READ_WRITE
elif replica['type'] == 'WITNESS':
replica_type = msgs.ReplicaInfo.TypeValueValuesEnum.WITNESS
replica_info.append(
msgs.ReplicaInfo(location=replica['location'], type=replica_type))
# TODO(b/399093071): Implement --replicas-file option.
labels_message = {}
if labels is not None:
labels_message = msgs.InstanceConfig.LabelsValue(additionalProperties=[
msgs.InstanceConfig.LabelsValue.AdditionalProperty(
key=key, value=value) for key, value in six.iteritems(labels)
])
instance_config = msgs.InstanceConfig(
name=config_ref.RelativeName(),
displayName=display_name,
baseConfig=base_config,
labels=labels_message,
replicas=replica_info)
if etag:
instance_config.etag = etag
req = msgs.SpannerProjectsInstanceConfigsCreateRequest(
parent=project_ref.RelativeName(),
instanceConfigId=config,
instanceConfig=instance_config,
validateOnly=validate_only)
return client.projects_instanceConfigs.Create(req)
def Patch(args):
"""Update an instance config."""
client = apis.GetClientInstance('spanner', 'v1')
msgs = apis.GetMessagesModule('spanner', 'v1')
ref = resources.REGISTRY.Parse(
args.config,
params={'projectsId': properties.VALUES.core.project.GetOrFail},
collection='spanner.projects.instanceConfigs')
instance_config = msgs.InstanceConfig(name=ref.RelativeName())
update_mask = []
if args.display_name is not None:
instance_config.displayName = args.display_name
update_mask.append('display_name')
if args.etag is not None:
instance_config.etag = args.etag
def GetLabels():
req = msgs.SpannerProjectsInstanceConfigsGetRequest(name=ref.RelativeName())
return client.projects_instanceConfigs.Get(req).labels
labels_update = labels_util.ProcessUpdateArgsLazy(
args, msgs.InstanceConfig.LabelsValue, GetLabels)
if labels_update.needs_update:
instance_config.labels = labels_update.labels
update_mask.append('labels')
if not update_mask:
raise errors.NoFieldsSpecifiedError('No updates requested.')
req = msgs.SpannerProjectsInstanceConfigsPatchRequest(
name=ref.RelativeName(),
instanceConfig=instance_config,
updateMask=','.join(update_mask),
validateOnly=args.validate_only)
return client.projects_instanceConfigs.Patch(req)
| 1.890625 | 2 |
experiments/enumerate_params.py | falkben/fastapi_experiments | 4 | 12796987 | <reponame>falkben/fastapi_experiments<gh_stars>1-10
from enum import Enum
from typing import Optional
import uvicorn
from fastapi import FastAPI, Query
app = FastAPI()
class NameEnum(str, Enum):
empty = ""
bob = "bob"
doug = "doug"
@app.get("/hello")
async def hello(name: NameEnum = Query(NameEnum.empty, alias="first_name")):
return "hello " + name.value
animals_dict = {"ANT": "walk", "BEE": "flies", "CAT": "meows", "DOG": "barks"}
Animal = Enum("Animal", {k: k for k in animals_dict})
@app.get("/animal")
async def animal(anim: Optional[Animal] = Query(None)):
if anim is not None:
return anim.name
else:
return "not found"
if __name__ == "__main__":
uvicorn.run(app, host="0.0.0.0", port=8000)
| 2.90625 | 3 |
setup.py | guilleCoro/django-autocomplete | 1 | 12796988 | from setuptools import setup
import pkg_resources
import autocomplete
def get_metadata_version():
"""
Tries to get the version from the django_autocomplete.egg-info directory.
"""
try:
pkg = list(pkg_resources.find_distributions('.', True))[0]
except IndexError:
return autocomplete.__version__
return pkg.version
version = autocomplete.get_mercurial_version() or get_metadata_version()
setup(
name = 'django-autocomplete',
version = version,
description = 'autocomplete utilities for django',
author = '<NAME>',
author_email = '<EMAIL>',
url = 'http://bitbucket.org/tyrion/django-autocomplete',
download_url = 'http://bitbucket.org/tyrion/django-autocomplete/downloads',
packages = ['autocomplete'],
include_package_data = True,
classifiers = [
'Development Status :: 4 - Beta',
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Utilities'
],
)
| 1.84375 | 2 |
typhon/tests/plots/test_plots.py | SallyDa/typhon | 0 | 12796989 | # -*- coding: utf-8 -*-
"""Testing the functions in typhon.plots.
"""
import os
from typhon import plots
class TestPlots:
"""Testing the plot functions."""
def test_figsize(self):
"""Test golden ratio for figures sizes."""
ret = plots.figsize(10)
assert ret == (10, 6.1803398874989481)
def test_get_subplot_arrangement(self):
"""Test the determination of subplot arrangements."""
shape = plots.get_subplot_arrangement(8)
assert shape == (3, 3)
def test_get_available_styles(self):
"""Check matplotlib stylesheet paths.
This test checks the consinstency of the in and outputs
of styles() and get_available_styles().
"""
style_paths = [
plots.styles(s) for s in plots.get_available_styles()]
assert all(os.path.isfile(s) for s in style_paths)
| 2.625 | 3 |
ckanext/heroslider/plugin.py | OpenGov-OpenData/ckanext-heroslider | 0 | 12796990 | # encoding: utf-8
import ckan.plugins as plugins
import ckan.plugins.toolkit as toolkit
import logging
try:
from ckan.common import config # CKAN 2.7 and later
except ImportError:
from pylons import config # CKAN 2.7 and later
log = logging.getLogger(__name__)
def dataset_count():
"""Return a count of all datasets"""
count = 0
result = toolkit.get_action('package_search')({}, {'rows': 1})
if result.get('count'):
count = result.get('count')
return count
def get_hero_images():
resources = []
try:
package_id = config.get('ckanext.heroslider.package_id', 'hero-slider-images')
result = toolkit.get_action('package_show')({}, {'id': package_id})
resource_list = result.get('resources')
for item in resource_list:
if item.get('format') in ['JPEG','PNG']:
if item.get('url'):
resources.append(item.get('url'))
except:
log.debug('Getting Hero images failed')
return resources
class HerosliderPlugin(plugins.SingletonPlugin):
plugins.implements(plugins.IConfigurer)
plugins.implements(plugins.ITemplateHelpers)
# IConfigurer
def update_config(self, config_):
toolkit.add_template_directory(config_, 'templates')
toolkit.add_public_directory(config_, 'public')
toolkit.add_resource('fanstatic', 'heroslider')
# ITemplateHelpers
def get_helpers(self):
return {
'hero_dataset_count': dataset_count,
'get_hero_images': get_hero_images,
}
| 2.109375 | 2 |
graphsignal/graphsignal_test.py | graphsignal/graphsignal | 84 | 12796991 | <filename>graphsignal/graphsignal_test.py
import unittest
import logging
import sys
from unittest.mock import patch, Mock
import graphsignal
logger = logging.getLogger('graphsignal')
class GraphsignalTest(unittest.TestCase):
def setUp(self):
if len(logger.handlers) == 0:
logger.addHandler(logging.StreamHandler(sys.stdout))
graphsignal.configure(api_key='k1', debug_mode=True)
def tearDown(self):
graphsignal.shutdown()
def test_configure(self):
self.assertEqual(graphsignal._get_config().api_key, 'k1')
self.assertEqual(graphsignal._get_config().debug_mode, True)
| 2.703125 | 3 |
intralinks/utils/associations.py | ilapi/intralinks-sdk-python | 3 | 12796992 | <filename>intralinks/utils/associations.py
"""
For educational purpose only
"""
def associate_users_and_groups(users, groups, group_members):
users_by_id = {u['id']:u for u in users}
groups_by_id = {g['id']:g for g in groups}
for u in users:
u['groups'] = []
for g in groups:
g['groupMembers'] = []
for m in group_members:
group = groups_by_id[m['workspaceGroupId']]
user = users_by_id[m['workspaceUserId']]
group['groupMembers'].append(user['id'])
user['groups'].append(group['id'])
for g in groups:
if g['groupMemberCount'] != len(g['groupMembers']):
raise Exception(g)
class PathBuilder:
def __init__(self, objects):
self.objects = objects
self.objects_by_id = {o['id']:o for o in objects}
def get_object(self, object_id):
return self.objects_by_id[object_id]
def get_parent(self, o):
return self.get_object(o['parentId'])
def has_parent(self, o):
return 'parentId' in o and o['parentId'] != -1
def build_paths(self):
for o in self.objects:
self.__build_path_helper__(o)
def __build_path_helper__(self, o):
if 'ids' not in o:
parent_ids = []
parent_names = []
if self.has_parent(o):
parent = self.get_parent(o)
if 'children_ids' not in parent:
parent['children_ids'] = []
parent['children_ids'].append(o['id'])
self.__build_path_helper__(parent)
parent_ids = parent['ids']
parent_names = parent['names']
o['ids'] = parent_ids + [o['id']]
o['names'] = parent_names + [o['name']]
o['fullPath'] = '/'.join(o['names'])
def build_paths(*arg):
objects = []
for a in arg:
objects.extend(a)
PathBuilder(objects).build_paths()
| 2.671875 | 3 |
changecolor.py | VSanteriH/Python-Kivy-template | 0 | 12796993 | '''
This file is used for changing icons color from black to white, so icon color could be
changed in code.
Icon images are from https://github.com/iconic/open-iconic/tree/master/png
'''
import os
from PIL import Image # https://pillow.readthedocs.io/en/stable/
def find_files():
""" Finds all files from icons folder that name ends with .png """
directory = os.fsencode( "icons/") # Gets folder where icons are located
for file in os.listdir(directory): # Gets every file from folder
filename = os.fsdecode(file)
if filename.endswith(".png"):
change_color("icons/" + filename)
continue
else:
continue
def change_color(image):
""" Changes every black pixel to white from image that was send to it. Skips transperent pixels """
newimage = Image.open(image)
for x in range(newimage.size[0]):# Goes trought every pixel of image in X axis
for y in range(newimage.size[1]): # In Y axis
r,g,b,a = newimage.getpixel((x,y)) # Get pixels color in rgb
if r == 0 and g == 0 and b == 0 and a > 0: # If pixel is black and not transparent.
newimage.putpixel((x,y), (255,255,255,a)) # Change color to white. Keep transperency.
newimage.save(image) # Saves a file over the old one.
| 3.96875 | 4 |
src/reader/management/commands/batch_import_perseus.py | LukeMurphey/textcritical_net | 6 | 12796994 | from django.core.management.base import BaseCommand
from reader.importer.PerseusBatchImporter import PerseusBatchImporter
from reader.importer.batch_import import JSONImportPolicy
import os
import sys
class Command(BaseCommand):
help = "Imports all Perseus XML documents from a directory that match the import policy"
def add_arguments(self, parser):
parser.add_argument('-d', '--directory',
dest='directory',
help='The directory containing the files to import')
parser.add_argument('-o', '--overwrite',
action="store_true",
dest="overwrite",
default=False,
help="Overwrite and replace existing items")
parser.add_argument("-t", "--test",
action="store_true",
dest="test",
help="Output the import parameters for any works that would be imported")
def handle(self, *args, **options):
directory = options['directory']
if directory is None and len(args) > 0:
directory = args[0]
# Validate the arguments
if directory is None:
print("No directory was provided to import")
return
overwrite = options['overwrite']
if overwrite is None:
overwrite = False
elif overwrite in [True, False]:
pass # Already a boolean
elif overwrite.lower() in ["true", "1"]:
overwrite = True
else:
overwrite = False
test = options['test']
if test is None:
test = False
elif test in [True, False]:
pass # Already a boolean
elif test.lower() in ["true", "1"]:
test = True
else:
test = False
# Get the path to the import policy accounting for the fact that the command may be run outside of the path where manage.py resides
import_policy_file = os.path.join( os.path.split(sys.argv[0])[0], "reader", "importer", "perseus_import_policy.json")
selection_policy = JSONImportPolicy()
selection_policy.load_policy( import_policy_file )
perseus_batch_importer = PerseusBatchImporter(
perseus_directory= directory,
book_selection_policy = selection_policy.should_be_processed,
overwrite_existing = overwrite,
test = test)
if test:
print("Testing import for files from", directory)
else:
print("Importing files from", directory)
perseus_batch_importer.do_import()
if test:
print("Files from the", directory, "evaluated")
else:
print("Files from the", directory, "directory successfully imported")
| 2.234375 | 2 |
src/models/predict_model.py | MariaFogh/MLOps_Transformers | 1 | 12796995 | <reponame>MariaFogh/MLOps_Transformers<filename>src/models/predict_model.py
import torch
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification
from wandb_helpers import wandb_arg_parser
import wandb
def test_model():
"""
Tests the trained model using the small version of the evaluation dataset.
The accuracy and loss are calculated and logged using Wandb.
"""
input_filepath = "./data/processed"
model_path = "./models/finetuned_bert"
small_eval_dataset = torch.load(input_filepath + "/eval_small.pt")
print("The test set concists of")
print(small_eval_dataset)
config = wandb_arg_parser()
batch_size = config.batch_size
num_batches = 1000 / batch_size
eval_dataloader = DataLoader(small_eval_dataset, batch_size=8)
model = AutoModelForSequenceClassification.from_pretrained(model_path)
device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
model.to(device)
model.eval()
accuracy = 0.0
validation_loss = 0.0
for ite, batch in enumerate(eval_dataloader):
print(f"\tRunning batch {ite+1} of {num_batches}", end="\r")
batch = {k: v.to(device) for k, v in batch.items()}
with torch.no_grad():
outputs = model(**batch)
logits = outputs.logits
predictions = torch.argmax(logits, dim=-1)
accuracy += sum(predictions == batch["labels"]) / predictions.numel()
loss = outputs.loss
validation_loss += loss.item()
accuracy = 100 * (accuracy / num_batches)
wandb.log(
{
"validation_loss": validation_loss / num_batches,
"validation_accuracy": accuracy,
}
)
if __name__ == "__main__":
test_model()
| 2.546875 | 3 |
HeavyIonsAnalysis/TrackAnalysis/python/TrkAnalyzers_cff.py | flodamas/cmssw | 0 | 12796996 | <gh_stars>0
import FWCore.ParameterSet.Config as cms
from HeavyIonsAnalysis.TrackAnalysis.trackAnalyzer_cfi import *
anaTrack = ppTrack.clone(
trackPtMin = 0.49,
trackSrc = cms.InputTag("hiGeneralTracks"),
vertexSrc = cms.VInputTag('hiSelectedVertex'),
mvaSrc = cms.InputTag('hiGeneralTracks','MVAVals'),
pfCandSrc = cms.InputTag("particleFlowTmp"),
doMVA = False
)
pixelTrack = anaTrack.clone(
trackPtMin = 0.4,
trackSrc = cms.InputTag("hiConformalPixelTracks"),
qualityStrings = cms.untracked.vstring('highPurity'))
trackSequencesPbPb = cms.Sequence(anaTrack)
trackSequencesPP = cms.Sequence(ppTrack)
| 1.117188 | 1 |
DCP#2/Dcp2.py | Tsubanee/Algorithms-Practice | 0 | 12796997 | <reponame>Tsubanee/Algorithms-Practice<gh_stars>0
import numpy as np
def MultiForEachInd(arr):
new_arr = np.ones((len(arr), 1))
np.array(arr)
k = len(arr)
for i in range(k):
for j in range(k):
if j != i:
new_arr[j] *= arr[i]
return new_arr
#Solution with products of numbers before the i'th index and after i'th index
def products(arr):
#Generate prefix products
prefix_products = []
for num in arr:
if prefix_products:
prefix_products.append(prefix_products[-1] * num)
else:
prefix_products.append(num)
#Generate Sufix products
suffix_products = []
for num in reversed(arr):
if suffix_products:
suffix_products.append(suffix_products[-1] * num)
else:
suffix_products.append(num)
suffix_products = list(reversed(suffix_products))
#Generate result
result = []
for i in range(len(arr)):
if i == 0:
result.append(suffix_products[i+1])
elif i == len(arr) - 1:
result.append(prefix_products[i-1])
else:
result.append(prefix_products[i-1] * suffix_products[i+1])
return result
#Works only for arrays without 0
def vectorized_products(array):
arr_a = np.array(array)
arr_b = np.ones(len(array))
arr_b /= arr_a
arr_b *= np.prod(arr_a)
return arr_b
#Speed comparision between implementations
test_array = [1,2,3,4,5,6,7]
%timeit products(test_array)
%timeit MultiForEachInd(test_array) | 3.515625 | 4 |
pool_service/tokens.py | sunil16/token_pool | 0 | 12796998 | <filename>pool_service/tokens.py
import random
import string
def randomStringDigits(stringLength=15):
"""Generate a random string of letters and digits """
lettersAndDigits = string.ascii_letters + string.digits
return ''.join(random.choice(lettersAndDigits) for i in range(stringLength))
def getTokens(number_of_token = 5):
token_list = []
token = {}
for itr in range(1, int(number_of_token)):
token = { 'token': randomStringDigits(40), 'counter': 0, 'isAvailable': True, 'last_active_date_time': '', 'last_inactive_date_time': '' }
token_list.append(token)
return token_list
| 3.265625 | 3 |
floga/analyzer/analyzer_fs.py | sudaning/Floga | 2 | 12796999 | <filename>floga/analyzer/analyzer_fs.py
# -*- coding: utf-8 -*-
import os
import time
import sys
from datetime import datetime
PY2 = sys.version_info[0] == 2
PY3 = sys.version_info[0] == 3
from base.base import PRINT, INPUT, getColor
if PY2:
from analyzer import LogAnalyzer
else:
from analyzer.analyzer import LogAnalyzer
# FS日志分析器
class FsLogAnalyzer(LogAnalyzer):
__sessLogInfoDict = {}# 按照会话归类的日志信息
# {会话UUID:{log:{文件索引:{行数:日志}},
# callNumber:呼叫号码,
# result:{分析结果},
# keyInfo:[(文件索引,行数,状态类型,(状态迁移信息))]}
# }
__ignoreLinesDict = {}# 忽略的行{文件索引:{行数:日志}}
ANALYZER_TYPE_FS = 'fs'
# 会话类字典key
SESS_FS_CALLNUMBER_DK = "callNumber"
# 抽取出的关键信息分类
SIGN_FLAG_CHAN = "chan proc"
SIGN_CHAN_CALLING = 'calling'
SIGN_CHAN_PROCEDDING = 'proceeding'
SIGN_CHAN_COMPLETE = 'completing'
SIGN_CHAN_TERMINATED = 'terminated'
SIGN_FLAG_CALL = "channel sm"
SIGN_CALL_HANGUP = 'HANGUP'
SIGN_FLAG_CS = "core sm"
SIGN_FLAG_RTP = "rtp"
SIGN_FLAG_CALLNUMBER = "callnumber"
SIGN_FLAG_HANGUP = "hangup_reason"
SIGN_FLAG_R_BYE = 'recv_bye'
SIGN_FLAG_S_BYE = "send_bye"
SIGN_FLAG_CANCEL = 'cancel'
SIGN_FLAG_R_INVITE = "recv_invite"
# SIP信令
SIP_INVITE = 'INVITE'
SIP_CANCEL = 'CANCEL'
SIP_BYE = 'BYE'
# 匹配模式
MATCH_MOD_NORMAL = "normal"
MATCH_MOD_EXTEND = "extend"
MATCH_MOD_DETAILS = "details"
# 输出文件
OUTPUT_POSTFIX_LOG = ".log"
OUTPUT_POSTFIX_RESULT = ".result"
OUTPUT_POSTFIX_DETAILS = ".details"
def __init__(self):
self.__sessLogInfoDict = {}
self.__ignoreLinesDict = {}
if PY2:
return LogAnalyzer.__init__(self, self.ANALYZER_TYPE_FS)
else:
return super(FsLogAnalyzer, self).__init__(self.ANALYZER_TYPE_FS)
def getSessLogInfoDict(self):
"""获取会话信息字典
参数列表:
无
返回值:
会话信息字典
例如:
{UUID:{log:{文件索引:{行数:日志}}, callNumber:呼叫号码, result:分析结果, keyInfo:(文件索引,行数,状态类型,(状态迁移信息))}}
异常:
无
"""
return self.__sessLogInfoDict
def getIgnoreLinesDict(self):
"""获取忽略的行字典
在解析过程中,有些无法满足正则条件的日志行,无法解析其数据,则会填入此字典中
参数列表:
无
返回值:
忽略的行字典
例如:
{文件索引:{行数:日志}}
异常:
无
"""
return self.__ignoreLinesDict
def load(self, path, rl=False):
"""加载FS的日志
参数列表:
path:日志路径
rl:是否重新加载
返回值:
成功标志和错误信息 元组(bool, str)
异常:
无
"""
if PY2:
return LogAnalyzer.load(self, path, rl)
else:
return super(FsLogAnalyzer, self).load(path, rl)
def clear(self):
"""清理FS的日志
参数列表:
无
返回值:
成功标志和错误信息 元组(bool, str)
异常:
无
"""
self.__sessLogInfoDict = {}
self.__ignoreLinesDict = {}
return super(FsLogAnalyzer, self).clear()
def getSessInfo(self, UUID = "", key = ""):
"""清理FS的日志
参数列表:
UUID:会话的UUID
key:内部的字典名
返回值:
成功标志和错误信息 元组(bool, str)
异常:
无
"""
sessDict = self.getSessLogInfoDict()
if UUID:
if sessDict.get(UUID, False):
return UUID, sessDict[UUID].get(key, False)
else:
return UUID, None
else:
return [(UUID, sessDict[UUID].get(key, False)) for UUID in sessDict.keys()]
def getLogDict(self, UUID = ""):
"""获取日志字典
参数列表:
UUID:会话的UUID
返回值:
日志字典 参照__sessLogInfoDict定义
异常:
无
"""
return self.getSessInfo(UUID, self.SESS_LOG_DK)
def getCallNumber(self, UUID = ""):
"""获取呼叫号码
参数列表:
UUID:会话的UUID
返回值:
呼叫号码 str
异常:
无
"""
return self.getSessInfo(UUID, self.SESS_FS_CALLNUMBER_DK)
def getResultDict(self, UUID = ""):
"""获取结果字典
参数列表:
UUID:会话的UUID
返回值:
结果字典 {'conclusion':"", 'details':{}, 'note':""}
异常:
无
"""
return self.getSessInfo(UUID, self.SESS_RESULT_DK)
def getkeyInfoList(self, UUID = ""):
"""获取关键信息列表
参数列表:
UUID:会话的UUID
返回值:
关键信息 [(文件索引,行数,状态类型,(信息)),]
异常:
无
"""
return self.getSessInfo(UUID, self.SESS_KEYINFO_DK)
def getSignInfo(self, flag, context):
"""信令的收发方向(用于上层显示输出)
参数列表:
flag:keyInfoList中元组的‘状态类型’字段
context:keyInfoList中元组的‘信息’字段
返回值:
元组(FromModule, ToModule, Sign)
异常:
无
"""
if flag in [self.SIGN_FLAG_CHAN]:
if context[0] in [self.SIGN_CHAN_CALLING]:
return self.MOD_FS, self.MOD_OUTSIDE, self.SIP_INVITE
elif context[0] in [self.SIGN_CHAN_PROCEDDING]:
return self.MOD_OUTSIDE, self.MOD_FS, context[1]
elif context[0] in [self.SIGN_CHAN_COMPLETE]:
return self.MOD_OUTSIDE, self.MOD_FS, context[1]
elif context[0] in [self.SIGN_CHAN_TERMINATED]:
return self.MOD_OUTSIDE, self.MOD_FS, context[1]
elif flag in [self.SIGN_FLAG_R_BYE]:
return self.MOD_OUTSIDE, self.MOD_FS, self.SIP_BYE
elif flag in [self.SIGN_FLAG_CANCEL]:
return self.MOD_FS, self.MOD_OUTSIDE, self.SIP_CANCEL
elif flag in [self.SIGN_FLAG_S_BYE]:
return self.MOD_FS, self.MOD_OUTSIDE, self.SIP_BYE
else:
pass
return '', '', ''
# 按照会话,收集日志信息
def __sessCollect(self):
"""按照UUID收集会话日志
FS的日志,左边打印的就是会话UUID信息(36位数字或字母以‘-’连接的字符串,形如4541eb63-e5b0-49f0-8d2c-31e06078013f)
函数读取日志的每一行,按照UUID进行会话归类,建立本地UUID为key的字典,再以文件索引和行数作为key为字典,value为日志内容。
最后包含一些关键信息,如呼叫号码、分析结果、关键信息供分析器内部逻辑使用
参数列表:
无
返回值:
成功解析的会话日志字典和无法解析的会话日志字典 dict,dict
异常:
无
"""
ignoreLinesDict = {}
sessLogInfoDict = {}
fileLen = len(self.getLines())
process = 0
for f, lines in enumerate(self.getLines()):
process = self.printProc(process, fileLen)
for i, line in enumerate(lines):
# 例如:4541eb63-e5b0-49f0-8d2c-31e06078013f 2016-03-21 17:41:14.701532 [DEBUG] switch_core_state_machine.c:40 sofia/external/[email protected]:5080 Standard INIT
# 找到第一个空格,左边就是会话ID,右边就是日志信息
pos = line.find(' ')
line_len = len(line)
# 若没有找到空格,则不记录(UUID都是36长度的,若不是,则不记录)
if pos is -1 or pos < 36 or line[0:pos].count('-') != 4:
if f not in ignoreLinesDict:
ignoreLinesDict[f] = {}
else:
ignoreLinesDict[f][i] = line
continue
# 拆分出UUID和日志信息
sessUUID, sessLog = line[0:pos], line[pos + 1:-1]
# 按照UUID归类存放日志信息
if sessUUID in sessLogInfoDict:
if f not in sessLogInfoDict[sessUUID][self.SESS_LOG_DK]:
sessLogInfoDict[sessUUID][self.SESS_LOG_DK][f] = {i:sessLog}
if sessLogInfoDict[sessUUID][self.SESS_START_TIME_DK] is None:
sessLogInfoDict[sessUUID][self.SESS_START_TIME_DK] = self.getLogTime(sessLog)
else:
sessLogInfoDict[sessUUID][self.SESS_LOG_DK][f][i] = sessLog
if sessLogInfoDict[sessUUID][self.SESS_START_TIME_DK] is None:
sessLogInfoDict[sessUUID][self.SESS_START_TIME_DK] = self.getLogTime(sessLog)
else:
sessLogInfoDict[sessUUID] = {self.SESS_LOG_DK:{f:{i:sessLog}}, self.SESS_FS_CALLNUMBER_DK:"", \
self.SESS_RESULT_DK:{self.SESS_RESULT_CONCLUSION_DK:"", self.SESS_RESULT_DETAILS_DK:{}, self.SESS_RESULT_NOTE_DK:""}, \
self.SESS_KEYINFO_DK:[], self.SESS_START_TIME_DK:self.getLogTime(sessLog)}
else:
self.__sessLogInfoDict = sessLogInfoDict
self.__ignoreLinesDict = ignoreLinesDict
for sessUUID in sessLogInfoDict.keys():
if sessLogInfoDict[sessUUID][self.SESS_START_TIME_DK] is None:
print(sessUUID, "\nis not get time")
return sessLogInfoDict, ignoreLinesDict
# 获取会话中的呼叫号码
def __getCallNumber(self):
"""获取呼叫号码
在建立了会话日志字典之后,分析每路会话,以正则的方式匹配提取其中的号码段,最后写入此路会话的字典信息callNumber中。
号码的提取样例为(sofia/external/[email protected]:5080),其中的6010为号码
参数列表:
无
返回值:
无
异常:
无
"""
sessLogInfoDict = self.getSessLogInfoDict()
# 例如 2016-03-21 17:41:14.701532 [DEBUG] switch_core_state_machine.c:473 (sofia/external/[email protected]:5080) Running State Change CS_INIT
sessLen = len(sessLogInfoDict)
process = 0
for sessUUID in sessLogInfoDict.keys():
process = self.printProc(process, sessLen, widgetType = "percent")
for f in sessLogInfoDict[sessUUID][self.SESS_LOG_DK].keys():
flag = False
for l in sessLogInfoDict[sessUUID][self.SESS_LOG_DK][f].keys():
# 取一行日志
sessLog = sessLogInfoDict[sessUUID][self.SESS_LOG_DK][f][l]
# 进行正则匹配,以"(sofia/external/"作为开头关键字,以")"作为结尾,"@"作为分隔,提取其中的号码
# 默认按照此行日志取号码
res = self.reMatch("New Channel sofia\/(.*)\/(\d*)\@(.*?) \[", sessLog, 3)
if res:
sessLogInfoDict[sessUUID][self.SESS_FS_CALLNUMBER_DK] = res[1]
flag = True
# 若有号码变换,需要取变换的号码
res = self.reMatch("Dialplan: sofia\/(.*)\/(.*) Action transfer\((\d*) XML default\)", sessLog, 3)
if res:
sessLogInfoDict[sessUUID][self.SESS_FS_CALLNUMBER_DK] = res[2]
flag = True
break
res = self.reMatch("<(\d*)>->(\d*) in context", sessLog, 2)
if res:
sessLogInfoDict[sessUUID][self.SESS_FS_CALLNUMBER_DK] = res[1]
flag = True
break
if flag:
break
# 没有找到号码,可能是日志文件的格式发生了变化
else:
#print "Not find the call number. UUID:%s" % sessUUID
pass
else:
pass
# 会话关键信息收集
def __sessKeyInfoCollect(self):
"""会话关键信息收集
在建立了会话日志字典之后,分析每路会话,以正则的方式匹配其中的状态转移和收取消息日志。
例如:
State Change CS_CONSUME_MEDIA -> CS_EXECUTE 为核心层状态机迁移 -- CS类
Callstate Change ACTIVE -> HANGUP 为呼叫层状态机迁移 -- call类
entering state [proceeding][180] 为收响应消息的处理 -- channel类
AUDIO RTP [sofia/external/[email protected]:5080] 10.0.7.176 port 24776 -> 192.168.0.178 port 7076 codec: 18 ms: 20 -- RTP信息类
Hangup sofia/external/[email protected]:5080 [CS_CONSUME_MEDIA] [INCOMPATIBLE_DESTINATION] -- 挂断原因类
提取这些信息,并保存在会话字典的keyInfo中,其中以元祖的形式存放(文件索引,行号,匹配标志,提取的结果)
参数列表:
无
返回值:
无
异常:
无
"""
sessLogInfoDict = self.getSessLogInfoDict()
# 需要匹配的正则表达式
reExpInfo = [
("State Change (.*) -> (.*)", 2, [], self.SIGN_FLAG_CS), # 状态转移类的日志
("entering state \[(.*)\]\[(.*)\]", 2, [], self.SIGN_FLAG_CHAN), # 收到消息类的日志
("Callstate Change (.*) -> (.*)", 2, [], self.SIGN_FLAG_CALL), # 呼叫状态类的日志
("receiving invite from (.*) version", 1, [], self.SIGN_FLAG_R_INVITE),
("AUDIO RTP \[(.*)\] (.*) port (\d+) -> (.*) port (\d+) codec: (\d+) ms: (\d+)", 7, [0], self.SIGN_FLAG_RTP), # RTP通道信息
("Flipping CID from \"(.*)\" \<(.*)\> to \"(.*)\" \<(.*)\>", 4, [], self.SIGN_FLAG_CALLNUMBER), # 呼叫号码
("952 Hangup (.*) \[(.*)\] \[(.*)\]", 3, [0], self.SIGN_FLAG_R_BYE),
("Hangup (.*) \[(.*)\] \[(.*)\]", 3, [0], self.SIGN_FLAG_HANGUP),
("Sending BYE to(.*)", 1, [0], self.SIGN_FLAG_S_BYE),
("Sending CANCEL to(.*)", 1, [0], self.SIGN_FLAG_CANCEL),
]
sessLen = len(sessLogInfoDict)
process = 0
for sessUUID in sessLogInfoDict.keys():
process = self.printProc(process, sessLen, widgetType = "percent", begin=0, end=50)
keyInfoList = []
logFileDict = sessLogInfoDict[sessUUID][self.SESS_LOG_DK]
fileList = sorted(logFileDict.items(), key=lambda logFileDict:logFileDict[0])
for f, logDict in fileList:
logList = sorted(logDict.items(), key=lambda logDict:logDict[0])
for line, log in logList:
for reExpr, expLen, dropPos, flag in reExpInfo:
res = self.reMatch(reExpr, log, expLen)
if res:
l = list(res)
for dPos in [x for x in sorted(dropPos, reverse=True) if dropPos and x < len(res)]:
try:
del l[dPos]
except Exception as Err:
s = str(Err, reExpInfo[i], res)
PRINT(s)
raise
res = tuple(l)
keyInfoList.append((f, line, flag, res))
break
else:
sessLogInfoDict[sessUUID][self.SESS_KEYINFO_DK] = keyInfoList
def __match(self, keyInfoList, flag, param1 = "", param2 = "", f = -1, l = -1, mod="normal"):
l = [(i, x) for i, x in enumerate(keyInfoList) if x[2] == flag and \
((len(x[3]) >= 1 and param1.strip() == x[3][0].strip()) if param1 != "" else True) and \
((len(x[3]) >= 2 and param2.strip() == x[3][1].strip()) if param2 != "" else True) and \
(x[0] >= f if f != -1 else True) and \
(x[1] >= l if l != -1 else True)]
if mod in [self.MATCH_MOD_NORMAL]:
return any(l)
elif mod in [self.MATCH_MOD_EXTEND]:
return (l[0][1][0], l[0][1][1], l[0][0]) if any(l) else False
elif mod in [self.MATCH_MOD_DETAILS]:
return l[0][1][3] if any(l) else False
else:
return False
def __matchCsStateChange(self, keyInfoList, fromState, toState):
"""CS状态变迁匹配
参数列表:
keyInfoList:关键信息列表
fromState:迁移前的状态
toState:迁移到的状态
返回值:
成功或失败 bool
异常:
无
"""
return self.__match(keyInfoList, self.SIGN_FLAG_CS, fromState, toState)
def __matchCallStateChange(self, keyInfoList, fromState, toState):
"""call状态变迁匹配
参数列表:
keyInfoList:关键信息列表
fromState:迁移前的状态
toState:迁移到的状态
返回值:
成功或失败 bool
异常:
无
"""
return self.__match(keyInfoList, self.SIGN_FLAG_CALL, fromState, toState)
def __fuzzyMatchChannelStateCode(self, keyInfoList, fuzzyCode):
"""通道状态码模糊匹配
模糊码以X代表一个任意数字位,例如4XX,则为匹配4开头应答码
参数列表:
keyInfoList:关键信息列表
fuzzyCode:模糊状态码
返回值:
匹配到的值
异常:
无
"""
codeList = []
for x in keyInfoList:
if x[2] == self.SIGN_FLAG_CHAN:
reExpr = "(" + fuzzyCode.replace("X","\\d").replace("x", "\\d") + ")"
res = self.reMatch(reExpr, x[3][1], 1)
res and codeList.append(x[3])
return codeList
def __matchChannelStateCode(self, keyInfoList, code):
"""通道状态码匹配
精确匹配状态码
参数列表:
keyInfoList:关键信息列表
code:状态码
返回值:
成功或失败 bool
异常:
无
"""
return self.__match(keyInfoList, self.SIGN_FLAG_CHAN, param2 = code)
def __matchChannelStateDesc(self, keyInfoList, desc):
"""通道状态描述匹配
匹配状态描述
参数列表:
keyInfoList:关键信息列表
desc:描述
返回值:
成功或失败 bool
异常:
无
"""
return self.__match(keyInfoList, self.SIGN_FLAG_CHAN, param1 = desc)
# 分析会话过程
def __sessAnalysis(self):
"""会话分析
分析每路会话的状态变迁过程。首先确定有哪些状态在变迁,然后建立状态迁移标准模板,去匹配其中的过程
参数列表:
无
返回值:
无
异常:
无
"""
sessLogInfoDict = self.getSessLogInfoDict()
sessLen = len(sessLogInfoDict)
process = 0
for sessUUID in sessLogInfoDict.keys():
process = self.printProc(process, sessLen, widgetType = "percent", begin=50, end=100)
keyInfoList = sessLogInfoDict[sessUUID][self.SESS_KEYINFO_DK]
#if sessUUID == "4befcdab-a4cc-4d6a-979f-bbff65d729b0":
# print("\n")
# for k in keyInfoList:
# print(k)
conclusion = ""
note = ""
detailsDict = {
"CS_NEW__CS_INIT": self.__matchCsStateChange(keyInfoList, "CS_NEW", "CS_INIT"),
"CS_INIT__CS_ROUTING": self.__matchCsStateChange(keyInfoList, "CS_INIT", "CS_ROUTING"),
"CS_ROUTING__CS_CONSUME_MEDIA": self.__matchCsStateChange(keyInfoList, "CS_ROUTING", "CS_CONSUME_MEDIA"),
"CS_CONSUME_MEDIA__CS_EXECUTE": self.__matchCsStateChange(keyInfoList, "CS_CONSUME_MEDIA", "CS_EXECUTE"),
"DOWN__RINGING": self.__matchCallStateChange(keyInfoList, "DOWN", "RINGING"),
"DOWN__EARLY": self.__matchCallStateChange(keyInfoList, "DOWN", "EARLY"),
"DOWN__ACTIVE": self.__matchCallStateChange(keyInfoList, "DOWN", "ACTIVE"),
"EARLY__RINGING": self.__matchCallStateChange(keyInfoList, "EARLY", "RINGING"),
"EARLY__ACTIVE": self.__matchCallStateChange(keyInfoList, "EARLY", "ACTIVE"),
"RINGING__ACTIVE": self.__matchCallStateChange(keyInfoList, "RINGING", "ACTIVE"),
"DOWN__HANGUP": self.__matchCallStateChange(keyInfoList, "DOWN", "HANGUP"),
"EARLY__HANGUP": self.__matchCallStateChange(keyInfoList, "EARLY", "HANGUP"),
"RINGING__HANGUP": self.__matchCallStateChange(keyInfoList, "RINGING", "HANGUP"),
"ACTIVE__HANGUP": self.__matchCallStateChange(keyInfoList, "ACTIVE", "HANGUP"),
"calling_0": self.__matchChannelStateDesc(keyInfoList, "calling"),
"proceeding_180": self.__matchChannelStateCode(keyInfoList, "180"),
"proceeding_183": self.__matchChannelStateCode(keyInfoList, "183"),
"completing_200": self.__matchChannelStateDesc(keyInfoList, "completing"),
"completed_200": self.__matchChannelStateDesc(keyInfoList, "completed"),
"ready_200": self.__matchChannelStateDesc(keyInfoList, "ready"),
"terminated_list": self.__fuzzyMatchChannelStateCode(keyInfoList, "4xx") + \
self.__fuzzyMatchChannelStateCode(keyInfoList, "5xx") + \
self.__fuzzyMatchChannelStateCode(keyInfoList, "6xx"),
}
# 标志性处理类的状态
case_calling_invite = {"CS_INIT__CS_ROUTING":True, "CS_ROUTING__CS_CONSUME_MEDIA":True, "calling_0":True,}
case_ringing_180 = {"proceeding_180":True,}
case_ringing_183 = {"proceeding_183":True,}
case_ringinged_180 = {"DOWN__RINGING":True,}
case_ringinged_183 = {"DOWN__EARLY":True,}
case_ringing_183_180 = {"DOWN__EARLY":True, "proceeding_183":True, "EARLY__RINGING":True, "proceeding_180":True,}
case_answer_invite = {"DOWN__ACTIVE":True, "completing_200":True, "ready_200":True,}
case_answerd_invite = {"DOWN__ACTIVE":True, "completed_200":True, "ready_200":True,}
case_answer_180 = {"RINGING__ACTIVE":True, "completing_200":True, "ready_200":True,}
case_answerd_180 = {"RINGING__ACTIVE":True, "completed_200":True, "ready_200":True,}
case_answer_183 = {"EARLY__ACTIVE":True, "completing_200":True, "ready_200":True,}
case_answerd_183 = {"EARLY__ACTIVE":True, "completed_200":True, "ready_200":True,}
case_hangup_invite = {"DOWN__HANGUP":True,}
case_hangup_180 = {"RINGING__HANGUP":True,}
case_hangup_183 = {"EARLY__HANGUP":True,}
case_hangup_acitve = {"ACTIVE__HANGUP":True,}
case_r_183 = {"proceeding_183":True,}
# invite->
if self.caseMatch(detailsDict, case_calling_invite) or self.__match(keyInfoList, self.SIGN_FLAG_R_INVITE):
conclusion = "OK"
note = "[CALLING" + (self.__match(keyInfoList, self.SIGN_FLAG_R_INVITE) and "(R)" or "(S)")
# invite-> 200<-
if self.caseMatch(detailsDict, case_answer_invite):
note += " -> TALKING"
# invite-> 200<- bye<->
if self.caseMatch(detailsDict, case_hangup_acitve):
note += " -> HANGUP" + (self.__match(keyInfoList, self.SIGN_FLAG_S_BYE) and "(S)" or "(R)")
# invite-> (bye-> or 错误应答<-)
elif self.caseMatch(detailsDict, case_hangup_invite):
if self.caseMatch(detailsDict, case_r_183):
note += " -> RINGING(183)"
note += " -> HANGUP" + (self.__match(keyInfoList, self.SIGN_FLAG_S_BYE) and "(S)" or "(R)")
else:
# invite-> (183<- or 180<-)
if self.caseMatch(detailsDict, case_ringing_180) or self.caseMatch(detailsDict, case_ringing_183) or self.caseMatch(detailsDict, case_ringing_183_180) or \
self.caseMatch(detailsDict, case_ringinged_180) or self.caseMatch(detailsDict, case_ringinged_183):
note += " -> RINGING"
# invite-> (183<- or 180<-) 200<-
if self.caseMatch(detailsDict, case_answer_180) or self.caseMatch(detailsDict, case_answerd_180) or \
self.caseMatch(detailsDict, case_answer_183) or self.caseMatch(detailsDict, case_answerd_183):
note += " -> TALKING"
# invite-> (183<- or 180<-) 200<- bye<->
if self.caseMatch(detailsDict, case_hangup_acitve):
note += " -> HANGUP" + (self.__match(keyInfoList, self.SIGN_FLAG_S_BYE) and "(S)" or "(R)")
# invite-> (183<- or 180<-) 错误应答<-
elif self.caseMatch(detailsDict, case_hangup_180) or self.caseMatch(detailsDict, case_hangup_183):
note += " -> HANGUP" + (self.__match(keyInfoList, self.SIGN_FLAG_S_BYE) and "(S)" or "(R)")
# 判断挂断原因
res = self.__match(keyInfoList, self.SIGN_FLAG_HANGUP, mod = self.MATCH_MOD_DETAILS)
st, reason = res if res else ("", "")
if reason:
note += "{[" + st + "]" + reason + "}"
if reason not in ["NORMAL_CLEARING", "MANAGER_REQUEST"]:
conclusion = "ERROR"
else:
res = self.__match(keyInfoList, self.SIGN_FLAG_R_BYE, mod = self.MATCH_MOD_DETAILS)
st, reason = res if res else ("", "")
if reason:
note += "{[" + st + "]" + reason + "}"
if reason not in ["NORMAL_CLEARING", "MANAGER_REQUEST"]:
conclusion = "ERROR"
if detailsDict["terminated_list"]:
conclusion = "ERROR"
note += "(recv %s)" % detailsDict["terminated_list"][0][1]
else:
conclusion = "WARNING"
note += "[NOT COMPLETE"
note += "]"
sessLogInfoDict[sessUUID][self.SESS_RESULT_DK][self.SESS_RESULT_NOTE_DK] = note
sessLogInfoDict[sessUUID][self.SESS_RESULT_DK][self.SESS_RESULT_DETAILS_DK] = detailsDict
sessLogInfoDict[sessUUID][self.SESS_RESULT_DK][self.SESS_RESULT_CONCLUSION_DK] = conclusion
# print "\n", sessLogInfoDict[sessUUID]["callNumber"], sessLogInfoDict[sessUUID]["result"]["conclusion"], note,#, "\n", keyInfoList, "\n",detailsDict, "\n"
# 分析会话日志
def __analysis(self):
self.__sessKeyInfoCollect()
self.__sessAnalysis()
# 运行
def run(self, mode = "Normal"):
time1 = time.clock()
s = "正在收集会话信息..."
PRINT(s, end='')
self.__sessCollect()
time2 = time.clock()
s = "OK (耗时:%.2f秒)" % (time2 - time1)
PRINT(s, color='green')
s = "正在提取号码..."
PRINT(s, end='')
self.__getCallNumber()
time3 = time.clock()
s = "OK (耗时:%.2f秒)" % (time3 - time2)
PRINT(s, color='green')
s = "正在分析会话过程..."
PRINT(s, end='')
self.__analysis()
time4 = time.clock()
s = "OK (耗时:%.2f秒)" % (time4 - time3)
PRINT(s, color='green')
return True, ""
# 获取UUID列表
def getSessUUIDList(self):
sessLogInfoDict = self.getSessLogInfoDict()
return sessLogInfoDict.keys()
# 获取呼叫号码列表
def getCallNumberList(self):
sessLogInfoDict = self.getSessLogInfoDict()
return [sessLogInfoDict[sessUUID][self.SESS_FS_CALLNUMBER_DK] for sessUUID in sessLogInfoDict.keys() if sessLogInfoDict[sessUUID][self.SESS_FS_CALLNUMBER_DK]]
# 显示UUID列表
def showSessUUIDList(self):
sessUUIDList = self.getSessUUIDList()
self.printList(sessUUIDList, 4, "UUID列表:", "总数:%d" % len(sessUUIDList))
# 显示呼叫号码列表
def showCallNumberList(self):
# 呼叫号码
callNumberList = self.getCallNumberList()
tmp = set(callNumberList)
self.printList(tmp, 8, "呼叫号码列表:", "总数:%d" % len(tmp))
# 重复的呼叫号码
dupl = self.findDupl(callNumberList)
len(dupl) and self.printList(dupl, 8, "重复的号码:", "总数:%d" % len(dupl))
# ----------------------------------------------显示详细分析结果----------------------------------------------
def __showDetailsHeader(self, sessUUID = "", callNumber = "", conclusion = ""):
return ""
def getDetails(self, sessUUID = "", targConclusion = "", mode = "normal"):
sessLogInfoDict = self.getSessLogInfoDict()
if not sessLogInfoDict.get(sessUUID, False):
return ""
conclusion = sessLogInfoDict[sessUUID][self.SESS_RESULT_DK][self.SESS_RESULT_CONCLUSION_DK]
if targConclusion.upper() not in conclusion.upper():
return ""
logDict = sessLogInfoDict[sessUUID][self.SESS_LOG_DK]
keyInfoList = sessLogInfoDict[sessUUID][self.SESS_KEYINFO_DK]
if not logDict or not keyInfoList:
return ""
res = self.__match(keyInfoList, self.SIGN_FLAG_CALLNUMBER, mod = self.MATCH_MOD_DETAILS)
disFrom, numberFrom, disTo, numberTo = res if res else ("","","","")
callTime = "%s" % self.getLogTime(logDict.get(keyInfoList[0][0], {}).get(keyInfoList[0][1], ""))
res = self.__match(keyInfoList, self.SIGN_FLAG_RTP, mod = self.MATCH_MOD_DETAILS)
locIp, locPort, RmtIp, RmtPort, audioPayLoad, audioPTime = res if res else ("","","","","","")
note = sessLogInfoDict[sessUUID][self.SESS_RESULT_DK][self.SESS_RESULT_NOTE_DK]
s = ""
if mode in ['normal']:
s += "-" * 160 + "\n"
s += "\n" + "{0:*^160}".format(" 基本信息 ") + "\n\n"
s += "%-16s: %-s\n" % ("呼叫开始时间", callTime)
s += "%-16s: %-s\n" % ("UUID", sessUUID)
if numberFrom:
s += "%-16s: %-s\n" % ("显示号码", numberFrom)
s += "%-16s: %-s\n" % ("呼叫号码", numberTo or sessLogInfoDict[sessUUID][self.SESS_FS_CALLNUMBER_DK])
if locIp and RmtIp:
s += "%-16s: %s:%s:%s -> %s:%s:%s (%s:%s %s:%s)\n" % ("媒体信息", "本端地址", locIp, locPort, "远端地址", RmtIp, RmtPort, "Payload", audioPayLoad, "ptime", audioPTime)
res = self.__match(keyInfoList, self.SIGN_FLAG_HANGUP, mod = self.MATCH_MOD_DETAILS)
reason = res[1] if res else ""
if reason:
res = self.__match(keyInfoList, self.SIGN_FLAG_CHAN, param1 = self.SIGN_CHAN_TERMINATED, mod = self.MATCH_MOD_DETAILS)
s += "%-16s: %s\n" % ("挂断原因", res[1] if res else reason)
else:
res = self.__match(keyInfoList, self.SIGN_FLAG_R_BYE, mod = self.MATCH_MOD_DETAILS)
reason = res[1] if res else ""
if reason:
res = self.__match(keyInfoList, self.SIGN_FLAG_CHAN, param1 = self.SIGN_CHAN_TERMINATED, mod = self.MATCH_MOD_DETAILS)
s += "%-16s: %s\n" % ("挂断原因", res[1] if res else reason)
signTimePrev = None
signTimeThis = None
if mode in ['normal']:
s += "%-16s: %-s\n" % ("结果", conclusion)
s += "%-16s: %-s\n" % ("消息流", self.showNote(note))
s += "\n" + "{0:*^160}".format(" 消息交互详情 ") + "\n\n"
s += "%-4s %-35s %-16s %-16s %s\n\n" % ("序号","信令时间", "源日志行号", "消息类型", "详情")
l = []
for i, k in enumerate(keyInfoList):
signTime = "%s" % self.getLogTime(logDict.get(k[0], {}).get(k[1], ""))
res = self.reMatch("(\\d{4})-(\\d{1,2})-(\\d{1,2}) (\\d{2}):(\\d{2}):(\\d{2}).(\\d{6})", signTime, 7)
if res:
signTimePrev = signTimeThis
signTimeThis = datetime(int(res[0]), int(res[1]), int(res[2]), int(res[3]), int(res[4]), int(res[5]))
if signTimePrev and (signTimeThis - signTimePrev).seconds > 4:
s += "{0:^40}".format(" ↑ ") + "\n"
s += "%s \n" % getColor("{0:^40}".format("时差:" + str((signTimeThis - signTimePrev).seconds) + "s", color="red", need=True))
s += "{0:^40}".format(" ↓ ") + "\n"
if k[0] not in l:
s += self.getPathEx(k[0]) + "\n"
l.append(k[0])
s += "%02d. %-35s %-16s %-16s %s\n" % (i + 1, signTime, str(k[1]), str(k[2]), str(k[3]))
else:
s += "\n"
return s
def __showDetailsBody(self, sessUUID = "", targConclusion = ""):
s = self.getDetails(sessUUID, targConclusion)
if s:
PRINT(s)
return s
def __showDetailsTail(self, count, sessUUID = "", callNumber = "", conclusion = ""):
s = "-" * 160 + "\n"
s += "\n总数:%d" % count
PRINT(s)
return s
def __showDetails(self, sessUUID = "", callNumber = "", conclusion = ""):
sessLogInfoDict = self.getSessLogInfoDict()
# 显示头
self.__showDetailsHeader()
# 显示Body
count = 0
if sessUUID:
# 若输入了callNumber
if (callNumber == sessLogInfoDict[sessUUID][self.SESS_FS_CALLNUMBER_DK] if callNumber else True) and \
self.__showDetailsBody(sessUUID, conclusion):
count += 1
else:
total = len(sessLogInfoDict)
flag = False
sessList = sorted(sessLogInfoDict.items(), key=lambda sessLogInfoDict:sessLogInfoDict[1][self.SESS_START_TIME_DK])
for i, (sessUUID, context) in enumerate(sessList):
# 若输入了callNumber
if (callNumber == context[self.SESS_FS_CALLNUMBER_DK] if callNumber else True) and \
self.__showDetailsBody(sessUUID, conclusion):
count += 1
continueRet, flag = self.inputContinue(i, count, total, flag, self.__showDetailsHeader)
if not continueRet:
break
# 显示尾
self.__showDetailsTail(count)
return count
# 按照UUID搜索日志,并显示详细分析信息
def showDetails(self, sessUUID = "", callNumber = "", conclusion = ""):
return self.__showDetails(sessUUID = sessUUID, callNumber = callNumber, conclusion = conclusion)
# ----------------------------------------------显示分析结果----------------------------------------------
def __showAnalysisResultHeader(self, targConclusion=""):
s = "%-30s %-36s %-30s %-7s %-s\n" % ("呼叫开始时间", "UUID", "呼叫号码", "结果", "备注")
PRINT(s)
def __getAnalysisResultBody(self, sessUUID, targConclusion = "", show = True):
sessLogInfoDict = self.getSessLogInfoDict()
s = ""
conclusion = ""
if sessLogInfoDict.get(sessUUID, False):
logDict = sessLogInfoDict[sessUUID][self.SESS_LOG_DK]
keyInfoList = sessLogInfoDict[sessUUID][self.SESS_KEYINFO_DK]
if not keyInfoList or not logDict:
return s, conclusion
callTime = "%s" % self.getLogTime(logDict.get(keyInfoList[0][0]).get(keyInfoList[0][1]))
callNumber = sessLogInfoDict[sessUUID][self.SESS_FS_CALLNUMBER_DK]
conclusion = sessLogInfoDict[sessUUID][self.SESS_RESULT_DK][self.SESS_RESULT_CONCLUSION_DK]
note = sessLogInfoDict[sessUUID][self.SESS_RESULT_DK][self.SESS_RESULT_NOTE_DK]
if targConclusion.upper() in conclusion.upper():
color = conclusion.upper() in ['ERROR'] and 'red' or \
conclusion.upper() in ['WARNING'] and 'yellow' or \
conclusion.upper() in ['OK'] and 'green'
conclusion = getColor("{0:<7}".format(conclusion), color = color)
s += "%-30s %-36s %-30s %-7s %-s\n" % (callTime, sessUUID, callNumber or getColor("{0:<20}".format("null"), color='gray', need=show), conclusion, note)
return s, conclusion
def __showAnalysisResultBody(self, sessUUID, targConclusion = ""):
s, c = self.__getAnalysisResultBody(sessUUID, targConclusion)
if s:
PRINT(s)
return s
def __showAnalysisResultTail(self, count, targConclusion=""):
s = "\n总数:%d" % count
PRINT(s)
def __showResult(self, sessUUID = "", callNumber = "", conclusion = ""):
sessLogInfoDict = self.getSessLogInfoDict()
# 显示头
self.__showAnalysisResultHeader(conclusion)
# 显示Body
count = 0
if sessUUID:
# 若输入了callNumber则认为需要过滤
if (callNumber == sessLogInfoDict[sessUUID][self.SESS_FS_CALLNUMBER_DK] if callNumber else True) \
and self.__showAnalysisResultBody(sessUUID, conclusion):
count += 1
else:
total = len(sessLogInfoDict)
flag = False
sessList = sorted(sessLogInfoDict.items(), key=lambda sessLogInfoDict:sessLogInfoDict[1][self.SESS_START_TIME_DK])
for i, (sessUUID, context) in enumerate(sessList):
# 若输入了callNumber或UUID则认为需要过滤
if (callNumber == context[self.SESS_FS_CALLNUMBER_DK] if callNumber else True) \
and self.__showAnalysisResultBody(sessUUID, conclusion):
count += 1
# 输出分段,提示是否继续显示内容
continueRet, flag = self.inputContinue(i, count, total, flag, self.__showAnalysisResultHeader, conclusion)
if not continueRet:
break
# 显示尾
self.__showAnalysisResultTail(count, conclusion)
return count
def showResult(self, sessUUID = "", callNumber = "", conclusion = ""):
return self.__showResult(sessUUID = sessUUID, callNumber = callNumber, conclusion = conclusion)
# ----------------------------------------------输出简单分析结果到文件----------------------------------------------
def __getOutputResultHeader(self):
s = "%-30s %-36s %-30s %-6s %s\n" % ("呼叫开始时间", "UUID", "呼叫号码", "结果", "备注")
return s
def __getOutputResultTail(self, warningCount, errorCount, okCount):
s = "%s:%d\n%s:%d\n%s:%d\n%s:%d\n" % ("总计", errorCount + okCount + warningCount, "告警", warningCount, "失败", errorCount, "成功", okCount)
return s
def __outputReslut(self, outputPath, sessUUID = "", callNumber = "", conclusion = "", fileName = ""):
sessLogInfoDict = self.getSessLogInfoDict()
# 确定新的目录,以源日志文件名作为目录名
if not fileName:
#fileNames = "_".join([os.path.split(p)[-1] for p in self.getPath()])
fileNames = sessUUID + callNumber + conclusion + "_tmp"
fileName = "Result" + fileNames + self.OUTPUT_POSTFIX_RESULT
context = ""
warningCount, errorCount, okCount = 0, 0, 0
# 输出到文件
if sessUUID:
if (callNumber == sessLogInfoDict[sessUUID][self.SESS_FS_CALLNUMBER_DK] if callNumber else True):
s, c = self.__getAnalysisResultBody(sessUUID, conclusion, show=False)
context += s
if s and c.upper() in ['ERROR']:
errorCount += 1
elif s and c.upper() in ['WARNING']:
warningCount += 1
elif s and c.upper() in ['OK']:
okCount += 1
else:
for sessUUID in sessLogInfoDict.keys():
if (callNumber == sessLogInfoDict[sessUUID][self.SESS_FS_CALLNUMBER_DK] if callNumber else True):
s, c = self.__getAnalysisResultBody(sessUUID, conclusion, show=False)
context += s
if s and c.upper() in ['ERROR']:
errorCount += 1
elif s and c.upper() in ['WARNING']:
warningCount += 1
elif s and c.upper() in ['OK']:
okCount += 1
if context:
context = self.__getOutputResultHeader() + context
context += self.__getOutputResultTail(warningCount, errorCount, okCount)
if self.outputEx(outputPath, fileName, context):
return 1, outputPath, [fileName]
else:
return 0, outputPath, []
else:
return 0, outputPath, []
# 输出简单分析结果到文件
def outputReslut(self, outputPath, sessUUID = "", callNumber = "", conclusion = "", fileName = ""):
return self.__outputReslut(outputPath, sessUUID = sessUUID, callNumber = callNumber, conclusion = conclusion, fileName = fileName)
# ----------------------------------------------输出原始日志到文件----------------------------------------------
def __getOutputHeader(self, logDict, callNumber, sessUUID):
s = "呼叫号码:%s\nUUID:%s\n" % (callNumber, sessUUID)
return s
def __outputOriginLog(self, outputPath, sessUUID = "", callNumber = "", name = ""):
sessLogInfoDict = self.getSessLogInfoDict()
newPath = outputPath
fileNameList = [] # 输出的文件列表
# 如果存在UUID(只输出一个文件)
if sessUUID:
if sessLogInfoDict.get(sessUUID, False):
logDict = sessLogInfoDict[sessUUID][self.SESS_LOG_DK]
# 若输入了号码,则需要过滤号码
c = sessLogInfoDict[sessUUID][self.SESS_FS_CALLNUMBER_DK]
if (callNumber == c if callNumber else True):
fileName = name or ((callNumber or c) + "__" + sessUUID + self.OUTPUT_POSTFIX_LOG)
if self.output(logDict, newPath, fileName, self.__getOutputHeader(logDict, c, sessUUID)):
fileNameList.append(fileName)
# 不存在UUID(可能输出多个文件)
else:
# 确定新的目录,若指定了文件名,则以指定的为准,否则以源日志文件名作为目录名
if not name:
#fileNames = "_".join([os.path.split(p)[-1] for p in self.getPath()])
fileNames = sessUUID + callNumber + "_tmp"
newPath = os.path.join(outputPath, fileNames)
else:
newPath = os.path.join(outputPath, name)
# 创建新的目录,若存在则删除
self.makeDir(newPath)
for sessUUID in sessLogInfoDict.keys():
logDict = sessLogInfoDict[sessUUID][self.SESS_LOG_DK]
# 若输入了号码,则需要过滤号码
c = sessLogInfoDict[sessUUID][self.SESS_FS_CALLNUMBER_DK]
if (callNumber == c if callNumber else True):
fileName = (callNumber or c) + "__" + sessUUID + self.OUTPUT_POSTFIX_LOG
if self.output(logDict, newPath, fileName, self.__getOutputHeader(logDict, c, sessUUID)):
fileNameList.append(fileName)
return len(fileNameList), newPath, fileNameList
def outputOriginLog(self, outputPath, sessUUID = "", callNumber = "", name = ""):
return self.__outputOriginLog(outputPath, sessUUID = sessUUID, callNumber = callNumber, name = name)
# ----------------------------------------------输出详细分析结果到文件----------------------------------------------
def __outputDetails(self, outputPath, fileName = "", callNumber = "", sessUUID = "", targConclusion=""):
fileNameList = []
sessLogInfoDict = self.getSessLogInfoDict()
newPath = outputPath
if sessUUID:
sessDict = sessLogInfoDict.get(sessUUID, False)
if not sessDict:
return len(fileNameList), newPath, fileNameList
if sessDict[self.SESS_FS_CALLNUMBER_DK] == callNumber if callNumber else True:
logDict = sessLogInfoDict[sessUUID][self.SESS_LOG_DK]
newFileName = fileName or (sessDict[self.SESS_FS_CALLNUMBER_DK] + "__" + sessUUID + "__" + targConclusion + self.OUTPUT_POSTFIX_DETAILS)
if self.outputEx(newPath, newFileName, self.getDetails(sessUUID, targConclusion)):
fileNameList.append(newFileName)
else:
if not callNumber:
# 确定新的目录,以源日志文件名作为目录名
if not fileName:
#orgLogFileNames = "_".join([os.path.split(p)[-1] for p in self.getPath()])
orgLogFileNames = callNumber + sessUUID + targConclusion + "_tmp"
newPath = os.path.join(outputPath, orgLogFileNames)
else:
newPath = os.path.join(outputPath, fileName)
# 创建新的目录
if not self.makeDir(newPath):
return len(fileNameList), newPath, fileNameList
for sessUUID in sessLogInfoDict.keys():
sessDict = sessLogInfoDict[sessUUID]
if sessDict[self.SESS_FS_CALLNUMBER_DK] == callNumber if callNumber else True:
logDict = sessLogInfoDict[sessUUID][self.SESS_LOG_DK]
newFileName = sessDict[self.SESS_FS_CALLNUMBER_DK] + "__" + sessUUID + "__" + targConclusion + self.OUTPUT_POSTFIX_DETAILS
if self.outputEx(newPath, newFileName, self.getDetails(sessUUID, targConclusion)):
fileNameList.append(newFileName)
return len(fileNameList), newPath, fileNameList
def outputDetails(self, outputPath, fileName = "", sessUUID = "", callNumber = "", conclusion = ""):
return self.__outputDetails(outputPath, fileName = fileName, sessUUID = sessUUID, callNumber = callNumber, targConclusion = conclusion)
| 2 | 2 |
examples/boost_python/demo3_numpy/demo3.py | davidcortesortuno/finmag | 10 | 12797000 | <reponame>davidcortesortuno/finmag<filename>examples/boost_python/demo3_numpy/demo3.py
import numpy as np
import demo3_module
a = np.array([[1, 2], [3, 4]], dtype=float)
print "Trace of a:", demo3_module.trace(a)
| 2.25 | 2 |
lib/mvc/notification_window/controller.py | realmar/gnome-quota-indicator | 0 | 12797001 | <reponame>realmar/gnome-quota-indicator<gh_stars>0
"""Controller of the notification window."""
from lib.mvc.bases import ControllerBase
from lib.mvc.notification_window.model import NotificationWindowModel
from lib.mvc.notification_window.view import NotificationWindowView
from lib.exception_feedback import add_default_exception_handling
from functools import partial
class NotificationWindowController(ControllerBase):
"""Controller of the notification window."""
def __init__(self, app):
"""Ctor of NotificationWindowController."""
super().__init__(app, NotificationWindowModel, NotificationWindowView)
self.view.register_open_usage_event(
partial(self.view.app.quota_window.view.cb_show, 0, 0))
self.view.initialize()
| 1.84375 | 2 |
setup.py | Daiwv/bittrex_autotrader | 1 | 12797002 | <filename>setup.py
from distutils.core import setup
version = '0.0.6'
setup(
name = 'bittrex_autotrader',
packages = ['bittrex_autotrader'],
version = version,
description = 'Bittrex currency exchange autotrading script in a nutshell.',
author = '<NAME>',
author_email = '<EMAIL>',
url = 'https://github.com/nuxy/bittrex_autotrader',
download_url = 'https://github.com/nuxy/bittrex_autotrader/archive/0.0.{0}.tar.gz'.format(version),
keywords = ['trading-bot', 'api-client', 'cryptocurrency', 'bittrex'],
classifiers = [],
)
| 1.328125 | 1 |
smlb/optimizers/random_optimizer.py | CitrineInformatics/smlb | 6 | 12797003 | """An "optimizer" that draws random samples.
Scientific Machine Learning Benchmark
A benchmark of regression models in chem- and materials informatics.
2019-2020, Citrine Informatics.
"""
from typing import Optional, Any
from smlb import (
params,
Random,
RandomVectorSampler,
VectorSpaceData,
Optimizer,
TrackedTransformation,
)
class RandomOptimizer(Optimizer, Random):
"""Draws random samples.
Parameters:
num_samples: the number of random samples to draw
domain: optional domain from which to draw values. If not provided, then the
optimization domain is taken to be that of `data` parameter passed to `optimize()`.
rng: pseudo-random number generator
"""
def __init__(self, num_samples: int, domain: Optional[Any] = None, rng=None, **kwargs):
super().__init__(rng=rng, **kwargs)
self._num_samples = params.integer(num_samples, above=0)
self._sampler = RandomVectorSampler(size=self._num_samples, domain=domain, rng=rng)
def _minimize(self, data: VectorSpaceData, function_tracker: TrackedTransformation):
"""Generate num_samples random samples and evaluate them."""
samples = self._sampler.apply(data)
function_tracker.apply(samples)
| 3.21875 | 3 |
models/windows81_general_configuration.py | MIchaelMainer/msgraph-v10-models-python | 1 | 12797004 | <reponame>MIchaelMainer/msgraph-v10-models-python<filename>models/windows81_general_configuration.py
# -*- coding: utf-8 -*-
'''
# Copyright (c) Microsoft Corporation. All Rights Reserved. Licensed under the MIT License. See License in the project root for license information.
#
# This file was generated and any changes will be overwritten.
'''
from __future__ import unicode_literals
from ..model.internet_site_security_level import InternetSiteSecurityLevel
from ..model.site_security_level import SiteSecurityLevel
from ..model.required_password_type import RequiredPasswordType
from ..model.windows_user_account_control_settings import WindowsUserAccountControlSettings
from ..one_drive_object_base import OneDriveObjectBase
class Windows81GeneralConfiguration(OneDriveObjectBase):
def __init__(self, prop_dict={}):
self._prop_dict = prop_dict
@property
def accounts_block_adding_non_microsoft_account_email(self):
"""
Gets and sets the accountsBlockAddingNonMicrosoftAccountEmail
Returns:
bool:
The accountsBlockAddingNonMicrosoftAccountEmail
"""
if "accountsBlockAddingNonMicrosoftAccountEmail" in self._prop_dict:
return self._prop_dict["accountsBlockAddingNonMicrosoftAccountEmail"]
else:
return None
@accounts_block_adding_non_microsoft_account_email.setter
def accounts_block_adding_non_microsoft_account_email(self, val):
self._prop_dict["accountsBlockAddingNonMicrosoftAccountEmail"] = val
@property
def apply_only_to_windows81(self):
"""
Gets and sets the applyOnlyToWindows81
Returns:
bool:
The applyOnlyToWindows81
"""
if "applyOnlyToWindows81" in self._prop_dict:
return self._prop_dict["applyOnlyToWindows81"]
else:
return None
@apply_only_to_windows81.setter
def apply_only_to_windows81(self, val):
self._prop_dict["applyOnlyToWindows81"] = val
@property
def browser_block_autofill(self):
"""
Gets and sets the browserBlockAutofill
Returns:
bool:
The browserBlockAutofill
"""
if "browserBlockAutofill" in self._prop_dict:
return self._prop_dict["browserBlockAutofill"]
else:
return None
@browser_block_autofill.setter
def browser_block_autofill(self, val):
self._prop_dict["browserBlockAutofill"] = val
@property
def browser_block_automatic_detection_of_intranet_sites(self):
"""
Gets and sets the browserBlockAutomaticDetectionOfIntranetSites
Returns:
bool:
The browserBlockAutomaticDetectionOfIntranetSites
"""
if "browserBlockAutomaticDetectionOfIntranetSites" in self._prop_dict:
return self._prop_dict["browserBlockAutomaticDetectionOfIntranetSites"]
else:
return None
@browser_block_automatic_detection_of_intranet_sites.setter
def browser_block_automatic_detection_of_intranet_sites(self, val):
self._prop_dict["browserBlockAutomaticDetectionOfIntranetSites"] = val
@property
def browser_block_enterprise_mode_access(self):
"""
Gets and sets the browserBlockEnterpriseModeAccess
Returns:
bool:
The browserBlockEnterpriseModeAccess
"""
if "browserBlockEnterpriseModeAccess" in self._prop_dict:
return self._prop_dict["browserBlockEnterpriseModeAccess"]
else:
return None
@browser_block_enterprise_mode_access.setter
def browser_block_enterprise_mode_access(self, val):
self._prop_dict["browserBlockEnterpriseModeAccess"] = val
@property
def browser_block_java_script(self):
"""
Gets and sets the browserBlockJavaScript
Returns:
bool:
The browserBlockJavaScript
"""
if "browserBlockJavaScript" in self._prop_dict:
return self._prop_dict["browserBlockJavaScript"]
else:
return None
@browser_block_java_script.setter
def browser_block_java_script(self, val):
self._prop_dict["browserBlockJavaScript"] = val
@property
def browser_block_plugins(self):
"""
Gets and sets the browserBlockPlugins
Returns:
bool:
The browserBlockPlugins
"""
if "browserBlockPlugins" in self._prop_dict:
return self._prop_dict["browserBlockPlugins"]
else:
return None
@browser_block_plugins.setter
def browser_block_plugins(self, val):
self._prop_dict["browserBlockPlugins"] = val
@property
def browser_block_popups(self):
"""
Gets and sets the browserBlockPopups
Returns:
bool:
The browserBlockPopups
"""
if "browserBlockPopups" in self._prop_dict:
return self._prop_dict["browserBlockPopups"]
else:
return None
@browser_block_popups.setter
def browser_block_popups(self, val):
self._prop_dict["browserBlockPopups"] = val
@property
def browser_block_sending_do_not_track_header(self):
"""
Gets and sets the browserBlockSendingDoNotTrackHeader
Returns:
bool:
The browserBlockSendingDoNotTrackHeader
"""
if "browserBlockSendingDoNotTrackHeader" in self._prop_dict:
return self._prop_dict["browserBlockSendingDoNotTrackHeader"]
else:
return None
@browser_block_sending_do_not_track_header.setter
def browser_block_sending_do_not_track_header(self, val):
self._prop_dict["browserBlockSendingDoNotTrackHeader"] = val
@property
def browser_block_single_word_entry_on_intranet_sites(self):
"""
Gets and sets the browserBlockSingleWordEntryOnIntranetSites
Returns:
bool:
The browserBlockSingleWordEntryOnIntranetSites
"""
if "browserBlockSingleWordEntryOnIntranetSites" in self._prop_dict:
return self._prop_dict["browserBlockSingleWordEntryOnIntranetSites"]
else:
return None
@browser_block_single_word_entry_on_intranet_sites.setter
def browser_block_single_word_entry_on_intranet_sites(self, val):
self._prop_dict["browserBlockSingleWordEntryOnIntranetSites"] = val
@property
def browser_require_smart_screen(self):
"""
Gets and sets the browserRequireSmartScreen
Returns:
bool:
The browserRequireSmartScreen
"""
if "browserRequireSmartScreen" in self._prop_dict:
return self._prop_dict["browserRequireSmartScreen"]
else:
return None
@browser_require_smart_screen.setter
def browser_require_smart_screen(self, val):
self._prop_dict["browserRequireSmartScreen"] = val
@property
def browser_enterprise_mode_site_list_location(self):
"""
Gets and sets the browserEnterpriseModeSiteListLocation
Returns:
str:
The browserEnterpriseModeSiteListLocation
"""
if "browserEnterpriseModeSiteListLocation" in self._prop_dict:
return self._prop_dict["browserEnterpriseModeSiteListLocation"]
else:
return None
@browser_enterprise_mode_site_list_location.setter
def browser_enterprise_mode_site_list_location(self, val):
self._prop_dict["browserEnterpriseModeSiteListLocation"] = val
@property
def browser_internet_security_level(self):
"""
Gets and sets the browserInternetSecurityLevel
Returns:
:class:`InternetSiteSecurityLevel<onedrivesdk.model.internet_site_security_level.InternetSiteSecurityLevel>`:
The browserInternetSecurityLevel
"""
if "browserInternetSecurityLevel" in self._prop_dict:
if isinstance(self._prop_dict["browserInternetSecurityLevel"], OneDriveObjectBase):
return self._prop_dict["browserInternetSecurityLevel"]
else :
self._prop_dict["browserInternetSecurityLevel"] = InternetSiteSecurityLevel(self._prop_dict["browserInternetSecurityLevel"])
return self._prop_dict["browserInternetSecurityLevel"]
return None
@browser_internet_security_level.setter
def browser_internet_security_level(self, val):
self._prop_dict["browserInternetSecurityLevel"] = val
@property
def browser_intranet_security_level(self):
"""
Gets and sets the browserIntranetSecurityLevel
Returns:
:class:`SiteSecurityLevel<onedrivesdk.model.site_security_level.SiteSecurityLevel>`:
The browserIntranetSecurityLevel
"""
if "browserIntranetSecurityLevel" in self._prop_dict:
if isinstance(self._prop_dict["browserIntranetSecurityLevel"], OneDriveObjectBase):
return self._prop_dict["browserIntranetSecurityLevel"]
else :
self._prop_dict["browserIntranetSecurityLevel"] = SiteSecurityLevel(self._prop_dict["browserIntranetSecurityLevel"])
return self._prop_dict["browserIntranetSecurityLevel"]
return None
@browser_intranet_security_level.setter
def browser_intranet_security_level(self, val):
self._prop_dict["browserIntranetSecurityLevel"] = val
@property
def browser_logging_report_location(self):
"""
Gets and sets the browserLoggingReportLocation
Returns:
str:
The browserLoggingReportLocation
"""
if "browserLoggingReportLocation" in self._prop_dict:
return self._prop_dict["browserLoggingReportLocation"]
else:
return None
@browser_logging_report_location.setter
def browser_logging_report_location(self, val):
self._prop_dict["browserLoggingReportLocation"] = val
@property
def browser_require_high_security_for_restricted_sites(self):
"""
Gets and sets the browserRequireHighSecurityForRestrictedSites
Returns:
bool:
The browserRequireHighSecurityForRestrictedSites
"""
if "browserRequireHighSecurityForRestrictedSites" in self._prop_dict:
return self._prop_dict["browserRequireHighSecurityForRestrictedSites"]
else:
return None
@browser_require_high_security_for_restricted_sites.setter
def browser_require_high_security_for_restricted_sites(self, val):
self._prop_dict["browserRequireHighSecurityForRestrictedSites"] = val
@property
def browser_require_firewall(self):
"""
Gets and sets the browserRequireFirewall
Returns:
bool:
The browserRequireFirewall
"""
if "browserRequireFirewall" in self._prop_dict:
return self._prop_dict["browserRequireFirewall"]
else:
return None
@browser_require_firewall.setter
def browser_require_firewall(self, val):
self._prop_dict["browserRequireFirewall"] = val
@property
def browser_require_fraud_warning(self):
"""
Gets and sets the browserRequireFraudWarning
Returns:
bool:
The browserRequireFraudWarning
"""
if "browserRequireFraudWarning" in self._prop_dict:
return self._prop_dict["browserRequireFraudWarning"]
else:
return None
@browser_require_fraud_warning.setter
def browser_require_fraud_warning(self, val):
self._prop_dict["browserRequireFraudWarning"] = val
@property
def browser_trusted_sites_security_level(self):
"""
Gets and sets the browserTrustedSitesSecurityLevel
Returns:
:class:`SiteSecurityLevel<onedrivesdk.model.site_security_level.SiteSecurityLevel>`:
The browserTrustedSitesSecurityLevel
"""
if "browserTrustedSitesSecurityLevel" in self._prop_dict:
if isinstance(self._prop_dict["browserTrustedSitesSecurityLevel"], OneDriveObjectBase):
return self._prop_dict["browserTrustedSitesSecurityLevel"]
else :
self._prop_dict["browserTrustedSitesSecurityLevel"] = SiteSecurityLevel(self._prop_dict["browserTrustedSitesSecurityLevel"])
return self._prop_dict["browserTrustedSitesSecurityLevel"]
return None
@browser_trusted_sites_security_level.setter
def browser_trusted_sites_security_level(self, val):
self._prop_dict["browserTrustedSitesSecurityLevel"] = val
@property
def cellular_block_data_roaming(self):
"""
Gets and sets the cellularBlockDataRoaming
Returns:
bool:
The cellularBlockDataRoaming
"""
if "cellularBlockDataRoaming" in self._prop_dict:
return self._prop_dict["cellularBlockDataRoaming"]
else:
return None
@cellular_block_data_roaming.setter
def cellular_block_data_roaming(self, val):
self._prop_dict["cellularBlockDataRoaming"] = val
@property
def diagnostics_block_data_submission(self):
"""
Gets and sets the diagnosticsBlockDataSubmission
Returns:
bool:
The diagnosticsBlockDataSubmission
"""
if "diagnosticsBlockDataSubmission" in self._prop_dict:
return self._prop_dict["diagnosticsBlockDataSubmission"]
else:
return None
@diagnostics_block_data_submission.setter
def diagnostics_block_data_submission(self, val):
self._prop_dict["diagnosticsBlockDataSubmission"] = val
@property
def password_block_picture_password_and_pin(self):
"""
Gets and sets the passwordBlockPicturePasswordAndPin
Returns:
bool:
The passwordBlockPicturePasswordAndPin
"""
if "passwordBlockPicturePasswordAndPin" in self._prop_dict:
return self._prop_dict["passwordBlockPicturePasswordAndPin"]
else:
return None
@password_block_picture_password_and_pin.setter
def password_block_picture_password_and_pin(self, val):
self._prop_dict["passwordBlockPicturePasswordAndPin"] = val
@property
def password_expiration_days(self):
"""
Gets and sets the passwordExpirationDays
Returns:
int:
The passwordExpirationDays
"""
if "passwordExpirationDays" in self._prop_dict:
return self._prop_dict["passwordExpirationDays"]
else:
return None
@password_expiration_days.setter
def password_expiration_days(self, val):
self._prop_dict["passwordExpirationDays"] = val
@property
def password_minimum_length(self):
"""
Gets and sets the passwordMinimumLength
Returns:
int:
The passwordMinimumLength
"""
if "passwordMinimumLength" in self._prop_dict:
return self._prop_dict["passwordMinimumLength"]
else:
return None
@password_minimum_length.setter
def password_minimum_length(self, val):
self._prop_dict["passwordMinimumLength"] = val
@property
def password_minutes_of_inactivity_before_screen_timeout(self):
"""
Gets and sets the passwordMinutesOfInactivityBeforeScreenTimeout
Returns:
int:
The passwordMinutesOfInactivityBeforeScreenTimeout
"""
if "passwordMinutesOfInactivityBeforeScreenTimeout" in self._prop_dict:
return self._prop_dict["passwordMinutesOfInactivityBeforeScreenTimeout"]
else:
return None
@password_minutes_of_inactivity_before_screen_timeout.setter
def password_minutes_of_inactivity_before_screen_timeout(self, val):
self._prop_dict["passwordMinutesOfInactivityBeforeScreenTimeout"] = val
@property
def password_minimum_character_set_count(self):
"""
Gets and sets the passwordMinimumCharacterSetCount
Returns:
int:
The passwordMinimumCharacterSetCount
"""
if "passwordMinimumCharacterSetCount" in self._prop_dict:
return self._prop_dict["passwordMinimumCharacterSetCount"]
else:
return None
@password_minimum_character_set_count.setter
def password_minimum_character_set_count(self, val):
self._prop_dict["passwordMinimumCharacterSetCount"] = val
@property
def password_previous_password_block_count(self):
"""
Gets and sets the passwordPreviousPasswordBlockCount
Returns:
int:
The passwordPreviousPasswordBlockCount
"""
if "passwordPreviousPasswordBlockCount" in self._prop_dict:
return self._prop_dict["passwordPreviousPasswordBlockCount"]
else:
return None
@password_previous_password_block_count.setter
def password_previous_password_block_count(self, val):
self._prop_dict["passwordPreviousPasswordBlockCount"] = val
@property
def password_required_type(self):
"""
Gets and sets the passwordRequiredType
Returns:
:class:`RequiredPasswordType<onedrivesdk.model.required_password_type.RequiredPasswordType>`:
The passwordRequiredType
"""
if "passwordRequiredType" in self._prop_dict:
if isinstance(self._prop_dict["passwordRequiredType"], OneDriveObjectBase):
return self._prop_dict["passwordRequiredType"]
else :
self._prop_dict["passwordRequiredType"] = RequiredPasswordType(self._prop_dict["passwordRequiredType"])
return self._prop_dict["passwordRequiredType"]
return None
@password_required_type.setter
def password_required_type(self, val):
self._prop_dict["passwordRequiredType"] = val
@property
def password_sign_in_failure_count_before_factory_reset(self):
"""
Gets and sets the passwordSignInFailureCountBeforeFactoryReset
Returns:
int:
The passwordSignInFailureCountBeforeFactoryReset
"""
if "passwordSignInFailureCountBeforeFactoryReset" in self._prop_dict:
return self._prop_dict["passwordSignInFailureCountBeforeFactoryReset"]
else:
return None
@password_sign_in_failure_count_before_factory_reset.setter
def password_sign_in_failure_count_before_factory_reset(self, val):
self._prop_dict["passwordSignInFailureCountBeforeFactoryReset"] = val
@property
def storage_require_device_encryption(self):
"""
Gets and sets the storageRequireDeviceEncryption
Returns:
bool:
The storageRequireDeviceEncryption
"""
if "storageRequireDeviceEncryption" in self._prop_dict:
return self._prop_dict["storageRequireDeviceEncryption"]
else:
return None
@storage_require_device_encryption.setter
def storage_require_device_encryption(self, val):
self._prop_dict["storageRequireDeviceEncryption"] = val
@property
def updates_require_automatic_updates(self):
"""
Gets and sets the updatesRequireAutomaticUpdates
Returns:
bool:
The updatesRequireAutomaticUpdates
"""
if "updatesRequireAutomaticUpdates" in self._prop_dict:
return self._prop_dict["updatesRequireAutomaticUpdates"]
else:
return None
@updates_require_automatic_updates.setter
def updates_require_automatic_updates(self, val):
self._prop_dict["updatesRequireAutomaticUpdates"] = val
@property
def user_account_control_settings(self):
"""
Gets and sets the userAccountControlSettings
Returns:
:class:`WindowsUserAccountControlSettings<onedrivesdk.model.windows_user_account_control_settings.WindowsUserAccountControlSettings>`:
The userAccountControlSettings
"""
if "userAccountControlSettings" in self._prop_dict:
if isinstance(self._prop_dict["userAccountControlSettings"], OneDriveObjectBase):
return self._prop_dict["userAccountControlSettings"]
else :
self._prop_dict["userAccountControlSettings"] = WindowsUserAccountControlSettings(self._prop_dict["userAccountControlSettings"])
return self._prop_dict["userAccountControlSettings"]
return None
@user_account_control_settings.setter
def user_account_control_settings(self, val):
self._prop_dict["userAccountControlSettings"] = val
@property
def work_folders_url(self):
"""
Gets and sets the workFoldersUrl
Returns:
str:
The workFoldersUrl
"""
if "workFoldersUrl" in self._prop_dict:
return self._prop_dict["workFoldersUrl"]
else:
return None
@work_folders_url.setter
def work_folders_url(self, val):
self._prop_dict["workFoldersUrl"] = val
| 1.945313 | 2 |
deplodocker/formatter.py | RCheese/deplodocker | 5 | 12797005 | try:
import orjson as json
except ImportError:
import json
import toml
import yaml
def format_requirements(data: dict) -> str:
result = []
for group, values in data.items():
result.append(f"### {group.upper()}\n")
for extras, version in values.items():
result.append(f"{extras}=={version}\n")
return "".join(result)
def formatter(data: dict, format: str) -> str:
if format == "json":
proxy = json.dumps(data)
if isinstance(proxy, bytes):
proxy = proxy.decode()
return proxy
elif format == "toml":
return toml.dumps(data)
elif format == "yaml":
return yaml.dump(data)
elif format == "requirements.txt":
return format_requirements(data)
else:
raise TypeError(f"Invalid format {format}")
| 2.734375 | 3 |
launch/launchLESHIT.py | mcaldana/CubismUP_3D | 8 | 12797006 | #!/usr/bin/env python3
import os, numpy as np, argparse
def relFit(nu, eps): return 7.33972668 * np.power(eps, 1/6.0) / np.sqrt(nu)
def etaFit(nu, eps): return np.power(eps, -0.25) * np.power(nu, 0.75)
def lambdaFit(nu, eps): return 5.35507603 * np.power(eps,-1/6.0) * np.sqrt(nu);
def runspec(nu, eps, run, cs):
if cs is not None:
return "HITBND_LES_EXT2pi_EPS%.03f_NU%.04f_CS%.02f_RUN%d" \
% (eps, nu, run, cs)
else:
return "HITBND_DNS_EXT2pi_EPS%.03f_NU%.04f_RUN%d" \
% (eps, nu, run)
def getSettings(nu, eps, cs):
if cs is not None:
options = '-sgs SSM -cs %f -bpdx 4 -bpdy 4 -bpdz 4 -CFL 0.1 ' % cs
else:
options = '-bpdx 12 -bpdy 12 -bpdz 12 -CFL 0.02 '
tAnalysis = np.sqrt(nu / eps)
return options + '-extentx 6.2831853072 -dump2D 0 -dump3D 0 ' \
'-tdump 1 -BC_x periodic -BC_y periodic -BC_z periodic ' \
'-spectralIC fromFit -initCond HITurbulence -tAnalysis %f ' \
'-compute-dissipation 1 -nprocsx 1 -nprocsy 1 -nprocsz 1 ' \
'-spectralForcing 1 -tend 100 -keepMomentumConstant 1 ' \
'-analysis HIT -nu %f -energyInjectionRate %f ' \
% (tAnalysis, nu, eps)
def launchEuler(nu, eps, run):
runname = runspec(nu, eps, run)
print(runname)
tAnalysis = np.sqrt(nu / eps)
os.system("export NU=%f \n export EPS=%f \n export TANALYSIS=%f \n " \
"echo $NU $EPS \n ./launchEuler.sh settingsHIT_DNS.sh %s " \
% (nu, eps, tAnalysis, runname) )
def launchDaint(nCases, les):
SCRATCH = os.getenv('SCRATCH')
HOME = os.getenv('HOME')
f = open('HIT_sbatch','w')
f.write('#!/bin/bash -l \n')
if les: f.write('#SBATCH --job-name=LES_HIT \n')
else: f.write('#SBATCH --job-name=DNS_HIT \n')
f.write('#SBATCH --time=24:00:00 \n')
f.write('#SBATCH --output=out.%j.%a.txt \n')
f.write('#SBATCH --error=err.%j.%a.txt \n')
f.write('#SBATCH --constraint=gpu \n')
f.write('#SBATCH --account=s929 \n')
f.write('#SBATCH --array=0-%d \n' % (nCases-1))
#f.write('#SBATCH --partition=normal \n')
#f.write('#SBATCH --ntasks-per-node=1 \n')
f.write('ind=$SLURM_ARRAY_TASK_ID \n')
if les:
f.write('RUNDIRN=`./launchLESHIT.py --LES --case ${ind} --printName` \n')
f.write('OPTIONS=`./launchLESHIT.py --LES --case ${ind} --printOptions` \n')
else:
f.write('RUNDIRN=`./launchLESHIT.py --case ${ind} --printName` \n')
f.write('OPTIONS=`./launchLESHIT.py --case ${ind} --printOptions` \n')
f.write('mkdir -p %s/CubismUP3D/${RUNDIRN} \n' % SCRATCH)
f.write('cd %s/CubismUP3D/${RUNDIRN} \n' % SCRATCH)
f.write('cp %s/CubismUP_3D/bin/simulation ./exec \n' % HOME)
f.write('export OMP_NUM_THREADS=12 \n')
f.write('export CRAY_CUDA_MPS=1 \n')
f.write('srun --ntasks 1 --ntasks-per-node=1 ./exec ${OPTIONS} \n')
f.close()
os.system('sbatch HIT_sbatch')
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description = "Compute a target file for RL agent from DNS data.")
parser.add_argument('--printName', dest='printName',
action='store_true', help="Only print run name.")
parser.set_defaults(printName=False)
parser.add_argument('--printOptions', dest='printOptions',
action='store_true', help="Only print run options.")
parser.set_defaults(printOptions=False)
parser.add_argument('--launchDaint', dest='launchDaint',
action='store_true', help="Only print run options.")
parser.set_defaults(launchDaint=False)
parser.add_argument('--launchEuler', dest='launchEuler',
action='store_true', help="Only print run options.")
parser.set_defaults(launchEuler=False)
parser.add_argument('--LES', dest='LES', action='store_true',
help="Triggers LES modeling.")
parser.set_defaults(LES=False)
parser.add_argument('--case', type = int, default = -1,
help="Simulation case.")
args = parser.parse_args()
if args.LES: rangeles = np.linspace(0.16, 0.24, 9)
else: rangeles = [None]
NUS, EPS, RUN, CSS = [], [], [], []
h = 2 * np.pi / 16 / 12
for nu in np.logspace(np.log10(0.002), np.log10(0.02), 16) :
for eps in np.logspace(np.log10(0.01), np.log10(2.0), 16) :
if relFit(nu, eps) > 100 or relFit(nu, eps) < 20: continue
if lambdaFit(nu, eps) > 0.1 * 2 * np.pi: continue
if etaFit(nu, eps) > h or etaFit(nu, eps) < h/8: continue
for les in rangeles :
for i in [0, 1, 2] :
NUS,EPS,RUN,CSS = NUS+[nu], EPS+[eps], RUN+[i], CSS+[les]
nCases = len(NUS)
#print('Defined %d cases' % nCases)
if args.launchDaint: launchDaint(nCases, args.LES)
if args.case < 0: cases = range(nCases)
else: cases = [args.case]
for i in cases:
if args.printOptions:
print( getSettings(NUS[i], EPS[i], CSS[i]) )
if args.printName:
print( runspec(NUS[i], EPS[i], RUN[i], CSS[i]) )
if args.launchEuler:
launchEuler(NUS[i], EPS[i], RUN[i])
#for nu in [0.002, 0.004, 0.008] :
# for eps in [0.02, 0.04, 0.08, 0.16, 0.32] :
# tke0 = 2.77578963 * np.power(eps, (2.0/3.0) )
# for scal in [2, 3] :
# tke0 = 2.77578963 * np.power(eps, (2.0/3.0) )
# for scal in [2] :
# ext = scal * np.pi
# os.system("\
# export NU=%f \n\
# export EPS=%f \n\
# export TKE0=%f \n\
# export EXT=%f \n\
# echo $NU $EPS $TKE0 $EXT \n\
# ./launchEuler.sh settingsHIT_DNS.sh HIT_DNS_EXT%dpi_EPS%.02f_NU%.03f"
# % (nu, eps, tke0, ext, scal, eps, nu))
#for nu in [0.001, 0.002, 0.004, 0.008, 0.016] :
# for eps in [0.02, 0.04, 0.08, 0.16, 0.32, 0.64] :
| 2.078125 | 2 |
eden/iterated_maximum_subarray.py | smautner/EDeN | 42 | 12797007 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from collections import defaultdict
import numpy as np
def find_smallest_positive(alist):
# find first positive value
minpos = -1
for x in alist:
if x > 0:
minpos = x
break
if minpos > 0:
# find smallest positive value
for x in alist:
if x > 0 and x < minpos:
minpos = x
return minpos
def rebase_to_smallest_positive(alist):
base = find_smallest_positive(alist)
if base == -1:
return None
else:
return [x - base for x in alist]
def compute_maximum_subarray(score_vector=None):
begin_temp = begin = end = 0
start_val = score_vector[0]
max_ending_here = max_so_far = start_val
for pos, x in enumerate(score_vector[1:], 1):
if max_ending_here < 0:
max_ending_here = x
begin_temp = pos
else:
max_ending_here = max_ending_here + x
if max_ending_here > max_so_far:
max_so_far = max_ending_here
begin = begin_temp
end = pos
return begin, end
def compute_iterated_maximum_subarray(seq=None, score=None, min_subarray_size=None, max_subarray_size=None, output='minimal', margin=1):
original_score = score
while True:
# find (begin,end) of subarray in each element
begin, end = compute_maximum_subarray(score_vector=score)
# check that the retrieved subarray is larger than min_subarray_size
if end - begin < min_subarray_size - 1:
break
else:
# extract maximum subarray
# NOTE: in order to account for border effects we expand on the left and on the right by 'margin'
first = max(0, begin - margin)
# NOTE: we return + 1 for the rightmost postition to be compliant with the 'one after the end' semantics
last = min(len(seq), end + margin + 1)
subarray = seq[first: last]
subarray_size = len(subarray)
if max_subarray_size == -1 or subarray_size <= max_subarray_size:
# store data
acc = 0
for x in original_score[begin: end + 1]:
acc += x
if output == 'minimal':
subarray = {'subarray_string': ''.join(subarray)}
else:
subarray = {'subarray_string': ''.join(subarray), 'subarray': subarray, 'begin': first,
'end': last, 'size': subarray_size, 'seq': seq, 'score': acc}
yield subarray
if subarray_size > max_subarray_size:
# if the subarray is too large then rebase the score list, i.e. offset by the smallest positive value
score = rebase_to_smallest_positive(score)
if score is None:
break
else:
# remove current subarray by zeroing importance values of subarray
score[first: last] = [0.0] * subarray_size
# iterate after removal of current subarray
def extract_sequence_and_score(graph=None):
# make dict with positions as keys and lists of ids as values
pos_to_ids = defaultdict(list)
for u in graph.nodes():
if 'position' not in graph.node[u]: # no position attributes in graph, use the vertex id instead
raise Exception('Missing "position" attribute in node:%s %s' % (u, graph.node[u]))
else:
pos = graph.node[u]['position']
# accumulate all node ids
pos_to_ids[pos] += [u]
# extract sequence of labels and importances
seq = [None] * len(pos_to_ids)
score = [0] * len(pos_to_ids)
for pos in sorted(pos_to_ids):
ids = pos_to_ids[pos]
labels = [graph.node[u].get('label', 'N/A') for u in ids]
# check that all labels for the same position are identical
assert(sum([1 for label in labels if label == labels[0]]) == len(labels)
), 'ERROR: non identical labels referring to same position: %s %s' % (pos, labels)
seq[pos] = labels[0]
# average all importance score for the same position
importances = [graph.node[u].get('importance', 0) for u in ids]
score[pos] = np.mean(importances)
return seq, score
def compute_max_subarrays_sequence(seq=None, score=None, min_subarray_size=None, max_subarray_size=None, output='minimal', margin=1):
# extract subarrays
for subarray in compute_iterated_maximum_subarray(seq=seq, score=score, min_subarray_size=min_subarray_size, max_subarray_size=max_subarray_size, output=output, margin=margin):
yield subarray
def compute_max_subarrays(graph=None, min_subarray_size=None, max_subarray_size=None, output='minimal', margin=1):
seq, score = extract_sequence_and_score(graph)
for subarray in compute_max_subarrays_sequence(seq=seq, score=score, min_subarray_size=min_subarray_size, max_subarray_size=max_subarray_size, output=output, margin=margin):
yield subarray
| 3.03125 | 3 |
_unittests/ut_sklapi/test_onnx_helper.py | Exlsunshine/mlprodict | 0 | 12797008 | <filename>_unittests/ut_sklapi/test_onnx_helper.py
"""
@brief test log(time=2s)
"""
import unittest
from logging import getLogger
import numpy
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import Binarizer, StandardScaler, OneHotEncoder
from skl2onnx import convert_sklearn
from skl2onnx.common.data_types import FloatTensorType
from skl2onnx.helpers.onnx_helper import load_onnx_model, save_onnx_model
from skl2onnx.helpers.onnx_helper import select_model_inputs_outputs
from skl2onnx.helpers.onnx_helper import enumerate_model_node_outputs
from pyquickhelper.pycode import ExtTestCase
class TestOnnxHelper(ExtTestCase):
def setUp(self):
logger = getLogger('skl2onnx')
logger.disabled = True
def get_model(self, model):
try:
import onnxruntime
assert onnxruntime is not None
except ImportError:
return None
from onnxruntime import InferenceSession
session = InferenceSession(save_onnx_model(model))
return lambda X: session.run(None, {'input': X})[0]
def test_onnx_helper_load_save(self):
model = make_pipeline(StandardScaler(), Binarizer(threshold=0.5))
X = numpy.array([[0.1, 1.1], [0.2, 2.2]])
model.fit(X)
model_onnx = convert_sklearn(
model, 'binarizer', [('input', FloatTensorType([1, 2]))])
filename = "temp_onnx_helper_load_save.onnx"
save_onnx_model(model_onnx, filename)
model = load_onnx_model(filename)
list(enumerate_model_node_outputs(model))
new_model = select_model_inputs_outputs(model, 'variable')
self.assertTrue(new_model.graph is not None) # pylint: disable=E1101
tr1 = self.get_model(model)
tr2 = self.get_model(new_model)
X = X.astype(numpy.float32)
X1 = tr1(X)
X2 = tr2(X)
self.assertEqual(X1.shape, (2, 2))
self.assertEqual(X2.shape, (2, 2))
def test_onnx_helper_load_save_init(self):
model = make_pipeline(Binarizer(), OneHotEncoder(
sparse=False), StandardScaler())
X = numpy.array([[0.1, 1.1], [0.2, 2.2], [0.4, 2.2], [0.2, 2.4]])
model.fit(X)
model_onnx = convert_sklearn(
model, 'pipe3', [('input', FloatTensorType([1, 2]))])
filename = "temp_onnx_helper_load_save.onnx"
save_onnx_model(model_onnx, filename)
model = load_onnx_model(filename)
list(enumerate_model_node_outputs(model))
new_model = select_model_inputs_outputs(model, 'variable')
self.assertTrue(new_model.graph is not None) # pylint: disable=E1101
tr1 = self.get_model(model)
tr2 = self.get_model(new_model)
X = X.astype(numpy.float32)
X1 = tr1(X)
X2 = tr2(X)
self.assertEqual(X1.shape, (4, 2))
self.assertEqual(X2.shape, (4, 2))
if __name__ == "__main__":
unittest.main()
| 2.46875 | 2 |
recipes/Python/286229_Remove_control_character_M_opened_html/recipe-286229.py | tdiprima/code | 2,023 | 12797009 | <reponame>tdiprima/code
import string
class Stripper( SGMLParser ) :
...
def handle_data( self, data ) :
data = string.replace( data, '\r', '' )
...
| 2.3125 | 2 |
mmdet/models/bbox_heads/convfc_bbox_head_MH.py | Gitgigabyte/mmd | 1 | 12797010 | import torch.nn as nn
from ..registry import HEADS
from ..utils import ConvModule
from .bbox_head import BBoxHead
import torch
import torch.nn.functional as F
import mmcv
from mmdet.core import mask_target, mask_bg_target, force_fp32, bbox_target, bbox_overlaps
from ..losses import accuracy
from ..builder import build_loss
@HEADS.register_module
class ConvFCBBoxHead_MH(BBoxHead):
"""More general bbox head, with shared conv and fc layers and two optional
separated branches.
/-> cls convs -> cls fcs -> cls
shared convs -> shared fcs
\-> reg convs -> reg fcs -> reg
""" # noqa: W605
def __init__(self,
num_shared_convs=0,
num_shared_fcs=0,
num_cls_convs=0,
num_cls_fcs=0,
num_reg_convs=0,
num_reg_fcs=0,
mask_channels=256,
using_mask = True,
with_IoU = False,
conv_out_channels=256,
fc_out_channels=1024,
proto_combine='con',
feature_reduce=False,
# mask_conv=3,
conv_cfg=None,
norm_cfg=None,
using_bg=False,
using_refine=True,
loss_iou = dict(type='MSELoss', loss_weight=0.5),
*args,
**kwargs):
super(ConvFCBBoxHead_MH, self).__init__(*args, **kwargs)
assert (num_shared_convs + num_shared_fcs + num_cls_convs +
num_cls_fcs + num_reg_convs + num_reg_fcs > 0)
if num_cls_convs > 0 or num_reg_convs > 0:
assert num_shared_fcs == 0
if not self.with_cls:
assert num_cls_convs == 0 and num_cls_fcs == 0
if not self.with_reg:
assert num_reg_convs == 0 and num_reg_fcs == 0
self.num_shared_convs = num_shared_convs
self.using_mask = using_mask
self.num_shared_fcs = num_shared_fcs
self.num_cls_convs = num_cls_convs
self.num_cls_fcs = num_cls_fcs
self.num_reg_convs = num_reg_convs
self.num_reg_fcs = num_reg_fcs
self.conv_out_channels = conv_out_channels
self.fc_out_channels = fc_out_channels
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
self.using_bg = using_bg
self.using_refine = using_refine
self.with_IoU = with_IoU
self.mask_channels = mask_channels
self.proto_combine = proto_combine
self.feature_reduce = feature_reduce
if with_IoU:
self.iou_loss = build_loss(loss_iou)
# self.hint_conv = ConvModule(self.mask_channels, self.mask_channels, 1, conv_cfg=conv_cfg, norm_cfg=norm_cfg)
# add shared convs and fcs
if self.proto_combine == 'None':
if self.feature_reduce:
self.reduce_con = ConvModule(self.in_channels, conv_out_channels - mask_channels, 1, conv_cfg=conv_cfg,
norm_cfg=norm_cfg)
else:
combine_channels = self.in_channels + self.mask_channels if proto_combine == 'con' else self.in_channels
self.combine = ConvModule(combine_channels, conv_out_channels, 1, conv_cfg=conv_cfg, norm_cfg=norm_cfg)
# self.mask_conv = nn.ModuleList()
# for i in range(mask_conv):
# conv_m = ConvModule(1, 1, 3, padding=1, conv_cfg=conv_cfg, norm_cfg=norm_cfg)
# self.mask_conv.append(conv_m)
self.shared_convs, self.shared_fcs, last_layer_dim = \
self._add_conv_fc_branch(
self.num_shared_convs, self.num_shared_fcs, self.in_channels,
True)
self.shared_out_channels = last_layer_dim
# add cls specific branch
self.cls_convs, self.cls_fcs, self.cls_last_dim = \
self._add_conv_fc_branch(
self.num_cls_convs, self.num_cls_fcs, self.shared_out_channels)
# add reg specific branch
self.reg_convs, self.reg_fcs, self.reg_last_dim = \
self._add_conv_fc_branch(
self.num_reg_convs, self.num_reg_fcs, self.shared_out_channels)
if self.num_shared_fcs == 0 and not self.with_avg_pool:
if self.num_cls_fcs == 0:
self.cls_last_dim *= self.roi_feat_area
if self.num_reg_fcs == 0:
self.reg_last_dim *= self.roi_feat_area
self.relu = nn.ReLU(inplace=True)
# reconstruct fc_cls and fc_reg since input channels are changed
if self.with_cls:
self.fc_cls = nn.Linear(self.cls_last_dim, self.num_classes)
if self.with_reg:
out_dim_reg = (4 if self.reg_class_agnostic else 4 *
self.num_classes)
self.fc_reg = nn.Linear(self.reg_last_dim, out_dim_reg)
if self.with_IoU:
self.IoU_reg = nn.Linear(self.reg_last_dim, self.num_classes)
def _add_conv_fc_branch(self,
num_branch_convs,
num_branch_fcs,
in_channels,
is_shared=False):
"""Add shared or separable branch
convs -> avg pool (optional) -> fcs
"""
last_layer_dim = in_channels
# add branch specific conv layers
branch_convs = nn.ModuleList()
if num_branch_convs > 0:
for i in range(num_branch_convs):
conv_in_channels = (
last_layer_dim if i == 0 else self.conv_out_channels)
branch_convs.append(
ConvModule(
conv_in_channels,
self.conv_out_channels,
3,
padding=1,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg))
last_layer_dim = self.conv_out_channels
# add branch specific fc layers
branch_fcs = nn.ModuleList()
if num_branch_fcs > 0:
# for shared branch, only consider self.with_avg_pool
# for separated branches, also consider self.num_shared_fcs
if (is_shared
or self.num_shared_fcs == 0) and not self.with_avg_pool:
last_layer_dim *= self.roi_feat_area
for i in range(num_branch_fcs):
fc_in_channels = (
last_layer_dim if i == 0 else self.fc_out_channels)
branch_fcs.append(
nn.Linear(fc_in_channels, self.fc_out_channels))
last_layer_dim = self.fc_out_channels
return branch_convs, branch_fcs, last_layer_dim
def init_weights(self):
super(ConvFCBBoxHead_MH, self).init_weights()
for module_list in [self.shared_fcs, self.cls_fcs, self.reg_fcs]:
for m in module_list.modules():
if isinstance(m, nn.Linear):
nn.init.xavier_uniform_(m.weight)
nn.init.constant_(m.bias, 0)
# @force_fp32(apply_to=('cls_score', 'bbox_pred'))
# def loss(self,
# cls_score,
# bbox_pred,
# labels,
# label_weights,
# bbox_targets,
# bbox_weights,
# reduction_override=None):
# losses = dict()
# if cls_score is not None:
# avg_factor = max(torch.sum(label_weights > 0).float().item(), 1.)
# losses['loss_cls_refine'] = self.loss_cls(
# cls_score,
# labels,
# label_weights,
# avg_factor=avg_factor,
# reduction_override=reduction_override)
# losses['acc_refine'] = accuracy(cls_score, labels)
# if bbox_pred is not None:
# pos_inds = labels > 0
# if self.reg_class_agnostic:
# pos_bbox_pred = bbox_pred.view(bbox_pred.size(0), 4)[pos_inds]
# else:
# pos_bbox_pred = bbox_pred.view(bbox_pred.size(0), -1,
# 4)[pos_inds, labels[pos_inds]]
# losses['loss_bbox_refine'] = self.loss_bbox(
# pos_bbox_pred,
# bbox_targets[pos_inds],
# bbox_weights[pos_inds],
# avg_factor=bbox_targets.size(0),
# reduction_override=reduction_override)
# return losses
#TODO: add IoU target aquire and loss calculation
def get_iou_target(self, sampling_reuslt, bbox_pred, bbox_target):
pos_proposals = [res.pos_bboxes for res in sampling_reuslt]
pos_assigned_gt_inds = [
res.pos_gt_assigned_gt_inds for res in sampling_reuslt
]
# bbox_overlaps()
def get_mask_target(self, sampling_results, gt_masks, rcnn_train_cfg):
# pos_proposals = [res.pos_bboxes for res in sampling_results]
# pos_assigned_gt_inds = [
# res.pos_assigned_gt_inds for res in sampling_results
# ]
# mask_targets = mask_target(pos_proposals, pos_assigned_gt_inds,
# gt_masks, rcnn_train_cfg)
proposals = [res.bboxes for res in sampling_results]
assigned_gt_inds = [
res.inds for res in sampling_results
]
mask_targets = mask_target(proposals, assigned_gt_inds,
gt_masks, rcnn_train_cfg)
mask_bg_targets = mask_bg_target(proposals, gt_masks, rcnn_train_cfg)
return mask_targets, mask_bg_targets
# def get_target(self, sampling_results, gt_bboxes, gt_labels,
# rcnn_train_cfg):
# pos_proposals = [res.pos_bboxes for res in sampling_results]
# neg_proposals = [torch.tensor([]) for res in sampling_results]
# pos_gt_bboxes = [res.pos_gt_bboxes for res in sampling_results]
# pos_gt_labels = [res.pos_gt_labels for res in sampling_results]
# reg_classes = 1 if self.reg_class_agnostic else self.num_classes
# cls_reg_targets = bbox_target(
# pos_proposals,
# neg_proposals,
# pos_gt_bboxes,
# pos_gt_labels,
# rcnn_train_cfg,
# reg_classes,
# target_means=self.target_means,
# target_stds=self.target_stds)
# return cls_reg_targets
def forward(self, x, mask_pred):
# shared part
if self.using_mask:
# for conv in self.mask_conv:
# mask_pred = conv(mask_pred)
# mask_pred = self.hint_conv(mask_pred)
if self.proto_combine == 'con':
x = torch.cat([x, mask_pred], dim=1)
x = self.combine(x)
elif self.proto_combine == 'sum':
x = x + mask_pred
x = self.combine(x)
else:
x = self.reduce_con(x)
x = torch.cat([x, mask_pred], dim=1)
if self.num_shared_convs > 0:
for conv in self.shared_convs:
x = conv(x)
if self.num_shared_fcs > 0:
if self.with_avg_pool:
x = self.avg_pool(x)
x = x.view(x.size(0), -1)
for fc in self.shared_fcs:
x = self.relu(fc(x))
# separate branches
x_cls = x
x_reg = x
for conv in self.cls_convs:
x_cls = conv(x_cls)
if x_cls.dim() > 2:
if self.with_avg_pool:
x_cls = self.avg_pool(x_cls)
x_cls = x_cls.view(x_cls.size(0), -1)
for fc in self.cls_fcs:
x_cls = self.relu(fc(x_cls))
for conv in self.reg_convs:
x_reg = conv(x_reg)
if x_reg.dim() > 2:
if self.with_avg_pool:
x_reg = self.avg_pool(x_reg)
x_reg = x_reg.view(x_reg.size(0), -1)
for fc in self.reg_fcs:
x_reg = self.relu(fc(x_reg))
cls_score = self.fc_cls(x_cls) if self.with_cls else None
bbox_pred = self.fc_reg(x_reg) if self.with_reg else None
if self.with_IoU:
IoU_pred = self.IoU_reg(x_reg)
return cls_score, bbox_pred, IoU_pred
return cls_score, bbox_pred
@HEADS.register_module
class SharedFCBBoxHead_MH(ConvFCBBoxHead_MH):
def __init__(self, num_fcs=2, fc_out_channels=1024, *args, **kwargs):
assert num_fcs >= 1
super(SharedFCBBoxHead_MH, self).__init__(
num_shared_convs=0,
num_shared_fcs=num_fcs,
num_cls_convs=0,
num_cls_fcs=0,
num_reg_convs=0,
num_reg_fcs=0,
fc_out_channels=fc_out_channels,
*args,
**kwargs)
| 1.84375 | 2 |
services/mongoDbService/matchProvider.py | DanielTal87/Football-Management-Tool | 0 | 12797011 | #!/usr/bin/python3
from services.loggerServices.loggerService import LoggerService
logger = LoggerService().logger
def create_match(self, data):
return self.client.FMT["matches"].insert_one(data).inserted_id
def update_match(self, data):
return self.client.FMT["matches"].update_one(data, upsert=True).inserted_id
def find_match(self, data):
return self.client.FMT["matches"].find_one(data)
def parse_match_from_request(request):
return {
"home_team": request.get("home_team"),
"away_team": request.get("away_team"),
"date": request.get("date")
}
def parse_ended_match_from_request(request):
match = parse_match_from_request(request)
match["score"] = request.get("score").replace(' ', '')
return match
def parse_ended_match_to_db(request):
parsed_match = {
"home_team": request.get("home_team"),
"away_team": request.get("away_team"),
"date": request.get("date"),
"score": request.get("score")
}
home_team_score = parsed_match["score"].split('-')[0]
away_team_score = parsed_match["score"].split('-')[1]
# parse match result
if home_team_score == away_team_score:
parsed_match['is_draw'] = True
parsed_match['team_won_score'] = home_team_score
else:
parsed_match['is_draw'] = False
if home_team_score > away_team_score:
parsed_match['team_won'] = parsed_match['home_team']
parsed_match['team_won_score'] = home_team_score
parsed_match['team_lost'] = parsed_match['away_team']
parsed_match['team_lose_score'] = away_team_score
else:
parsed_match['team_won'] = parsed_match['away_team']
parsed_match['team_won_score'] = away_team_score
parsed_match['team_lost'] = parsed_match['home_team']
parsed_match['team_lose_score'] = home_team_score
return parsed_match
def parse_match_from_db(request):
return {
"id": str(request.get("_id")),
"home_team": request.get("home_team"),
"away_team": request.get("away_team"),
"date": request.get("date", None),
"score": request.get("score", None),
"is_draw": request.get("is_draw", None),
"team_won": request.get("team_won", None),
"team_lost": request.get("team_lost", None)
}
| 2.875 | 3 |
cursoemvideoPy/Mundo1/ex012.py | BrCarlini/exPython | 0 | 12797012 | print('========= Desconto =========')
preco = float(input('Digite o preço do produto: R$'))
calc = preco - ((5 / 100) * preco)
print(f'Preço digitado R${preco}\nDesconto: 5%\nPreço com desconto: R${calc}') | 3.875 | 4 |
asf_search/ASFSearchResults.py | gitter-badger/Discovery-asf_search | 57 | 12797013 | <gh_stars>10-100
from collections import UserList
from multiprocessing import Pool
import json
from asf_search import ASFSession
class ASFSearchResults(UserList):
def geojson(self):
return {
'type': 'FeatureCollection',
'features': [product.geojson() for product in self]
}
def __str__(self):
return json.dumps(self.geojson(), indent=2, sort_keys=True)
def download(self, path: str, session: ASFSession = None, processes=1) -> None:
"""
Iterates over each ASFProduct and downloads them to the specified path.
:param path: The directory into which the products should be downloaded.
:param session: The session to use, in most cases should be authenticated beforehand
:param processes: Number of download processes to use. Defaults to 1 (i.e. sequential download)
:return: None
"""
if session is None:
session = ASFSession()
if processes == 1:
for product in self:
product.download(path=path, session=session)
else:
pool = Pool(processes=processes)
args = [(product, path, session) for product in self]
pool.map(_download_product, args)
pool.close()
pool.join()
def _download_product(args):
product, path, session = args
product.download(path=path, session=session)
| 2.953125 | 3 |
eoxserver/services/subset.py | ESA-VirES/eoxserver | 1 | 12797014 | <reponame>ESA-VirES/eoxserver<filename>eoxserver/services/subset.py<gh_stars>1-10
#-------------------------------------------------------------------------------
# $Id$
#
# Project: EOxServer <http://eoxserver.org>
# Authors: <NAME> <<EMAIL>>
#
#-------------------------------------------------------------------------------
# Copyright (C) 2013 EOX IT Services GmbH
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies of this Software or works derived from this Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#-------------------------------------------------------------------------------
import logging
from django.contrib.gis.geos import Polygon, LineString
from eoxserver.resources.coverages import crss
from eoxserver.services.exceptions import (
InvalidAxisLabelException, InvalidSubsettingException
)
__all__ = ["Subsets", "Trim", "Slice"]
logger = logging.getLogger(__name__)
class Subsets(list):
""" Convenience class to handle a variety of spatial and/or temporal
subsets.
"""
def __init__(self, iterable, crs=None, allowed_types=None):
""" Constructor. Allows to add set the initial subsets
"""
self.allowed_types = allowed_types if allowed_types is not None else (
Trim, Slice
)
# Do a manual insertion here to assure integrity
for subset in iterable:
self.append(subset)
self._crs = crs
# List API
def extend(self, iterable):
for subset in iterable:
self._check_subset(subset)
super(Subsets, self).append(subset)
def append(self, subset):
self._check_subset(subset)
super(Subsets, self).append(subset)
def insert(self, i, subset):
self._check_subset(subset)
super(Subsets, self).insert(i, subset)
# Subset related stuff
@property
def has_x(self):
return any(map(lambda s: s.is_x, self))
@property
def has_y(self):
return any(map(lambda s: s.is_y, self))
@property
def has_t(self):
return any(map(lambda s: s.is_temporal, self))
@property
def crs(self):
return self._crs
@crs.setter
def crs(self, value):
self._crs = value
@property
def srid(self):
""" Tries to find the correct integer SRID for the crs.
"""
crs = self.crs
if crs is not None:
srid = crss.parseEPSGCode(crs,
(crss.fromURL, crss.fromURN, crss.fromShortCode)
)
if srid is None and not crss.is_image_crs(crs):
raise InvalidSubsettingException(
"Could not parse EPSG code from URI '%s'" % crs
)
return srid
return None
def filter(self, queryset, containment="overlaps"):
if not len(self):
return queryset
qs = queryset
bbox = [None, None, None, None]
srid = self.srid
if srid is None:
srid = 4326
max_extent = crss.crs_bounds(srid)
tolerance = crss.crs_tolerance(srid)
for subset in self:
if isinstance(subset, Slice):
is_slice = True
value = subset.value
elif isinstance(subset, Trim):
is_slice = False
low = subset.low
high = subset.high
if subset.is_temporal:
if is_slice:
qs = qs.filter(
begin_time__lte=value,
end_time__gte=value
)
else:
if high is not None:
qs = qs.filter(
begin_time__lte=high
)
if low is not None:
qs = qs.filter(
end_time__gte=low
)
# check if the temporal bounds must be strictly contained
if containment == "contains":
if high is not None:
qs = qs.filter(
end_time__lte=high
)
if low is not None:
qs = qs.filter(
begin_time__gte=low
)
else:
if is_slice:
if subset.is_x:
line = LineString(
(value, max_extent[1]),
(value, max_extent[3])
)
else:
line = LineString(
(max_extent[0], value),
(max_extent[2], value)
)
line.srid = srid
if srid != 4326:
line.transform(4326)
qs = qs.filter(footprint__intersects=line)
else:
if subset.is_x:
bbox[0] = subset.low
bbox[2] = subset.high
else:
bbox[1] = subset.low
bbox[3] = subset.high
if bbox != [None, None, None, None]:
bbox = map(
lambda v: v[0] if v[0] is not None else v[1],
zip(bbox, max_extent)
)
bbox[0] -= tolerance; bbox[1] -= tolerance
bbox[2] += tolerance; bbox[3] += tolerance
logger.debug(
"Applying BBox %s with containment '%s'." % (bbox, containment)
)
poly = Polygon.from_bbox(bbox)
poly.srid = srid
if srid != 4326:
poly.transform(4326)
if containment == "overlaps":
qs = qs.filter(footprint__intersects=poly)
elif containment == "contains":
qs = qs.filter(footprint__within=poly)
return qs
def matches(self, eo_object, containment="overlaps"):
if not len(self):
return True
bbox = [None, None, None, None]
srid = self.srid
if srid is None:
srid = 4326
max_extent = crss.crs_bounds(srid)
tolerance = crss.crs_tolerance(srid)
footprint = eo_object.footprint
begin_time = eo_object.begin_time
end_time = eo_object.end_time
for subset in self:
if isinstance(subset, Slice):
is_slice = True
value = subset.value
elif isinstance(subset, Trim):
is_slice = False
low = subset.low
high = subset.high
if subset.is_temporal:
if is_slice:
if begin_time > value or end_time < value:
return False
elif low is None and high is not None:
if begin_time > high:
return False
elif low is not None and high is None:
if end_time < low:
return False
else:
if begin_time > high or end_time < low:
return False
else:
if is_slice:
if subset.is_x:
line = Line(
(value, max_extent[1]),
(value, max_extent[3])
)
else:
line = Line(
(max_extent[0], value),
(max_extent[2], value)
)
line.srid = srid
if srid != 4326:
line.transform(4326)
if not line.intersects(footprint):
return False
else:
if subset.is_x:
bbox[0] = subset.low
bbox[2] = subset.high
else:
bbox[1] = subset.low
bbox[3] = subset.high
if bbox != [None, None, None, None]:
bbox = map(
lambda v: v[0] if v[0] is not None else v[1],
zip(bbox, max_extent)
)
bbox[0] -= tolerance; bbox[1] -= tolerance
bbox[2] += tolerance; bbox[3] += tolerance
logger.debug(
"Applying BBox %s with containment '%s'." % (bbox, containment)
)
poly = Polygon.from_bbox(bbox)
poly.srid = srid
if srid != 4326:
poly.transform(4326)
if containment == "overlaps":
if not footprint.intersects(poly):
return False
elif containment == "contains":
if not footprint.within(poly):
return False
return True
def _check_subset(self, subset):
if not isinstance(subset, Subset):
raise ValueError("Supplied argument is not a subset.")
if not isinstance(subset, self.allowed_types):
raise InvalidSubsettingException(
"Supplied subset is not allowed."
)
if self.has_x and subset.is_x:
raise InvalidSubsettingException(
"Multiple subsets for X-axis given."
)
if self.has_y and subset.is_y:
raise InvalidSubsettingException(
"Multiple subsets for Y-axis given."
)
if self.has_t and subset.is_temporal:
raise InvalidSubsettingException(
"Multiple subsets for time-axis given."
)
@property
def xy_bbox(self):
""" Returns the minimum bounding box for all X and Y subsets.
"""
bbox = [None, None, None, None]
for subset in self:
if subset.is_x:
if isinstance(subset, Trim):
bbox[0] = subset.low
bbox[2] = subset.high
else:
bbox[0] = bbox[2] = subset.value
elif subset.is_y:
if isinstance(subset, Trim):
bbox[1] = subset.low
bbox[3] = subset.high
else:
bbox[1] = bbox[3] = subset.value
return bbox
def bounding_polygon(self, coverage):
srid = coverage.srid
extent = coverage.extent
size_x, size_y = coverage.size
footprint = coverage.footprint
subset_srid = self.srid
if subset_srid is None:
bbox = list(extent)
else:
bbox = list(footprint.extent)
for subset in self:
if not isinstance(subset, Trim) or subset.is_temporal:
continue
if subset_srid is None:
# transform coordinates from imageCRS to coverages CRS
if subset.is_x:
if subset.low is not None:
l = max(float(subset.low) / float(size_x), 0.0)
bbox[0] = extent[0] + l * (extent[2] - extent[0])
if subset.high is not None:
l = max(float(subset.high) / float(size_x), 0.0)
bbox[2] = extent[0] + l * (extent[2] - extent[0])
elif subset.is_y:
if subset.low is not None:
l = max(float(subset.low) / float(size_y), 0.0)
bbox[1] = extent[3] - l * (extent[3] - extent[1])
if subset.high is not None:
l = max(float(subset.high) / float(size_y), 0.0)
bbox[3] = extent[3] - l * (extent[3] - extent[1])
else:
if subset.is_x:
if subset.low is not None:
bbox[0] = max(subset.low, bbox[0])
if subset.high is not None:
bbox[2] = min(subset.high, bbox[2])
if subset.is_y:
if subset.low is not None:
bbox[1] = max(subset.low, bbox[1])
if subset.high is not None:
bbox[3] = min(subset.high, bbox[3])
if subset_srid is None:
poly = Polygon.from_bbox(bbox)
poly.srid = srid
else:
poly = Polygon.from_bbox(bbox)
poly.srid = subset_srid
return poly
class Subset(object):
def __init__(self, axis):
axis = axis.lower()
if axis not in all_axes:
raise InvalidAxisLabelException(axis)
self.axis = axis
@property
def is_temporal(self):
return self.axis in temporal_axes
@property
def is_x(self):
return self.axis in x_axes
@property
def is_y(self):
return self.axis in y_axes
class Slice(Subset):
def __init__(self, axis, value):
super(Slice, self).__init__(axis)
self.value = value
def __repr__(self):
return "Slice: %s[%s]" % (self.axis, self.value)
class Trim(Subset):
def __init__(self, axis, low=None, high=None):
super(Trim, self).__init__(axis)
if low is not None and high is not None and low > high:
raise InvalidSubsettingException(
"Invalid bounds: lower bound greater than upper bound."
)
self.low = low
self.high = high
def __repr__(self):
return "Trim: %s[%s:%s]" % (
self.axis, self.low, self.high
)
temporal_axes = ("t", "time", "phenomenontime")
x_axes = ("x", "lon", "long")
y_axes = ("y", "lat")
z_axes = ("z", "height")
all_axes = temporal_axes + x_axes + y_axes + z_axes
def is_temporal(axis):
""" Returns whether or not an axis is a temporal one.
"""
return (axis.lower() in temporal_axes)
| 1.304688 | 1 |
c2cwsgiutils/scripts/genversion.py | camptocamp/c2cwsgiutils | 5 | 12797015 | #!/usr/bin/env python3
import json
import logging
import os
import re
import subprocess # nosec
import sys
from typing import Dict, Optional, Tuple, cast
SRC_VERSION_RE = re.compile(r"^.*\(([^=]*)===?([^=]*)\)$")
VERSION_RE = re.compile(r"^([^=]*)==([^=]*)$")
LOG = logging.getLogger(__name__)
def _get_package_version(comp: str) -> Tuple[Optional[str], Optional[str]]:
"""
Parse plain and editable versions.
See test_genversion.py for examples.
"""
src_matcher = SRC_VERSION_RE.match(comp)
matcher = src_matcher or VERSION_RE.match(comp)
if matcher:
return cast(Tuple[str, str], matcher.groups())
else:
if len(comp) > 0 and not comp[:3] == "-e ":
print("Cannot parse package version: " + comp)
return None, None
def _get_packages_version() -> Dict[str, str]:
result = {}
with open(os.devnull, "w", encoding="utf-8") as devnull:
for comp in (
subprocess.check_output(["python3", "-m", "pip", "freeze"], stderr=devnull) # nosec
.decode()
.strip()
.split("\n")
):
name, version = _get_package_version(comp)
if name is not None and version is not None:
result[name] = version
return result
def deprecated() -> None:
"""Run the command and print a deprecated notice."""
LOG.warning("c2cwsgiutils_genversion.py is deprecated; use c2cwsgiutils-genversion instead")
return main()
def main() -> None:
"""Run the command."""
if len(sys.argv) == 2:
git_tag = None
git_hash = sys.argv[1]
else:
git_tag = sys.argv[1]
git_hash = sys.argv[2]
report = {"main": {"git_hash": git_hash}, "packages": _get_packages_version()}
if git_tag is not None:
report["main"]["git_tag"] = git_tag
with open("versions.json", "w", encoding="utf-8") as file:
json.dump(report, file, indent=2)
if __name__ == "__main__":
main()
| 2.484375 | 2 |
src/constellix/domains/record/type/main.py | aperim/python-constellix | 0 | 12797016 | <filename>src/constellix/domains/record/type/main.py
"""A Record"""
import logging
_LOGGER = logging.getLogger(__name__)
class DomainRecord(object):
def __init__(self, protocol=None, domain=None):
super().__init__()
self.__protocol = protocol
self.__domain = domain
@property
def record_type(self):
try:
return self.__record_type
except AttributeError:
return None
@property
def domain_id(self):
try:
return self.__domain.id
except AttributeError:
return None | 2.625 | 3 |
scripts/autoreduce/reduce_REF_L.py | neutrons/LiquidsReflectometer | 0 | 12797017 | <gh_stars>0
"""
Auto-reduction script for the Liquids Reflectometer
For reference:
Type 0: Normal sample data
Type 1: Direct beams for scaling factors
Type 2: Zero-attenuator direct beams
Type 3: Data that we don't need to treat
"""
import sys
import os
import json
import warnings
warnings.simplefilter('ignore')
if ("MANTIDPATH" in os.environ):
del os.environ["MANTIDPATH"]
sys.path.insert(0,"/opt/mantidnightly/bin")
sys.path.insert(1,"/opt/mantidnightly/lib")
import mantid
from mantid.simpleapi import *
try:
from sf_calculator import ScalingFactor
DIRECT_BEAM_CALC_AVAILABLE = True
logger.notice("sf_calculator available")
except:
import scipy
logger.notice("Scaling factor calculation upgrade not available: scipy=%s" % scipy.__version__)
DIRECT_BEAM_CALC_AVAILABLE = False
event_file_path=sys.argv[1]
output_dir=sys.argv[2]
event_file = os.path.split(event_file_path)[-1]
# The legacy format is REF_L_xyz_event.nxs
# The new format is REF_L_xyz.nxs.h5
run_number = event_file.split('_')[2]
run_number = run_number.replace('.nxs.h5', '')
# Reduction options
#-------------------------------------------------------------------------
# Wavelength below which we don't need the absolute normalization
WL_CUTOFF = 10.0
# Default primary fraction range to be used if it is not defined in the template
PRIMARY_FRACTION_RANGE = [5, 290]
NORMALIZE_TO_UNITY = False
# Allowed values: ['DirectBeam', 'WithReference']
NORMALIZATION_TYPE = "WithReference"
#NORMALIZATION_TYPE = "DirectBeam"
# Allowed values: dict or ""
# D2O
REFL1D_PARS = json.dumps(dict(back_sld=6.4,
back_roughness=2.7,
front_sld=0,
layers=[],
scale=1.0,
background=0.0))
# Quartz
#REFL1D_PARS = json.dumps(dict(back_sld=4.09,
# back_roughness=4.28,
# front_sld=0,
# layers=[],
# scale=0.9169,
# background=3.753e-07))
#-------------------------------------------------------------------------
# Locate the template file
# If no template file is available, the automated reduction will generate one
template_file = ""
if os.path.isfile("template.xml"):
template_file = "template.xml"
elif os.path.isfile(os.path.join(output_dir, "template.xml")):
template_file = os.path.join(output_dir, "template.xml")
elif os.path.isfile("/SNS/REF_L/shared/autoreduce/template.xml"):
template_file = "/SNS/REF_L/shared/autoreduce/template.xml"
print("Using template: %s" % template_file)
# Run the auto-reduction
ws = LoadEventNexus(Filename=event_file_path)
# Check the measurement geometry
if ws.getRun().getProperty('BL4B:CS:ExpPl:OperatingMode').value[0] == 'Free Liquid':
NORMALIZATION_TYPE = "WithReference"
else:
NORMALIZATION_TYPE = "DirectBeam"
# Determine whether this is data or whether we need to compute scaling factors
data_type = ws.getRun().getProperty("data_type").value[0]
if data_type == 1 and DIRECT_BEAM_CALC_AVAILABLE:
logger.notice("Computing scaling factors")
sequence_number = ws.getRun().getProperty("sequence_number").value[0]
first_run_of_set = ws.getRun().getProperty("sequence_id").value[0]
incident_medium = ws.getRun().getProperty("incident_medium").value[0]
_fpath = os.path.join(output_dir, "sf_%s_%s_auto.cfg" % (first_run_of_set, incident_medium))
sf = ScalingFactor(run_list=range(first_run_of_set, first_run_of_set + sequence_number),
sort_by_runs=True,
tof_step=200,
medium=incident_medium,
slit_tolerance=0.06,
sf_file=_fpath)
sf.execute()
else:
logger.notice("Automated reduction")
output = LRAutoReduction(#Filename=event_file_path,
InputWorkspace=ws,
ScaleToUnity=NORMALIZE_TO_UNITY,
ScalingWavelengthCutoff=WL_CUTOFF,
PrimaryFractionRange=PRIMARY_FRACTION_RANGE,
OutputDirectory=output_dir,
SlitTolerance=0.06,
ReadSequenceFromFile=True,
OrderDirectBeamsByRunNumber=True,
TemplateFile=template_file, FindPeaks=False,
NormalizationType=NORMALIZATION_TYPE,
Refl1DModelParameters=REFL1D_PARS)
first_run_of_set=int(output[1])
#-------------------------------------------------------------------------
# Produce plot for the web monitor
default_file_name = 'REFL_%s_combined_data_auto.txt' % first_run_of_set
default_file_path = os.path.join(output_dir, default_file_name)
if os.path.isfile(default_file_path):
print("Loading %s" % os.path.join(output_dir, default_file_name))
reflectivity = LoadAscii(Filename=os.path.join(output_dir, default_file_name), Unit="MomentumTransfer")
plotting_ready = True
try:
from postprocessing.publish_plot import plot1d
except ImportError:
from finddata.publish_plot import plot1d, _determine_config_file, publish_plot
if _determine_config_file(None) is None:
plotting_ready = False
x = reflectivity.readX(0)
y = reflectivity.readY(0)
dy = reflectivity.readE(0)
dx = reflectivity.readDx(0)
if int(run_number) - first_run_of_set < 10:
for r in range(0, 10):
reduced_file_name = 'REFL_%s_%s_%s_auto.nxs' % (first_run_of_set, r+1, first_run_of_set+r)
reduced_file_path = os.path.join(output_dir, reduced_file_name)
if os.path.isfile(reduced_file_path):
# Look to see whether submitting the plot is enabled
if plotting_ready:
plot1d(first_run_of_set+r, [[x, y, dy, dx]], instrument='REF_L',
x_title=u"Q (1/A)", x_log=True,
y_title="Reflectivity", y_log=True, show_dx=False)
else:
plot_div = plot1d(first_run_of_set+r, [[x, y, dy, dx]], instrument='REF_L',
x_title=u"q (1/A)", x_log=True,
y_title="Reflectivity", y_log=True, show_dx=False, publish=False)
publish_plot('REF_L', first_run_of_set+r, files={'file': plot_div},
config="/SNS/REF_L/shared/.livedata.conf")
else:
plot1d(run_number, [[x, y, dy, dx]], instrument='REF_L',
x_title=u"Q (1/A)", x_log=True,
y_title="Reflectivity", y_log=True, show_dx=False)
| 2.328125 | 2 |
openerp/addons/document/content_index.py | ntiufalara/openerp7 | 3 | 12797018 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import logging
import os
import tempfile
from subprocess import Popen, PIPE
_logger = logging.getLogger(__name__)
class NhException(Exception):
pass
class indexer(object):
""" An indexer knows how to parse the content of some file.
Typically, one indexer should be instantiated per file
type.
Override this class to add more functionality. Note that
you should only override the Content or the File methods
that give an optimal result. """
def _getMimeTypes(self):
""" Return supported mimetypes """
return []
def _getExtensions(self):
return []
def _getDefMime(self, ext):
""" Return a mimetype for this document type, ideally the
closest to the extension ext. """
mts = self._getMimeTypes();
if len (mts):
return mts[0]
return None
def indexContent(self, content, filename=None, realfile=None):
""" Use either content or the real file, to index.
Some parsers will work better with the actual
content, others parse a file easier. Try the
optimal.
"""
res = ''
try:
if content != None:
return self._doIndexContent(content)
except NhException:
pass
if realfile != None:
try:
return self._doIndexFile(realfile)
except NhException:
pass
fp = open(realfile,'rb')
try:
content2 = fp.read()
finally:
fp.close()
# The not-handled exception may be raised here
return self._doIndexContent(content2)
# last try, with a tmp file
if content:
try:
fname,ext = filename and os.path.splitext(filename) or ('','')
fd, rfname = tempfile.mkstemp(suffix=ext)
os.write(fd, content)
os.close(fd)
res = self._doIndexFile(rfname)
os.unlink(rfname)
return res
except NhException:
pass
raise NhException('No appropriate method to index file.')
def _doIndexContent(self, content):
raise NhException("Content cannot be handled here.")
def _doIndexFile(self, fpath):
raise NhException("Content cannot be handled here.")
def __repr__(self):
return "<indexer %s.%s>" %(self.__module__, self.__class__.__name__)
def mime_match(mime, mdict):
if mdict.has_key(mime):
return (mime, mdict[mime])
if '/' in mime:
mpat = mime.split('/')[0]+'/*'
if mdict.has_key(mpat):
return (mime, mdict[mpat])
return (None, None)
class contentIndex(object):
def __init__(self):
self.mimes = {}
self.exts = {}
def register(self, obj):
f = False
for mime in obj._getMimeTypes():
self.mimes[mime] = obj
f = True
for ext in obj._getExtensions():
self.exts[ext] = obj
f = True
if f:
_logger.debug('Register content indexer: %r.', obj)
if not f:
raise Exception("Your indexer should at least support a mimetype or extension.")
def doIndex(self, content, filename=None, content_type=None, realfname=None, debug=False):
fobj = None
fname = None
mime = None
if content_type and self.mimes.has_key(content_type):
mime = content_type
fobj = self.mimes[content_type]
elif filename:
bname,ext = os.path.splitext(filename)
if self.exts.has_key(ext):
fobj = self.exts[ext]
mime = fobj._getDefMime(ext)
if content_type and not fobj:
mime,fobj = mime_match(content_type, self.mimes)
if not fobj:
try:
if realfname :
fname = realfname
else:
try:
bname,ext = os.path.splitext(filename or 'test.tmp')
except Exception:
bname, ext = filename, 'tmp'
fd, fname = tempfile.mkstemp(suffix=ext)
os.write(fd, content)
os.close(fd)
pop = Popen(['file','-b','--mime',fname], shell=False, stdout=PIPE)
(result, _) = pop.communicate()
mime2 = result.split(';')[0]
_logger.debug('File gives us: %s', mime2)
# Note that the temporary file still exists now.
mime,fobj = mime_match(mime2, self.mimes)
if not mime:
mime = mime2
except Exception:
_logger.exception('Cannot determine mime type.')
try:
if fobj:
res = (mime, fobj.indexContent(content,filename,fname or realfname) )
else:
_logger.debug("Have no object, return (%s, None).", mime)
res = (mime, '')
except Exception:
_logger.exception("Cannot index file %s (%s).",
filename, fname or realfname)
res = (mime, '')
# If we created a tmp file, unlink it now
if not realfname and fname:
try:
os.unlink(fname)
except Exception:
_logger.exception("Cannot unlink %s.", fname)
return res
cntIndex = contentIndex()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| 1.75 | 2 |
create_morf_wordmap.py | lallubharteja/KWS-Scripts | 27 | 12797019 | #!/usr/bin/env python3
from __future__ import print_function
import morfessor
import sys
import logging
import lzma
import os
import math
def main(allowed_chars_file, model):
allowed_chars = {line.strip() for line in open(allowed_chars_file, encoding='utf-8') if len(line.strip()) == 1}
model = morfessor.MorfessorIO().read_any_model(model)
for line in sys.stdin:
word = line.strip()
parts = model.viterbi_segment(word)[0]
print(word,end=' ')
print(" ".join(parts).replace("<unk>", "<UNK>"))
if __name__ == "__main__":
logging.basicConfig(level=logging.INFO)
if len(sys.argv) != 3:
print("usage: python3 create_morf_wordmap.py <allowe-characters-file> <morfessor-model> < word-list")
print("e.g.: python3 create_morf_wordmap.py data/kws_prep/allowed_chars data/kws_prep/morf/model.bin < data/kws_prep/dev.words")
print("This script prints a stdout word and its morpheme constituents according to the morfessor.")
sys.exit(-1)
main(sys.argv[1],sys.argv[2])
| 3 | 3 |
data_utils/create_data.py | jsedoc/Tree_Learning | 0 | 12797020 | from __future__ import print_function
from create_tree import *
import numpy as np
import random
DATA_DIR = "../data/"
def curriculum_depth(i, num_examples, max_depth):
curriculum_max_depth= int((max_depth*i)/num_examples)
#print(i, curriculum_max_depth,)
if curriculum_max_depth > 0:
random_depth = 2 + np.random.randint(curriculum_max_depth)
else:
random_depth = 2
#print("DEPTH = ", random_depth)
return random_depth
def copy_t2t(depth):
my_tree = generate_data(depth-1)
change_nts(my_tree)
my_list = convert_to_list_inorder(my_tree,[])
infix_tree = ' '.join(str(e) for e in my_list)
#print my_tree
return ([infix_tree, infix_tree])
def create_examples(num_examples, max_depth, function):
data = []
for i in range(num_examples):
depth = max_depth
if np.random.randint(2) == 0:
depth = curriculum_depth(i, num_examples, max_depth)
data.append(function(depth))
return data
if __name__ == "__main__":
num_examples = 1000
max_depth = 5
data_subset = "train"
t2t_operation = "COPY"
seed = 0
#NOTE: we need both -- for reproducible trees...
#numpy.random.seed(seed)
#random.seed(seed)
if t2t_operation == "COPY":
data = create_examples(num_examples,max_depth, function=copy_t2t)
trans = open(DATA_DIR + data_subset + '.copy', 'w')
elif t2t_operation == "RELABEL_1":
data = create_examples(num_examples,max_depth, function=copy_t2t)
trans = open(DATA_DIR + data_subset + '.copy', 'w')
orig = open(DATA_DIR + data_subset + '.orig', 'w')
for i in range(num_examples):
print(data[i][0], file=orig)
print(data[i][1], file=trans)
#orig_vocab = open(DATA_DIR + 'vocab.train.orig', 'w')
#trans_vocab = open(DATA_DIR + 'vocab.train.copy', 'w')
#max_num = 256
#operators = ['+','-','*','/']
#for i in range(1, max_num+1):
# print >> orig_vocab, i, i
# print >> trans_vocab, i, i
#for i in range(len(operators)):
# print >> orig_vocab, operators[i], max_num+i+1
# print >> trans_vocab, operators[i], max_num+i+1
#print >> orig_vocab, '(', max_num + len(operators) + 1
#print >> orig_vocab, ')', max_num + len(operators) + 2
#print >> trans_vocab, '(', max_num + len(operators) + 1
#print >> trans_vocab, ')', max_num + len(operators) + 2
| 3.234375 | 3 |
setup.py | daconex/GroupMeNotifier | 1 | 12797021 | <reponame>daconex/GroupMeNotifier<filename>setup.py
from setuptools import setup
setup(
name='GroupMe Notifier',
version=0.1,
description='GroupMe Notifier is a tool for getting notifications on desktop',
author='<NAME>',
author_email='<EMAIL>',
url='http://daconex.me',
install_requires=['requests']
)
| 1.5625 | 2 |
GraphTheory/check_binary_search_tree.py | GeorgeLinut/CodingInterviewTipsAndTricks | 19 | 12797022 | def check_bst_util(root, min_value, max_value):
if root is None:
return True
if not (min_value < root.value < max_value):
return False
return check_bst_util(root.left, min, root.value) and check_bst_util(root.right, root.value, max)
def check_bst(root):
return check_bst_util(root, - maxsize, maxsize) | 3.390625 | 3 |
core.py | pygics-mod/page | 0 | 12797023 | <reponame>pygics-mod/page<gh_stars>0
# -*- coding: utf-8 -*-
'''
Created on 2017. 10. 25.
@author: HyechurnJang
'''
import os
import uuid
import types
import jinja2
from pygics import Lock, ContentType, export, rest
def createVid(): return 'v-' + str(uuid.uuid4())
class Tag(dict):
def __init__(self, tag, **attrs):
dict.__init__(self, tag=tag, elems=[], attrs={})
for key, val in attrs.items(): self['attrs'][key.lower()] = val
def __len__(self, *args, **kwargs):
return self['elems'].__len__()
def __str__(self):
ret = '<%s' % self['tag']
for k, v in self['attrs'].items(): ret += ' %s="%s"' % (k, v)
ret += '>'
for elem in self['elems']: ret += unicode(elem)
ret += '</%s>' % self['tag']
return ret
#===========================================================================
# Attributes (a.k.a : event & links)
#===========================================================================
def attr(self, **attrs):
own_attrs = self['attrs']
for key, val in attrs.items():
key_low = key.lower()
own_attrs[key_low] = '%s %s' % (own_attrs[key_low], val) if key_low in own_attrs else val
return self
def __lshift__(self, opts):
if opts: return self.attr(**opts)
return self
def baseattr(self, **attrs):
own_attrs = self['attrs']
for key, val in attrs.items():
key_low = key.lower()
own_attrs[key_low] = '%s %s' % (val, own_attrs[key_low]) if key_low in own_attrs else val
return self
#===========================================================================
# Elements (a.k.a : children)
#===========================================================================
def html(self, *elems):
for elem in elems: self['elems'].append(elem)
return self
def __rshift__(self, elems):
if elems:
if isinstance(elems, tuple) or isinstance(elems, list): return self.html(*elems)
else: return self.html(*(elems,))
return self
class Cache:
_CACHE_DATA = {}
@classmethod
def getCache(cls, file_path):
if file_path in Cache._CACHE_DATA:
return Cache._CACHE_DATA[file_path]
else:
class CacheDescriptor(types.FileType):
def __init__(self, file_path):
with open(file_path, 'rb') as fd: self.data = fd.read()
self.file_path = file_path
@property
def name(self): return self.file_path
def read(self): return self.data
def close(self): return None
if not os.path.exists(file_path): raise Exception('could not find %s' % file_path)
cache = CacheDescriptor(file_path)
Cache._CACHE_DATA[file_path] = cache
return cache
class Static:
def __init__(self, url, static='static', cache=True):
mod_path, mod_name = pmd()
mod_name = mod_name.replace('.', '/')
if not url: self.url = '/%s' % mod_name
elif url[0] == '/': self.url = url
else: self.url = '/%s/%s' % (mod_name, url)
if not static: self.static = mod_path
elif static[0] == '/': self.static = '%s%s' % (mod_path, static)
else: self.static = '%s/%s' % (mod_path, static)
self._static_cache = cache
@export('GET', self.url)
def send_static(req, *argv):
path = '/'.join(argv)
file_path = '%s/%s' % (self.static, path)
if self._static_cache: return Cache.getCache(file_path)
else:
if not os.path.exists(file_path): raise Exception('could not find %s' % path)
return open(file_path, 'rb')
class Page:
def __init__(self,
url=None,
title='',
favicon='/page/static/image/favicon.ico',
static='static',
cache=True):
mod_path, mod_name = pmd()
mod_name = mod_name.replace('.', '/')
if not url: self.url = '/%s' % mod_name
elif url[0] == '/': self.url = url
else: self.url = '/%s/%s' % (mod_name, url)
if static[0] == '/': static = static[1:]
if not static: self.static_path = mod_path
else: self.static_path = '%s/%s' % (mod_path, static)
if not static: self.static_url = '%s/static' % self.url
elif self.url != '/': self.static_url = '%s/%s' % (self.url, static)
else: self.static_url = '/%s' % static
self._page_init = '/page/empty'
self._page_view = {}
self._page_title = title
self._page_favicon = favicon
self._page_meta_list = []
self._page_css_list = []
self._page_js_list = []
self._page_head = ''
self._page_header = ''
self._page_footer = ''
self._page_cache = cache
self._page_cache_data = {}
self._page_updated = True
self._page_lock = Lock()
self._page_rendered = None
with open(pwd() + '/template.html') as fd: self._page_template = jinja2.Template(fd.read())
@export('GET', self.url, content_type=ContentType.TextHtml)
def send_template(req): return self.__render__()
@export('GET', self.static_url)
def send_static(req, *argv):
path = '/'.join(argv)
file_path = '%s/%s' % (self.static_path, path)
if self._page_cache: return Cache.getCache(file_path)
else:
if not os.path.exists(file_path): raise Exception('could not find %s' % path)
return open(file_path, 'rb')
def __render__(self):
if self._page_updated:
self._page_lock.on()
self._page_rendered = self._page_template.render({
'init' : self._page_init,
'title' : self._page_title,
'favicon' : self._page_favicon,
'meta_list' : self._page_meta_list,
'css_list' : self._page_css_list,
'js_list' : self._page_js_list,
'head' : unicode(self._page_head),
'header' : unicode(self._page_header),
'footer' : unicode(self._page_footer)
})
self._page_rendered = self._page_rendered.encode('utf-8')
self._page_updated = False
self._page_lock.off()
return self._page_rendered
else:
return self._page_rendered
def meta(self, *meta_list):
self._page_lock.on()
for meta in meta_list:
meta_str = ' '
for key, val in meta.items(): meta_str += '%s="%s"' % (key, val)
self._page_meta_list.append(meta_str)
self._page_updated = True
self._page_lock.off()
return self
def css(self, *css_list):
self._page_lock.on()
for css in css_list: self._page_css_list.append(css)
self._page_updated = True
self._page_lock.off()
return self
def js(self, *js_list):
self._page_lock.on()
for js in js_list: self._page_js_list.append(js)
self._page_updated = True
self._page_lock.off()
return self
def head(self, html):
self._page_lock.on()
self._page_head = html
self._page_updated = True
self._page_lock.off()
return self
def header(self, html):
self._page_lock.on()
self._page_header = html
self._page_updated = True
self._page_lock.off()
return self
def footer(self, html):
self._page_lock.on()
self._page_footer = html
self._page_updated = True
self._page_lock.off()
return self
#===========================================================================
# View Definition
#===========================================================================
def init(self, method='r', **opts):
def wrapper(func):
crud = method.lower()
id = createVid()
name = func.__name__
url = '%s/%s' % (self.url if self.url != '/' else '', func.__name__)
self._page_view[name] = {'id' : id, 'name' : name, 'url' : url}
if 'r' in crud or '*' in crud:
@rest('GET', url, **opts)
def get(req, *argv, **kargs): return func(req, *argv, **kargs)
if 'c' in crud or '*' in crud:
@rest('POST', url, **opts)
def post(req, *argv, **kargs): return func(req, *argv, **kargs)
if 'u' in crud or '*' in crud:
@rest('PUT', url, **opts)
def put(req, *argv, **kargs): return func(req, *argv, **kargs)
if 'd' in crud or '*' in crud:
@rest('DELETE', url, **opts)
def delete(req, *argv, **kargs): return func(req, *argv, **kargs)
self._page_lock.on()
self._page_init = url
self._page_updated = True
self._page_lock.off()
return wrapper
def view(self, method='r', **opts):
def wrapper(func):
crud = method.lower()
id = createVid()
name = func.__name__
url = '%s/%s' % (self.url if self.url != '/' else '', name)
self._page_view[name] = {'id' : id, 'name' : name, 'url' : url}
if 'r' in crud or '*' in crud:
@rest('GET', url, **opts)
def get(req, *argv, **kargs): return func(req, *argv, **kargs)
if 'c' in crud or '*' in crud:
@rest('POST', url, **opts)
def post(req, *argv, **kargs): return func(req, *argv, **kargs)
if 'u' in crud or '*' in crud:
@rest('PUT', url, **opts)
def put(req, *argv, **kargs): return func(req, *argv, **kargs)
if 'd' in crud or '*' in crud:
@rest('DELETE', url, **opts)
def delete(req, *argv, **kargs): return func(req, *argv, **kargs)
return wrapper
def getView(self, name, *path):
view = self._page_view[name]
return {'id' : view['id'], 'name' : name, 'url' : '%s/%s' % (view['url'], '/'.join(path)) if path else view['url']}
#===========================================================================
# View Functions
#===========================================================================
def patch(self, name, *argv):
view = self._page_view[name]
id = view['id']
url = '%s/%s' % (view['url'], '/'.join(argv)) if argv else view['url']
return Tag('script', Id=id, Page_Url=url).html(
'$(document).ready(function(){page_patch("%s")});' % id
)
def __call__(self, name, *argv):
return self.patch(name, *argv)
def reload(self, *names):
reload = []
for name in names:
reload.append(self._page_view[name]['id'])
return {'reload' : reload}
def __getitem__(self, names):
if isinstance(names, tuple) or isinstance(names, list): return self.reload(*names)
else: return self.reload(*(names,))
#===========================================================================
# Interactive Functions
#===========================================================================
class InteractiveTag(Tag):
def __init__(self, view, *argv):
Tag.__init__(self, 'script')
self._view_id = view['id']
self._view_url = '%s/%s' % (view['url'] + '/'.join(argv)) if argv else view['url']
self._event_id = createVid()
self._event_attr = {'class' : self._event_id, 'page_url' : self._view_url, 'page_view' : self._view_id}
def event(self): return self._event_attr
def get(self, name, *argv):
class Get(Page.InteractiveTag):
def __init__(self, view, *argv):
Page.InteractiveTag.__init__(self, view, *argv)
self.html('$(document).ready(function(){$(".%s").click(function(){page_get($(this));});});' % self._event_id)
return Get(self._page_view[name], *argv)
def post(self, name, *argv):
class Post(Page.InteractiveTag):
def __init__(self, view, *argv):
Page.InteractiveTag.__init__(self, view, *argv)
self._data_id = self._event_id + '-data'
self._data_attr = {'class' : self._data_id}
self._event_attr['page_data'] = self._data_id
self.html('$(document).ready(function(){$(".%s").click(function(){page_post($(this));});});' % self._event_id)
def data(self): return self._data_attr
return Post(self._page_view[name], *argv)
def put(self, name, *argv):
class Put(Page.InteractiveTag):
def __init__(self, view, *argv):
Page.InteractiveTag.__init__(self, view, *argv)
self._data_id = self._event_id + '-data'
self._data_attr = {'class' : self._data_id}
self._event_attr['page_data'] = self._data_id
self.html('$(document).ready(function(){$(".%s").click(function(){page_put($(this));});});' % self._event_id)
def data(self): return self._data_attr
return Put(self._page_view[name], *argv)
def delete(self, name, *argv):
class Delete(Page.InteractiveTag):
def __init__(self, view, *argv):
Page.InteractiveTag.__init__(self, view, *argv)
self.html('$(document).ready(function(){$(".%s").click(function(){page_delete($(this));});});' % self._event_id)
return Delete(self._page_view[name], *argv)
#===============================================================================
# Page Statics
#===============================================================================
Page(url='/page', cache=True)
@export('GET', '/page/empty', content_type=ContentType.AppJson)
def empty_page(req): return {'error' : 'Page Empty'}
@export('GET', '/favicon.ico', content_type=ContentType.AppStream)
def default_favicon(req, *argv): return Cache.getCache(pwd() + '/static/image/favicon.ico')
| 2.4375 | 2 |
pharmacognosy/users/__init__.py | skylifewww/pharmacognosy | 0 | 12797024 | <reponame>skylifewww/pharmacognosy<gh_stars>0
default_app_config = 'pharmacognosy.users.apps.Config'
| 1.109375 | 1 |
example.py | bekaertruben/ns-census-maximizer | 3 | 12797025 | <filename>example.py<gh_stars>1-10
import census_maximizer as cm
import matplotlib.pyplot as plt # if not installed, run `pip install matplotlib`
# CONFIG
USER = "<Insert nation name here>"
PASSWORD = "<<PASSWORD> here>"
CONTACT = "<Nationstates demands that the User Agent contain a method of contacting the Script's owner. An email address is fine>"
cm.init(CONTACT)
solver = cm.CensusMaximizer(USER, PASSWORD)
solver.adjust_weights(census = {
-1 : ("Wealth Gaps", "Obesity", "Crime", "Charmlessness", "Primitiveness", "Averageness", "Death Rate", "Taxation"),
0 : ("Rudeness", "Ignorance", "Corruption", "Government Size", "Political Apathy", "Authoritarianism"),
2 : ("Economy", "Political Freedom"),
3 : ("Civil Rights",),
}, policy={
"No Internet": -10
})
# solve issues for the nation:
solver.solve_issues()
# plot overall weighted census score over time (password is not required for this)
plt.plot(*solver.census_score_history())
plt.show()
| 3.15625 | 3 |
python/setuptools/example_without_dependency/jdhp_setuptools_demo/__init__.py | jeremiedecock/snippets | 23 | 12797026 | # -*- coding: utf-8 -*-
# Copyright (c) 2015 <NAME> (http://www.jdhp.org)
"""
A great package example!
"""
# PEP0440 compatible formatted version, see:
# https://www.python.org/dev/peps/pep-0440/
#
# Generic release markers:
# X.Y
# X.Y.Z # For bugfix releases
#
# Admissible pre-release markers:
# X.YaN # Alpha release
# X.YbN # Beta release
# X.YrcN # Release Candidate
# X.Y # Final release
#
# Dev branch marker is: 'X.Y.dev' or 'X.Y.devN' where N is an integer.
# 'X.Y.dev0' is the canonical version of 'X.Y.dev'
__version__ = "0.2"
__all__ = ['row_your_boat']
| 1.898438 | 2 |
f5/bigip/shared/test/functional/test_iapp.py | nghia-tran/f5-common-python | 272 | 12797027 | # Copyright 2015 F5 Networks Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import pytest
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
from distutils.version import LooseVersion
from requests.exceptions import HTTPError
pytestmark = pytest.mark.skipif(
LooseVersion(pytest.config.getoption('--release'))
< LooseVersion('12.0.0'),
reason='Needs v12 TMOS or greater to pass.'
)
@pytest.fixture(scope='function')
def iapp_lx(mgmt_root):
fake_iapp_name = 'foo-iapp.rpm'
sio = StringIO(80*'a')
ftu = mgmt_root.shared.file_transfer.uploads
ftu.upload_stringio(sio, fake_iapp_name, chunk_size=20)
yield fake_iapp_name
tpath_name = '/var/config/rest/downloads/{0}'.format(fake_iapp_name)
mgmt_root.tm.util.unix_rm.exec_cmd('run', utilCmdArgs=tpath_name)
@pytest.fixture(scope='function')
def pkg_task(mgmt_root, iapp_lx):
collection = mgmt_root.shared.iapp.package_management_tasks_s
task = collection.package_management_task.create(
operation='INSTALL',
packageFilePath='/var/config/rest/downloads/foo-iapp.rpm'
)
yield task
@pytest.fixture(scope='function')
def pkg_query_task(mgmt_root, iapp_lx):
collection = mgmt_root.shared.iapp.package_management_tasks_s
task = collection.package_management_task.create(
operation='QUERY'
)
yield task
class TestPackageManagementTasks(object):
def test_create_task(self, pkg_task):
assert pkg_task.operation == "INSTALL"
assert pkg_task.kind == \
'shared:iapp:package-management-tasks:iapppackagemanagementtaskstate' # NOQA
def test_load_no_task(self, mgmt_root):
with pytest.raises(HTTPError) as err:
collection = mgmt_root.shared.iapp.package_management_tasks_s
collection.package_management_task.load(
id='asdasdasd'
)
assert err.value.response.status_code == 404
def test_load(self, mgmt_root, pkg_task):
collection = mgmt_root.shared.iapp.package_management_tasks_s
resource = collection.package_management_task.load(id=pkg_task.id)
assert pkg_task.id == resource.id
assert pkg_task.selfLink == resource.selfLink
def test_exists(self, mgmt_root, pkg_task):
pid = str(pkg_task.id)
collection = mgmt_root.shared.iapp.package_management_tasks_s
exists = collection.package_management_task.exists(id=pid)
assert exists is True
def test_cancel(self, pkg_task):
pkg_task.cancel()
assert pkg_task.__dict__['canceled']
def test_delete(self, pkg_task):
pkg_task.cancel()
while True:
pkg_task.refresh()
if pkg_task.status in ['CANCELED', 'FAILED', 'FINISHED']:
pkg_task.delete()
break
assert pkg_task.__dict__['deleted']
def test_package_mgmt_tasks_collection(self, mgmt_root, iapp_lx):
col = mgmt_root.shared.iapp.package_management_tasks_s.get_collection()
assert isinstance(col, list)
assert len(col) > 0
def test_create_query_task(self, pkg_query_task):
assert pkg_query_task.operation == "QUERY"
assert pkg_query_task.kind == \
'shared:iapp:package-management-tasks:iapppackagemanagementtaskstate' # NOQA
| 1.617188 | 2 |
lesson-03/fractal_tree_v1.0.py | hemiaoio/pylearning | 1 | 12797028 | """
功能:分形树
版本:1.0
日期:2018/08/19
"""
import turtle
def draw_branch(branch_length, pen_size):
if(branch_length > 0):
turtle.forward(branch_length)
turtle.right(20)
draw_branch(branch_length-10, pen_size)
turtle.left(40)
draw_branch(branch_length-10, pen_size)
turtle.right(20)
turtle.backward(branch_length)
def main():
# 画笔起始位置
turtle.right(90)
turtle.penup()
turtle.forward(300)
turtle.pendown()
turtle.left(90)
turtle.left(90)
draw_branch(100, 5)
turtle.exitonclick()
if __name__ == '__main__':
main()
| 4.28125 | 4 |
renpy/preferences.py | GlebYoutuber/DDLC | 1 | 12797029 | <filename>renpy/preferences.py
# Copyright 2004-2017 <NAME> <<EMAIL>>
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE
import renpy.audio
pad_bindings = {
"pad_leftshoulder_press" : [ "rollback", ],
"pad_lefttrigger_pos" : [ "rollback", ],
"pad_back_press" : [ "rollback", ],
"pad_guide_press" : [ "game_menu", ],
"pad_start_press" : [ "game_menu", ],
"pad_y_press" : [ "hide_windows", ],
"pad_rightshoulder_press" : [ "rollforward", ],
"pad_righttrigger_press" : [ "dismiss", "button_select" ],
"pad_a_press" : [ "dismiss", "button_select" ],
"pad_b_press" : [ "button_alternate" ],
"pad_dleft_press" : [ "focus_left", "bar_left" ],
"pad_leftx_neg" : [ "focus_left", "bar_left" ],
"pad_rightx_neg" : [ "focus_left", "bar_left" ],
"pad_dpright_press" : [ "focus_right", "bar_right" ],
"pad_leftx_pos" : [ "focus_right", "bar_right" ],
"pad_rightx_pos" : [ "focus_right", "bar_right" ],
"pad_dpup_press" : [ "focus_up", "bar_up" ],
"pad_lefty_neg" : [ "focus_up", "bar_up" ],
"pad_righty_neg" : [ "focus_up", "bar_up" ],
"pad_dpdown_press" : [ "focus_down", "bar_down" ],
"pad_lefty_pos" : [ "focus_down", "bar_down" ],
"pad_righty_pos" : [ "focus_down", "bar_down" ],
}
class Preferences(renpy.object.Object):
"""
Stores preferences that will one day be persisted.
"""
__version__ = 18
def after_upgrade(self, version):
if version < 1:
self.mute_volumes = 0
if version < 2:
self.using_afm_enable = False
if version < 3:
self.physical_size = None
if version < 4:
self.renderer = "auto"
self.performance_test = True
if version < 5:
self.language = None
if version < 6:
self.wait_voice = True
if version < 7:
self.voice_sustain = False
if version < 8:
self.mouse_move = False
if version < 9:
self.afm_after_click = False
if version < 11:
self.show_empty_window = True
if version < 13:
self.self_voicing = False
if version < 14:
self.emphasize_audio = False
if version < 15:
self.pad_enabled = True
if version < 17:
self.init_rollback_side()
if version < 18:
self.virtual_size = None
self.video_image_fallback = False
def __init__(self):
self.fullscreen = False
self.skip_unseen = False
self.text_cps = 0
self.afm_time = 0
self.afm_enable = True
self.using_afm_enable = False
self.voice_sustain = False
self.mouse_move = False
self.show_empty_window = True
# Should we wait for the voice to stop?
self.wait_voice = True
# Should we disengage auto-forward mode after a click?
self.afm_after_click = False
# 2 - All transitions.
# 1 - Only non-default transitions.
# 0 - No transitions.
self.transitions = 2
# Should video sprites always default to provided displayables if possible?
self.video_image_fallback = False
self.skip_after_choices = False
# Mixer channel info.
# A map from channel name to the current volume (between 0 and 1).
self.volumes = { }
# True if the channel should not play music. False
# otherwise. (Not used anymore.)
self.mute = { }
# Joystick mappings.
self.joymap = dict()
# The size of the window, or None if we don't know it yet.
self.physical_size = None
# The virtual size at the time self.physical_size was set.
self.virtual_size = None
# The graphics renderer we use.
self.renderer = "auto"
# Should we do a performance test on startup?
self.performance_test = True
# The language we use for translations.
self.language = None
# Should we self-voice?
self.self_voicing = False
# Should we emphasize audio?
self.emphasize_audio = False
# Is the gamepad enabled?
self.pad_enabled = True
self.init_rollback_side()
def init_rollback_side(self):
self.mobile_rollback_side = "disable"
self.desktop_rollback_side = "disable"
def set_volume(self, mixer, volume):
if volume != 0:
self.mute[mixer] = False
self.volumes[mixer] = volume
def get_volume(self, mixer):
if mixer not in self.volumes:
return 0.0
if self.mute.get(mixer, False):
return 0.0
return self.volumes[mixer]
def set_mute(self, mixer, mute):
self.mute[mixer] = mute
if (not mute) and (self.volumes.get(mixer, 1.0) == 0.0):
self.volumes[mixer] = 1.0
def get_mute(self, mixer):
if mixer not in self.volumes:
return False
return self.mute[mixer]
def init_mixers(self):
for i in renpy.audio.music.get_all_mixers():
self.volumes.setdefault(i, 1.0)
self.mute.setdefault(i, False)
def get_all_mixers(self):
return renpy.audio.music.get_all_mixers()
def __eq__(self, other):
return vars(self) == vars(other)
renpy.game.Preferences = Preferences
renpy.game.preferences = Preferences()
| 1.71875 | 2 |
app/apps/core/migrations/0015_auto_20190426_0724.py | lawi21/escriptorium | 4 | 12797030 | # Generated by Django 2.1.4 on 2019-04-26 07:24
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('core', '0014_auto_20190417_1639'),
]
operations = [
migrations.AlterUniqueTogether(
name='documentpart',
unique_together={('order', 'document')},
),
]
| 1.375 | 1 |
LevelBuilder/sourceGenerator.py | joonamo/pocket-game | 0 | 12797031 | def write_tiledata(outfile, bank, tiles_data, name):
def write_tiles():
out = ""
for tile in tiles_data:
out += " // " + tile["name"] + "\n"
out += " " + ",".join(tile["hexdata"]) + ",\n"
return out
def write_palettes():
return ", ".join([hex(tile["palette_idx"]) for tile in tiles_data])
# write tiledata
with open(outfile + "_tiles.b" + bank + ".c", "w") as file:
file.write("\
#pragma bank " + bank + "\n\
\n\
void empty(void) __nonbanked {}\n\
__addressmod empty const CODE;\n\
\n\
const unsigned char " + name + "_tiles_data[] = {\n\
" + write_tiles() + "};\n\
\n\
const unsigned char " + name + "_tile_palettes[] = {\n\
" + write_palettes() + "\n\
};\n\
\n\
#include \"TilesInfo.h\"\n\
const struct TilesInfoInternal " + name + "_tiles_internal = {\n\
8, //width\n\
8, //height\n\
" + str(len(tiles_data)) + ", //num_tiles\n\
" + name + "_tiles_data, //tiles\n\
" + name + "_tile_palettes, //CGB palette\n\
};\n\
CODE struct TilesInfo " + name + "_tiles = {\n\
" + bank + ", //bank\n\
&" + name + "_tiles_internal, //data\n\
};\n\
")
# write tiledata header
with open(outfile + "_tiles.h", "w") as file:
file.write("\
#ifndef TILES_" + name + "_tiles_H\n\
#define TILES_" + name + "_tiles_H\n\
#include \"TilesInfo.h\"\n\
extern struct TilesInfo " + name + "_tiles;\n\
#endif\n\
")
def write_mapdata(
outfile,
bank,
map_data,map_width,
map_height,
map_name,
collision_tiles,
collision_down_tiles):
def write_collision_tiles(collision_tiles):
if len(collision_tiles) == 0:
return "0"
return "{" + ", ".join(str(v) for v in collision_tiles) + ", 0}"
# write map
with open(outfile + ".b" + bank + ".c", "w") as file:
file.write("\
#pragma bank " + bank + "\n\
\n\
void empty(void) __nonbanked {}\n\
__addressmod empty const CODE;\n\
\n\
const unsigned char " + map_name + "_map[] = { \n\
" + map_data + " \n\
};\n\
#include \"" + map_name + "_tiles.h\"\n\
#include \"MapInfo.h\"\n\
#include \"types.h\"\n\
const struct MapInfoInternal " + map_name + "_internal = {\n\
" + map_name + "_map, //map\n\
" + map_width + ", //width\n\
" + map_height + ", //height\n\
0, //attributes\n\
&" + map_name + "_tiles, //tiles info\n\
};\n\
CODE struct MapInfo " + map_name + " = {\n\
3, //bank\n\
&" + map_name + "_internal, //data\n\
};\n\
CODE UINT8 " + map_name + "_collision_tiles[] = " + write_collision_tiles(collision_tiles) + ";\n\
CODE UINT8 " + map_name + "_collision_down_tiles[] = " + write_collision_tiles(collision_down_tiles) + ";\n\
\n\
")
# write map header
with open(outfile + ".h", "w") as file:
file.write("\
#ifndef MAP_" + map_name + "_H\n\
#define MAP_" + map_name + "_H\n\
#define mapWidth " + map_width + "\n\
#define mapHeight " + map_height + "\n\
#include \"MapInfo.h\"\n\
#include \"types.h\"\n\
extern unsigned char bank_" + map_name + ";\n\
extern struct MapInfo " + map_name + ";\n\
extern UINT8 " + map_name + "_collision_tiles;\n\
extern UINT8 " + map_name + "_collision_down_tiles;\n\
#endif\n\
")
def write_palette_data(outfile, bank, name, palettes):
def palette_or_zero(idx):
try:
return ", ".join([hex(v).rjust(6, " ") for v in palettes[idx]])
except:
return "0x0000, 0x0000, 0x0000, 0x0000"
if len(palettes) > 8:
raise ValueError("More than 8 palettes given!")
with open(outfile + "_palette.b" + bank + ".c", "w") as file:
file.write("\
#pragma bank " + bank + "\n\
\n\
void empty(void) __nonbanked {}\n\
__addressmod empty const CODE;\n\
\n\
#include \"types.h\"\n\
\n\
CODE UINT16 " + name + "_palette[] = {\n\
" + palette_or_zero(0) + ",\n\
" + palette_or_zero(1) + ",\n\
" + palette_or_zero(2) + ",\n\
" + palette_or_zero(3) + ",\n\
" + palette_or_zero(4) + ",\n\
" + palette_or_zero(5) + ",\n\
" + palette_or_zero(6) + ",\n\
" + palette_or_zero(7) + "\n\
};")
# write tiledata header
with open(outfile + "_palette.h", "w") as file:
file.write("\
#ifndef PALETTE_" + name + "_palette_H\n\
#define PALETTE_" + name + "_palette_H\n\
#include \"types.h\"\n\
extern UINT16 " + name + "_palette;\n\
#endif\n\
") | 3.28125 | 3 |
core_admin/des/migrations/0033_auto_20210722_1449.py | linea-it/tno | 0 | 12797032 | # Generated by Django 2.2.13 on 2021-07-22 14:49
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('des', '0032_auto_20210713_2127'),
]
operations = [
migrations.AlterField(
model_name='astrometryjob',
name='status',
field=models.IntegerField(choices=[(1, 'Idle'), (2, 'Running'), (3, 'Completed'), (4, 'Failed'), (5, 'Aborted'), (6, 'Warning'), (7, 'Launched')], default=1, verbose_name='Status'),
),
]
| 1.773438 | 2 |
tests/test_matrix.py | vladislav-miroshnikov/formal-lang-course | 0 | 12797033 | import pytest
from pyformlang.finite_automaton import NondeterministicFiniteAutomaton
from project import BooleanMatrices
@pytest.fixture
def nfa():
nfa = NondeterministicFiniteAutomaton()
nfa.add_transitions(
[
(0, "X", 1),
(0, "X", 2),
(1, "Y", 2),
(1, "Z", 1),
(2, "S", 3),
(3, "W", 4),
(4, "W", 0),
]
)
return nfa
@pytest.mark.parametrize(
"label,expected_nnz", [("X", 2), ("Y", 1), ("Z", 1), ("S", 1), ("W", 2)]
)
def test_nonzero(nfa, label, expected_nnz):
bm = BooleanMatrices(nfa)
actual_nnz = bm.bool_matrices[label].nnz
assert actual_nnz == expected_nnz
def test_symbols(nfa):
bm = BooleanMatrices(nfa)
actual_symbols = bm.bool_matrices.keys()
expected_symbols = nfa.symbols
assert actual_symbols == expected_symbols
@pytest.mark.parametrize(
"label,edges",
[
("X", [(0, 1), (0, 2)]),
("Y", [(1, 2)]),
("Z", [(1, 1)]),
("S", [(2, 3)]),
("W", [(3, 4), (4, 0)]),
],
)
def test_adjacency(nfa, label, edges):
bm = BooleanMatrices(nfa)
assert all(bm.bool_matrices[label][edge] for edge in edges)
def test_transitive_closure(nfa):
bm = BooleanMatrices(nfa)
tc = bm.make_transitive_closure()
assert tc.sum() == tc.size
| 2.328125 | 2 |
python-100-examples/test76.py | zengxianbin/Practice | 2 | 12797034 | #!/usr/local/bin/python3
# -*- coding: UTF-8 -*-
class Solution(object):
def test76(self):
'''
题目:编写一个函数,输入n为偶数时,调用函数求1/2+1/4+...+1/n,当输入n为奇数时,调用函数1/1+1/3+...+1/n
'''
return ""
if __name__ == "__main__":
solution = Solution()
solution.test76()
| 3.09375 | 3 |
src/electric/main.py | electricdb/electric-cli | 0 | 12797035 | <filename>src/electric/main.py
"""ElectricDB command line interface utility.
Run `electric --help` for usage.
"""
import click
from .resources.auth import auth
# from .resources.account import account
# from .resources.database import database
from . import browser
from . import config
@click.group(cls=click.Group)
@click.option('--endpoint', metavar='URL', envvar='ELECTRIC_DATA_ENDPOINT',
default=config.default_endpoint(), show_default=True,
help='Web service API endpoint.')
@click.pass_context
def cli(ctx, endpoint):
"""ElectricDB - Low latency database hosting."""
ctx.obj = NotImplemented
@cli.command()
@click.pass_obj
def docs(obj):
"""Open the online documentation in a web browser."""
browser.open(config.documentation_url())
cli.add_command(auth)
# cli.add_command(account)
# cli.add_command(database)
| 2.28125 | 2 |
assign_rights/mixins/authmixins.py | RockefellerArchiveCenter/aquila | 0 | 12797036 | <gh_stars>0
from django.contrib.auth.mixins import LoginRequiredMixin, UserPassesTestMixin
from django.urls import reverse_lazy
class LoggedInMixinDefaults(LoginRequiredMixin):
"""Sets basic login_url for mixin defaults."""
login_url = reverse_lazy("login")
class EditMixin(LoggedInMixinDefaults, UserPassesTestMixin):
"""Checks whether a user is either a superuser or in the edit group."""
def test_func(self):
return any([
self.request.user.is_superuser,
self.request.user.groups.filter(name='edit').exists()
])
class DeleteMixin(LoggedInMixinDefaults, UserPassesTestMixin):
"""Checks whether a user is either a superuser or in the delete group."""
def test_func(self):
return any([
self.request.user.is_superuser,
self.request.user.groups.filter(name='delete').exists()
])
| 2.3125 | 2 |
Modifier/interface/other.py | hamano0813/PoR | 4 | 12797037 | <filename>Modifier/interface/other.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from PySide6.QtWidgets import QGridLayout, QTextEdit
from widget import BackgroundFrame, ValueSpin, NameLabel
from parameter import DataSetting
# noinspection PyTypeChecker
class Other(BackgroundFrame):
def __init__(self, parent):
BackgroundFrame.__init__(self, parent)
self['所持金'] = ValueSpin(None, value=DataSetting()['所持金'])
self['奖励EX'] = ValueSpin(None, value=DataSetting()['奖励EX'])
self['所持金'].set_parent(None)
self['奖励EX'].set_parent(None)
text_edit = QTextEdit()
text_edit.setHtml('''
<b>修改可能出现严重错误,及时存档是好习惯</b><br>
1.支持日文原版和汉化版,同时支持Dolphin5.0以上至今的任意开发版本<br>
2.修改人物职业为上级职业时,记得在技能的隐藏特性里勾选“上级职业”<br>
3.模型原则上可以修改人物是否持有武器等等,可能会出现奇妙的3D贴图错误,慎用<br>
4.将敌方修改为本方可能在过关时加入,但容易造成后续关卡的阵容和强制出击出现问题,慎用<br>
5.武器熟练只有在职业可以使用该类武器时生效,圣骑士等职业可以勾选隐藏特性装备各类武器<br>
6.人物的生理节奏与战斗次数有关,每战斗10次生理曲线前进1格,30格为一个周期<br>
7.技能修改中的部分隐藏特性无效,未翻译的隐藏特性谨慎修改,除非你知道那代表什么<br>
<br>
<i>created by Hamano0813</i>
''')
text_edit.setReadOnly(True)
main_layout = QGridLayout()
main_layout.addWidget(NameLabel('所持金'), 0, 0, 1, 1)
main_layout.addWidget(self['所持金'], 0, 1, 1, 1)
main_layout.addWidget(NameLabel('奖励EX'), 1, 0, 1, 1)
main_layout.addWidget(self['奖励EX'], 1, 1, 1, 1)
main_layout.addWidget(text_edit, 2, 0, 1, 3)
main_layout.setSpacing(3)
self.setLayout(main_layout)
| 2.1875 | 2 |
quanttrader/event/__init__.py | qalpha/quanttrader | 135 | 12797038 | <gh_stars>100-1000
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from .event import *
from .backtest_event_engine import *
from .live_event_engine import * | 1.09375 | 1 |
autoio/mess_io/tests/test__read_pes.py | mobergd/autoio | 0 | 12797039 | <filename>autoio/mess_io/tests/test__read_pes.py
"""
tests pes reader
"""
import os
import numpy
import mess_io
PATH = os.path.dirname(os.path.realpath(__file__))
DATA_PATH = os.path.join(PATH, 'data')
DATA_NAME = 'rates.inp'
with open(os.path.join(DATA_PATH, DATA_NAME), 'r') as datfile:
INP_STR = datfile.read()
def test__pes():
""" tests mess_io.reader.pes
"""
# Test reading with removing any fake wells
energy_dct1, conn_lst1 = mess_io.reader.pes(
input_string=INP_STR,
read_fake=False)
ref_energy_dct1 = {
'P1': 0.0,
'P2': 3.22,
'B1': 13.23
}
ref_conn_lst1 = (
('P1', 'B1'),
('B1', 'P2')
)
assert set(energy_dct1.keys()) == set(ref_energy_dct1.keys())
assert all(numpy.isclose(energy_dct1[key], ref_energy_dct1[key])
for key in energy_dct1)
assert conn_lst1 == ref_conn_lst1
# Test reading the entire PES with fake wells
energy_dct2, conn_lst2 = mess_io.reader.pes(
input_string=INP_STR,
read_fake=True)
ref_energy_dct2 = {
'F1': -1.0,
'F2': 2.22,
'P1': 0.0,
'P2': 3.22,
'FRB1': 0.0,
'FPB1': 3.22,
'B1': 13.23
}
ref_conn_lst2 = (
('P1', 'FRB1'),
('FRB1', 'F1'),
('P2', 'FPB1'),
('FPB1', 'F2'),
('F1', 'B1'),
('B1', 'F2')
)
assert set(energy_dct2.keys()) == set(ref_energy_dct2.keys())
assert all(numpy.isclose(energy_dct2[key], ref_energy_dct2[key])
for key in energy_dct2)
assert conn_lst2 == ref_conn_lst2
if __name__ == '__main__':
test__pes()
| 2.359375 | 2 |
alipay/aop/api/domain/MultiStagePayLineInfo.py | antopen/alipay-sdk-python-all | 213 | 12797040 | <reponame>antopen/alipay-sdk-python-all<gh_stars>100-1000
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class MultiStagePayLineInfo(object):
def __init__(self):
self._payment_amount = None
self._payment_idx = None
@property
def payment_amount(self):
return self._payment_amount
@payment_amount.setter
def payment_amount(self, value):
self._payment_amount = value
@property
def payment_idx(self):
return self._payment_idx
@payment_idx.setter
def payment_idx(self, value):
self._payment_idx = value
def to_alipay_dict(self):
params = dict()
if self.payment_amount:
if hasattr(self.payment_amount, 'to_alipay_dict'):
params['payment_amount'] = self.payment_amount.to_alipay_dict()
else:
params['payment_amount'] = self.payment_amount
if self.payment_idx:
if hasattr(self.payment_idx, 'to_alipay_dict'):
params['payment_idx'] = self.payment_idx.to_alipay_dict()
else:
params['payment_idx'] = self.payment_idx
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = MultiStagePayLineInfo()
if 'payment_amount' in d:
o.payment_amount = d['payment_amount']
if 'payment_idx' in d:
o.payment_idx = d['payment_idx']
return o
| 2.109375 | 2 |
scripts/helpers.py | mashaelalzaid/rare-diseases-SE | 0 | 12797041 | # -*- coding: utf-8 -*-
from scripts import tabledef
from flask import session
from sqlalchemy.orm import sessionmaker
from contextlib import contextmanager
import bcrypt
import sys, subprocess, ipaddress, time, datetime, json, os, csv, copy
from watson_developer_cloud import DiscoveryV1
EnvID="5aec3469-82f9-49cb-9718-e3d0526a85f7"
ColID="ccc5a579-296d-445f-a4cf-9fd81c536e8d"
ConfID="e813ec51-af96-422f-943c-65d776818292"
@contextmanager
def session_scope():
"""Provide a transactional scope around a series of operations."""
s = get_session()
s.expire_on_commit = False
try:
yield s
s.commit()
except:
s.rollback()
raise
finally:
s.close()
def get_session():
return sessionmaker(bind=tabledef.engine)()
def get_natural_language_query(query):
#with session_scope() as s:
print("query is"+query)
discovery = DiscoveryV1(version='2018-03-05', username="9e523dc4-1206-4898-a30f-faf75cd8526b", password="<PASSWORD>")
my_query = discovery.query(environment_id=EnvID, collection_id=ColID, query=query, passages='true', passages_count='1', count=1, highlight='true')
p_passage=my_query['passages'][0]["passage_text"]
p_score=my_query['passages'][0]["passage_score"]
p_id=my_query['passages'][0]["document_id"]
querylist = [p_passage,p_score,p_id]
return querylist
| 2.1875 | 2 |
billiards/billiards/views/match.py | zxkane/billiards | 0 | 12797042 | <reponame>zxkane/billiards<filename>billiards/billiards/views/match.py
# -*- coding: utf-8 -*-
# encoding: utf-8
'''
Created on 2013年10月22日
@author: kane
'''
from StringIO import StringIO
import datetime
import json
from dateutil.relativedelta import relativedelta
from django.core import serializers
from django.core.exceptions import PermissionDenied
from django.db.models.aggregates import Max
from django.db.models.query_utils import Q
from django.http import HttpResponse
from django.shortcuts import render_to_response, get_object_or_404, redirect
from django.template.context import RequestContext
from django.utils import simplejson
import pytz
from billiards import settings
from billiards.commons import tojson
from billiards.models import Match, MatchEnroll, \
match_fields
from billiards.settings import TEMPLATE_ROOT, TIME_ZONE
def updateMatchJsonStrEnrollInfo(matchjsonstr, user, matchArray):
enrolledMatch = MatchEnroll.objects.filter(Q(match__in=matchArray) & Q(user__exact=user))
if len(enrolledMatch) > 0:
matches = simplejson.loads(matchjsonstr)
for enrollinfo in enrolledMatch:
for match in matches:
if enrollinfo.match.id == match['pk']:
match['fields']['enrolled'] = True
break
matchjsonstr = simplejson.dumps(matches)
return matchjsonstr
def updateMatchQuerySetEnrollInfo(matchQuerySet, user):
enrolledMatch = MatchEnroll.objects.filter(Q(match__in=matchQuerySet) & Q(user__exact=user))
if len(enrolledMatch) > 0:
for enrollinfo in enrolledMatch:
for match in matchQuerySet:
if enrollinfo.match.id == match.id:
setattr(match, 'enrolled', True)
break
return matchQuerySet
datefmt = "%Y-%m-%d"
def set_to_midnight(dt):
midnight = datetime.time(0)
localtz = pytz.timezone(settings.TIME_ZONE)
return localtz.localize(datetime.datetime.combine(dt.date(), midnight))
def getQueryCriteria(starttime, endtime):
return Q(starttime__gte=set_to_midnight(starttime)) & Q(status='approved') & Q(starttime__lt=set_to_midnight(endtime))
def getMatchByRequest(request, starttime = None, endtime = None, deltadays = 1):
if starttime == None:
starttimenative = datetime.datetime.today()
localtz = pytz.timezone(settings.TIME_ZONE)
starttime = localtz.localize(starttimenative)
try:
if request.GET.get('starttime') is not None:
starttime = set_to_midnight(datetime.datetime.utcfromtimestamp(float(request.GET.get('starttime'))))
except Exception:
pass
if endtime == None:
endtime = starttime + relativedelta(days=deltadays)
try:
if request.GET.get('endtime') is not None:
endtime = datetime.datetime.utcfromtimestamp(float(request.GET.get('endtime')))
except Exception:
pass
query = getQueryCriteria(starttime, endtime)
matches = Match.objects.filter(query).order_by('starttime')
return matches, starttime, endtime
def index(request, view = None):
starttime = None
intervals = 7
starttime2 = datetime.datetime.today()
endtime2 = starttime2 + relativedelta(days=intervals)
if 's' in request.GET:
try:
starttimeS = datetime.datetime.utcfromtimestamp(float(request.GET.get('s')))
if set_to_midnight(starttime2) <= set_to_midnight(starttimeS) <= set_to_midnight(endtime2):
starttime = starttimeS
except Exception:
pass
matches, starttime, endtime = getMatchByRequest(request, starttime)
if request.GET.get('f') == 'json':
jsonstr = tojson(matches, match_fields)
if request.user.is_authenticated():
jsonstr = updateMatchJsonStrEnrollInfo(jsonstr, request.user, matches)
return HttpResponse(jsonstr)
page = 'match.html'
query2 = getQueryCriteria(starttime2, endtime2)
matchCountSummary = dict()
rt = Match.objects.filter(query2)
for match in rt:
if match.starttime.strftime(datefmt) in matchCountSummary:
matchCountSummary[match.starttime.strftime(datefmt)] += 1
else:
matchCountSummary[match.starttime.strftime(datefmt)] = 1
topOneBonusSummary = Match.objects.values('starttime','bonus').filter(query2).filter(bonus=Match.objects.filter(query2).aggregate(Max('bonus'))['bonus__max'])
def ValuesQuerySetToDict(vqs):
return [{'bonus': item['bonus'], 'starttime': item['starttime'].strftime(datefmt)} for item in vqs]
return render_to_response(TEMPLATE_ROOT + page,
{'matches': matches, 'startdate': starttime2, 'enddate': endtime2,
'intervals': intervals, 'matchsummary': matchCountSummary, 'bonussummary': simplejson.dumps(ValuesQuerySetToDict(topOneBonusSummary)),
},
context_instance=RequestContext(request))
def getMatch(matchid):
return get_object_or_404(Match, pk=matchid, status='approved')
def detail(request, matchid):
match = getMatch(matchid)
if match.type == 2:
return redirect('activity_detail', matchid=matchid)
if request.GET.get('f') == 'json':
json_serializer = serializers.get_serializer("json")()
stream = StringIO()
json_serializer.serialize([match], fields=('id', 'poolroom', 'title', 'bonus', 'starttime', 'description'), ensure_ascii=False, stream=stream, indent=2, use_natural_keys=True)
jsonstr = stream.getvalue()
if request.user.is_authenticated():
jsonstr = updateMatchJsonStrEnrollInfo(jsonstr, request.user, [match])
return HttpResponse(jsonstr)
if request.user.is_authenticated():
match = updateMatchQuerySetEnrollInfo([match], request.user)[0]
return render_to_response(TEMPLATE_ROOT + 'match_detail.html', {'match': match},
context_instance=RequestContext(request))
def activity(request, matchid):
match = getMatch(matchid)
if match.type == 1:
return redirect('match_detail', matchid=matchid)
return render_to_response(TEMPLATE_ROOT + 'activity_detail.html', {'match': match},
context_instance=RequestContext(request))
def enroll(request, matchid):
if not request.user.is_authenticated():
raise PermissionDenied
match = getMatch(matchid)
if match.is_expired:
return HttpResponse(json.dumps({'rt': 3, 'msg': 'match is expired'}), content_type="application/json")
elif match.status != 'approved':
return HttpResponse(json.dumps({'rt': 4, 'msg': 'match is invalid'}), content_type="application/json")
elif match.type == 2:
return HttpResponse(json.dumps({'rt': 5, 'msg': 'can not enroll activity'}), content_type="application/json")
obj, created = MatchEnroll.objects.get_or_create(match=match, user=request.user,
defaults={'enrolltime': datetime.datetime.utcnow().replace(tzinfo=pytz.timezone(TIME_ZONE))})
if obj != False:
msg = {'rt': 2, 'msg': 'already enrolled'}
elif created != False:
msg = {'rt': 1, 'msg': 'enrolled'}
return HttpResponse(json.dumps(msg), content_type="application/json")
def redbull_2014_05(request):
if 'f' in request.GET and request.GET.get('f') == 'json':
redbull_matches = Match.objects.filter(Q(flags=Match.flags.redbull)).order_by('starttime')
json_serializer = serializers.get_serializer("json")()
stream = StringIO()
json_serializer.serialize(redbull_matches, fields=match_fields, ensure_ascii=False, stream=stream, indent=2, use_natural_keys=True)
jsonstr = stream.getvalue()
return HttpResponse(jsonstr)
return render_to_response(TEMPLATE_ROOT + 'redbull/match_poolroom.html', {},
context_instance=RequestContext(request))
def winners(request):
return render_to_response(TEMPLATE_ROOT + 'redbull/match_winners.html', context_instance=RequestContext(request))
| 2 | 2 |
data/dataset2ssv.py | TWTDIG/frechetrange | 5 | 12797043 | <filename>data/dataset2ssv.py
""" Importer for T-Drive Dataset
takes t-drive directory and creates SSV
"""
import numpy as np;
import pandas as pd;
import os;
import sys;
from tqdm import tqdm;
from matplotlib import pyplot as plt;
def character(indir, outfile):
print("Importing Character dataset")
print(" In-Dir: %s" % indir)
print("Out-File: %s" % outfile)
fileset = sorted([os.path.join(indir,x) for x in os.listdir(indir) if x.startswith("file-")])
print("==> Found %d files." % len(fileset))
of = open(outfile, "w")
of.write("x y id\n");
for idx,f in enumerate(tqdm(fileset)):
df = pd.read_table(f, sep=" ",header=None, skipinitialspace=True)
m = df.values
m = np.cumsum(m, axis=0) # the dataset reports a smoothed derivative. integrate to get a spatial object
m[:,-1] = np.ones(m.shape[0])*idx
np.savetxt(of,m)
def sanfrancisco(indir, outfile):
print("Importing from a San Francisco Directory")
print(" In-Dir: %s" % indir)
print("Out-File: %s" % outfile)
fileset = sorted([os.path.join(indir,x) for x in os.listdir(indir) if x.endswith(".plt")])
print("==> Found %d files." % len(fileset))
of = open(outfile, "w")
of.write("x y id\n");
for idx,f in enumerate(tqdm(fileset)):
df = pd.read_table(f, sep=" ",header=None, skipinitialspace=True)
m = df.values[:,range(3)] # remove last column
m[:,-1] = np.ones(m.shape[0])*idx
np.savetxt(of,m)
def geolife(indir, outfile):
print("Importing from GeoLife")
print(" In-Dir: %s" % indir)
print("Out-File: %s" % outfile)
# fileset = [os.path.join(root,f) for f in files for root, _, files in os.walk(indir)]
fileset=[]
for root,_,files in os.walk(indir):
fileset = fileset + [os.path.join(root, f) for f in files if f.endswith(".plt")]
fileset = sorted(fileset)
print("==> Found %d files." % len(fileset))
of = open(outfile, "w")
of.write("x y id\n");
for idx,f in enumerate(tqdm(fileset)):
# first skip the header
fd = open(f, "r");
header = [fd.readline() for _ in range(6)]
df = pd.read_csv(fd, header=None)
m = df.values[:,range(3)]
m[:,-1] = np.ones(m.shape[0])*idx
np.savetxt(of,m)
# drwxr-xr-x 3 wern_m3 1001 4096 Sep 16 2016 roma
# drwxr-xr-x 4 wern_m3 1001 4096 Jan 10 2017 sf_large
# drwxr-xr-x 11 wern_m3 1001 4096 Nov 5 2016 tdrive
def usage(msg):
print (msg)
sys.exit(-1)
if __name__=="__main__":
if (len(sys.argv) != 4):
usage("Run with 3 parameters: type, dir, outfilename")
cases = dict({
"character":character,
"sanfrancisco":sanfrancisco,
"geolife":geolife
})
if sys.argv[1] not in cases:
usage("Importer for %s not found." % sys.argv[1])
cases[sys.argv[1]](sys.argv[2],sys.argv[3])
| 2.703125 | 3 |
povary/apps/recipes/tasks.py | TorinAsakura/cooking | 0 | 12797044 | # -*- coding: utf-8 -*-
from celery.task import task
@task
def publish_recipe(recipe):
from recipes.models import Recipe
try:
recipe = Recipe.objects.get(id=recipe.id)
recipe.published=True
recipe.save()
except Recipe.DoesNotExist:
pass | 1.875 | 2 |
transmission_rpc/session.py | sc104501/transmission-rpc | 61 | 12797045 | # Copyright (c) 2018-2021 Trim21 <<EMAIL>>
# Copyright (c) 2008-2014 <NAME> <<EMAIL>>
# Licensed under the MIT license.
from typing import TYPE_CHECKING, Any, Dict, Tuple, Union, Generator
from typing_extensions import Literal
from transmission_rpc.lib_types import Field
if TYPE_CHECKING:
from transmission_rpc.client import Client
class Session:
"""
Session is a dict-like class holding the session data for a Transmission daemon.
Access the session field can be done through attributes.
The attributes available are the same as the session arguments in the
Transmission RPC specification, but with underscore instead of hyphen.
get ``'download-dir'`` with ``session.download_dir``.
.. code-block:: python
session = Client().get_session()
current = session.download_dir
there are also setter like ``Session().download_dir = '/path/to/download'``
.. code-block:: python
session = Client().get_session()
session.download_dir = '/path/to/new/download/dir'
if you want to batch update a session, call ``.update(data)``
.. code-block:: python
session = Client().get_session()
session.update({'k1': 'v1', "k2": "v2"})
if you have to access to the private ``Session()._fields``,
keys are stored with underscore.
"""
def __init__(self, client: "Client", fields: Dict[str, Any] = None):
self._client = client
self._fields: Dict[str, Field] = {}
if fields is not None:
self._update(fields)
def __getattr__(self, name: str) -> Any:
try:
return self._fields[name].value
except KeyError as e:
raise AttributeError(f"No attribute {name}") from e
def _set(self, key: str, value: Any, commit: bool = False) -> None:
key = key.replace("-", "_")
current_field = self._fields.get(key)
if current_field is None:
self._fields[key] = Field(value, True)
else:
if current_field.value != value:
self._fields[key] = Field(value, True)
if commit:
self._commit(key, value)
def __str__(self) -> str:
text = ""
max_length = max(len(x) for x in self._fields.keys()) + 1
for key, value in sorted(self._fields.items(), key=lambda x: x[0]):
text += f"{key.ljust(max_length)}: {value.value!r}\n"
return text
def _commit(self, key: str = None, value: Any = None) -> None:
"""submit all dirty field to client"""
dirty = {}
if key is not None and value is not None:
dirty[key] = value
else:
for k, v in self._fields.items():
if v.dirty:
dirty[k] = v.value
self._client.set_session(**dirty)
def _update(self, other: Union[Dict[str, Any], "Session"]) -> None:
if isinstance(other, dict):
for key, value in other.items():
self._set(key, value)
elif isinstance(other, Session):
for key, value in other._fields.items():
self._set(key, value.value)
else:
raise ValueError("Cannot update with supplied data")
def update(self, other: Union[Dict[str, Any], "Session"]) -> None:
"""
Update the session data from a Transmission JSON-RPC arguments dictionary
"""
self._update(other)
self._commit()
def keys(self) -> Generator[str, None, None]:
"""
session keys with underscore (eg: ``download_dir``)
"""
yield from self._fields.keys()
def values(self) -> Generator[Any, None, None]:
for value in self._fields.values():
yield value.value
def items(self) -> Generator[Tuple[str, Any], None, None]:
"""
iter key,value pair
hyphen in key is replace by underscore. (eg: ``'download_dir'``)
"""
for key, field in self._fields.items():
yield key, field.value
@property
def download_dir(self) -> str:
"""default download location
- rpc version 12
- transmission version 2.20
:return:
"""
return self.__getattr__("download_dir")
@download_dir.setter
def download_dir(self, location: str) -> None:
"""Enable/disable peer exchange."""
if isinstance(location, str) and location:
self._set("download_dir", location, True)
else:
raise TypeError(f"{location!r} if not a valid 'download-dir'")
@property
def version(self) -> str:
"""
- rpc version 3
- transmission version 1.41
"""
return self.__getattr__("version")
@property
def rpc_version(self) -> int:
"""
- rpc version 4
- transmission version 1.50
"""
return self.__getattr__("rpc_version")
@property
def peer_port(self) -> int:
"""Get the peer port.
- rpc version 5
- transmission version 1.60
"""
return self.__getattr__("peer_port")
@peer_port.setter
def peer_port(self, port: int) -> None:
"""Set the peer port.
- rpc version 5
- transmission version 1.60
"""
if isinstance(port, int):
self._set("peer_port", port, True)
else:
raise ValueError("Not a valid limit")
@property
def pex_enabled(self) -> bool:
"""Is peer exchange enabled
- rpc version 5
- transmission version 1.60"""
return self.__getattr__("pex_enabled")
@pex_enabled.setter
def pex_enabled(self, enabled: bool) -> None:
"""Enable/disable peer exchange."""
if isinstance(enabled, bool):
self._set("pex_enabled", enabled, True)
else:
raise TypeError("Not a valid type")
@property
def encryption(self) -> str:
return self.__getattr__("encryption")
@encryption.setter
def encryption(self, value: Literal["required", "preferred", "tolerated"]) -> None:
if value in {"required", "preferred", "tolerated"}:
self._set("encryption", value, commit=True)
else:
raise ValueError(
"Not a valid encryption, can only be one of ['required', 'preferred', 'tolerated']"
)
| 2.65625 | 3 |
Missions_to_Mars/scrape_mars.py | neilhsu70/web-scraping-challenge | 0 | 12797046 | <reponame>neilhsu70/web-scraping-challenge
from bs4 import BeautifulSoup
from splinter import Browser
import requests
import pymongo
import pandas as pd
import time
def scrape():
mars_news = 'https://mars.nasa.gov/news/'
mars_image = 'https://www.jpl.nasa.gov/spaceimages/?search=&category=Mars'
mars_twitter = 'https://twitter.com/marswxreport?lang=en'
mars_facts = 'https://space-facts.com/mars/'
mars_hemisphere = 'https://astrogeology.usgs.gov/search/results?q=hemisphere+enhanced&k1=target&v1=Mars'
results = {}
# Mars News
response = requests.get(mars_news)
time.sleep(3)
news_pull = BeautifulSoup(response.text, 'lxml')
title = news_pull.find('div', class_= 'content_title')
news_title = title.a.text.strip()
paragraph = news_pull.find('div', class_= 'rollover_description_inner')
news_paragraph = paragraph.text.strip()
results['news_title'] = news_title
results['news_text'] = news_paragraph
# Mars Image
executable_path = {'executable_path': '/usr/local/bin/chromedriver'}
browser = Browser('chrome', **executable_path, headless=False)
browser.visit(mars_image)
browser.links.find_by_partial_text('FULL IMAGE').click()
browser.links.find_by_partial_text('more info').click()
featured_img_url = browser.find_by_css('.main_image').first['src']
results['image_url'] = featured_img_url
browser.quit()
# Mars Twitter
response = requests.get(mars_twitter)
tweet_pull = BeautifulSoup(response.text, 'lxml')
t = tweet_pull.find_all('p', class_ = 'TweetTextSize TweetTextSize--normal js-tweet-text tweet-text')
for i in t:
if 'InSight' in i.text:
i.a.decompose()
results['tweet'] = i.text
break
# Mars Facts
facts_html = pd.read_html(mars_facts)
facts_df = facts_html[0]
facts_df.columns = ['Description', 'Data']
facts_df.set_index('Description', inplace=True)
html_table = facts_df.to_html()
html_table = html_table.replace('\n', '')
results['table_html'] = html_table
# Mars Hemispheres
executable_path = {'executable_path': '/usr/local/bin/chromedriver'}
browser = Browser('chrome', **executable_path, headless=False)
browser.visit(mars_hemisphere)
for i in range(4):
link = browser.links.find_by_partial_text('Hemisphere')[i]
link.click()
title = browser.find_by_css('.title').first.text
url = browser.find_by_text('Sample').first['href']
results[title] = url
browser.back()
browser.quit()
return results
| 3.234375 | 3 |
kunquat/tracker/ui/views/sheet/trigger_renderer.py | cyberixae/kunquat | 0 | 12797047 | # -*- coding: utf-8 -*-
#
# Author: <NAME>, Finland 2014
#
# This file is part of Kunquat.
#
# CC0 1.0 Universal, http://creativecommons.org/publicdomain/zero/1.0/
#
# To the extent possible under law, Kunquat Affirmers have waived all
# copyright and related or neighboring rights to Kunquat.
#
from PyQt4.QtCore import *
from PyQt4.QtGui import *
from config import *
class TriggerRenderer():
def __init__(self, config, trigger, notation):
assert trigger
self._config = config
self._trigger = trigger
self._notation = notation
self._setup_fields()
def get_field_count(self):
return len(self._fields)
def get_field_bounds(self, index):
offset = self._fields[index]['offset']
width = self._fields[index]['width']
return (offset, width)
def get_total_width(self):
return self._total_width
def draw_trigger(self, painter, include_line=True, select=False):
# Select colour based on event type
evtype = self._trigger.get_type()
if evtype == 'n+':
evtype_fg_colour = self._config['trigger']['note_on_colour']
elif evtype == 'h':
evtype_fg_colour = self._config['trigger']['hit_colour']
elif evtype == 'n-':
evtype_fg_colour = self._config['trigger']['note_off_colour']
else:
evtype_fg_colour = self._config['trigger']['default_colour']
# Set colours
painter.save()
if select:
height = self._config['tr_height']
painter.fillRect(
QRect(0, 0, self._total_width - 1, height - 1),
evtype_fg_colour)
painter.setPen(self._config['bg_colour'] if select else evtype_fg_colour)
# Draw fields
for i, field in enumerate(self._fields):
painter.drawText(
QPoint(field['offset'], self._baseline_offset),
field['text'])
painter.restore()
# Draw line only if not obscured by cursor
if include_line:
painter.save()
painter.setPen(evtype_fg_colour)
painter.drawLine(QPoint(0, 0), QPoint(self._total_width - 2, 0))
painter.restore()
def _make_field_data(self, offset, vis_text):
metrics = self._config['font_metrics']
return {
'offset': offset,
'width': metrics.boundingRect(vis_text).width(),
'text': vis_text,
}
def _get_note_vis_name(self, expr):
try:
cents = float(expr)
name = self._notation.get_full_name(cents)
except ValueError:
return expr
if name:
return name
else:
return expr
def _setup_fields(self):
evtype = self._trigger.get_type()
expr = self._trigger.get_argument()
metrics = self._config['font_metrics']
padding = self._config['trigger']['padding']
self._baseline_offset = metrics.tightBoundingRect('A').height()
self._fields = []
# Get field bounds
if evtype == 'n+':
note_name = self._get_note_vis_name(expr)
note_field = self._make_field_data(padding, note_name)
self._fields.append(note_field)
elif evtype == 'n-':
vis_text = u'══'
note_off_field = self._make_field_data(padding, vis_text)
self._fields.append(note_off_field)
else:
type_field = self._make_field_data(padding, evtype)
self._fields.append(type_field)
if expr != None:
if self._trigger.get_argument_type() == 'note':
vis_text = self._get_note_vis_name(expr)
else:
vis_text = expr
arg_field = self._make_field_data(
type_field['offset'] + type_field['width'] + padding,
vis_text)
self._fields.append(arg_field)
# Width
total_padding = padding * (len(self._fields) + 1)
self._total_width = sum(f['width'] for f in self._fields) + total_padding
| 1.6875 | 2 |
systest/testcases/vim/test_vim.py | ayoubbargueoui1996/osm-devops | 0 | 12797048 | <reponame>ayoubbargueoui1996/osm-devops
# Copyright 2017 Sandvine
#
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import pytest
import time
@pytest.mark.vim
@pytest.mark.openstack
@pytest.mark.vmware
class TestClass(object):
def test_empty_vim(self,osm):
assert not osm.get_api().vim.list()
@pytest.fixture(scope='function')
def cleanup_test_add_vim_account(self,osm,request):
def teardown():
try:
for vim in osm.get_api().vim.list(False):
osm.get_api().vim.delete(vim['name'])
except:
pass
request.addfinalizer(teardown)
@pytest.mark.openstack
@pytest.mark.smoke
def test_add_vim_account(self,osm,openstack,cleanup_test_add_vim_account):
os_access = {}
vim_name = 'helloworld-os'
os_access['vim-url'] = 'https://169.254.169.245/'
os_access['vim-username'] = 'pytest2'
os_access['vim-password'] = '<PASSWORD>'
os_access['vim-tenant-name'] = 'pytest3'
os_access['vim-type'] = 'openstack'
os_access['description'] = 'a test vim'
assert not osm.get_api().vim.create(vim_name,os_access)
resp=osm.get_api().vim.get(vim_name)
assert resp['name'] == vim_name
assert resp['vim_type'] == 'openstack'
assert resp['vim_url'] == os_access['vim-url']
assert resp['vim_user'] == os_access['vim-username']
assert resp['vim_tenant_name'] == os_access['vim-tenant-name']
assert not osm.get_api().vim.delete(vim_name)
@pytest.mark.vmware
#@<EMAIL>
def test_add_vim_account_vmware(self,osm,vmware,cleanup_test_add_vim_account):
os_access = {}
vim_name = 'helloworld-vmware'
os_access['vim-url'] = 'https://169.254.169.245/'
os_access['vim-username'] = 'pytest2'
os_access['vim-password'] = '<PASSWORD>'
os_access['vim-tenant-name'] = 'pytest3'
os_access['vim-type'] = 'vmware'
os_access['description'] = 'a test vim'
assert not osm.get_api().vim.create(vim_name,os_access)
resp=osm.get_api().vim.get(vim_name)
assert resp['name'] == vim_name
assert resp['vim_type'] == 'vmware'
assert resp['vim_url'] == os_access['vim-url']
assert resp['vim_user'] == os_access['vim-username']
assert resp['vim_tenant_name'] == os_access['vim-tenant-name']
assert not osm.get_api().vim.delete(vim_name)
<EMAIL>
def test_add_multiple_accounts(self,osm,cleanup_test_add_vim_account):
os_access = {}
vims = [ {'name': 'testvim1', 'vim-type': 'openstack'}, {'name': 'testvim2','vim-type': 'vmware'} ]
os_access['vim-url'] = 'https://169.254.169.245/'
os_access['vim-username'] = 'pytest2'
os_access['vim-password'] = '<PASSWORD>'
os_access['vim-tenant-name'] = 'pytest3'
os_access['description'] = 'a test vim'
for vim in vims:
os_access['vim-type'] = vim['vim-type']
assert not osm.get_api().vim.create(vim['name'],os_access)
resp=osm.get_api().vim.get(vim['name'])
assert resp['name'] == vim['name']
assert resp['vim_type'] == vim['vim-type']
assert resp['vim_url'] == os_access['vim-url']
assert resp['vim_user'] == os_access['vim-username']
assert resp['vim_tenant_name'] == os_access['vim-tenant-name']
for vim in osm.get_api().vim.list(False):
osm.get_api().vim.delete(vim['name'])
| 1.960938 | 2 |
examples/simpy/bank.py | chrismurf/simulus | 2 | 12797049 | <gh_stars>1-10
"""This example is modified from the simpy's bank renege example; we
use the same settings as simpy so that we can get the same results."""
RANDOM_SEED = 42 # random seed for repeatability
NUM_CUSTOMERS = 5 # total number of customers
INTV_CUSTOMERS = 10.0 # mean time between new customers
MEAN_BANK_TIME = 12.0 # mean time in bank for each customer
MIN_PATIENCE = 1 # min customer patience
MAX_PATIENCE = 3 # max customer patience
import simulus
from random import seed, expovariate, uniform
def source():
for i in range(NUM_CUSTOMERS):
sim.process(customer, i)
sim.sleep(expovariate(1.0/INTV_CUSTOMERS))
def customer(idx):
arrive = sim.now
print('%7.4f Customer%02d: Here I am' % (arrive, idx))
patience = uniform(MIN_PATIENCE, MAX_PATIENCE)
_, timedout = sim.wait(counter, patience)
if timedout:
print('%7.4f Customer%02d: RENEGED after %6.3f' %
(sim.now, idx, sim.now-arrive))
else:
print('%7.4f Customer%02d: Waited %6.3f' %
(sim.now, idx, sim.now-arrive))
sim.sleep(expovariate(1.0/MEAN_BANK_TIME))
print('%7.4f Customer%02d: Finished' % (sim.now, idx))
counter.release()
print('Bank renege')
seed(RANDOM_SEED)
sim = simulus.simulator()
counter = sim.resource()
sim.process(source)
sim.run()
| 3.046875 | 3 |
env_interpretation/lazy_cartesian_product.py | GeorgianBadita/Dronem-gym-envirnoment | 5 | 12797050 | <reponame>GeorgianBadita/Dronem-gym-envirnoment
"""
@author: <NAME>
@email: <EMAIL>
@date: 04.04.2020 17:34
"""
from typing import List, Any
from bigfloat import *
class LazyCartesianProduct:
"""
Class for generating the lazy cartesian product
"""
def __init__(self, sets: List[List[Any]]):
self.__sets = sets
self.__max_size = 0
self.__factors = [0] * len(self.__sets)
self.__modulo = [0] * len(self.__sets)
self.__compute()
@property
def max_size(self):
return self.__max_size
def __compute(self) -> None:
fac = 1
self.__max_size = 1
for i in range(len(self.__sets) - 1, -1, -1):
items = len(self.__sets[i])
self.__max_size *= items
self.__factors[i] = fac
self.__modulo[i] = items
fac *= items
def get_nth_element(self, n: int) -> List[Any]:
if n < 0 or n >= self.__max_size:
raise ValueError("Invalid value of n")
res = []
for i in range(len(self.__sets)):
res.append(self.__sets[i][int(mod(div(BigFloat(n), BigFloat(self.__factors[i])), self.__modulo[i]))])
return res
| 2.859375 | 3 |
Subsets and Splits