max_stars_repo_path
stringlengths 3
269
| max_stars_repo_name
stringlengths 4
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.05M
| score
float64 0.23
5.13
| int_score
int64 0
5
|
---|---|---|---|---|---|---|
sila_library/sila2lib/framework/std_features/SiLAService.py | lemmi25/sila2lib | 0 | 12787451 | <reponame>lemmi25/sila2lib<gh_stars>0
"""
________________________________________________________________________
:PROJECT: SiLA2_python
*SiLAService standard feature*
:details: The Feature each SiLA Server MUST implement. It is the entry point to a SiLA Server and helps to discover
the features it implements.
:file: SiLAService.py
:authors: <NAME>
<NAME>
:date: (creation) 2019-02-02
:date: (last modification) 2019-08-26
________________________________________________________________________
**Copyright**:
This file is provided "AS IS" with NO WARRANTY OF ANY KIND,
INCLUDING THE WARRANTIES OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
For further Information see LICENSE file that comes with this distribution.
________________________________________________________________________
"""
from typing import Dict, Any
__version__ = "0.2.0"
import os
import logging
import grpc
# importing protobuf and gRPC handler/stubs
from sila2lib.framework import SiLAFramework_pb2 as silaFW_pb2
from sila2lib.error_handling.server_err import SiLAValidationError
from . import SiLAService_pb2 as pb2
from . import SiLAService_pb2_grpc as pb2_grpc
# noinspection PyPep8Naming
class SiLAService(pb2_grpc.SiLAServiceServicer):
"""
The Feature each SiLA Server MUST implement. It is the entry point to a SiLA Server and helps to discover the
features it implements.
"""
#: The name of the server
server_name: str
#: Description of the server
server_description: str
#: Software version of the server
server_version: str
# Type of the server
server_type: str
#: The UUID of the server
server_UUID: str
# The vendors URL
vendor_URL: str
#: Dictionary of implemented features that are registered to the SiLAService provider
implemented_features: Dict[str, str]
def __init__(self, server_name: str, server_description: str, server_version: str,
server_type: str, server_UUID: str, vendor_URL: str):
"""SiLAService class initialiser"""
logging.debug("Initialising org.silastandard feature: SiLAService")
# Initialise class variables
self.implemented_features = {}
# Store the inputs
self.server_name = server_name
self.server_description = server_description
self.server_version = server_version
self.server_type = server_type
self.server_UUID = server_UUID
self.vendor_URL = vendor_URL
# read the feature definition (FDL) from the library
sila_service_fdl = os.path.join(os.path.dirname(__file__), '..', 'feature_definitions','org.silastandard', 'SiLAService.sila.xml')
self.registerFeature('SiLAService', sila_service_fdl)
def GetFeatureDefinition(self, request, context: grpc.ServicerContext) -> pb2.GetFeatureDefinition_Responses:
"""
Get all details on one Feature through the qualified Feature id.
:param request: gRPC request
request.QualifiedFeatureIdentifier: The qualified Feature identifier for which the Feature description
should be retrieved.
:param context: gRPC context.
"""
feature_id = request.QualifiedFeatureIdentifier.value
logging.debug('Feature definition for feature {feature_id} requested.'.format(feature_id=feature_id))
try:
return pb2.GetFeatureDefinition_Responses(
FeatureDefinition=silaFW_pb2.String(value=self.implemented_features[feature_id])
)
except KeyError:
logging.error('Feature {feature_id} not registered.'.format(feature_id=feature_id))
err = SiLAValidationError(parameter="QualifiedFeatureIdentifier.Identifier",
msg='Feature {feature_id} is unknown.'.format(feature_id=feature_id))
err.raise_rpc_error(context=context)
def SetServerName(self, request, context: grpc.ServicerContext) -> pb2.SetServerName_Responses:
"""
Sets a human readable name to the Server Name property.
:param request: gRPC request
request.ServerName: The human readable name of to assign to the SiLA Server.
:param context: gRPC context.
"""
self.server_name = request.ServerName.value
logging.debug("Server name changed to {server_name}".format(server_name=self.server_name))
return pb2.SetServerName_Responses()
def Get_ServerName(self, request, context: grpc.ServicerContext) -> pb2.Get_ServerName_Responses:
"""
Human readable name of the SiLA Server.
:param request: gRPC request.
:param context: gRPC context.
:returns:
response.ServerName: Human readable name of the SiLA Server.
"""
logging.debug("Get_ServerName: {server_name}".format(server_name=self.server_name))
return pb2.Get_ServerName_Responses(ServerName=silaFW_pb2.String(value=self.server_name))
def Get_ServerType(self, request, context: grpc.ServicerContext) -> pb2.Get_ServerType_Responses:
"""
The type of Server this is. Is specified by the implementer of the server and not unique.
:param request: gRPC request.
:param context: gRPC context.
:returns:
response.ServerType: The type of Server this is. Is specified by the implementer of the server and not
unique.
"""
logging.debug("Get_ServerType: {}".format(self.server_type))
return pb2.Get_ServerType_Responses(ServerType=silaFW_pb2.String(value=self.server_type))
def Get_ServerUUID(self, request, context: grpc.ServicerContext) -> pb2.Get_ServerUUID_Responses:
"""
Globally unique identifier that identifies a SiLA Server. The Server UUID *must* be generated once and always
remain the same.
:param request: gRPC request.
:param context: gRPC context.
:returns:
response.ServerUUID: Globally unique identifier that identifies a SiLA Server. The Server UUID *must* be
generated once and always remain the same.
"""
logging.debug("Get_ServerUUID: {server_uuid}".format(server_uuid=self.server_UUID))
return pb2.Get_ServerUUID_Responses(ServerUUID=silaFW_pb2.String(value=self.server_UUID))
def Get_ServerDescription(self, request, context: grpc.ServicerContext) -> pb2.Get_ServerDescription_Responses:
"""
Description of the SiLA Server.
:param request: gRPC request.
:param context: gRPC context.
:returns:
response.ServerDescription: Description of the SiLA Server.
"""
logging.debug("Get_ServerDescription: {server_description}".format(server_description=self.server_description))
return pb2.Get_ServerDescription_Responses(ServerDescription=silaFW_pb2.String(value=self.server_description))
def Get_ServerVersion(self, request, context: grpc.ServicerContext) -> pb2.Get_ServerVersion_Responses:
"""
Returns the version of the SiLA Server. A "Major" and a "Minor" version number (e.g. 1.0) *must* be provided, a
Patch version number _may_ be provided. Optionally, an arbitrary text, separated by an underscore _may_ be
appended, e.g. “3.19.373_mighty_lab_devices”.
:param request: gRPC request
:param context: gRPC context
:returns:
response.ServerVersion: Returns the version of the SiLA Server. A "Major" and a "Minor" version number
(e.g. 1.0) *must* be provided, a Patch version number _may_ be provided. Optionally,
an arbitrary text, separated by an underscore _may_ be appended, e.g.
“3.19.373_mighty_lab_devices”.
"""
logging.debug("Get_ServerVersion: {server_version}".format(server_version=self.server_version))
return pb2.Get_ServerVersion_Responses(ServerVersion=silaFW_pb2.String(value=self.server_version))
def Get_ServerVendorURL(self, request, context: grpc.ServicerContext) -> pb2.Get_ServerVendorURL_Responses:
"""
Returns the URL to the website of the vendor or the website of the product of this SiLA Server.
:param request: gRPC request.
:param context: gRPC context.
:returns:
response.ServerVendorURL: Returns the URL to the website of the vendor or the website of the product of
this SiLA Server.
"""
logging.debug("Get_ServerVendorURL: {vendor_url}".format(vendor_url=self.vendor_URL))
return pb2.Get_ServerVendorURL_Responses(
ServerVendorURL=silaFW_pb2.String(value=self.vendor_URL)
)
def Get_ImplementedFeatures(self, request, context: grpc.ServicerContext) -> pb2.Get_ImplementedFeatures_Responses:
"""
Returns a list of qualified Feature identifiers of all implemented Features of this SiLA Server.
:param request: gRPC request
:param context: gRPC context
:returns:
response.ImplementedFeatures: Returns a list of qualified Feature identifiers of all implemented Features
of this SiLA Server.
"""
logging.debug("Get_ImplementedFeatures {feature_list}".format(
feature_list=', '.join(self.implemented_features.keys()))
)
feature_list = [
silaFW_pb2.String(value=feature_id)
for feature_id in
self.implemented_features
]
return pb2.Get_ImplementedFeatures_Responses(
ImplementedFeatures=feature_list
)
def registerFeature(self, feature_id: str, xml_fdl: str = None) -> None:
"""
Registers a new feature to the server so its FeatureDefinition can be provided to the client.
:param feature_id: Feature identifier to add.
:param xml_fdl: The feature definition file of the feature to register in XML format.
"""
if xml_fdl is not None:
with open(xml_fdl, 'r', encoding='utf-8') as file:
fdl = file.read()
else:
# no FDL input given
logging.error('For feature {feature_id} no FDL data has been provided!'.format(feature_id=feature_id))
fdl = '<xml>Feature Definition Missing for this feature.</xml>'
# compressing the FDL, by removing \n and \r
self.implemented_features[feature_id] = fdl.replace('\n', ' ').replace('\r', '')
| 1.398438 | 1 |
nertivia4py/utils/get.py | bentettmar/nertivia4py | 3 | 12787452 | from . import server
from . import textchannel
from . import user
class Get:
def get_server(id) -> server.Server:
"""
Gets a Nertivia server.
Args:
- id (int): The ID of the server.
Returns:
- server.Server: The server.
"""
return server.Server(id)
def get_text_channel(id) -> textchannel.TextChannel:
"""
Gets a Nertivia text channel.
Args:
- id (int): The ID of the text channel.
Returns:
- textchannel.TextChannel: The text channel.
"""
return textchannel.TextChannel(id)
def get_user(id) -> user.User:
"""
Gets a Nertivia user.
Args:
- id (int): The ID of the user.
Returns:
- user.User: The user.
"""
return user.User(id) | 2.890625 | 3 |
run_encoding.py | ddboline/roku_app | 1 | 12787453 | #!/usr/bin/python
'''
Script to record from roku device via WinTV HVR-1950
'''
from __future__ import (absolute_import, division, print_function, unicode_literals)
from time import sleep
from roku_app.run_encoding import run_encoding
if __name__ == '__main__':
try:
run_encoding()
except Exception as exc:
print('Caught exception %s' % exc)
sleep(10)
| 2.046875 | 2 |
main.py | bjnhur/pyHelloWorld | 0 | 12787454 | <reponame>bjnhur/pyHelloWorld<filename>main.py
def test_get_helloworld():
assert 'hello world' == get_helloworld()
def get_helloworld():
print('hello world')
return 'hello world'
def main():
print(get_helloworld())
if __name__ == '__main__':
main()
| 2.484375 | 2 |
marltoolbox/utils/exploration.py | tobiasbaumann1/amd | 0 | 12787455 | from gym.spaces import Discrete
from ray.rllib.models.action_dist import ActionDistribution
from ray.rllib.models.tf.tf_action_dist import Categorical
from ray.rllib.models.torch.torch_action_dist import TorchCategorical
from ray.rllib.utils.annotations import override
from ray.rllib.utils.exploration.stochastic_sampling import StochasticSampling
from ray.rllib.utils.framework import TensorType
from ray.rllib.utils.framework import get_variable
from ray.rllib.utils.from_config import from_config
from ray.rllib.utils.schedules import Schedule, PiecewiseSchedule
from typing import Union
class SoftQSchedule(StochasticSampling):
"""Special case of StochasticSampling w/ Categorical and temperature param.
Returns a stochastic sample from a Categorical parameterized by the model
output divided by the temperature. Returns the argmax iff explore=False.
"""
def __init__(self, action_space, *, framework,
initial_temperature=1.0, final_temperature=0.0,
temperature_timesteps=int(1e5),
temperature_schedule=None, **kwargs):
"""Initializes a SoftQ Exploration object.
Args:
action_space (Space): The gym action space used by the environment.
temperature (Schedule): The temperature to divide model outputs by
before creating the Categorical distribution to sample from.
framework (str): One of None, "tf", "torch".
temperature_schedule (Optional[Schedule]): An optional Schedule object
to use (instead of constructing one from the given parameters).
"""
assert isinstance(action_space, Discrete)
super().__init__(action_space, framework=framework, **kwargs)
self.temperature_schedule = \
from_config(Schedule, temperature_schedule, framework=framework) or \
PiecewiseSchedule(
endpoints=[
(0, initial_temperature), (temperature_timesteps, final_temperature)],
outside_value=final_temperature,
framework=self.framework)
# The current timestep value (tf-var or python int).
self.last_timestep = get_variable(
0, framework=framework, tf_name="timestep")
self.temperature = self.temperature_schedule(self.last_timestep)
@override(StochasticSampling)
def get_exploration_action(self,
action_distribution: ActionDistribution,
timestep: Union[int, TensorType],
explore: bool = True):
cls = type(action_distribution)
assert cls in [Categorical, TorchCategorical]
self.last_timestep = timestep
# TODO This step changes the Q value, even when we are not exploring, create an issue
# Quick correction
if explore:
self.temperature = self.temperature_schedule(timestep if timestep is not None else self.last_timestep)
else:
self.temperature = 1.0
# Re-create the action distribution with the correct temperature applied.
dist = cls(
action_distribution.inputs,
self.model,
temperature=self.temperature)
# Delegate to super method.
return super().get_exploration_action(action_distribution=dist, timestep=timestep, explore=explore)
| 2.609375 | 3 |
flask_playground/routes/v1/errors.py | willianantunes/flask-playground | 8 | 12787456 | from flask import jsonify
from flask_playground.routes.exceps import ValidationError
from flask_playground.routes.v1 import api_v1_routes
@api_v1_routes.errorhandler(ValidationError)
def bad_request(e):
response = jsonify({"message": e.args[0]})
response.status_code = 400
return response
@api_v1_routes.app_errorhandler(404)
def not_found(e):
response = jsonify({"message": "Invalid resource URI"})
response.status_code = 404
return response
@api_v1_routes.errorhandler(405)
def method_not_supported(e):
response = jsonify({"message": "The method is not supported"})
response.status_code = 405
return response
@api_v1_routes.app_errorhandler(500)
def internal_server_error(e):
response = jsonify({"error": "Internal server error", "message": e.args[0]})
response.status_code = 500
return response
| 2.8125 | 3 |
radiomics/utility.py | RimeT/p3_radio | 0 | 12787457 | import logging
def get_logger(log_file=None, name='radiomics_logger'):
logger = logging.getLogger(name)
logger.setLevel(logging.DEBUG)
# stream handler will send message to stdout
formatter = logging.Formatter('%(asctime)s [%(levelname)s] %(message)s', datefmt='%Y-%m-%d %H:%M:%S')
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
ch.setFormatter(formatter)
logger.addHandler(ch)
if log_file is not None:
fh = logging.FileHandler(log_file)
fh.setLevel(logging.DEBUG)
fh.setFormatter(formatter)
logger.addHandler(fh)
return logger
| 2.6875 | 3 |
move_pdf.py | AnonymousRandomPerson/TranscriptionUtils | 0 | 12787458 | <filename>move_pdf.py<gh_stars>0
import os, shutil
from game_acronyms import *
parts_folder = os.path.join(os.sep, 'Users', 'chenghanngan', 'Documents', 'Music', 'Transcription', 'Parts')
scores_folder = os.path.join(os.sep, 'Users', 'chenghanngan', 'Documents', 'Music', 'Transcription', 'Scores')
for file in sorted(os.listdir(scores_folder)):
if file.endswith('.pdf'):
file_path = os.path.join(scores_folder, file)
instrument_index = file.rindex(' - ')
combined_name = file[:instrument_index]
game_acronym, track_name, game_name = split_track_name(combined_name)
long_name = '{} ({}){}'.format(track_name, game_name, file[instrument_index:])
parts_dir = os.path.join(parts_folder, combined_name)
if os.path.isdir(parts_dir):
dest_path = os.path.join(parts_dir, long_name)
print('Moving', file_path, 'to', dest_path)
shutil.move(file_path, dest_path)
else:
print('No parts folder found for', combined_name)
| 2.75 | 3 |
rr/forms/spadmin.py | UniversityofHelsinki/sp-registry | 0 | 12787459 | <filename>rr/forms/spadmin.py
import re
from django.core.validators import ValidationError
from django.forms import CharField, Form, EmailField, ModelChoiceField
from django.utils.translation import ugettext_lazy as _
from rr.models.email import Template
class SPAdminForm(Form):
"""
Form for sending email invites
"""
email = EmailField(label=_('Email where invitation is sent'))
template = ModelChoiceField(queryset=Template.objects.all(), required=False,
help_text=_('Using default template if none given.'))
def __init__(self, *args, **kwargs):
"""
Only show admin_notes field for superusers
"""
self.superuser = kwargs.pop('superuser', False)
super(SPAdminForm, self).__init__(*args, **kwargs)
if not self.superuser:
del self.fields['template']
class SPAdminGroupForm(Form):
"""
Form for adding admin groups
"""
group = CharField(label=_('Group name'),
help_text=_('6-32 characters, allowed characters are [a-z0-9-]. May not start or end with "-".'))
def clean_group(self):
group = self.cleaned_data['group']
if len(group) < 7:
raise ValidationError(_("Minimum length 6 characters."))
if len(group) > 32:
raise ValidationError(_("Maximum length 32 characters."))
pattern = re.compile("^([a-z0-9])([a-z0-9-])*([a-z0-9])$")
if not pattern.match(group):
raise ValidationError(_("Invalid characters in group name."))
return group
| 2.203125 | 2 |
train_identify_domain_h2o.py | gkovacs/tmi-browsing-behavior-prediction | 1 | 12787460 | <filename>train_identify_domain_h2o.py
#!/usr/bin/env python
# md5: c8864930a096ab388f167e1cd2c0a0ef
# coding: utf-8
import csv
import sys
import traceback
import os
data_version = int(sys.argv[1])
from tmilib import *
import h2o
import h2o.grid
h2o.init(port=int(os.environ.get('h2o_port', 54321)))
train_dataset = sdir_path('domainclass_cpn_train_v' + str(data_version) +'.csv')
def train_classifier(model_name):
model_file = sdir_path(model_name)
if path.exists(model_file):
print 'already exists', model_name
return
print model_name
#global train_dataset
#train_dataset = sdir_path('catdata_train_tensecond_v2.csv')
classifier = get_classifier()
print classifier
h2o.save_model(classifier, model_file)
classifier_algorithm = lambda: h2o.estimators.H2ORandomForestEstimator(build_tree_one_node=True)
def get_classifier():
classifier = classifier_algorithm() #h2o.estimators.H2ORandomForestEstimator(binomial_double_trees=True)
training_data = h2o.import_file(train_dataset)
test_data = h2o.import_file(train_dataset.replace('train', 'test'))
classifier.train(x=training_data.columns[1:], y=training_data.columns[0], training_frame=training_data, validation_frame=test_data)
return classifier
train_classifier('domainclass_cpn_v' + str(data_version) + '_randomforest_v1.h2o')
| 2.4375 | 2 |
SocialHandler.py | stephensekula/navierstokes | 5 | 12787461 | <reponame>stephensekula/navierstokes<gh_stars>1-10
import abc
__metaclass__ = abc.ABCMeta
import sys
import os
import subprocess
import logging
import unicodedata
import commands
import re
import hashlib
import copy
import URLShortener
import chardet
from sets import Set
class SocialHandler(object):
def __init__(self):
# the list of messages gathered from this
# social network's feed (e.g. your feed)
self.messages = []
# a map of users (strings) from another network
# to users on this network (really, any old mapping
# of string you like - good for putting hyperlinks to
# users for discovery on other networks, or notification
# purposes.
self.usermap = {}
# debug flag
self.debug = False
# shorten URLs in message content?
self.do_url_shortening = False
# URL shortening config to use
self.urlShorteningConfig = {}
# time limit for considering posts in this service (seconds)
self.max_message_age = 3600
# set a "no share" keyword that, if present in a message, prevents NS from sharing the message
self.noshare_keyword = ""
# check that lynx is installed and accessible
lynx_check = ""
try:
lynx_check = subprocess.check_output(["lynx", "--help"])
except subprocess.CalledProcessError:
self.msg(3, self.texthandler("Lynx is required, but I cannot run it. Make sure it is installed and located in the PATH."))
pass
except OSError:
self.msg(3, self.texthandler("Lynx is required, but I cannot run it. Make sure it is installed and located in the PATH."))
pass
return
@abc.abstractmethod
def gather(self):
""" This method harvests posts from a social network """
@abc.abstractmethod
def write(self,message=unicode("","utf8")):
""" This method posts a message to a social network """
def append_message(self, message=unicode("","utf8")):
# safely append messages
if self.noshare_keyword != "":
if message.content.find(self.noshare_keyword) == -1:
self.messages.append(message)
pass
pass
else:
self.messages.append(message)
pass
return
def texthandler(self, text=unicode("","utf8")):
if not isinstance(text, unicode):
return text.decode('utf8', errors='ignore')
return text
def reshare_text(self, owner="someone"):
""" This method returns common text that can be used to
prepend to a reshared post"""
text = self.texthandler("RT from %s" % (owner))
return text
def msg(self,level=0,text=unicode("","utf8")):
level_text = self.texthandler("INFO")
message = self.texthandler("%s: %s" % (self.__class__.__name__, text))
if level == 0:
logging.info(message)
elif level == 1:
logging.warning(message)
elif level == 2:
logging.error(message)
elif level == 3:
logging.critical(message)
pass
#print "%s: [%s] %s" % (self.__class__.__name__, level_text, text)
if level > 2:
sys.exit()
return
def generate_id(self,text=unicode("","utf8")):
# generate an ID for a message from input text by generating
# an MD5 checksum from the text
try:
message_md5sum = hashlib.md5(text).hexdigest()
except UnicodeEncodeError:
message_md5sum = hashlib.md5(text.encode('utf-8')).hexdigest()
pass
return int(message_md5sum, 16)
def map_users(self, text=unicode("","utf8")):
new_text = text
for key in self.usermap:
new_text = new_text.replace(key, self.texthandler('<a href="%s">%s</a>'%(self.usermap[key][0],self.usermap[key][1])))
pass
return new_text
def which(self,program):
import os
def is_exe(fpath):
return os.path.isfile(fpath) and os.access(fpath, os.X_OK)
fpath, fname = os.path.split(program)
if fpath:
if is_exe(program):
return program
else:
for path in os.environ["PATH"].split(os.pathsep):
path = path.strip('"')
exe_file = os.path.join(path, program)
if is_exe(exe_file):
return exe_file
return None
def changeLinksToURLs(self, msg=unicode("","utf8")):
prefx = self.texthandler('<a href="')
linkClose = self.texthandler('">')
postfx = self.texthandler( '</a>')
new_msg = unicode("","utf8")
new_msg += msg
if not prefx in msg:
return new_msg
#<a href="http://www.thisisalink.com/foo/bar.html">this is some link text</a>
#to
#this is some link msg http://www.thisisalink.com/foo/bar.html
pos = 0
while True:
pos = new_msg.find(prefx,pos)
if pos < 0:
break
htmlText = new_msg[pos:new_msg.find(postfx,pos) + len(postfx)]
link = htmlText[htmlText.find(prefx)+len(prefx):htmlText.find(linkClose)]
linkmsg = htmlText[htmlText.find(linkClose)+len(linkClose):htmlText.find(postfx)]
outText = linkmsg + ' ' + link
if linkmsg == link:
outText = link
new_msg = new_msg.replace(htmlText, outText)
pass
return new_msg
def HTMLConvert(self, msg=unicode("","utf8") ):
msg_clean = self.changeLinksToURLs(msg)
pid = os.getpid()
htmlfile = open('/tmp/%d_msg.html' % (pid),'w')
try:
htmlfile.write( msg_clean )
except UnicodeEncodeError:
htmlfile.write( unicodedata.normalize('NFKD', msg_clean).encode('ascii','ignore') )
pass
htmlfile.close()
txt = commands.getoutput('/usr/bin/lynx --dump -width 2048 -nolist /tmp/%d_msg.html' % (pid))
os.system('rm -f /tmp/%d_msg.html' % (pid))
return txt
def TextToHtml(self, msg=unicode("","utf8") ):
# Convert links to HTML in a text message
# Relied on external tool, txt2html
# write message to file for conversion
pid = os.getpid()
text_file = open("/tmp/txt2html_%d.txt" % (pid), "w")
#text_file.write(msg.encode('utf8'))
text_file.write(self.texthandler(msg).encode('utf8'))
text_file.close();
# Convert using tool
html_message = unicode("","utf8")
try:
#html_message = unicode(subprocess.check_output(["txt2html", "--infile", "/tmp/txt2html_%d.txt" % (pid)]))
html_message = subprocess.check_output(["txt2html", "--infile", "/tmp/txt2html_%d.txt" % (pid)])
except subprocess.CalledProcessError:
print self.texthandler("There was a problem trying to call the txt2html program - make sure it is installed correctly.")
sys.exit(-1)
pass
# excerpt the content of the <body> tags
html_message = self.texthandler(html_message)
body_begin = html_message.find(u'<body>') + 6
body_end = html_message.find(u'</body>')
html_message = html_message[body_begin:body_end]
return html_message
def T2H_URLs(self, text=unicode("","utf8")):
html_text = ""
# Retrieves the urls from this text
found_urls = list(Set(re.findall(self.texthandler('(?:http[s]*://|www.)[^"\'<> ]+'), text, re.MULTILINE)))
if len(found_urls) == 0:
return self.texthandler(text)
# deep-copy the text and prepare for it to be mangled... politely.
html_text = copy.deepcopy(text)
url = unicode("","utf8")
for url in found_urls:
try:
html_text = html_text.replace(url, "<a href=\"%s\">%s</a>" % (url,url))
except UnicodeDecodeError:
url = url.encode('utf-8')
html_text = html_text.replace(url, "<a href=\"%s\">%s</a>" % (url,url))
pass
pass
return html_text
def ShortenURLs(self, text=unicode("","utf8")):
# convert all links in HTML to shortened links using a shortening service
# Get all unique URLs from this text string
found_urls = list(Set(re.findall(self.texthandler('(?:http[s]*://|www.)[^"\'<> ]+'), text, re.MULTILINE)))
if len(found_urls) == 0:
return self.texthandler(text)
url_shortener = URLShortener.URLShortener(self.urlShorteningConfig)
new_text = copy.deepcopy(text)
url = unicode("","utf8")
for url in found_urls:
shortened_url = url_shortener.shorten(url)
try:
new_text = new_text.replace(url, shortened_url)
except UnicodeDecodeError:
url = url.encode('utf-8')
shortened_url = shortened_url.encode('utf-8')
new_text = new_text.replace(url, shortened_url)
pass
pass
return new_text
| 2.453125 | 2 |
ioloop/kqueue.py | mengzhuo/ioloop | 1 | 12787462 | <gh_stars>1-10
#!/usr/bin/env python
# encoding: utf-8
import select
from ioloop import READ, WRITE, ERROR
class KQueueLoop(object):
def __init__(self):
self.loop = select.kqueue()
self.active = {}
def register(self, fd, events):
if fd in self.active:
raise IOError('FD: %d in active' % fd)
self._control(fd, events, select.KQ_EV_ADD)
self.active[fd] = events
def unregister(self, fd):
events = self.active.pop(fd)
self._control(fd, events, select.KQ_EV_DELETE)
def modify(self, fd, events):
self.unregister(fd)
self.register(fd, events)
def _control(self, fd, events, flags):
kevents = []
if events & WRITE:
kevents.append(select.kevent(fd,
filter=select.KQ_FILTER_WRITE,
flags=flags))
if events & READ:
kevents.append(select.kevent(fd,
filter=select.KQ_FILTER_READ,
flags=flags))
[self.loop.control([x], 0) for x in kevents]
def poll(self, timeout=1):
kevents = self.loop.control(None, 1000, timeout)
events = {}
for kevent in kevents:
fd = kevent.ident
if kevent.filter == select.KQ_FILTER_READ:
events[fd] = events.get(fd,0) | READ
elif kevent.filter == select.KQ_FILTER_WRITE and not kevent.flags & select.KQ_EV_EOF:
events[fd] = events.get(fd,0) | WRITE
elif kevent.flag & select.KQ_EV_ERROR:
events[fd] = events.get(fd,0) | ERROR
return events.items()
| 2.34375 | 2 |
azure-mgmt-iothub/azure/mgmt/iothub/models/iot_hub_properties.py | CharaD7/azure-sdk-for-python | 0 | 12787463 | <filename>azure-mgmt-iothub/azure/mgmt/iothub/models/iot_hub_properties.py<gh_stars>0
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class IotHubProperties(Model):
"""The Iot Hub properties.
:param authorization_policies: The authorization rules.
:type authorization_policies: list of
:class:`SharedAccessSignatureAuthorizationRule
<azure.mgmt.iothub.models.SharedAccessSignatureAuthorizationRule>`
:param ip_filter_rules: The IP filter rules.
:type ip_filter_rules: list of :class:`IpFilterRule
<azure.mgmt.iothub.models.IpFilterRule>`
:param host_name: The name of the host.
:type host_name: str
:param event_hub_endpoints: The event hub endpoint properties.
:type event_hub_endpoints: dict
:param storage_endpoints: The list of storage end points where files can
be uploaded. Currently only one storage account can be configured.
:type storage_endpoints: dict
:param messaging_endpoints: The list of messaging end points configured.
:type messaging_endpoints: dict
:param enable_file_upload_notifications: The flag which indicates whether
file upload notification should be enabled. This is optional at iot hub
level. When enabled upload notifications will be available.
:type enable_file_upload_notifications: bool
:param cloud_to_device:
:type cloud_to_device: :class:`CloudToDeviceProperties
<azure.mgmt.iothub.models.CloudToDeviceProperties>`
:param comments: The comments.
:type comments: str
:param operations_monitoring_properties:
:type operations_monitoring_properties:
:class:`OperationsMonitoringProperties
<azure.mgmt.iothub.models.OperationsMonitoringProperties>`
:param features: The Capabilities/Features that need to be enabled for
the Hub. Possible values include: 'None', 'DeviceManagement'
:type features: str or :class:`Capabilities
<azure.mgmt.iothub.models.Capabilities>`
"""
_attribute_map = {
'authorization_policies': {'key': 'authorizationPolicies', 'type': '[SharedAccessSignatureAuthorizationRule]'},
'ip_filter_rules': {'key': 'ipFilterRules', 'type': '[IpFilterRule]'},
'host_name': {'key': 'hostName', 'type': 'str'},
'event_hub_endpoints': {'key': 'eventHubEndpoints', 'type': '{EventHubProperties}'},
'storage_endpoints': {'key': 'storageEndpoints', 'type': '{StorageEndpointProperties}'},
'messaging_endpoints': {'key': 'messagingEndpoints', 'type': '{MessagingEndpointProperties}'},
'enable_file_upload_notifications': {'key': 'enableFileUploadNotifications', 'type': 'bool'},
'cloud_to_device': {'key': 'cloudToDevice', 'type': 'CloudToDeviceProperties'},
'comments': {'key': 'comments', 'type': 'str'},
'operations_monitoring_properties': {'key': 'operationsMonitoringProperties', 'type': 'OperationsMonitoringProperties'},
'features': {'key': 'features', 'type': 'str'},
}
def __init__(self, authorization_policies=None, ip_filter_rules=None, host_name=None, event_hub_endpoints=None, storage_endpoints=None, messaging_endpoints=None, enable_file_upload_notifications=None, cloud_to_device=None, comments=None, operations_monitoring_properties=None, features=None):
self.authorization_policies = authorization_policies
self.ip_filter_rules = ip_filter_rules
self.host_name = host_name
self.event_hub_endpoints = event_hub_endpoints
self.storage_endpoints = storage_endpoints
self.messaging_endpoints = messaging_endpoints
self.enable_file_upload_notifications = enable_file_upload_notifications
self.cloud_to_device = cloud_to_device
self.comments = comments
self.operations_monitoring_properties = operations_monitoring_properties
self.features = features
| 1.695313 | 2 |
meiduo_mall/celery_tasks/sms/tasks.py | aprilchen918/meiduo_project2 | 0 | 12787464 | <reponame>aprilchen918/meiduo_project2<filename>meiduo_mall/celery_tasks/sms/tasks.py
# Define task
from celery_tasks.sms.twilio1.twilio_sms import Twilio
from . import constants
from celery_tasks.main import celery_app
# Use Decorator to decorate asynchronous task to ensure celery recognize task
@celery_app.task(name='send_sms_code')
def send_sms_code(mobile, sms_code):
"""
Send sms code's asynchronous task
:param mobile: phone num
:param sms_code: sms code
:return: success:0, fall: -1
"""
send_ret = Twilio().send_sms(sms_code, constants.SMS_CODE_REDIS_EXPIRES//60, mobile, constants.SEND_SMS_TEMPLATE_ID)
return send_ret | 2.1875 | 2 |
gedcom-to-map/kml/foliumExp.py | D-Jeffrey/gedcom-to-visualmap | 1 | 12787465 | import numpy as nump
import math
import random
import folium
# import simplekml as simplekml
from models.Line import Line
from models.Pos import Pos
import time
from gedcomoptions import gvOptions
from folium.plugins import FloatImage, AntPath, MiniMap, HeatMapWithTime
legend_file = 'legend.png'
lgd_txt = '<span style="color: {col};">{txt}</span>'
def dift(l):
d = ((random.random() * 0.001) - 0.0005)
#d = 0
if (l):
return (float(l)+d)
else:
return None
class MyMarkClusters:
def __init__(self, mymap, step):
self.pmarker = dict()
self.markercluster = dict()
self.mymap = mymap
self.step = step
def mark (self, spot, when=None):
if spot and spot.lat and spot.lon:
cnt = 1
if (when):
# TODO this is a range date hack
if type(when) == type (" "):
when = when[0:4]
when = int(when) - (int(when) % self.step)
markname = str(spot.lat)+ str(spot.lon) + str(when)
else:
markname = str(spot.lat)+","+ str(spot.lon)
if (markname in self.pmarker.keys()):
cnt = self.pmarker[markname][2]+1
self.pmarker[markname] = (spot.lat, spot.lon, cnt, when)
def checkmarker(self, lat, long, name):
if lat and long:
markname = str(lat)+","+ str(long)
if (self.cmarker[mark] == 1):
return None
if (markname in self.markercluster.keys()):
return self.markercluster[markname]
else:
self.markercluster[markname] = folium.MarkerCluster(name), add_to(self.mymap)
return self.markercluster[markname]
class foliumExporter:
def __init__(self, gOptions : gvOptions):
self.file_name = gOptions.Result
self.max_line_weight = gOptions.MaxLineWeight
self.gOptions = gOptions
self.fm = folium.Map(location=[0, 0], zoom_start=2)
backTypes = ('Open Street Map', 'Stamen Terrain', 'CartoDB Positron', 'Stamen Toner', 'Stamen Watercolor', 'Cartodbdark_matter')
if (self.gOptions.MapStyle < 1 or self.gOptions.MapStyle > len(backTypes)):
self.gOptions.MapStyle = 3
for bt in range(0,4):
folium.raster_layers.TileLayer(backTypes[bt], name=backTypes[bt]).add_to(self.fm)
if (self.gOptions.mapMini):
folium.plugins.MiniMap(toggle_display=True).add_to(self.fm)
random.seed()
self.gOptions.step()
def setoptions(self):
return
def Done(self):
self.fm.save(self.file_name)
self.gOptions.stop()
# self.fm = None
def getFeatureGroup(self, thename, depth):
if not thename in self.fglastname:
self.fglastname[thename] = [folium.FeatureGroup(name= thename, show=False), 0, 0]
thefg = self.fglastname[thename][0]
self.fglastname[thename][1] += 1
self.fglastname[thename][2] = depth
return thefg
def export(self, main: Pos, lines: [Line], ntag =""):
SortByLast = (self.gOptions.GroupBy == 1)
SortByPerson = (self.gOptions.GroupBy == 2)
fm = self.fm
self.gOptions.step("Preparing")
self.fglastname = dict()
flr = folium.FeatureGroup(name= lgd_txt.format(txt= 'Relations', col='green'), show=False )
flp = folium.FeatureGroup(name= lgd_txt.format(txt= 'People', col='Black'), show=False )
mycluster = MyMarkClusters(fm, self.gOptions.HeatMapTimeStep)
""" *****************************
HEAT MAP Section
*****************************
"""
if self.gOptions.HeatMapTimeLine:
print("building clusters")
self.gOptions.step("Building Heatmap Clusters")
for line in lines:
if (self.gOptions.step()):
break
if (hasattr(line,'style') and line.style == 'Life'):
if line.human.birth and line.human.birth.pos:
mycluster.mark(line.human.birth.pos, line.human.birth.whenyear())
minyear = line.human.birth.whenyearnum()
else:
minyear = None
if line.human.death and line.human.death.when:
maxyear = line.human.death.whenyearnum(True)
else:
maxyear = None
for mids in (line.midpoints):
y = mids.whenyear()
if y:
if minyear:
minyear = min(int(y), minyear)
else:
minyear = int(y)
y = mids.whenyear(True)
if y:
if maxyear:
maxyear = max(int(y), maxyear)
else:
maxyear = int(y)
if minyear and maxyear:
activepos = Pos(None, None)
if line.human.birth and line.human.birth.pos:
(activepos.lat, activepos.lon) = (line.human.birth.pos.lat, line.human.birth.pos.lon)
for year in range(minyear,maxyear):
for mids in (line.midpoints):
if mids.whenyearnum() == year:
activepos = mids.pos
if activepos and activepos.lat and activepos.lon:
mycluster.mark(activepos, year)
if line.human.death and line.human.death.pos:
mycluster.mark(line.human.death.pos, line.human.death.whenyearnum())
years= []
for marker in mycluster.pmarker:
self.gOptions.step()
if type(mycluster.pmarker[marker][3]) == type(' '):
print (mycluster.pmarker[marker])
theyear = mycluster.pmarker[marker][3]
if theyear and not theyear in years:
years.append(theyear)
years.sort()
heat_data = [[] for _ in range (0,len(years))]
for mkyear in range(0,len(years)):
self.gOptions.step()
for markname in (mycluster.pmarker):
if years[mkyear] == mycluster.pmarker[markname][3]:
heat_data[mkyear].append([mycluster.pmarker[markname][0], mycluster.pmarker[markname][1], mycluster.pmarker[markname][2]])
#Normalize the data
mx=0
for i in range(len(heat_data)):
for j in range(len(heat_data[i])):
mx = max(mx, heat_data[i][j][2])
for i in range(len(heat_data)):
for j in range(len(heat_data[i])):
heat_data[i][j][2] = float(heat_data[i][j][2])/mx
hm = folium.plugins.HeatMapWithTime(heat_data,index = years , name= 'Heatmap', max_opacity=0.9, min_speed=1, speed_step=1, max_speed=25,
gradient={'0':'Navy', '0.25':'Blue','0.5':'Green', '0.75':'Yellow','1': 'Red'})
fm.add_child( hm)
else:
for line in lines:
self.gOptions.step()
mycluster.mark(line.a)
mycluster.mark(line.b)
if line.midpoints:
for mids in (line.midpoints):
mycluster.mark(mids.pos, None)
fg = folium.FeatureGroup(name= lgd_txt.format(txt= 'Heatmap', col='black'), show=(self.gOptions.HeatMap))
heat_data = []
for markname in (mycluster.pmarker):
self.gOptions.step()
heat_data.append([mycluster.pmarker[markname][0], mycluster.pmarker[markname][1], mycluster.pmarker[markname][2]])
hm = folium.plugins.HeatMap(heat_data,max_opacity=0.8, name= 'Heatmap')
fg.add_child(hm)
fm.add_child( fg)
#My to use the jquery hack to MAGIC HACK fix the Folium code to use Font Awesome!
# missing tag a the end on purpose
fm.default_js.append(['hack.js', 'https://use.fontawesome.com/releases/v5.15.4/js/all.js" data-auto-replace-svg="nest'])
""" *****************************
Line Drawing Section
*****************************
"""
i = 0
self.gOptions.step("Building lines")
for line in (list(filter (lambda line: hasattr(line,'style'), lines))):
self.gOptions.step()
i += 1
if ( line.style == 'Life'):
flc = flp
aicc = 'orange'
aici = 'child'
bicc = 'gray'
bici = 'cross'
lc = '#' + line.color.to_hexa()
da = []
ln = line.parentofhuman
g = ""
markertipname = "Life of " + line.name
fancyname = line.style + " of "+ line.parentofhuman
markhome = 'house'
else:
flc = flr
aicc = 'green'
aici = 'baby'
bicc = 'green'
lc = 'green'
da = [5,5]
if (line.style == 'father'):
lc = 'blue'
lc = '#2b8cbe'
bici = 'male'
bicc = 'blue'
if (line.style == 'mother'):
lc = 'pink'
bici = 'female'
bicc = 'pink'
ln = line.name
g = line.name.split(' ',2)[0]
markertipname = line.name + " " + line.style + " of "+ line.parentofhuman
fancyname = line.name + " " + line.style + " of "+ line.parentofhuman
fg = None
newfg = False
labelname = str(i) +' '+ ln
if (len(labelname) > 25): labelname = labelname[1:25] +"..."
gn = lgd_txt.format( txt=labelname, col= lc)
fm_line = []
bextra = "Born {}".format(line.human.birth.whenyear()) if line.human.birth and line.human.birth.when else ''
dextra = "Died {}".format(line.human.death.whenyear()) if line.human.death and line.human.death.when else ''
fancyname = fancyname + "<br>" + bextra +" "+ dextra if (bextra != '') or (dextra != '') else fancyname
if line.human.photo:
fancyname = fancyname + "<img src='{}' width='150'>".format(line.human.photo)
difta = diftb = None
if (line.a and line.a.lat and line.a.lon):
# color = father/mother, born = baby, male, female
difta = [dift(line.a.lat), dift(line.a.lon)]
if self.gOptions.MarksOn:
if self.gOptions.BornMark:
mk = folium.features.Marker(difta,tooltip=markertipname , popup=fancyname, opacity=.5, icon=folium.Icon(color=aicc,icon=aici, prefix='fa' ))
if SortByLast:
fg = self.getFeatureGroup(line.human.surname, line.prof)
if SortByPerson:
fg = self.getFeatureGroup(line.parentofhuman, line.prof)
if (not fg):
fg = folium.FeatureGroup(name= gn, show=False)
newfg = True
fg.add_child(mk)
# 'tombstone' or 'cross'
if (line.b and line.b.lat and line.b.lon):
diftb = [dift(line.b.lat), dift(line.b.lon)]
if self.gOptions.MarksOn:
mk = folium.features.Marker(diftb,tooltip =markertipname , popup=fancyname, opacity=.5,icon=folium.Icon(color=bicc,icon=bici, prefix='fa', extraClasses = 'fas'))
if SortByLast:
fg = self.getFeatureGroup(line.human.surname, line.prof)
if SortByPerson:
fg = self.getFeatureGroup(line.parentofhuman, line.prof)
if (not fg):
fg = folium.FeatureGroup(name= gn, show=False)
newfg = True
fg.add_child(mk)
if difta:
fm_line.append(tuple(difta))
if line.midpoints:
# Change line type
lc = "gray"
for mids in (line.midpoints):
midspot = tuple([dift(mids.pos.lat), dift(mids.pos.lon)])
fm_line.append(midspot)
if self.gOptions.HomeMarker and fg:
if mids.what == 'home':
mker = 'home'
mkcolor = bicc
tip = mids.where
else:
mker = 'shoe-prints'
mkcolor = 'lightgray'
if mids.what:
tip = mids.what + ' ' + mids.where
else:
tip = '?? ' + mids.where
mk = folium.features.Marker(midspot,tooltip =tip, opacity=.5, icon=folium.Icon(color=mkcolor,icon=mker, prefix='fa', extraClasses = 'fas'))
fg.add_child(mk)
if diftb:
fm_line.append(tuple(diftb))
if (len(fm_line) > 1):
lcolor = line.color.to_hexa()
lcolor = lc
if line.prof:
lwidth = max(int(self.max_line_weight/math.exp(0.5*line.prof)), 2)
else:
lwidth = 1
if self.gOptions.UseAntPath:
if line.style == 'Life':
pl = folium.plugins.AntPath(fm_line, weight=lwidth, opacity=.7, tooltip=ln, popup=fancyname, color=lcolor, lineJoin='arcs')
else:
pl = folium.features.PolyLine(fm_line, color=lcolor, weight=lwidth, opacity=1, tooltip=ln, popup=fancyname, dash_array = da, lineJoin='arcs' )
else:
pl = folium.features.PolyLine(fm_line, color=lcolor, weight=lwidth, opacity=1, tooltip=ln, popup=fancyname, dash_array = da, lineJoin='arcs')
if (pl):
if SortByLast:
fg = self.getFeatureGroup(line.human.surname, line.prof)
if SortByPerson:
fg = self.getFeatureGroup(line.parentofhuman, line.prof)
if (not fg):
fg = folium.FeatureGroup(name= gn, show=False)
newfg = True
fg.add_child(pl)
print(f"Name:{line.human.name:30};\tParent:{line.parentofhuman:30};\tStyle:{line.style};\tfrom:{line.a}; to:{line.b}")
# Did we just create a feature group for this person?
if newfg:
fg.layer_name = fg.layer_name + " ({})".format(len(fm_line) + 1 if diftb else 0 + 1 if diftb else 0)
fm.add_child(fg)
for fgn in sorted(self.fglastname.keys(), key=lambda x: self.fglastname[x][2], reverse = False ):
# print ("]]{} : {}".format(fgn, fglastname[fgn][1]))
self.fglastname[fgn][0].layer_name = "{} : {}".format(fgn, self.fglastname[fgn][1])
fm.add_child(self.fglastname[fgn][0])
sc = False if self.gOptions.showLayerControl else True
folium.map.LayerControl('topleft', collapsed= sc).add_to(fm)
if main and main.birth and main.birth.pos and main.birth.pos.lat:
#TODO Look at MarkerClusters
if self.gOptions.MarkStarOn:
folium.Marker([dift(main.birth.pos.lat), dift(main.birth.pos.lon)], tooltip = main.name, opacity=0.5, icon=folium.Icon(color='lightred',icon='star', prefix='fa', iconSize = ['50%', '50%'])).add_to(fm)
else:
print ("No GPS locations to generate a map.")
# TODO Add a legend
# FloatImage(image_file, bottom=0, left=86).add_to(fm)
if SortByLast:
print ("Number of FG lastName: {}".format(len(self.fglastname)))
self.Done()
return
| 2.4375 | 2 |
src/pyrin/api/swagger/__init__.py | wilsonGmn/pyrin | 0 | 12787466 | # -*- coding: utf-8 -*-
"""
swagger package.
"""
from pyrin.packaging.base import Package
class SwaggerPackage(Package):
"""
swagger package class.
"""
NAME = __name__
COMPONENT_NAME = 'api.swagger.component'
CONFIG_STORE_NAMES = ['swagger']
DEPENDS = ['pyrin.api.router',
'pyrin.configuration']
| 1.335938 | 1 |
src/fortrace/utility/guesttime.py | dasec/ForTrace | 1 | 12787467 | import time
from datetime import datetime
def getGuestTime():
curr_datetime = datetime.now()
dt_string = curr_datetime.strftime("%d/%m/%Y")
ti_string = curr_datetime.strftime("%H:%M:%S")
return "{0} {1}".format(dt_string, ti_string)
def getGuestTimezone():
is_dst = time.daylight and time.localtime().tm_isdst > 0
utc_offset = - (time.altzone if is_dst else time.timezone)/3600
if utc_offset > 0:
marker = "+"
else:
marker = "-"
tzone = "UTC{0}{1}".format(marker, utc_offset)
return tzone
| 3.0625 | 3 |
vmServer.py | aivaturi/parallels-vm-manager | 1 | 12787468 | <reponame>aivaturi/parallels-vm-manager
#!/usr/bin/python
__author__ = '<NAME>'
__version__ = '0.1'
__license__ = 'FreeBSD'
import base64
import json
import logging
import logging.handlers
import os
import re
import sys
import subprocess
import xml.etree.ElementTree
from bottle import *
import prlsdkapi
"""
@package vmServer
@brief Entry point to vmManager server
In development turn on debug for stacktrace & reload for instantaneous reloads
@code python vmManager.py --debug @endcode
"""
reload = 0
if (len(sys.argv) > 1):
if (sys.argv[1] == '--debug'):
debug(True)
reload = 1
else:
debug(False)
# Logging settings for Rome Server
log = logging.getLogger('vmManager')
log.setLevel(logging.DEBUG)
log_file = './prlManager.log'
fileHandler = logging.FileHandler(log_file)
format = logging.Formatter("%(asctime)s %(levelname)s %(filename)s: %(funcName)s() %(lineno)d %(message)s")
fileHandler.setFormatter(format)
log.addHandler(fileHandler)
log.info("Initialized log for vmManager...")
vmManager = None
vmServer = None
class PrlVMManager:
""" Helper class that will perform operations on Parallels VMs """
def __init__(self):
# initialize the desktop sdk & login to the Parallels local service
prlsdkapi.init_desktop_sdk()
self.server = prlsdkapi.Server()
try:
# The call returns a prlsdkapi.Result object on success.
result = self.server.login_local('', 0, prlsdkapi.prlsdk.consts.PSL_NORMAL_SECURITY).wait()
log.debug("Logged in to Parallels service")
except prlsdkapi.PrlSDKError, e:
sys.exit("Login error: %s" % e)
def __del__(self):
# Log off and deinitialize the prlsdkapi library.
self.server.logoff()
prlsdkapi.deinit_sdk()
def searchVM(self, vm_to_find):
"""
This method will Obtain a Vm object for the virtual machine specified by
its name.
@param vm_to_find: Name of the virtual machine to find. Can also be a
partial name (starts with the specified string)
@return <b><Object></b>: Returns a vm object
"""
log.debug("Entering searchVM()...")
try:
result = self.server.get_vm_list().wait()
except prlsdkapi.PrlSDKError, e:
log.error("Error: %s" % e)
log.debug("Exiting searchVM()...")
return
for i in range(result.get_params_count()):
vm = result.get_param_by_index(i)
vm_name = vm.get_name()
if vm_name.startswith(vm_to_find):
return vm
log.debug("Exiting searchVM()...")
return
def _getVMObjects(self):
# This is an internal method, which obtains the virtual machine list.
# getVMList is an asynchronous method that returns
# a prlsdkapi.Result object containing the list of virtual machines.
log.debug("Entering _getVMObjects()...")
job = self.server.get_vm_list()
result = job.wait()
log.debug("Exiting _getVMObjects()...")
return result
def getVMList(self):
"""
This method will find all the VMs that are available in Parallels &
return them as a list.
@return <b><List></b>: List of VM names.
"""
log.debug("Entering getVMList()...")
result = self._getVMObjects()
vm_list = []
for i in range(result.get_params_count()):
vm = result.get_param_by_index(i)
vm_list.append(vm.get_name())
log.debug(vm_list)
log.debug("Exiting getVMList()...")
return vm_list
def getTemplateList(self):
"""
This method will find all the VMs that are templates & return them as a list.
@return <b><List></b>: List of template names.
"""
log.debug("Entering getTemplateList()...")
result = self._getVMObjects()
template_list = []
for i in range(result.get_params_count()):
vm = result.get_param_by_index(i)
if (vm.is_template()):
template_list.append(vm.get_name())
log.debug(template_list)
log.debug("Exiting getTemplateList()...")
return template_list
def getVMListWithInfo(self):
"""
This method is similar to getVMList but will also gather all the VMs
relevant information like status, adapter information etc & return them
as a dictionary.
@return <b><Dictionary></b>: List of VMs and their relevant information.
"""
log.debug("Entering getVMListWithInfo()...")
result = self._getVMObjects()
# Iterate through the Result object parameters.
# Each parameter is an instance of the prlsdkapi.Vm class.
vm_list_info = {}
for i in range(result.get_params_count()):
vm = result.get_param_by_index(i)
vm_list_info[i] = self.getVMInfo(vm)
log.debug(vm_list_info)
log.debug("Exiting getVMListWithInfo()...")
return vm_list_info
def getVMInfo(self, vm):
"""
Given a vm object, it'll return all the information about that VM.
@param <b><Object></b>: prlsdapi vm object
@return <b><Dictionary></b>: VM's information as a dictionary.
"""
log.debug("Entering getVMInfo()...")
vm_info = {}
vm_config = vm.get_config()
vm_info["name"] = vm_config.get_name()
vm_info["status"] = self.getVMStatus(vm)
vm_info["os"] = self.getVMOSInfo(vm)
vm_info["network"] = self.getVMNetInfo(vm)
log.debug("Exiting getVMInfo()...")
return vm_info
def getVMStatus(self, vm):
"""
This method will determine the status of a VM.
@param <b><Object></b>: prlsdapi vm object
@return <b><String></b>: Status string; either "running", "suspended",
"stopped" or "paused"
"""
log.debug("Entering getVMStatus()...")
try:
state_result = vm.get_state().wait()
except prlsdkapi.PrlSDKError, e:
log.erro("Error: %s" % e)
log.debug("Exiting getVMStatus()...")
return
# Now obtain the VmInfo object.
vm_info = state_result.get_param()
# Get the virtual machine state code.
state_code = vm_info.get_state()
state_desc = "unknown status"
# Translate the state code into a readable description.
# For the complete list of states, see the
# VMS_xxx constants in the Python API Reference guide.
if state_code == prlsdkapi.prlsdk.consts.VMS_RUNNING:
state_desc = "running"
elif state_code == prlsdkapi.prlsdk.consts.VMS_STOPPED:
state_desc = "stopped"
elif state_code == prlsdkapi.prlsdk.consts.VMS_PAUSED:
state_desc = "paused"
elif state_code == prlsdkapi.prlsdk.consts.VMS_SUSPENDED:
state_desc = "suspended"
log.debug("Exiting getVMStatus()...")
return state_desc
def getVMOSInfo(self, vm):
"""
This method will determine the OS that the VM is running. If it can't
determine the OS or its version, a generic prlsdkapi.prlsdk.consts
constant is returned.
@param <b><Object></b>: prlsdapi vm object
@return <b><Dictionary></b>: Dictionary with OS type & OS version.
"""
log.debug("Entering getVMOSInfo()...")
vm_config = vm.get_config()
# initialize our defaults
osType = ""
osVersion = ""
# Obtain the guest OS type and version.
# OS types are defined as PVS_GUEST_TYPE_xxx constants.
# For the complete list, see the documentation for
# the prlsdkapi.prlsdk.consts module or
# the Parallels Python API Reference guide.
os_type = vm_config.get_os_type()
if os_type == prlsdkapi.prlsdk.consts.PVS_GUEST_TYPE_WINDOWS:
osType = "Windows"
elif os_type == prlsdkapi.prlsdk.consts.PVS_GUEST_TYPE_LINUX:
osType = "Linux"
elif os_type == prlsdkapi.prlsdk.consts.PVS_GUEST_TYPE_MACOS:
osType = "Mac OS X"
else:
osType = "Other type (" + str(os_type) + ")"
# OS versions are defined as PVS_GUEST_VER_xxx constants.
# Here we assume that MACOS_LAST is Lion since there is no
# specific const declared for Lions, as of September, 2011
os_version = vm_config.get_os_version()
if os_version == prlsdkapi.prlsdk.consts.PVS_GUEST_VER_WIN_XP:
osVersion = "XP"
elif os_version == prlsdkapi.prlsdk.consts.PVS_GUEST_VER_WIN_WINDOWS7:
osVersion = "7"
elif os_version == prlsdkapi.prlsdk.consts.PVS_GUEST_VER_LIN_UBUNTU:
osVersion = "Ubuntu"
elif os_version == prlsdkapi.prlsdk.consts.PVS_GUEST_VER_LIN_FEDORA_5:
osVersion = "Fedora 5"
elif os_version == prlsdkapi.prlsdk.consts.PVS_GUEST_VER_MACOS_LAST:
osVersion = "Lion"
else:
osVersion = "Other version (" + str(os_version) + ")"
log.debug("Exiting getVMOSInfo()...")
return {"osType" : osType, "osVersion" : osVersion}
def getVMNetInfo(self, vm, g_username="", g_password=""):
"""
This method will find all the adapters of the Vm & get its relevant
information. This will first try to log in to the guest & find that info
from within the guest, if it can't do that then it'll try to determine
that information from outside the VM. You will get much better & detailed
info if you provide username & password of the guest VM.
@param <b><Object></b>: prlsdapi vm object
@param <b><String></b>: <optional> guest OS username
@param <b><String></b>: <optional> guest OS password
@return <b><Dictionary></b>: Dictionary with adapter details
"""
# First try getting the information from inside & if that returns empty
# try getting the information from outside
vm_net_adapters = self._getVMNetInfoFromInsideVM(vm, g_username, g_password)
if (not vm_net_adapters):
vm_net_adapters = self._getVMNetInfoFromOutsideVM(vm)
return vm_net_adapters
def _getVMNetInfoFromOutsideVM(self, vm):
"""
This method will find all the adapters of the Vm & list the MAC address,
IP (if running) & type of adapter. It uses ARP to determine the ip of
the adapter. All this information is gathered from outside the VM.
@note Also look at _getVMNetInfoFromInsideVM().
@param <b><Object></b>: prlsdapi vm object
@return <b><Dictionary></b>: Dictionary with "type" of adapter, its "mac"
address & assigned "ip" (only when OS is running)
"""
log.debug("Entering getVMNetInfoFromOutsideVM()...")
vm_config = vm.get_config()
vm_net_adapters = {}
# Obtain the network interface info.
# The vm.net_adapters sequence contains objects of type VmNetDev.
count = vm_config.get_net_adapters_count()
for n in range(count):
# set the defaulst, just to be sure
ip = ""
mac = ""
emulated_type = ""
net_adapter = None
type = ""
vm_net_adapters[n] = {}
net_adapter = vm_config.get_net_adapter(n)
emulated_type = net_adapter.get_emulated_type()
if emulated_type == prlsdkapi.prlsdk.consts.PNA_HOST_ONLY:
type = "host-only"
elif emulated_type == prlsdkapi.prlsdk.consts.PNA_SHARED:
type = "shared"
elif emulated_type == prlsdkapi.prlsdk.consts.PNA_BRIDGED_ETHERNET:
type = "bridged"
vm_net_adapters[n]["type"] = type
mac = str(net_adapter.get_mac_address())
vm_net_adapters[n]["mac"] = mac
# net_adapter.get_net_addresses() is supposed to provide us with
# the ip of the vm, but I couldn't get it to work with Lion. So
# this is the next best solution - using arp to figure out the
# ip. Also this is tailored only to work on OS X. BTW, we check this
# only if the vm status is running.
#
# Find the ip address from arp cache
# arp -a | grep mac_Address
if (self.getVMStatus(vm) == 'running'):
# Mac address format on OS X as used by arp is a little different.
# The tuples drop the leading 0. So we modify the string that we get
# the API to match the format of arp.
arp_mac = [mac[i:i+2] for i in range(0,len(mac),2)]
arp_mac = ':'.join([re.sub(r'0(\d)',r'\1',i) for i in arp_mac]).lower()
p1 = subprocess.Popen(["arp", "-a"], stdout=subprocess.PIPE)
p2 = subprocess.Popen(["grep", arp_mac], stdin=p1.stdout, stdout=subprocess.PIPE)
p1.stdout.close()
output = p2.communicate()[0]
m = re.match(r'^.*?\((.*?)\).*', output)
ip = m.group(1)
vm_net_adapters[n]["ip"] = ip
log.debug("Exiting getVMNetInfoFromOutsideVM()...")
return vm_net_adapters
def _getVMNetInfoFromInsideVM(self, vm, g_username, g_password):
"""
This method will try to log in & create a new session in the VM or bind
to an existing one and will find all the adapters of the Vm & list the
MAC address, IP/Subnet, type of adapter, gateway & DNS.
@note Also look at _getVMNetInfoFromOutsideVM().
@param <b><Object></b>: prlsdapi vm object
@param <b><String></b>: guest OS username
@param <b><String></b>: guest OS password
@return <b><Dictionary></b>: Dictionary with all the adapters info
"""
log.debug("Entering getVMNetInfoFromInsideVM()...")
vm_net_adapters = {}
# login to the guest & create a session
try:
vm_guest = vm.login_in_guest(g_username, g_password).wait().get_param()
except prlsdkapi.PrlSDKError, e:
log.error("Guest OS Login Error: %s" % e)
return
server_config = vm_guest.get_network_settings().wait().get_param()
count = server_config.get_net_adapters_count()
vm_net_adapters = {}
# Find all the adapters & collect their information
for n in range(count):
vm_net_adapters[n] = {}
type = ""
host_net = server_config.get_net_adapter(n)
emulated_type = host_net.get_net_adapter_type()
if emulated_type == prlsdkapi.prlsdk.consts.PNA_HOST_ONLY:
type = "host-only"
elif emulated_type == prlsdkapi.prlsdk.consts.PNA_SHARED:
type = "shared"
elif emulated_type == prlsdkapi.prlsdk.consts.PNA_BRIDGED_ETHERNET:
type = "bridged"
vm_net_adapters[n]["type"] = type
vm_net_adapters[n]["ip"] = host_net.get_net_addresses().get_item(0)
vm_net_adapters[n]["mac"] = host_net.get_mac_address()
dns_str_list = host_net.get_dns_servers()
dns_str_cnt = dns_str_list.get_items_count()
dns_str = []
for m in range(dns_str_cnt):
dns_str.append(dns_str_list.get_item(m))
vm_net_adapters[n]["dns"] = dns_str
vm_net_adapters[n]["gateway"] = host_net.get_default_gateway()
# Logout from our session
vm_guest.logout()
log.debug("Exiting getVMNetInfoFromInsideVM()...")
return vm_net_adapters
def startVM(self, vm):
"""
Starts a VM if it is not in "running" state.
@param <b><Object></b>: prlsdapi vm object
@return <b><String></b>: "started" if successfully started, otherwise
status as returned by getVMStatus().
"""
log.debug("Entering startVM()...")
# Check whether the vm is already running otherwise start it
status = self.getVMStatus(vm)
if (status != "running"):
try:
vm.start().wait()
status = 'started'
except prlsdkapi.PrlSDKError, e:
status = "Error: %s" % e
log.debug("Exiting startVM()...")
return status
def stopVM(self, vm, acpi):
"""
Stops a VM if it is in "running" state.
@param <b><Object></b>: prlsdapi vm object
@param <b><Boolean></b>: Whether to perform a graceful shutdown of VM's
OS using ACPI (if the OS supports it).
@return <b><String></b>: "stopped" if successfully stopped, otherwise
status as returned by getVMStatus().
"""
log.debug("Entering stopVM()...")
status = self.getVMStatus(vm)
if (status == "running"):
if (acpi):
try:
vm.stop(True).wait()
status = 'stopped'
except prlsdkapi.PrlSDKError, e:
status = "Error: %s" % e
else:
try:
vm.stop().wait()
status = 'stopped'
except prlsdkapi.PrlSDKError, e:
status = "Error: %s" % e
log.debug("Exiting stopVM()...")
return status
def switchToSnapshot(self, vm, snapshot_name):
"""
Switches to an existing snapshot.
@param <b><Object></b>: prlsdapi vm object.
@param <b><String></b>: Name of the snapshot to switch to.
@return <b><String></b>: Reason string explainign what happened.
"""
log.debug("Entered switchToSnapshot()...")
xml_str = vm.get_snapshots_tree().wait().get_param_as_string()
log.debug(xml_str)
# ElementTree doesn't appreciate if you undeclare the prefix, so we
# gotta take care of it
xml_hack = 'xmlns:xsi="" vm_uuid="" xsi:noNamespaceSchemaLocation=""'
xml_str = re.sub(xml_hack, '', xml_str)
snaps_xml_obj = xml.etree.ElementTree.fromstring(xml_str)
snaps = snaps_xml_obj.findall(".//SavedStateItem")
guid = None
for i in range(len(snaps)):
if (snaps[i].find("./Name").text == 'Snapshot 1'):
guid = snaps[i].get('guid')
break
log.debug(guid)
result = ""
if (guid):
try:
job = vm.switch_to_snapshot(guid)
job.wait()
ret_code = job.get_ret_code()
if (not ret_code):
result = "Switched to given snapshot"
else:
log.error(ret_code)
result = "Encountered an error while switching to snapshot"
except prlsdkapi.PrlSDKError, e:
log.error("Error: %s " % e)
result = "Caught an exception while switching to snapshot"
else:
result = "Snapshot not found"
log.debug("Exiting switchToSnapshot()...")
return result
def deployTemplate(self, template, new_name):
"""
Clones from a VM template & starts it.
@param <b><Object></b>: prlsdapi vm object which is a template.
@param <b><String></b>: Name of the new VM that'll be deployed.
@return <b><String></b>: Reason string explaining what happened.
"""
log.debug("Entered deployTemplate()...")
ret = ""
# First check that new_name is unique
vm_list = self.getVMList()
if (new_name not in vm_list):
# verify that template is indeed a template...
if (template.is_template()):
try:
log.debug("Clonning is in progress...")
template.clone(new_name, "", False).wait()
new_vm = self.searchVM(new_name)
status = self.startVM(new_vm)
if (status == 'started' or status == 'running'):
ret = "deployed"
log.debug("Deployed a new VM from template.")
except prlsdkapi.PrlSDKError, e:
ret = "Error: %s" % e
log.error(ret)
else:
ret = "Could not find a template with the given name"
else:
return "Another VM with the same name exists, provide a unique name for the new VM."
log.debug("Exiting deployTemplate()...")
return ret
class ServerUtils:
""" A general utility class providing helper methods """
def screenshot(self):
"""
This method will take a screenshot of main monitor & return it if
successful or it'll return an empty string
@return <b><Binary></b>: Returns a jpg, if one was generated
"""
log.debug("Entering screenshot()...")
screen = '/tmp/screen.jpg'
image = None
try:
os.system('screencapture -m %s' % screen)
image = open(screen, 'rt').read()
log.debug("Generated screenshot")
except:
log.exception("Could not generate screen")
finally:
os.remove(screen)
log.debug("Exiting screenshot()...")
return image
def parseJSONFromPOST(self):
"""
This method will parse the JSON input from the POST body & return a
python dict object
@return <b>parsed JSON object</b>: Could either be a python dictionary
or an array (depends on JSON sent)
"""
log.debug("Entered parseJSONFromPOST()...")
try:
data = json.loads(request.body.readline())
log.debug(data)
log.debug("Exiting parseJSONFromPOST()...")
return data
except ValueError:
log.error("Bad request: Exiting parseJSONFromPOST()...")
abort(400, 'Bad request: Could not decode request body,\
JSON expected.')
def osDetails(self):
"""
This method will return the OS details on which the server is running
@return <b><Dictionary></b>:
Returns 'osname' : Name of the OS
'osbit' : 32 or 64 bit OS
'osver' : version of the OS if available
"""
log.debug("Entering osDetails()...")
details = {}
cmd = 'sw_vers -productName'
try:
p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE)
details['osname'] = p.stdout.read().rstrip('\n')
except:
e = sys.exc_info()[1]
log.error(e)
cmd = 'sw_vers -productVersion'
try:
p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE)
details['osver'] = p.stdout.read().rstrip('\n')
except:
e = sys.exc_info()[1]
log.error(e)
cmd = 'uname -a | grep RELEASE_I386'
try:
p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE)
if (p.stdout.read()):
details['osbit'] = 32
else:
details['osbit'] = 64
except:
e = sys.exc_info()[1]
log.error(e)
log.debug("Exiting osDetails()...")
return details
################################################################################
vmServer = Bottle()
vmManager = PrlVMManager()
################################################################################
# Routes
################################################################################
@vmServer.error(404)
def error404(error):
return 'Ooh, over there. Something shiny!'
@vmServer.get('/routes')
def routes():
"""
Returns list of routes available
Resource : <b>/routes</b>
Method : GET
"""
routes = ""
for route in vmServer.routes:
routes += route['rule']
return routes
@vmServer.get('/screenshot')
def screenshot():
"""
This method will take a screenshot of the main monitor & return
base64 encoded image back.
Resource : <b>/screenshot</b>
Method : GET
@return <b><JSONResponseObject></b>
"""
status = None
value = None
screen = None
utils = serverUtils()
image = utils.screenshot()
if (image):
status = 0
value = "Generated screenshot"
screen = base64.b64encode(image)
else:
status = 9
value = "Could not generate screenshot, check logs"
response.content_type = 'application/json; charset=utf-8'
ret = {'status':status, 'value':value, 'screen':screen}
return ret
@vmServer.get('/os')
def os():
"""
This method will return the OS details vmManager is running on
Resource : <b>/os</b>
Method : GET
@return <b><JSONResponseObject></b>
"""
status = None
value = None
screen = None
utils = serverUtils()
details = utils.osDetails()
if (details):
status = 0
value = details
else:
status = 9
value = "Could not generate screenshot, check logs"
response.content_type = 'application/json; charset=utf-8'
ret = {'status':status, 'value':value, 'screen':screen}
return ret
@vmServer.get('/VM/list')
def vmList():
"""
Returns list of VMs on the local machine (including templates).
Resource : <b>/VM/list</b>
Method : GET
@return <b><JSONResponseObject></b>
"""
log.debug("Entered vmList()...")
value = None
status = None
list = vmManager.getVMList()
log.debug(list)
if (list):
value = list
status = 0
else:
status = 9
value = "Could not get list of VMs."
log.debug("Exiting vmList()...")
response.content_type = 'application/json; charset=utf-8'
return {'status':status, 'value':value}
@vmServer.get('/VM/listWithInfo')
def vmListWithInfo():
"""
Returns list of VMs on the local machine and their information
Resource : <b>/VM/listAllWithInfo</b>
Method : GET
@return <b><JSONResponseObject></b>
"""
log.debug("Entered vmListAllWithInfo()...")
value = None
status = None
list = vmManager.getVMListWithInfo()
log.debug(list)
if (list):
value = list
status = 0
else:
status = 9
value = "Could not get list of VMs."
log.debug("Exiting vmList()...")
response.content_type = 'application/json; charset=utf-8'
return {'status':status, 'value':value}
@vmServer.get('/VM/:vmName/info')
def vmInfo(vmName):
"""
Returns information about the VM such as os, network, status etc
Resource : <b>/VM/:vmName/info</b>
Method : GET
@return <b><JSONResponseObject></b>
"""
log.debug("Entered vmInfo()...")
vm = vmManager.searchVM(vmName)
value = None
status = None
if (vm):
value = vmManager.getVMInfo(vm)
status = 0
else:
value = 'Could not find the given VM name'
status = 9
log.debug("Exiting vmInfo()...")
response.content_type = 'application/json; charset=utf-8'
return {'status' : status, 'value' : value}
@vmServer.get('/VM/:vmName/status')
def vmStatus(vmName):
"""
Returns status of the VM
Resource : <b>/VM/:vmName/status</b>
Method : GET
@return <b><JSONResponseObject></b>
"""
log.debug("Entered vmStatus()...")
vm = vmManager.searchVM(vmName)
value = None
status = None
if (vm):
value = vmManager.getVMStatus(vm)
status = 0
else:
value = 'Could not find the given VM name'
status = 9
log.debug("Exiting vmStatus()...")
response.content_type = 'application/json; charset=utf-8'
return {'status' : status, 'value' : value}
@vmServer.get('/VM/:vmName/os')
def vmOSInfo(vmName):
"""
Returns OS that VM is running
Resource : <b>/VM/:vmName/os</b>
Method : GET
@return <b><JSONResponseObject></b>
"""
log.debug("Entered vmOSInfo()...")
vm = vmManager.searchVM(vmName)
value = None
status = None
if (vm):
value = vmManager.getVMOSInfo(vm)
status = 0
else:
value = 'Could not find the given VM name'
status = 9
log.debug("Exiting vmOSInfo()...")
response.content_type = 'application/json; charset=utf-8'
return {'status' : status, 'value' : value}
@vmServer.get('/VM/:vmName/adapters')
@vmServer.post('/VM/:vmName/adapters')
def vmAdapterInfo(vmName):
"""
Returns the adapter(s) details of the VM
Resource : <b>/VM/:vmName/adapters</b>
Method : GET, doesn't log in to VM
Method : POST, logs in to the VM using the provided credentials
POST Data: json object with keys - 'username' & 'password' to log in to guest.
@return <b><JSONResponseObject></b>
"""
log.debug("Entered vmAdapterInfo()...")
value = None
status = None
username = ""
password = ""
utils = ServerUtils()
if ((request.method == 'POST') and request.body.readline()):
data = utils.parseJSONFromPOST()
if data.has_key('username'):
username = data["username"]
if data.has_key('password'):
password = data['password']
vm = vmManager.searchVM(vmName)
if (vm):
value = vmManager.getVMNetInfo(vm, username, password)
status = 0
else:
value = 'Could not find the given VM name'
status = 9
log.debug("Exiting vmAdapterInfo()...")
response.content_type = 'application/json; charset=utf-8'
return {'status' : status, 'value' : value}
@vmServer.put('/VM/:vmName/start')
def vmStart(vmName):
"""
Start the VM if it is not already running. This just refers to the VM &
not the OS running (or stopped) in it.
Resource : <b>/VM/:vmName/start</b>
Method : GET
@return <b><JSONResponseObject></b>
"""
log.debug("Entered vmStart()...")
vm = vmManager.searchVM(vmName)
value = None
status = None
if (vm):
value = vmManager.startVM(vm)
status = 0
else:
value = 'Could not find the given VM name'
status = 9
log.debug("Exiting vmStart()...")
response.content_type = 'application/json; charset=utf-8'
return {'status' : status, 'value' : value}
@vmServer.put('/VM/:vmName/stop')
def vmStop(vmName):
"""
Stop the VM if it is running. This can also stop the OS.
Resource : <b>/VM/:vmName/start</b>
Method : PUT
POST Data: JSON object with key 'acpi' as true or false value.
@code curl -d"{\"acpi\":true}" -X PUT http://localhost:9898/VM/Lion123/stop
@return <b><JSONResponseObject></b>
"""
log.debug("Entered vmStart()...")
b_acpi = False
utils = ServerUtils()
if (request.body.readline()):
data = utils.parseJSONFromPOST()
if data.has_key('acpi'):
b_acpi = data["acpi"]
vm = vmManager.searchVM(vmName)
value = None
status = None
if (vm):
value = vmManager.stopVM(vm, b_acpi)
status = 0
else:
value = 'Could not find the given VM name'
status = 9
log.debug("Exiting vmStart()...")
response.content_type = 'application/json; charset=utf-8'
return {'status' : status, 'value' : value}
@vmServer.put('/VM/:vmName/switchToSnapshot')
def vmSwitchToSnapshot(vmName):
"""
Siwtch to a given snapshot.
Resource : <b>/VM/:vmName/start</b>
Method : PUT
POST Data: JSON object with key 'snapshot' containing the name of a snapshot.
@return <b><JSONResponseObject></b>
"""
log.debug("Entered vmSwitchToSnapshot()...")
snapshot_name = None
utils = ServerUtils()
if (request.body.readline()):
data = utils.parseJSONFromPOST()
if data.has_key('snapshot'):
snapshot_name = data["snapshot"]
if (not snapshot_name):
abort(400, "Snopshot Name not provided, can't continue.")
vm = vmManager.searchVM(vmName)
value = None
status = None
if (vm):
value = vmManager.switchToSnapshot(vm, snapshot_name)
if (value == "Switched to given snapshot"):
status = 0
else:
status = 9
else:
value = 'Could not find the given VM name'
status = 9
log.debug("Exiting vmSwitchToSnapshot()...")
response.content_type = 'application/json; charset=utf-8'
return {'status' : status, 'value' : value}
@vmServer.get('/templates/list')
def vmListTemplates():
"""
Returns a list of all the VMs that are templates.
Resource : <b>/templates/list</b>
Method : GET
@return <b><JSONResponseObject</b>
"""
log.debug("Entering vmListTemplates()...")
value = None
status = None
list = vmManager.getTemplateList()
log.debug(list)
if (list):
value = list
status = 0
else:
status = 9
value = "Could not get list of VMs."
log.debug("Exiting vmListTemplates()...")
response.content_type = 'application/json; charset=utf-8'
return {'status':status, 'value':value}
@vmServer.post('/templates/:templateName/deploy')
def vmDeployTemplate(templateName):
"""
Deploy a new virtual machine from an existing template
Resource : <b>/templates/:templateName/deploy</b>
Method : POST
POST Data: JSON object with key "new_name" providing a unique name for the new vm.
@return <b><JSONResponseObject></b>
"""
log.debug("Entered vmDeployTemplate()...")
new_name = None
utils = ServerUtils()
if (request.body.readline()):
data = utils.parseJSONFromPOST()
if data.has_key('new_name'):
new_name = data['new_name']
if (not new_name):
abort(400, "Snopshot Name not provided, can't continue.")
template = vmManager.searchVM(templateName)
value = None
status = None
if (template.is_template()):
value = vmManager.deployTemplate(template, new_name)
if (value == "deployed"):
status = 0
else:
status = 9
else:
value = 'Could not find the given template'
status = 9
log.debug("Exiting vmDeployTemplate()...")
response.content_type = 'application/json; charset=utf-8'
return {'status' : status, 'value' : value}
run(app=vmServer, host='0.0.0.0', port=9898, reloader=reload)
| 1.9375 | 2 |
tests/fixtures.py | hroncok/dist-chat | 1 | 12787469 | <filename>tests/fixtures.py
import pytest
from soucevi1_dist_chat.CNode import CNode
from soucevi1_dist_chat.CMessage import CMessage, MessageType
@pytest.fixture
def node_instance():
node = CNode(False, '127.0.0.1', '12345', '127.0.0.1', '4321', 'Jmeno')
return node
@pytest.fixture
def message_instance():
message = CMessage(sender_address='127.0.0.1',
sender_port='54321',
sender_name='Lojza',
time=78,
message_type=MessageType.user_message,
message_data='Ahoj')
return message
| 1.96875 | 2 |
tests/test_matrix_props/test_is_unitary.py | paniash/toqito | 76 | 12787470 | <filename>tests/test_matrix_props/test_is_unitary.py
"""Test is_unitary."""
import numpy as np
from toqito.matrix_props import is_unitary
from toqito.random import random_unitary
def test_is_unitary_random():
"""Test that random unitary matrix returns True."""
mat = random_unitary(2)
np.testing.assert_equal(is_unitary(mat), True)
def test_is_unitary_hardcoded():
"""Test that hardcoded unitary matrix returns True."""
mat = np.array([[0, 1], [1, 0]])
np.testing.assert_equal(is_unitary(mat), True)
def test_is_not_unitary():
"""Test that non-unitary matrix returns False."""
mat = np.array([[1, 0], [1, 1]])
np.testing.assert_equal(is_unitary(mat), False)
def test_is_not_unitary_matrix():
"""Test that non-unitary matrix returns False."""
mat = np.array([[1, 0], [1, 1]])
np.testing.assert_equal(is_unitary(mat), False)
def test_is_unitary_not_square():
"""Input must be a square matrix."""
mat = np.array([[-1, 1, 1], [1, 2, 3]])
np.testing.assert_equal(is_unitary(mat), False)
if __name__ == "__main__":
np.testing.run_module_suite()
| 3.0625 | 3 |
service_layer/abstract_services/manager_service.py | Alejandro-Fuste/Python_Javascript_ReimbursementApp | 0 | 12787471 | from abc import ABC, abstractmethod
from typing import List
from entities.manager import Manager
class ManagerService(ABC):
# for login purposes
@abstractmethod
def service_validate_manager(self, user_name: str, password: str) -> Manager:
pass
@abstractmethod
def service_get_all_managers(self) -> List[Manager]:
pass
| 2.96875 | 3 |
Backend/aws lambdas/vYelper-sqs_Handler/sns_handler.py | dhruvarora2/Yelper | 0 | 12787472 | <reponame>dhruvarora2/Yelper
import logging
import boto3
# Initialize logger and set log level
logger = logging.getLogger()
logger.setLevel(logging.INFO)
# Initialize SNS client for Ireland region
session = boto3.Session(
region_name="us-east-1"
)
sns_client = session.client('sns')
def send_message(phonenumber, suggestions):
# suggestions = ["Chipotle","MCD","x1","x2","x3"] #Separate these and add it to Message.
result = " "
for s in suggestions:
result = result + "\n " + s
message = "Here are your Top 5 Suggestions : " + result
print (message)
response = sns_client.publish(
Message = message,
PhoneNumber = phonenumber,
MessageAttributes={
'AWS.SNS.SMS.SenderID': {
'DataType': 'String',
'StringValue': 'DHRUV'
},
'AWS.SNS.SMS.SMSType': {
'DataType': 'String',
'StringValue': 'Promotional'
}
},
)
logger.info(response)
return 'OK' | 2.4375 | 2 |
Configuration/DataProcessing/python/Impl/__init__.py | Purva-Chaudhari/cmssw | 852 | 12787473 | <reponame>Purva-Chaudhari/cmssw
#!/usr/bin/env python3
"""
_Impl_
Scenario Implementations
"""
__all__ = []
| 0.863281 | 1 |
geophoto_api/views.py | amatmv/geophoto-backend | 1 | 12787474 | import boto3
import io
import base64
from PIL import Image
from django.contrib.auth.models import User
from django.contrib.auth import get_user_model
from django.core.files.uploadedfile import InMemoryUploadedFile, TemporaryUploadedFile
from rest_framework import generics, permissions
from rest_framework_jwt.settings import api_settings
from rest_framework.response import Response
from rest_framework.views import status
from geophoto.settings import AWS_S3_BASE_URL, AWS_STORAGE_BUCKET_NAME
from .decorators import validate_request_data_photo
from .serializers import PhotoSerializer, UserSerializer
from .models import *
User = get_user_model()
# Get the JWT settings
jwt_payload_handler = api_settings.JWT_PAYLOAD_HANDLER
jwt_encode_handler = api_settings.JWT_ENCODE_HANDLER
class ListUsers(generics.ListCreateAPIView):
"""
GET users/
"""
queryset = User.objects.all()
serializer_class = UserSerializer
permission_classes = (permissions.AllowAny,)
def get(self, request, *args, **kwargs):
try:
users = self.queryset.filter(**kwargs)
users_serialized = self.serializer_class(users, many=True).data
return Response(users_serialized)
except User.DoesNotExist:
return Response(
data={
"message": "User does not exist"
},
status=status.HTTP_404_NOT_FOUND
)
class ListSearchAround(generics.ListCreateAPIView):
"""
POST search_around/
"""
serializer_class = PhotoSerializer
permission_classes = (permissions.AllowAny,)
def post(self, request, *args, **kwargs):
dist = request.data.get('distance')
loc_lat = request.data.get('location_lat')
loc_lon = request.data.get('location_lon')
data = {}
if loc_lon and loc_lon and dist:
query = """
SELECT
uuid,
url,
title,
location AS point,
photo.user_id,
created_at,
prov.codiprov AS provincia,
mun.codimuni AS municipi,
comarca.codicomar AS comarca
FROM geophoto_api_photo photo
LEFT JOIN geophoto_api_provincia AS prov
ON ST_Contains(prov.geom, photo.location)
LEFT JOIN geophoto_api_municipi AS mun
ON ST_Contains(mun.geom, photo.location)
LEFT JOIN geophoto_api_comarca AS comarca
ON ST_Contains(comarca.geom, photo.location)
WHERE ST_DWithin(
ST_Transform(photo.location, 4326)::geography,
ST_SetSRID(ST_Makepoint({lon}, {lat}), 4326)::geography,
{dist}
)
ORDER BY ST_Distance(
ST_SetSRID(ST_MakePoint({lon}, {lat}), 4326),
ST_Transform(photo.location, 4326)
);
""".format(
lon=loc_lon,
lat=loc_lat,
dist=dist
)
rows = Photo.objects.raw(raw_query=query)
data = self.serializer_class(rows, many=True).data
return Response(data)
class ListWithinAround(generics.ListCreateAPIView):
"""
POST search_within/
"""
serializer_class = PhotoSerializer
permission_classes = (permissions.AllowAny,)
def get_photos_taken_in_provincia(self, name):
query = """
SELECT
uuid,
url,
title,
location AS point,
photo.user_id,
created_at,
prov.codiprov AS provincia,
mun.codimuni AS municipi,
comarca.codicomar AS comarca
FROM geophoto_api_photo AS photo
LEFT JOIN geophoto_api_provincia AS prov
ON ST_Contains(prov.geom, photo.location)
AND prov.nomprov ILIKE '%%{prov_name}%%'
LEFT JOIN geophoto_api_user u
ON u.id = photo.user_id
LEFT JOIN geophoto_api_municipi AS mun
ON ST_Contains(mun.geom, photo.location)
LEFT JOIN geophoto_api_comarca AS comarca
ON ST_Contains(comarca.geom, photo.location)
""".format(prov_name=name)
rows = Photo.objects.raw(raw_query=query)
response_data = self.serializer_class(rows, many=True).data
return response_data
def get_photos_taken_in_comarca(self, name):
query = """
SELECT
uuid,
url,
title,
location AS point,
photo.user_id,
created_at,
prov.codiprov AS provincia,
mun.codimuni AS municipi,
comarca.codicomar AS comarca
FROM geophoto_api_photo AS photo
JOIN geophoto_api_provincia AS prov
ON ST_Contains(prov.geom, photo.location)
JOIN geophoto_api_user u
ON u.id = photo.user_id
JOIN geophoto_api_municipi AS mun
ON ST_Contains(mun.geom, photo.location)
JOIN geophoto_api_comarca AS comarca
ON ST_Contains(comarca.geom, photo.location)
AND comarca.nomcomar ILIKE '%%{comarca_name}%%'
""".format(comarca_name=name)
rows = Photo.objects.raw(raw_query=query)
response_data = self.serializer_class(rows, many=True).data
return response_data
def get_photos_taken_in_municipi(self, name):
query = """
SELECT
uuid,
url,
title,
location AS point,
photo.user_id,
created_at,
prov.codiprov AS provincia,
mun.codimuni AS municipi,
comarca.codicomar AS comarca
FROM geophoto_api_photo AS photo
JOIN geophoto_api_provincia AS prov
ON ST_Contains(prov.geom, photo.location)
JOIN geophoto_api_user u
ON u.id = photo.user_id
JOIN geophoto_api_municipi AS mun
ON ST_Contains(mun.geom, photo.location)
AND mun.nommuni ILIKE '%%{mun_name}%%'
JOIN geophoto_api_comarca AS comarca
ON ST_Contains(comarca.geom, photo.location)
""".format(mun_name=name)
rows = Photo.objects.raw(raw_query=query)
response_data = self.serializer_class(rows, many=True).data
return response_data
def post(self, request, *args, **kwargs):
zone = request.data.get('zone')
zone_type = request.data.get('zone_type', '')
response_data = {}
if zone_type not in ('provincia', 'comarca', 'municipi'):
response_status = status.HTTP_400_BAD_REQUEST
else:
try:
if zone_type == 'provincia':
response_data = self.get_photos_taken_in_provincia(zone)
elif zone_type == 'comarca':
response_data = self.get_photos_taken_in_comarca(zone)
elif zone_type == 'municipi':
response_data = self.get_photos_taken_in_municipi(zone)
response_status = status.HTTP_200_OK
except Exception as e:
response_status = status.HTTP_500_INTERNAL_SERVER_ERROR
return Response(data=response_data, status=response_status)
class ListCreatePhotos(generics.ListCreateAPIView):
"""
GET photo/
POST photo/
"""
queryset = Photo.objects.all().order_by('-date_uploaded')
serializer_class = PhotoSerializer
permission_classes = (permissions.IsAuthenticated,)
@staticmethod
def get_bytesIO(data):
if isinstance(data, (InMemoryUploadedFile, TemporaryUploadedFile)):
photo_buf = io.BytesIO(data.file.read())
else:
b64_decoded = base64.b64decode(data)
photo_buf = io.BytesIO(b64_decoded)
return photo_buf
@staticmethod
def upload_s3_photo(photo_binary, key=None):
s3 = boto3.client('s3')
if key is None:
key = uuid.uuid4().hex[:6] + '.jpg'
s3.upload_fileobj(photo_binary, AWS_STORAGE_BUCKET_NAME, key)
url = "{aws_s3_url}{bucket_name}/{key}".format(
aws_s3_url=AWS_S3_BASE_URL,
bucket_name=AWS_STORAGE_BUCKET_NAME,
key=key
)
return url
@staticmethod
def generate_photo_name(photo_name):
return photo_name[:6] + '.jpg'
@validate_request_data_photo
def post(self, request, *args, **kwargs):
date_uploaded = datetime.today().strftime('%Y-%m-%d')
photo_file = request.data['photo']
bytes_data = self.get_bytesIO(photo_file)
exif_data = Photo.extract_exif_data(Image.open(bytes_data))
created_photo = None
try:
if not exif_data['location']:
lat = request.data.get('latitude', None)
lon = request.data.get('longitude', None)
exif_data['location'] = Photo.create_point(lat=lat, lon=lon)
if not exif_data['created_at']:
date = request.data.get('date', False)
exif_data['created_at'] = datetime.strptime(date, '%Y:%m:%d %H:%M:%S').strftime('%Y-%m-%d %H:%M:%S')
create_vals = {
'title': request.data["title"],
'date_uploaded': date_uploaded,
'user': request.user,
}
create_vals.update(exif_data)
created_photo = Photo.objects.create(**create_vals)
respose_data = {
'message': 'Photo posted successfully!'
}
response_status = status.HTTP_201_CREATED
except Exception as e:
respose_data = {
"message": "Internal server error."
}
response_status = status.HTTP_500_INTERNAL_SERVER_ERROR
print(e)
if created_photo is not None:
bytes_data = self.get_bytesIO(photo_file)
key = self.generate_photo_name(created_photo.uuid.hex)
url = self.upload_s3_photo(bytes_data, key=key)
created_photo.url = url
created_photo.save()
return Response(
data=respose_data,
status=response_status
)
def get(self, request, *args, **kwargs):
try:
photos = self.queryset.get(kwargs=kwargs)
return Response(self.serializer_class(photos).data)
except Photo.DoesNotExist:
return Response(
data={
"message": "Photo does not exist"
},
status=status.HTTP_404_NOT_FOUND
)
class RegisterUsers(generics.CreateAPIView):
"""
POST auth/register/
"""
permission_classes = (permissions.AllowAny,)
def post(self, request, *args, **kwargs):
username = request.data.get("username", "")
password = request.data.get("password", "")
email = request.data.get("email", "")
if not username and not password and not email:
return Response(
data={
"message": "username, password and email is required to register a user"
},
status=status.HTTP_400_BAD_REQUEST
)
full_name = request.data.get('full_name', '')
new_user = User.objects.create_user(
username=username, password=password, email=email, full_name=full_name
)
return Response(
data=UserSerializer(new_user).data,
status=status.HTTP_201_CREATED
)
class ListUserPhotos(generics.ListAPIView):
"""
GET search_my_photos/
"""
queryset = Photo.objects.all().order_by('-date_uploaded')
serializer_class = PhotoSerializer
permission_classes = (permissions.IsAuthenticated,)
def post(self, request, *args, **kwargs):
try:
username = request.data.get('username', None)
query = """
SELECT
uuid,
url,
title,
location AS point,
photo.user_id,
created_at,
prov.codiprov AS provincia,
mun.codimuni AS municipi,
comarca.codicomar AS comarca
FROM geophoto_api_photo AS photo
JOIN geophoto_api_provincia AS prov
ON ST_Contains(prov.geom, photo.location)
LEFT JOIN geophoto_api_user u
ON u.id = photo.user_id
LEFT JOIN geophoto_api_municipi AS mun
ON ST_Contains(mun.geom, photo.location)
LEFT JOIN geophoto_api_comarca AS comarca
ON ST_Contains(comarca.geom, photo.location)
WHERE u.username ilike '{username}'
""".format(username=username)
rows = Photo.objects.raw(raw_query=query)
response_data = self.serializer_class(rows, many=True).data
return Response(response_data)
except Photo.DoesNotExist:
return Response(
data={
"message": "User does not exist"
},
status=status.HTTP_404_NOT_FOUND
)
| 1.945313 | 2 |
backend/ecs_tasks/delete_files/s3.py | guvenbz/amazon-s3-find-and-forget | 165 | 12787475 | import logging
from functools import lru_cache
from urllib.parse import urlencode, quote_plus
from boto_utils import fetch_job_manifest, paginate
from botocore.exceptions import ClientError
from utils import remove_none, retry_wrapper
logger = logging.getLogger(__name__)
def save(s3, client, buf, bucket, key, metadata, source_version=None):
"""
Save a buffer to S3, preserving any existing properties on the object
"""
# Get Object Settings
request_payer_args, _ = get_requester_payment(client, bucket)
object_info_args, _ = get_object_info(client, bucket, key, source_version)
tagging_args, _ = get_object_tags(client, bucket, key, source_version)
acl_args, acl_resp = get_object_acl(client, bucket, key, source_version)
extra_args = {
**request_payer_args,
**object_info_args,
**tagging_args,
**acl_args,
**{"Metadata": metadata},
}
logger.info("Object settings: %s", extra_args)
# Write Object Back to S3
logger.info("Saving updated object to s3://%s/%s", bucket, key)
contents = buf.read()
with s3.open("s3://{}/{}".format(bucket, key), "wb", **extra_args) as f:
f.write(contents)
s3.invalidate_cache() # TODO: remove once https://github.com/dask/s3fs/issues/294 is resolved
new_version_id = f.version_id
logger.info("Object uploaded to S3")
# GrantWrite cannot be set whilst uploading therefore ACLs need to be restored separately
write_grantees = ",".join(get_grantees(acl_resp, "WRITE"))
if write_grantees:
logger.info("WRITE grant found. Restoring additional grantees for object")
client.put_object_acl(
Bucket=bucket,
Key=key,
VersionId=new_version_id,
**{**request_payer_args, **acl_args, "GrantWrite": write_grantees,}
)
logger.info("Processing of file s3://%s/%s complete", bucket, key)
return new_version_id
@lru_cache()
def get_requester_payment(client, bucket):
"""
Generates a dict containing the request payer args supported when calling S3.
GetBucketRequestPayment call will be cached
:returns tuple containing the info formatted for ExtraArgs and the raw response
"""
request_payer = client.get_bucket_request_payment(Bucket=bucket)
return (
remove_none(
{
"RequestPayer": "requester"
if request_payer["Payer"] == "Requester"
else None,
}
),
request_payer,
)
@lru_cache()
def get_object_info(client, bucket, key, version_id=None):
"""
Generates a dict containing the non-ACL/Tagging args supported when uploading to S3.
HeadObject call will be cached
:returns tuple containing the info formatted for ExtraArgs and the raw response
"""
kwargs = {"Bucket": bucket, "Key": key, **get_requester_payment(client, bucket)[0]}
if version_id:
kwargs["VersionId"] = version_id
object_info = client.head_object(**kwargs)
return (
remove_none(
{
"CacheControl": object_info.get("CacheControl"),
"ContentDisposition": object_info.get("ContentDisposition"),
"ContentEncoding": object_info.get("ContentEncoding"),
"ContentLanguage": object_info.get("ContentLanguage"),
"ContentType": object_info.get("ContentType"),
"Expires": object_info.get("Expires"),
"Metadata": object_info.get("Metadata"),
"ServerSideEncryption": object_info.get("ServerSideEncryption"),
"StorageClass": object_info.get("StorageClass"),
"SSECustomerAlgorithm": object_info.get("SSECustomerAlgorithm"),
"SSEKMSKeyId": object_info.get("SSEKMSKeyId"),
"WebsiteRedirectLocation": object_info.get("WebsiteRedirectLocation"),
}
),
object_info,
)
@lru_cache()
def get_object_tags(client, bucket, key, version_id=None):
"""
Generates a dict containing the Tagging args supported when uploading to S3
GetObjectTagging call will be cached
:returns tuple containing tagging formatted for ExtraArgs and the raw response
"""
kwargs = {"Bucket": bucket, "Key": key}
if version_id:
kwargs["VersionId"] = version_id
tagging = client.get_object_tagging(**kwargs)
return (
remove_none(
{
"Tagging": urlencode(
{tag["Key"]: tag["Value"] for tag in tagging["TagSet"]},
quote_via=quote_plus,
)
}
),
tagging,
)
@lru_cache()
def get_object_acl(client, bucket, key, version_id=None):
"""
Generates a dict containing the ACL args supported when uploading to S3
GetObjectAcl call will be cached
:returns tuple containing ACL formatted for ExtraArgs and the raw response
"""
kwargs = {"Bucket": bucket, "Key": key, **get_requester_payment(client, bucket)[0]}
if version_id:
kwargs["VersionId"] = version_id
acl = client.get_object_acl(**kwargs)
existing_owner = {"id={}".format(acl["Owner"]["ID"])}
return (
remove_none(
{
"GrantFullControl": ",".join(
existing_owner | get_grantees(acl, "FULL_CONTROL")
),
"GrantRead": ",".join(get_grantees(acl, "READ")),
"GrantReadACP": ",".join(get_grantees(acl, "READ_ACP")),
"GrantWriteACP": ",".join(get_grantees(acl, "WRITE_ACP")),
}
),
acl,
)
def get_grantees(acl, grant_type):
prop_map = {
"CanonicalUser": ("ID", "id"),
"AmazonCustomerByEmail": ("EmailAddress", "emailAddress"),
"Group": ("URI", "uri"),
}
filtered = [
grantee["Grantee"]
for grantee in acl.get("Grants")
if grantee["Permission"] == grant_type
]
grantees = set()
for grantee in filtered:
identifier_type = grantee["Type"]
identifier_prop = prop_map[identifier_type]
grantees.add("{}={}".format(identifier_prop[1], grantee[identifier_prop[0]]))
return grantees
@lru_cache()
def validate_bucket_versioning(client, bucket):
resp = client.get_bucket_versioning(Bucket=bucket)
versioning_enabled = resp.get("Status") == "Enabled"
mfa_delete_enabled = resp.get("MFADelete") == "Enabled"
if not versioning_enabled:
raise ValueError("Bucket {} does not have versioning enabled".format(bucket))
if mfa_delete_enabled:
raise ValueError("Bucket {} has MFA Delete enabled".format(bucket))
return True
@lru_cache()
def fetch_manifest(manifest_object):
return fetch_job_manifest(manifest_object)
def delete_old_versions(client, input_bucket, input_key, new_version):
try:
resp = list(
paginate(
client,
client.list_object_versions,
["Versions", "DeleteMarkers"],
Bucket=input_bucket,
Prefix=input_key,
VersionIdMarker=new_version,
KeyMarker=input_key,
)
)
versions = [el[0] for el in resp if el[0] is not None]
delete_markers = [el[1] for el in resp if el[1] is not None]
versions.extend(delete_markers)
sorted_versions = sorted(versions, key=lambda x: x["LastModified"])
version_ids = [v["VersionId"] for v in sorted_versions]
errors = []
max_deletions = 1000
for i in range(0, len(version_ids), max_deletions):
resp = client.delete_objects(
Bucket=input_bucket,
Delete={
"Objects": [
{"Key": input_key, "VersionId": version_id}
for version_id in version_ids[i : i + max_deletions]
],
"Quiet": True,
},
)
errors.extend(resp.get("Errors", []))
if len(errors) > 0:
raise DeleteOldVersionsError(
errors=[
"Delete object {} version {} failed: {}".format(
e["Key"], e["VersionId"], e["Message"]
)
for e in errors
]
)
except ClientError as e:
raise DeleteOldVersionsError(errors=[str(e)])
def verify_object_versions_integrity(
client, bucket, key, from_version_id, to_version_id
):
def raise_exception(msg):
raise IntegrityCheckFailedError(msg, client, bucket, key, to_version_id)
conflict_error_template = "A {} ({}) was detected for the given object between read and write operations ({} and {})."
not_found_error_template = "Previous version ({}) has been deleted."
object_versions = retry_wrapper(client.list_object_versions)(
Bucket=bucket,
Prefix=key,
VersionIdMarker=to_version_id,
KeyMarker=key,
MaxKeys=1,
)
versions = object_versions.get("Versions", [])
delete_markers = object_versions.get("DeleteMarkers", [])
all_versions = versions + delete_markers
if not len(all_versions):
return raise_exception(not_found_error_template.format(from_version_id))
prev_version = all_versions[0]
prev_version_id = prev_version["VersionId"]
if prev_version_id != from_version_id:
conflicting_version_type = (
"delete marker" if "ETag" not in prev_version else "version"
)
return raise_exception(
conflict_error_template.format(
conflicting_version_type,
prev_version_id,
from_version_id,
to_version_id,
)
)
return True
def rollback_object_version(client, bucket, key, version, on_error):
""" Delete newly created object version as soon as integrity conflict is detected """
try:
return client.delete_object(Bucket=bucket, Key=key, VersionId=version)
except ClientError as e:
err_message = "ClientError: {}. Version rollback caused by version integrity conflict failed".format(
str(e)
)
on_error(err_message)
except Exception as e:
err_message = "Unknown error: {}. Version rollback caused by version integrity conflict failed".format(
str(e)
)
on_error(err_message)
class DeleteOldVersionsError(Exception):
def __init__(self, errors):
super().__init__("\n".join(errors))
self.errors = errors
class IntegrityCheckFailedError(Exception):
def __init__(self, message, client, bucket, key, version_id):
self.message = message
self.client = client
self.bucket = bucket
self.key = key
self.version_id = version_id
| 1.890625 | 2 |
libspn/graph/op/concat.py | pronobis/libspn | 22 | 12787476 | from itertools import chain
import tensorflow as tf
from libspn.graph.node import OpNode, Input
from libspn import utils
from libspn.inference.type import InferenceType
from libspn.exceptions import StructureError
from libspn.utils.serialization import register_serializable
@register_serializable
class Concat(OpNode):
"""An op node that concatenates all inputs into a single output tensor.
Args:
*inputs: Inputs of this node. See :meth:`~libspn.Input.as_input` for
possible values.
name (str): Name of the node.
"""
def __init__(self, *inputs, name="Concat"):
super().__init__(inference_type=InferenceType.MARGINAL, name=name)
self.set_inputs(*inputs)
def serialize(self):
data = super().serialize()
data['inputs'] = [(i.node.name, i.indices) for i in self._inputs]
return data
def deserialize(self, data):
super().deserialize(data)
self.set_inputs()
def deserialize_inputs(self, data, nodes_by_name):
super().deserialize_inputs(data, nodes_by_name)
self._inputs = tuple(Input(nodes_by_name[nn], i)
for nn, i in data['inputs'])
@property
def inputs(self):
return self._inputs
def set_inputs(self, *inputs):
"""Set the inputs of this node. If no arguments are given, all existing
inputs get disconnected.
Args:
*inputs (input_like): Inputs of this node. See
:meth:`~libspn.Input.as_input` for possible inputs.
"""
self._inputs = self._parse_inputs(*inputs)
def add_inputs(self, *inputs):
"""Add more inputs to this node.
Args:
*inputs (input_like): Inputs of this node. See
:meth:`~libspn.Input.as_input` for possible inputs.
"""
self._inputs = self._inputs + self._parse_inputs(*inputs)
@property
def _const_out_size(self):
return False
@utils.docinherit(OpNode)
def _compute_out_size(self, *input_out_sizes):
if not self._inputs:
raise StructureError("%s is missing inputs." % self)
return sum(self._gather_input_sizes(*input_out_sizes))
@utils.docinherit(OpNode)
def _compute_scope(self, *input_scopes):
if not self._inputs:
raise StructureError("%s is missing inputs." % self)
input_scopes = self._gather_input_scopes(*input_scopes)
return list(chain.from_iterable(input_scopes))
@utils.docinherit(OpNode)
def _compute_valid(self, *input_scopes):
if not self._inputs:
raise StructureError("%s is missing inputs." % self)
_, *input_scopes_ = self._gather_input_scopes(*input_scopes)
# If already invalid, return None
if any(s is None for s in input_scopes_):
return None
else:
return self._compute_scope(*input_scopes)
@utils.docinherit(OpNode)
@utils.lru_cache
def _compute_log_value(self, *input_tensors):
# Check inputs
if not self._inputs:
raise StructureError("%s is missing inputs." % self)
gathered_inputs = self._gather_input_tensors(*input_tensors)
# Concatenate inputs
return tf.concat(gathered_inputs, axis=1)
@utils.docinherit(OpNode)
def _compute_log_mpe_value(self, *input_tensors):
return self._compute_log_value(*input_tensors)
@utils.lru_cache
def _compute_log_mpe_path(self, counts, *input_values, use_unweighted=False):
# Check inputs
if not self._inputs:
raise StructureError("%s is missing inputs." % self)
# Split counts for each input
input_sizes = self.get_input_sizes(*input_values)
split = tf.split(counts, num_or_size_splits=input_sizes, axis=1)
return self._scatter_to_input_tensors(*[(t, v) for t, v in
zip(split, input_values)])
| 2.3125 | 2 |
bhs/bhs/envs/Element.py | rasorensen90/muzero-general | 0 | 12787477 | <gh_stars>0
# -*- coding: utf-8 -*-
"""
Created on Wed May 29 09:37:43 2019
@author: RTS
"""
class Element:
def __init__(self, ID = None, inputElement = None, outputElement = None):
self.ID = ID
self.inputElements = [inputElement]
self.outputElements = [outputElement]
self.tote = None
self.cost = [1]
def setElements(self, inputElement, outputElement):
self.setInputElements(inputElement)
self.setOutputElements(outputElement)
def setInputElements(self, connector, inputElement):
self.inputElements[connector] = inputElement
def setOutputElements(self, connector, outputElement):
self.outputElements[connector] = outputElement
def push(self, tote):
if self.tote is None:
self.tote = tote
self.tote.element = self
else:
raise Exception('Target element was not empty')
def pull(self):
tote = self.tote
self.tote = None
return tote
def isReadyToRecieve(self, elem=None):
return self.tote == None
def isToteReady(self):
return self.tote is not None and not self.tote.moved
def move(self, control = 0): # default control is 0
if self.isToteReady():
# self.tote.counter += 1
# if self.tote.counter < 2:
# self.tote.moved = True
# return
if self.outputElements[control].isReadyToRecieve(self):
tote = self.pull()
tote.moved = True
# tote.counter = 0
self.outputElements[control].push(tote)
elif self.outputElements[control].tote is None or self.outputElements[control].tote.moved:
self.tote.moved = True
# print('Tote '+str(tote.ID)+' is moved from ' + str(self.ID) + ' to '+ str(self.outputElements[control].ID) + ' with control ' + str(control))
class Diverter(Element):
def __init__(self, ID, inputElement = None, outputElement1 = None, outputElement2 = None): # output elements should be given as a list
self.ID = ID
self.inputElements = [inputElement]
self.outputElements = [outputElement1, outputElement2]
self.tote = None
self.cost = [1,1]
self.forced_control = None
class Merger(Element):
def __init__(self, ID, inputElement1 = None, inputElement2 = None, outputElement = None): # input elements should be given as a list
self.ID = ID
self.inputElements = [inputElement1, inputElement2]
self.outputElements = [outputElement]
self.tote = None
self.cost = [1]
self.nextInputElementIdx = 0 #input 0 has first appearence priority
def isReadyToRecieve(self, elem):
if self.tote==None:
if all([inputElem.isToteReady() for inputElem in self.inputElements]):
if elem == self.inputElements[self.nextInputElementIdx]:
self.nextInputElementIdx = [1,0][self.nextInputElementIdx] # toggle nextInputElementIdx between 0 and 1
return True
else:
return False
else:
self.nextInputElementIdx = 0 # reset self.nextInputElementIdx
return True
else:
return False
class Toploader(Element):
def __init__(self, ID, inputElement = None, outputElement = None):
self.ID = ID
self.inputElements = [inputElement]
self.outputElements = [outputElement]
self.totes = []
self.tote = None
self.cost = [1]
def push(self, totes):
if not isinstance(totes, list):
totes = [totes]
for t in totes:
t.element = self
self.totes.append(t)
if self.tote is None:
self.tote = self.totes.pop(0)
for t in self.totes:
t.moved=True
def pull(self):
tote = self.tote
if self.totes != []:
self.tote = self.totes.pop(0)
self.tote.moved = True
for t in self.totes:
t.moved=True
else:
self.tote = None
return tote
# def isOccupied(self):
# return False # Ulimited space
# def isToteReady(self):
# return self.totes != []
| 3.453125 | 3 |
Projects/Project1/regan/__init__.py | adelezaini/MachineLearning | 0 | 12787478 | <gh_stars>0
from .crossvalidation import cross_validation
from .bootstrap_biasvar import Confidence_Interval, bias_variance_complexity, bootstrap, bias_variance_analysis
from .regression import lasso_reg, ridge_reg, OLS_solver, FrankeFunction, create_X, Split_and_Scale, MSE, R2, Plot_FrankeFunction, create_xyz_dataset, plot_ols_complexity, Rolling_Mean
from .run_and_plot import run_plot_compare, compare_lmd_CV, compare_lmd_BS
| 1.46875 | 1 |
imix/data/vqadata/dataset_test.py | linxi1158/iMIX | 23 | 12787479 | <gh_stars>10-100
from collections import OrderedDict
from typing import Any, Dict
import collections
import numpy as np
import torch
class VQA2Dataset(torch.utils.data.Dataset):
def __init__(self, path):
super().__init__()
self._load_npy(path)
def _load_npy(self, path):
self.db = np.load(path, allow_pickle=True)
self.start_idx = 0
if type(self.db) == dict:
self.metadata = self.db.get('metadata', {})
self.data = self.db.get('data', [])
else:
# TODO: Deprecate support for this
self.metadata = {'version': 1}
self.data = self.db
# Handle old imdb support
if 'image_id' not in self.data[0]:
self.start_idx = 1
if len(self.data) == 0:
self.data = self.db
def init_processors(self):
# self.text_processor = Vocab
self.text_processor = None
def get(self, item):
feature_path = item.get(self.feature_key, None)
if feature_path is None:
feature_path = self._get_feature_path_based_on_image(item)
return self.from_path(feature_path)
def _get_feature_path_based_on_image(self, item):
image_path = self._get_attrs(item)[0]
feature_path = '.'.join(image_path.split('.')[:-1]) + '.npy'
return feature_path
def from_path(self, path):
assert isinstance(path, str)
if 'genome' in path and path.endswith('.npy'):
path = str(int(path.split('_')[-1].split('.')[0])) + '.npy'
features, infos = self._get_image_features_and_info(path)
item = {}
for idx, image_feature in enumerate(features):
item['image_feature_%s' % idx] = image_feature
if infos is not None:
# infos[idx].pop("cls_prob", None)
item['image_info_%s' % idx] = infos[idx]
return item
def __len__(self):
return len(self.data)
def __getitem__(self, idx):
return self.load_item(idx)
def load_item(self, idx):
sample_info = self.data[idx]
current_sample = Sample()
if 'question_tokens' in sample_info:
text_processor_argument = {
'tokens': sample_info['question_tokens'],
'text': sample_info['question_str'],
}
else:
text_processor_argument = {'text': sample_info['question']}
processed_question = self.text_processor(text_processor_argument)
current_sample.text = processed_question['text']
if 'input_ids' in processed_question:
current_sample.update(processed_question)
current_sample.question_id = torch.tensor(sample_info['question_id'], dtype=torch.int)
if isinstance(sample_info['image_id'], int):
current_sample.image_id = torch.tensor(sample_info['image_id'], dtype=torch.int)
else:
current_sample.image_id = sample_info['image_id']
current_sample.text_len = torch.tensor(len(sample_info['question_tokens']), dtype=torch.int)
if self._use_features is True:
features = self.get(sample_info)
if hasattr(self, 'transformer_bbox_processor'):
features['image_info_0'] = self.transformer_bbox_processor(features['image_info_0'])
current_sample.update(features)
# Add details for OCR like OCR bbox, vectors, tokens here
current_sample = self.add_ocr_details(sample_info, current_sample)
# Depending on whether we are using soft copy this can add
# dynamic answer space
current_sample = self.add_answer_info(sample_info, current_sample)
return current_sample
class BatchCollator:
def __init__(self, dataset_name, dataset_type):
self._dataset_name = dataset_name
self._dataset_type = dataset_type
def __call__(self, batch):
# Create and return sample list with proper name
# and type set if it is already not a sample list
# (case of batched iterators)
sample_list = batch
if (
# Check if batch is a list before checking batch[0]
# or len as sometimes batch is already SampleList
isinstance(batch, list) and len(batch) == 1 and isinstance(batch[0], SampleList)):
sample_list = batch[0]
elif not isinstance(batch, SampleList):
sample_list = SampleList(batch)
sample_list.dataset_name = self._dataset_name
sample_list.dataset_type = self._dataset_type
return sample_list
class Sample(OrderedDict):
"""Sample represent some arbitrary data. All datasets in IMIX must return
an object of type ``Sample``.
Args:
init_dict (Dict): Dictionary to init ``Sample`` class with.
Usage::
>>> sample = Sample({"text": torch.tensor(2)})
>>> sample.text.zero_()
# Custom attributes can be added to ``Sample`` after initialization
>>> sample.context = torch.tensor(4)
"""
def __init__(self, init_dict=None):
if init_dict is None:
init_dict = {}
super().__init__(init_dict)
def __setattr__(self, key, value):
if isinstance(value, collections.abc.Mapping):
value = Sample(value)
self[key] = value
def __setitem__(self, key, value):
if isinstance(value, collections.abc.Mapping):
value = Sample(value)
super().__setitem__(key, value)
def __getattr__(self, key):
try:
return self[key]
except KeyError:
raise AttributeError(key)
def fields(self):
"""Get current attributes/fields registered under the sample.
Returns:
List[str]: Attributes registered under the Sample.
"""
return list(self.keys())
class SampleList(OrderedDict):
"""``SampleList`` is used to collate a list of ``Sample`` into a batch
during batch preparation. It can be thought of as a merger of list of Dicts
into a single Dict.
If ``Sample`` contains an attribute 'text' of size (2) and there are 10 samples in
list, the returned ``SampleList`` will have an attribute 'text' which is a tensor
of size (10, 2).
Args:
samples (type): List of ``Sample`` from which the ``SampleList``
will be created.
Usage::
>>> sample_list = [
Sample({"text": torch.tensor(2)}),
Sample({"text": torch.tensor(2)})
]
>>> sample_list.text
torch.tensor([2, 2])
"""
_TENSOR_FIELD_ = '_tensor_field'
def __init__(self, samples=None):
super().__init__(self)
if samples is None:
samples = []
if len(samples) == 0:
return
if self._check_and_load_dict(samples):
return
# If passed sample list was in form of key, value pairs of tuples
# return after loading these
if self._check_and_load_tuple(samples):
return
fields = samples[0].keys()
for field in fields:
if isinstance(samples[0][field], torch.Tensor):
size = (len(samples), *samples[0][field].size())
self[field] = samples[0][field].new_empty(size)
if self._get_tensor_field() is None:
self._set_tensor_field(field)
else:
self[field] = [None for _ in range(len(samples))]
for idx, sample in enumerate(samples):
# it should be a tensor but not a 0-d tensor
if (isinstance(sample[field], torch.Tensor) and len(sample[field].size()) != 0
and sample[field].size(0) != samples[0][field].size(0)):
raise AssertionError('Fields for all samples must be equally sized. '
'{} is of different sizes'.format(field))
self[field][idx] = self._get_data_copy(sample[field])
if isinstance(samples[0][field], collections.abc.Mapping):
self[field] = SampleList(self[field])
def _check_and_load_tuple(self, samples):
if isinstance(samples[0], (tuple, list)) and isinstance(samples[0][0], str):
for kv_pair in samples:
self.add_field(kv_pair[0], kv_pair[1])
return True
else:
return False
def _check_and_load_dict(self, samples):
if isinstance(samples, collections.abc.Mapping):
for key, value in samples.items():
self.add_field(key, value)
return True
else:
return False
def _fix_sample_type(self, samples):
if not isinstance(samples[0], Sample):
proper_samples = []
for sample in samples:
proper_samples.append(Sample(sample))
samples = proper_samples
return samples
def __setattr__(self, key, value):
self[key] = value
def __getattr__(self, key):
if key not in self:
raise AttributeError('Key {} not found in the SampleList. '
'Valid choices are {}'.format(key, self.fields()))
fields = self.keys()
if key in fields:
return self[key]
sample = Sample()
for field in fields:
sample[field] = self[field][key]
return sample
def get_device(self):
field_tensor = self._get_tensor_field()
return self[field_tensor].device
def get_item_list(self, key):
"""Get ``SampleList`` of only one particular attribute that is present
in the ``SampleList``.
Args:
key (str): Attribute whose ``SampleList`` will be made.
Returns:
SampleList: SampleList containing only the attribute value of the key
which was passed.
"""
sample = self[key]
return SampleList([sample])
def copy(self):
"""Get a copy of the current SampleList.
Returns:
SampleList: Copy of current SampleList.
"""
sample_list = SampleList()
fields = self.fields()
for field in fields:
sample_list.add_field(field, self[field])
return sample_list
def fields(self):
"""Get current attributes/fields registered under the SampleList.
Returns:
List[str]: list of attributes of the SampleList.
"""
return list(self.keys())
def get_fields(self, fields):
"""Get a new ``SampleList`` generated from the current ``SampleList``
but contains only the attributes passed in `fields` argument.
Args:
fields (List[str]): Attributes whose ``SampleList`` will be made.
Returns:
SampleList: SampleList containing only the attribute values of the fields
which were passed.
"""
current_fields = self.fields()
return_list = SampleList()
for field in fields:
if field not in current_fields:
raise AttributeError('{} not present in SampleList. '
'Valid choices are {}'.format(field, current_fields))
return_list.add_field(field, self[field])
return return_list
def get_field(self, field):
"""Get value of a particular attribute.
Args:
field (str): Attribute whose value is to be returned.
"""
return self[field]
def _get_data_copy(self, data):
# if isinstance(data, torch.Tensor):
# copy_ = data.clone()
# else:
# copy_ = deepcopy(data)
# return copy_
return data
def _get_tensor_field(self):
return self.__dict__.get(SampleList._TENSOR_FIELD_, None)
def _set_tensor_field(self, value):
self.__dict__[SampleList._TENSOR_FIELD_] = value
def get_batch_size(self):
"""Get batch size of the current ``SampleList``.
There must be a tensor
be a tensor present inside sample list to use this function.
Returns:
int: Size of the batch in ``SampleList``.
"""
tensor_field = self._get_tensor_field()
assert tensor_field is not None, 'There is no tensor yet in SampleList'
return self[tensor_field].size(0)
def add_field(self, field, data):
"""Add an attribute ``field`` with value ``data`` to the SampleList.
Args:
field (str): Key under which the data will be added.
data (object): Data to be added, can be a ``torch.Tensor``, ``list``
or ``Sample``
"""
fields = self.fields()
tensor_field = self._get_tensor_field()
if (len(fields) != 0 and isinstance(data, torch.Tensor) and len(data.size()) != 0 and tensor_field is not None
and data.size(0) != self[tensor_field].size(0)):
raise AssertionError('A tensor field to be added must '
'have same size as existing tensor '
'fields in SampleList. '
'Passed size: {}, Required size: {}'.format(len(data), len(self[tensor_field])))
if isinstance(data, collections.abc.Mapping):
self[field] = SampleList(data)
else:
self[field] = self._get_data_copy(data)
if isinstance(self[field], torch.Tensor) and tensor_field is None:
self._set_tensor_field(field)
def to(self, device, non_blocking=True):
"""Similar to ``.to`` function on a `torch.Tensor`. Moves all of the
tensors present inside the ``SampleList`` to a particular device. If an
attribute's value is not a tensor, it is ignored and kept as it is.
Args:
device (str|torch.device): Device on which the ``SampleList`` should
moved.
non_blocking (bool): Whether the move should be non_blocking. Default: True
Returns:
SampleList: a SampleList moved to the ``device``.
"""
fields = self.keys()
sample_list = self.copy()
if not isinstance(device, torch.device):
if not isinstance(device, str):
raise TypeError("device must be either 'str' or " "'torch.device' type, {} found".format(type(device)))
device = torch.device(device)
for field in fields:
if hasattr(sample_list[field], 'to'):
sample_list[field] = sample_list[field].to(device, non_blocking=non_blocking)
return sample_list
def pin_memory(self):
"""In custom batch object, we need to define pin_memory function so
that PyTorch can actually apply pinning.
This function just individually pins all of the tensor fields
"""
fields = self.keys()
for field in fields:
if hasattr(self[field], 'pin_memory'):
# This will also handle nested sample list recursively
self[field] = self[field].pin_memory()
return self
def to_dict(self) -> Dict[str, Any]:
"""Converts a sample list to dict, this is useful for TorchScript and
for other internal API unification efforts.
Returns:
Dict[str, Any]: A dict representation of current sample list
"""
sample_dict = {}
fields = self.keys()
for field in fields:
# Handle nested sample list recursively
if hasattr(self[field], 'to_dict'):
sample_dict[field] = self[field].to_dict()
else:
sample_dict[field] = self[field]
return sample_dict
| 2.421875 | 2 |
frappe/desk/doctype/desk_page/desk_page.py | shubhamgupta123/frappe | 0 | 12787480 | # -*- coding: utf-8 -*-
# Copyright (c) 2020, Frappe Technologies and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe import _
from frappe.modules.export_file import export_to_files
from frappe.model.document import Document
class DeskPage(Document):
def validate(self):
if (not (frappe.flags.in_install or frappe.flags.in_patch or frappe.flags.in_test or frappe.flags.in_fixtures)
and not frappe.conf.developer_mode):
frappe.throw(_("You need to be in developer mode to edit this document"))
def on_update(self):
export_to_files(record_list=[['Desk Page', self.name]], record_module=self.module)
| 1.921875 | 2 |
spira/yevon/geometry/nets/net_list.py | qedalab/spira | 10 | 12787481 | <filename>spira/yevon/geometry/nets/net_list.py
import networkx as nx
from spira.core.typed_list import TypedList
from spira.yevon.geometry.nets.net import __Net__, Net
from spira.core.parameters.variables import FloatParameter
from spira.core.parameters.descriptor import ParameterDescriptor
from spira.core.parameters.restrictions import RestrictType
class NetList(TypedList):
""" List containing nets for each metal plane in a cell. """
__item_type__ = __Net__
def __repr__(self):
if len(self._list) == 0:
print('Netlist is empty')
return '\n'.join('{}'.format(k) for k in enumerate(self._list))
def __str__(self):
return self.__repr__()
def __getitem__(self, key):
if isinstance(key, int):
return self._list[key]
else:
return self.get_from_label(key)
def __delitem__(self, key):
for i in range(0, len(self._list)):
if self._list[i] is key:
return list.__delitem__(self._list, i)
def flat_copy(self, level = -1):
el = PortList()
for e in self._list:
el += e.flat_copy(level)
return el
def move(self, position):
for c in self._list:
c.move(position)
return self
def move_copy(self, position):
T = self.__class__()
for c in self._list:
T.append(c.move_copy(position))
return T
def transform_copy(self, transformation):
T = self.__class__()
for c in self._list:
T.append(c.transform_copy(transformation))
return T
def transform(self, transformation):
for c in self._list:
c.transform(transformation)
return self
def disjoint(self):
graphs = [net.g for net in self._list]
net = Net(g=nx.disjoint_union_all(graphs))
return net
def connect_shared_nodes(self):
net = self.disjoint()
graphs = list(nx.connected_component_subgraphs(net.g))
net = Net(g=nx.disjoint_union_all(graphs))
return net
class NetListParameter(ParameterDescriptor):
__type__ = NetList
def __init__(self, default=[], **kwargs):
kwargs['default'] = self.__type__(default)
kwargs['restrictions'] = RestrictType([self.__type__])
super().__init__(**kwargs)
def __repr__(self):
return ''
def __str__(self):
return ''
def call_param_function(self, obj):
f = self.get_param_function(obj)
value = f(self.__type__(), 100)
if value is None:
value = self.__type__()
new_value = self.__cache_parameter_value__(obj, value)
return new_value
| 2.46875 | 2 |
typos.py | CesarSMx/Selenium-with-Python-course | 0 | 12787482 | <reponame>CesarSMx/Selenium-with-Python-course
import unittest
from pyunitreport import HTMLTestRunner
from selenium import webdriver
class Typos(unittest.TestCase):
@classmethod
def setUpClass(cls):
driver_path = r'/mnt/c/Users/cesar/Documents/desarrollo/Selenium/chromedriver.exe'
brave_path = r'C:\program Files\braveSoftware\brave-browser\application\brave.exe'
option = webdriver.ChromeOptions()
option.binary_location = brave_path
cls.driver = webdriver.Chrome(executable_path=driver_path, chrome_options=option)
driver = cls.driver
driver.implicitly_wait(10)
driver.get('https://the-internet.herokuapp.com/')
driver.find_element_by_link_text('Typos').click()
def test_find_typos(self):
driver = self.driver
correct_text = r"Sometimes you'll see a typo, other times you won't."
text_to_check = driver.find_element_by_xpath(r'//*[@id="content"]/div/p[2]').text
tries = 1
while text_to_check != correct_text:
driver.refresh()
tries += 1
text_to_check = driver.find_element_by_xpath(r'//*[@id="content"]/div/p[2]').text
self.assertEquals(text_to_check, correct_text)
print(f'It was needed {tries} tries to fix the typo.')
@classmethod
def tearDownClass(cls):
cls.driver.quit()
if __name__ == "__main__":
unittest.main(verbosity = 2) | 3.109375 | 3 |
NASA SPACEAPPS CHALLENGE/Solution/Software part/Astronomical Data and Python Libraries/pyephem/pyephem-3.7.6.0/ephem/tests/test_launchpad_236872.py | sahirsharma/Martian | 1 | 12787483 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import unittest
import ephem
# See whether asking for the rising-time of Mars hangs indefinitely.
class Launchpad236872Tests(unittest.TestCase):
def runTest(self):
mars = ephem.Mars()
boston = ephem.city('Boston')
boston.date = ephem.Date('2008/5/29 15:59:16')
boston.next_rising(mars)
| 2.75 | 3 |
billing/migrations/0009_longer_product_code.py | jayvdb/django-customer-billing | 13 | 12787484 | # Generated by Django 2.0.1 on 2018-01-26 09:41
import django.core.validators
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('billing', '0008_charge_deleted_and_reverses'),
]
operations = [
migrations.AlterField(
model_name='charge',
name='product_code',
field=models.CharField(blank=True, db_index=True, help_text='Identifies the kind of product being charged or credited', max_length=10, validators=[django.core.validators.RegexValidator(message='Between 4 and 10 uppercase letters or digits', regex='^[A-Z0-9]{4,10}$')]),
),
]
| 1.992188 | 2 |
simulations/ngspice/clock.py | matthuszagh/fmcw | 14 | 12787485 | <filename>simulations/ngspice/clock.py
#!/usr/bin/env python
from skidl.pyspice import (
lib_search_paths,
Part,
generate_netlist,
SPICE,
gnd,
node,
no_files,
)
import numpy as np
import os
no_files()
spicelib = os.getenv("SPICELIB")
fname = ".data/adc-filter.dat"
lib_search_paths[SPICE].append(spicelib)
vac = Part("pyspice", "SINEV", amplitude=1)
rt = Part("pyspice", "R", value=49.9)
rb = Part("pyspice", "R", value=49.9)
c = Part("pyspice", "C", value=100e-12)
rload = Part("pyspice", "R", value=1e6)
vac["p"] += rt["p"]
vac["n"] += rb["p"]
rt["n"] += c["p"], rload["p"]
rb["n"] += c["n"], rload["n"], gnd
circ = generate_netlist(libs=spicelib)
sim = circ.simulator()
waveforms = sim.ac(
variation="dec",
number_of_points=100,
start_frequency=1,
stop_frequency=100e6,
)
freq = waveforms.frequency
vinp = waveforms[node(vac["p"])]
vinn = waveforms[node(vac["n"])]
voutp = waveforms[node(rload["p"])]
with open(fname, "w") as f:
f.write("{:<12} {:<12} {:<12}\n".format("freq", "vratio", "phase"))
for fr, vin, vout in zip(
freq.as_ndarray(),
vinp.as_ndarray() - vinn.as_ndarray(),
voutp.as_ndarray(),
):
arg = np.imag(vout / vin) / np.real(vout / vin)
db = 20 * np.log10(abs(vout) / abs(vin))
f.write(
"{:<12.2f} {:<12.5f} {:<12.5f}\n".format(fr, db, np.arctan(arg),)
)
| 2.296875 | 2 |
recap_agr/libs/ndcg.py | MirkoLenz/ReCAP-Argument-Graph-Retrieval | 0 | 12787486 | # Copyright 2016 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# https://github.com/kmbnw/rank_metrics/blob/master/python/ndcg.py
from __future__ import absolute_import, annotations
import numpy as np
"""
Implementation of normalized discounted cumulative gain.
Handy for testing ranking algorithms.
https://en.wikipedia.org/wiki/Discounted_cumulative_gain
"""
def cum_gain(relevance):
"""
Calculate cumulative gain.
This ignores the position of a result, but may still be generally useful.
@param relevance: Graded relevances of the results.
@type relevance: C{seq} or C{numpy.array}
"""
if relevance is None or len(relevance) < 1:
return 0.0
return np.asarray(relevance).sum()
def dcg(relevance, alternate=True):
"""
Calculate discounted cumulative gain.
@param relevance: Graded and ordered relevances of the results.
@type relevance: C{seq} or C{numpy.array}
@param alternate: True to use the alternate scoring (intended to
place more emphasis on relevant results).
@type alternate: C{bool}
"""
if relevance is None or len(relevance) < 1:
return 0.0
rel = np.asarray(relevance)
p = len(rel)
if alternate:
# from wikipedia: "An alternative formulation of
# DCG[5] places stronger emphasis on retrieving relevant documents"
log2i = np.log2(np.asarray(range(1, p + 1)) + 1)
return ((np.power(2, rel) - 1) / log2i).sum()
else:
log2i = np.log2(range(2, p + 1))
return rel[0] + (rel[1:] / log2i).sum()
def idcg(relevance, alternate=True):
"""
Calculate ideal discounted cumulative gain (maximum possible DCG).
@param relevance: Graded and ordered relevances of the results.
@type relevance: C{seq} or C{numpy.array}
@param alternate: True to use the alternate scoring (intended to
place more emphasis on relevant results).
@type alternate: C{bool}
"""
if relevance is None or len(relevance) < 1:
return 0.0
# guard copy before sort
rel = np.asarray(relevance).copy()
rel.sort()
return dcg(rel[::-1], alternate)
def ndcg(relevance, nranks, alternate=True):
"""
Calculate normalized discounted cumulative gain.
@param relevance: Graded and ordered relevances of the results.
@type relevance: C{seq} or C{numpy.array}
@param nranks: Number of ranks to use when calculating NDCG.
Will be used to rightpad with zeros if len(relevance) is less
than nranks
@type nranks: C{int}
@param alternate: True to use the alternate scoring (intended to
place more emphasis on relevant results).
@type alternate: C{bool}
"""
if relevance is None or len(relevance) < 1:
return 0.0
if nranks < 1:
raise Exception("nranks < 1")
rel = np.asarray(relevance)
pad = max(0, nranks - len(rel))
# pad could be zero in which case this will no-op
rel = np.pad(rel, (0, pad), "constant")
# now slice downto nranks
rel = rel[0 : min(nranks, len(rel))]
ideal_dcg = idcg(rel, alternate)
if ideal_dcg == 0:
return 0.0
return dcg(rel, alternate) / ideal_dcg
| 2.0625 | 2 |
jobRun.py | kylezhao96/sqgh-ms-flask | 0 | 12787487 | <reponame>kylezhao96/sqgh-ms-flask
'''
@Author: hua
@Date: 2019-12-18 17:22:18
@description:
@LastEditors : hua
@LastEditTime : 2019-12-18 17:23:51
'''
import environment
environment.init("job")
from app import sched
#开始任务
sched.start() | 1.21875 | 1 |
tests/isbndb_tests.py | bbengfort/isbndb-python | 4 | 12787488 | #!/usr/bin/env python
from isbndb import ISBNdbException
from isbndb.models import *
from isbndb.client import ISBNdbClient
from isbndb.catalog import *
from unittest import TestCase
ACCESS_KEY = "<KEY>"
class ISBNdbTest(TestCase):
def setup(self):
self.client = ISBNdbClient( access_key=ACCESS_KEY )
def teardown(self):
pass
def test_connection(self):
catalog = BookCollection(self.client)
result = catalog.isbn('0210406240', results='authors')
if __name__ == "__main__":
from unittest import main
main( )
| 2.4375 | 2 |
mne_kit_gui/__init__.py | mne-tools/mne-kit-gui | 1 | 12787489 | """Convenience functions for opening GUIs."""
# Authors: <NAME> <<EMAIL>>
#
# License: BSD-3-Clause
import os
from mne.utils import verbose, get_config
from ._utils import _check_mayavi_version
from ._backend import _testing_mode
__version__ = '1.1.dev0'
def _initialize_gui(frame, view=None):
"""Initialize GUI depending on testing mode."""
if _testing_mode(): # open without entering mainloop
return frame.edit_traits(view=view), frame
else:
frame.configure_traits(view=view)
return frame
def fiducials(subject=None, fid_file=None, subjects_dir=None):
"""Set the fiducials for an MRI subject.
Parameters
----------
subject : str
Name of the mri subject.
fid_file : None | str
Load a fiducials file different form the subject's default
("{subjects_dir}/{subject}/bem/{subject}-fiducials.fif").
subjects_dir : None | str
Overrule the subjects_dir environment variable.
Returns
-------
frame : instance of FiducialsFrame
The GUI frame.
Notes
-----
All parameters are optional, since they can be set through the GUI.
The functionality in this GUI is also part of :func:`coregistration`.
"""
_check_mayavi_version()
from ._backend import _check_backend
_check_backend()
from ._fiducials_gui import FiducialsFrame
frame = FiducialsFrame(subject, subjects_dir, fid_file=fid_file)
return _initialize_gui(frame)
def kit2fiff():
"""Convert KIT files to the fiff format.
The recommended way to use the GUI is through bash with::
$ mne kit2fiff
Returns
-------
frame : instance of Kit2FiffFrame
The GUI frame.
"""
_check_mayavi_version()
from ._backend import _check_backend
_check_backend()
from ._kit2fiff_gui import Kit2FiffFrame
frame = Kit2FiffFrame()
return _initialize_gui(frame)
| 2.4375 | 2 |
nerdle/build_nerdles.py | bacross/nerdle_wordle | 0 | 12787490 | import nerdle_cfg
import re
import luigi
import d6tflow
import itertools
import pandas as pd
import numpy as np
#helper functions
def check_len_int(nerdle):
nerdle_str = ''.join(nerdle)
try:
return all(len(x)==len(str(int(x))) for x in re.split('\+|\-|\*|\/|==',nerdle_str))
except:
return False
def rt_is_num(nerdle):
rt_arr = nerdle[np.where(np.array(nerdle)=='==')[0][0]+1:]
test_str = ''.join(rt_arr)
return test_str.isnumeric()
def join_elems_of_tups(list_o_tups):
return list(map(lambda x: ''.join(x),list_o_tups))
def test_eval(nerdle):
test_str = ''.join(nerdle)
try:
return eval(test_str)
except:
return False
class buildNerdles(d6tflow.tasks.TaskPqPandas):
nerdle_len = luigi.IntParameter()
def run(self):
nerdle_len = self.nerdle_len
nerdles = list(itertools.combinations_with_replacement(nerdle_cfg.nerd_list,nerdle_len))
#TODO: Optimize second list comprehension using filter if possible
nerdles = list(filter(
lambda nerdle: ('==' in nerdle)&
bool(any(i in nerdle for i in [x for x in nerdle_cfg.nerd_op_list if x!="=="])),nerdles))
nerdle_ser = pd.Series(nerdles)
nerdle_df = pd.DataFrame(nerdle_ser)
nerdle_df.columns=['nerdle_combinations']
#for each nerdle combination create permutations
nerdle_df['perms'] = nerdle_df['nerdle_combinations'].apply(itertools.permutations,nerdle_len)
# can't start or end with an equals sign and turns permutation tuples into a list
nerdle_df['perm_red_stend_equal'] = nerdle_df['perms'].apply(lambda y: filter(lambda x:(list(x)[0]!='==')&(list(x)[-1]!='=='),y))
# equal sign appears only once
nerdle_df['perm_equal_once'] = nerdle_df['perm_red_stend_equal'].apply(lambda y: filter(lambda x: x.count('==')==1,y))
# elements to the right of the equal sign must be a number
nerdle_df['right_equal_must_be_number'] = nerdle_df['perm_equal_once'].apply(lambda y: filter(lambda x: rt_is_num(x),y))
#length of string has to be 9
nerdle_df['len_check'] = nerdle_df['right_equal_must_be_number'].apply(lambda y: filter(lambda x: len(x)==nerdle_len,y))
#check that non operater numbers are of proper length
nerdle_df['non_op_num_check'] = nerdle_df['len_check'].apply(lambda y: filter(lambda x: check_len_int(x),y))
#check that string evals properly
nerdle_df['eval_check'] = nerdle_df['non_op_num_check'].apply(lambda y: filter(lambda x: test_eval(x),y))
self.save(nerdle_df)
| 2.53125 | 3 |
source/scl.py | manojtummala/SUP | 4 | 12787491 | <filename>source/scl.py
import click
import requests
from tabulate import tabulate
@click.command()
@click.option('--section', help='What you want to see? Just type the section')
@click.argument('section')
def get(section):
"""
Simple CLI for accessing sathyabama latest updates
"""
"""This return a particular topic from sathyabama site [like news and events]"""
url_format = 'https://sathyabama-api.herokuapp.com/{}'
# click.echo(id)
response = requests.get(url_format.format(section))
if not response:
click.echo('Section is not yet available.')
else:
if section=='news':
table = make_table(response)
print(tabulate(table, headers=["NEWS"],
tablefmt="fancy_grid"))
elif section=='events':
table = make_table1(response)
print(tabulate(table, headers=["EVENTS"],
tablefmt="fancy_grid"))
# click.echo(response.json())
def make_table(response):
res = response.json()
result = []
for item in res["list"]:
new = []
new.append(item['News'] + '\n' + item['Time'] + '\n' + '[Read More]: ' + item['URL'])
result.append(new)
return result
def make_table1(response):
res = response.json()
result = []
for item in res["list"]:
new = []
new.append(item['Event'] + '\n' + item['Time'] + '\n' + item['Venue'] + '\n' + '[Read More]: ' + item['URL'])
result.append(new)
return result
| 3.578125 | 4 |
Snake.py | daydreamjesse/snake | 0 | 12787492 | __author__ = 'grahamhub'
import pygame
import random
import sys
import time
# colors
black = (0, 0, 0)
white = (255, 255, 255)
blue = (35, 25, 255)
green = (35, 255, 25)
red = (255, 35, 25)
count = 0
# width/height of snake segments
seg_width = 15
seg_height = 15
# space between each segment
seg_margin = 3
# set initial speed
x_change = seg_width + seg_margin
y_change = 0
def play():
while True:
font = pygame.font.Font(None, 60)
font.set_bold(True)
title = font.render("Press Enter to Play", True, white)
titlepos = title.get_rect()
titlepos.centerx = screen.get_rect().centerx
titlepos.centery = screen.get_rect().centery
screen.blit(title, titlepos)
pygame.display.flip()
for event in pygame.event.get():
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_RETURN:
return True
def score():
while True:
font = pygame.font.Font(None, 60)
font.set_bold(True)
title = font.render("Your score was " + str(count) + "!", True, white)
titlepos = title.get_rect()
titlepos.centerx = screen.get_rect().centerx
titlepos.centery = screen.get_rect().centery
screen.blit(title, titlepos)
pygame.display.flip()
time.sleep(3)
break
def replay():
while True:
font = pygame.font.Font(None, 60)
font.set_bold(True)
title = font.render("Press Enter to Replay", True, white)
titlepos = title.get_rect()
titlepos.centerx = screen.get_rect().centerx
titlepos.centery = screen.get_rect().centery
screen.blit(title, titlepos)
pygame.display.flip()
for event in pygame.event.get():
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_RETURN:
return True
if event.key == pygame.K_ESCAPE:
sys.exit()
class Segment(pygame.sprite.Sprite):
# class to represent one segment of the snake
def __init__(self, x, y):
super(Segment, self).__init__()
# set height/width
self.image = pygame.Surface([seg_width, seg_height])
self.image.fill(white)
# starting pos(top left corner)
self.rect = self.image.get_rect()
self.rect.x = x
self.rect.y = y
class Block(pygame.sprite.Sprite):
# class for blocks to collect
def __init__(self):
super(Block, self).__init__()
self.image = pygame.Surface([seg_width, seg_height])
self.image.fill(green)
self.rect = self.image.get_rect()
# spawning the block
def spawn(self):
self.rect.x = random.randrange(10, 790)
self.rect.y = random.randrange(10, 590)
class Enemy(pygame.sprite.Sprite):
def __init__(self):
super(Enemy, self).__init__()
self.image = pygame.Surface([seg_width, seg_height])
self.image.fill(red)
self.rect = self.image.get_rect()
def spawn(self):
self.rect.x = random.randrange(10, 790)
self.rect.y = random.randrange(10, 590)
pygame.init()
screen = pygame.display.set_mode([800, 600])
pygame.display.set_caption('Snake')
points = pygame.sprite.Group()
obstacles = pygame.sprite.Group()
allspriteslist = pygame.sprite.Group()
block = Block()
points.add(block)
# create the snake
snake_segs = []
for i in range(5):
x = 250 - (seg_width + seg_margin) * i
y = 30
segment = Segment(x, y)
snake_segs.append(segment)
allspriteslist.add(segment)
clock = pygame.time.Clock()
enemies = []
def addenemy():
enemy = Enemy()
enemy.spawn()
enemies.append(enemy)
obstacles.add(enemies)
obstacles.draw(screen)
# spawn the first block
block.spawn()
if play() is True:
while True:
while True:
for event in pygame.event.get():
if event.type == pygame.QUIT:
sys.exit()
# speed = a segment plus the margin
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_LEFT:
x_change = (seg_width + seg_margin) * -1
y_change = 0
if event.key == pygame.K_RIGHT:
x_change = (seg_width + seg_margin)
y_change = 0
if event.key == pygame.K_UP:
x_change = 0
y_change = (seg_height + seg_margin) * -1
if event.key == pygame.K_DOWN:
x_change = 0
y_change = (seg_height + seg_margin)
# so that the snake doesn't keep growing:
old_segment = snake_segs.pop()
allspriteslist.remove(old_segment)
# where the new segment will be:
x = snake_segs[0].rect.x + x_change
y = snake_segs[0].rect.y + y_change
segment = Segment(x, y)
# if out of bounds
if x > 800 or x < 0:
allspriteslist.empty()
screen.fill(black)
break
if y > 600 or y < 0:
allspriteslist.empty()
screen.fill(black)
break
# put new segment into list
snake_segs.insert(0, segment)
allspriteslist.add(segment)
screen.fill(blue)
points.draw(screen)
obstacles.draw(screen)
allspriteslist.draw(screen)
# check for collisions
blocks_hit = pygame.sprite.spritecollide(segment, points, False)
if blocks_hit:
snake_segs.append(segment)
allspriteslist.add(segment)
block.spawn()
points.add(block)
addenemy()
count += 1
endgame = pygame.sprite.spritecollide(segment, obstacles, True)
if endgame:
allspriteslist.empty()
screen.fill(black)
break
pygame.display.flip()
# set speed
clock.tick(10)
score()
screen.fill(black)
if replay() is True:
for i in range(count):
enemies.pop()
obstacles.empty()
snake_segs = []
for i in range(5):
x = 250 - (seg_width + seg_margin) * i
y = 30
segment = Segment(x, y)
snake_segs.append(segment)
allspriteslist.add(segment)
x_change = (seg_width + seg_margin)
y_change = 0
count -= count
| 3.140625 | 3 |
rbc/space/utils/base.py | rebuildingcode/hardware | 0 | 12787493 | <reponame>rebuildingcode/hardware
def get_polygon_label(content):
if hasattr(content, 'name'):
# get label for polygon-type objects with name attribute
label = content.name
else:
# otherwise default to using the area value as the label
# this supports shapely's Polygon object
label = f"AREA: {content.area}"
return label | 2.796875 | 3 |
hankonator/apps/hankonator.py | Fisent/hankonator_web | 0 | 12787494 | <reponame>Fisent/hankonator_web
from pyforms.basewidget import BaseWidget
from confapp import conf
from pyforms_web.controls.control_button import ControlButton
from pyforms_web.controls.control_list import ControlList
from pyforms_web.controls.control_text import ControlText
class SiteCrawlApp(BaseWidget):
UID = 'site-crawl-app'
TITLE = 'Site crawl'
LAYOUT_POSITION = conf.ORQUESTRA_HOME
ORQUESTRA_MENU = 'left'
ORQUESTRA_MENU_ICON = 'browser'
ORQUESTRA_MENU_ORDER = 0
def __init__(self, *args, **kwargs):
super(SiteCrawlApp, self).__init__(*args, **kwargs)
self._url = ControlText('Page url')
self._getlinks_btn = ControlButton('Get links', default=self.___getlinks_btn_evt, label_visible=False)
self._links_list = ControlList('Links list', horizontal_headers=['Found links'])
self.formset = ['_url', '_getlinks_btn', '_links_list']
def ___getlinks_btn_evt(self):
self._links_list.value = [
['Link1'],
['Link2']
] | 2.28125 | 2 |
subunit2sql/db/models.py | mtreinish/subunit2sql | 3 | 12787495 | <filename>subunit2sql/db/models.py
# Copyright 2014 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import uuid
from oslo.db.sqlalchemy import models # noqa
import sqlalchemy as sa
from sqlalchemy.ext import declarative
BASE = declarative.declarative_base()
class SubunitBase(models.ModelBase):
"""Base class for Subunit Models."""
__table_args__ = {'mysql_engine': 'InnoDB'}
__table_initialized__ = False
def save(self, session=None):
from subunit2sql.db import api as db_api
super(SubunitBase, self).save(session or db_api.get_session())
def keys(self):
return self.__dict__.keys()
def values(self):
return self.__dict__.values()
def items(self):
return self.__dict__.items()
def to_dict(self):
d = self.__dict__.copy()
d.pop("_sa_instance_state")
return self.__dict__.copy()
class Test(BASE, SubunitBase):
__tablename__ = 'tests'
__table_args__ = (sa.Index('ix_id', 'id'),
sa.Index('ix_test_id', 'test_id'))
id = sa.Column(sa.String(36), primary_key=True,
default=lambda: str(uuid.uuid4()))
test_id = sa.Column(sa.String(256))
run_count = sa.Column(sa.Integer())
success = sa.Column(sa.Integer())
failure = sa.Column(sa.Integer())
run_time = sa.Column(sa.Float())
class Run(BASE, SubunitBase):
__tablename__ = 'runs'
__table_args__ = (sa.Index('ix_run_id', 'id'), )
id = sa.Column(sa.String(36), primary_key=True,
default=lambda: str(uuid.uuid4()))
skips = sa.Column(sa.Integer())
fails = sa.Column(sa.Integer())
passes = sa.Column(sa.Integer())
run_time = sa.Column(sa.Float())
artifacts = sa.Column(sa.Text())
class TestRun(BASE, SubunitBase):
__tablename__ = 'test_runs'
__table_args__ = (sa.Index('ix_test_run_test_id', 'test_id'),
sa.Index('ix_test_run_run_id', 'run_id'),
sa.UniqueConstraint('test_id', 'run_id',
name='ix_test_run_test_id_run_id'))
id = sa.Column(sa.String(36), primary_key=True,
default=lambda: str(uuid.uuid4()))
test_id = sa.Column(sa.String(36), sa.ForeignKey('tests.id'),
nullable=False)
run_id = sa.Column(sa.String(36), sa.ForeignKey('runs.id'), nullable=False)
status = sa.Column(sa.String(256))
start_time = sa.Column(sa.DateTime())
stop_time = sa.Column(sa.DateTime())
class RunMetadata(BASE, SubunitBase):
__tablename__ = 'run_metadata'
__table_args__ = (sa.Index('ix_run_metadata_run_id', 'run_id'),)
id = sa.Column(sa.String(36), primary_key=True,
default=lambda: str(uuid.uuid4()))
key = sa.Column(sa.String(255))
value = sa.Column(sa.String(255))
run_id = sa.Column(sa.String(36), sa.ForeignKey('runs.id'))
class TestRunMetadata(BASE, SubunitBase):
__tablename__ = 'test_run_metadata'
__table_args__ = (sa.Index('ix_test_run_metadata_test_run_id',
'test_run_id'),)
id = sa.Column(sa.String(36), primary_key=True,
default=lambda: str(uuid.uuid4()))
key = sa.Column(sa.String(255))
value = sa.Column(sa.String(255))
test_run_id = sa.Column(sa.String(36), sa.ForeignKey('test_runs.id'))
class TestMetadata(BASE, SubunitBase):
__tablename__ = 'test_metadata'
__table_args__ = (sa.Index('ix_test_metadata_test_id',
'test_id'),)
id = sa.Column(sa.String(36), primary_key=True,
default=lambda: str(uuid.uuid4()))
key = sa.Column(sa.String(255))
value = sa.Column(sa.String(255))
test_id = sa.Column(sa.String(36), sa.ForeignKey('tests.id'))
| 2.34375 | 2 |
misago/misago/users/serializers/ban.py | vascoalramos/misago-deployment | 2 | 12787496 | from django.utils.translation import gettext as _
from rest_framework import serializers
from ...core.utils import format_plaintext_for_html
from ..models import Ban
__all__ = ["BanMessageSerializer", "BanDetailsSerializer"]
def serialize_message(message):
if message:
return {"plain": message, "html": format_plaintext_for_html(message)}
class BanMessageSerializer(serializers.ModelSerializer):
message = serializers.SerializerMethodField()
class Meta:
model = Ban
fields = ["message", "expires_on"]
def get_message(self, obj):
if obj.user_message:
message = obj.user_message
elif obj.check_type == Ban.IP:
message = _("Your IP address is banned.")
else:
message = _("You are banned.")
return serialize_message(message)
class BanDetailsSerializer(serializers.ModelSerializer):
user_message = serializers.SerializerMethodField()
staff_message = serializers.SerializerMethodField()
class Meta:
model = Ban
fields = ["user_message", "staff_message", "expires_on"]
def get_user_message(self, obj):
return serialize_message(obj.user_message)
def get_staff_message(self, obj):
return serialize_message(obj.staff_message)
| 2.171875 | 2 |
django_grpc_bus/apps.py | rameezarshad/django-grpc-framework | 2 | 12787497 | from django.apps import AppConfig
class DjangoGRPCBusConfig(AppConfig):
name = 'django_grpc_bus'
| 1.132813 | 1 |
nxted/files/test3.py | xlcteam/nxtIDE | 8 | 12787498 | <filename>nxted/files/test3.py
def main():
TextOut(0, LCD_LINE1, "test3")
Wait(2000) | 1.382813 | 1 |
py-numpy/numpy_test.py | DeercoderPractice/exp-code | 0 | 12787499 | #!/usr/bin/env python
import numpy as np
def test_concatenate():
x = np.array([11, 22])
y = np.array([18, 7, 6])
z = np.array([1, 3, 5])
print np.concatenate((x,y,z))
def test_concatenate2():
x = np.array(range(24))
x = x.reshape((3,4,2))
y = np.array(range(100, 124))
y = y.reshape((3,4,2))
z0 = np.concatenate((x,y))
z1 = np.concatenate((x,y), axis = 1)
z2 = np.concatenate((x,y), axis = 2)
print z0
print z1
print z2
def test_slicing():
x = np.array([2, 5, 18, 14, 4])
y = x[:, np.newaxis]
print x
print y
'''
' This function is used for generating the one/zero matrix
'''
def test_ones():
print "============="
x = np.ones((2, 3))
print x
y = np.ones((3, 4), dtype=int) #add type for thies
print y
z = np.zeros((2, 4))
print z
x = np.array([2,5,8,13,14,4])
print np.ones_like(x)
print np.zeros_like(x)
def main():
test_concatenate()
test_concatenate2()
test_slicing()
test_ones()
if __name__ == '__main__':
main()
| 3.78125 | 4 |
thermodynamics/mix/equilibrium.py | luis-zepeda/thermodynamics | 0 | 12787500 | from ..helpers import eos
from ..helpers import alfaFunctions
from ..helpers.eosHelpers import A_fun, B_fun, getCubicCoefficients, getMixFugacity,getMixFugacityCoef, dAdT_fun
from ..solvers.cubicSolver import cubic_solver
from ..helpers import temperatureCorrelations as tempCorr
from ..helpers import mixing_rules
from numpy import log, exp, sqrt,absolute, array,sum
from scipy.optimize import fsolve, newton, root
from scipy.integrate import quad
def solve_eos(t,p,tc,pc,acentric,liq_compositions,vap_compositions,kij,method='pr',alfa_function='alfa_peng_robinson',mixing_rule='van_der_waals',diagram=False,properties=False,heat_capacities=None):
# Vectorization
tc = array(tc)
pc= array(pc)
acentric = array(acentric)
liq_compositions=array(liq_compositions)
vap_compositions = array(vap_compositions)
kij = array(kij)
# Method selection
eos_fun = eos.selector(method)
u,w,omega_a,omega_b,L = eos_fun()
# Alfa function selection
alfa_fun = alfaFunctions.selector(alfa_function)
alfa= alfa_fun(t,tc,acentric)
Ai = A_fun(t,p,tc,pc,acentric,omega_a,alfa)
Bi = B_fun(t,p,tc,pc,omega_b)
# Mixing rules
mixing_rule_used = mixing_rules.selector(mixing_rule)
A_liq,B_liq,A_i_liq,Aij_liq,dAdT_liq = mixing_rule_used(liq_compositions,tc,acentric,kij,Ai,Bi,alfa,alfa_fun,t)
A_vap,B_vap,A_i_vap,Aij_vap,dAdT_vap = mixing_rule_used(vap_compositions,tc,acentric,kij,Ai,Bi,alfa,alfa_fun,t)
coefficients_liq = getCubicCoefficients(A_liq,B_liq,u,w)
coefficients_vap = getCubicCoefficients(A_vap,B_vap,u,w)
z_liq= cubic_solver(coefficients_liq,diagram,B_liq)
z_vap = cubic_solver(coefficients_vap,diagram,B_vap)
z_liq = z_liq[0] if isinstance(z_liq,tuple) else z_liq
z_vap = z_vap[1] if isinstance(z_vap,tuple) else z_vap
liq_fugacity_coef = getMixFugacityCoef(z_liq,A_liq,B_liq,A_i_liq,Bi,L)
vap_fugacity_coef = getMixFugacityCoef(z_vap,A_vap,B_vap,A_i_vap,Bi,L)
if(properties):
liq_fugacity = getMixFugacity(z_liq,A_liq,B_liq,A_i_liq,B_liq,L,liq_compositions,p)
vap_fugacity = getMixFugacity(z_vap,A_vap,B_vap,A_i_vap,B_vap,L,vap_compositions,p)
heat_capacities = array(heat_capacities)
ideal_enthalpies = get_ideal_enthalpy(heat_capacities,t)
ideal_entropies = get_ideal_entropy(heat_capacities,t,p)
dAdt = dAdT_fun(t,p,tc,pc,acentric,omega_a,alfa_fun)
enthalpy_liq = get_real_enthalpy(ideal_enthalpies,t,z_liq,A_liq,dAdt,B_liq,L)
enthalpy_vap = get_real_enthalpy(ideal_enthalpies,t,z_vap,A_vap,dAdt,B_vap,L)
entropy_liq = get_real_entropy(ideal_entropies,z_liq,A_liq,dAdt,B_liq,L)
entropy_vap = get_real_entropy(ideal_entropies,z_vap,A_vap,dAdt,B_vap,L)
response = {
"liq_fugacity":liq_fugacity,
"vap_fugacity":vap_fugacity,
"enthalpy_liq":enthalpy_liq,
"enthalpy_vap":enthalpy_vap,
"entropy_liq":entropy_liq,
"entropy_vap":entropy_vap,
"z_liq":z_liq,
"z_vap":z_vap,
"liq_compositions":liq_compositions,
"vap_compositions":vap_compositions
}
return response
return (liq_fugacity_coef,vap_fugacity_coef)
def bubble_temperature(t,p,tc,pc,acentric,liq_compositions,vap_compositions,kij,delta_t=0.1,method='pr',alfa_function='alfa_peng_robinson',mixing_rule='van_der_waals'):
liq_fugacity_coef,vap_fugacity_coef = solve_eos(t,p,tc,pc,acentric,liq_compositions,vap_compositions,kij,method,alfa_function,mixing_rule)
Ki = liq_fugacity_coef/vap_fugacity_coef
Sy = sum(Ki*liq_compositions)
E = log(Sy)
attempts=0
new_t=t
new_vap_compositions = vap_compositions
while(absolute(E) >= 1e-9):
if(attempts == 500):
return 'Problem can not be solved'
t0 = new_t + delta_t
liq_fugacity_coef0,vap_fugacity_coef0 = solve_eos(t0,p,tc,pc,acentric,liq_compositions,new_vap_compositions,kij,method,alfa_function,mixing_rule)
Ki0 = liq_fugacity_coef0/vap_fugacity_coef0
Sy0 = sum(Ki0*liq_compositions)
E0 = log(Sy0)
new_t = (new_t*t0*(E0-E))/(t0*E0-new_t*E)
Sy = sum(Ki*liq_compositions)
new_vap_compositions = (Ki*liq_compositions)/Sy
liq_fugacity_coef,vap_fugacity_coef = solve_eos(new_t,p,tc,pc,acentric,liq_compositions,new_vap_compositions,kij,method,alfa_function,mixing_rule)
Ki = liq_fugacity_coef/vap_fugacity_coef
Sy = sum(Ki*liq_compositions)
E=log(Sy)
attempts +=1
return(new_t,p,liq_compositions,new_vap_compositions)
def bubble_pressure(t,p,tc,pc,acentric,liq_compositions,vap_compositions,kij,delta_p=0.001,method='pr',alfa_function='alfa_peng_robinson',mixing_rule='van_der_waals'):
liq_fugacity_coef,vap_fugacity_coef = solve_eos(t,p,tc,pc,acentric,liq_compositions,vap_compositions,kij,method,alfa_function,mixing_rule)
Ki = liq_fugacity_coef/vap_fugacity_coef
Sy = sum(Ki*liq_compositions)
E = Sy -1
attempts=0
new_p=p
new_vap_compositions = vap_compositions
while(absolute(E) >= 1e-9):
if(attempts == 100):
return 'Probleam can not be solved'
p0=new_p*(1+delta_p)
liq_fugacity_coef0,vap_fugacity_coef0 = solve_eos(t,p0,tc,pc,acentric,liq_compositions,new_vap_compositions,kij,method,alfa_function,mixing_rule)
Ki0 = liq_fugacity_coef0/vap_fugacity_coef0
Sy0 = sum(Ki0*liq_compositions)
E0=Sy0-1
new_p = (new_p*p0*(E0-E))/(p0*E0-new_p*E)
Sy = sum(Ki*liq_compositions)
new_vap_compositions = (Ki*liq_compositions)/Sy
liq_fugacity_coef,vap_fugacity_coef = solve_eos(t,new_p,tc,pc,acentric,liq_compositions,new_vap_compositions,kij,method,alfa_function,mixing_rule)
Ki = liq_fugacity_coef/vap_fugacity_coef
Sy = sum(Ki*liq_compositions)
E = Sy -1
attempts +=1
return(t,new_p,liq_compositions,new_vap_compositions)
def dew_temperature(t,p,tc,pc,acentric,liq_compositions,vap_compositions,kij,delta_t=0.1,method='pr',alfa_function='alfa_peng_robinson',mixing_rule='van_der_waals'):
liq_fugacity_coef,vap_fugacity_coef = solve_eos(t,p,tc,pc,acentric,liq_compositions,vap_compositions,kij,method,alfa_function,mixing_rule)
Ki = liq_fugacity_coef/vap_fugacity_coef
Sx = sum(vap_compositions/Ki)
E = log(Sx)
attempts=0
new_t=t
new_liq_compositions = liq_compositions
while(absolute(E) >= 1e-9):
if(attempts == 500):
return 'Probleam can not be solved'
t0 = new_t + delta_t
liq_fugacity_coef0,vap_fugacity_coef0 = solve_eos(t0,p,tc,pc,acentric,new_liq_compositions,vap_compositions,kij,method,alfa_function,mixing_rule)
Ki0 = liq_fugacity_coef0/vap_fugacity_coef0
Sx0 = sum(vap_compositions/Ki0)
E0 = log(Sx0)
new_t = (new_t*t0*(E0-E))/(t0*E0-new_t*E)
Sx = sum(vap_compositions/Ki)
new_liq_compositions = vap_compositions/(Ki*Sx)
liq_fugacity_coef,vap_fugacity_coef = solve_eos(new_t,p,tc,pc,acentric,new_liq_compositions,vap_compositions,kij,method,alfa_function,mixing_rule)
Ki = liq_fugacity_coef/vap_fugacity_coef
Sx = sum(vap_compositions/Ki)
E = log(Sx)
attempts +=1
return(new_t,p,new_liq_compositions,vap_compositions)
def dew_pressure(t,p,tc,pc,acentric,liq_compositions,vap_compositions,kij,delta_p=0.001,method='pr',alfa_function='alfa_peng_robinson',mixing_rule='van_der_waals'):
liq_fugacity_coef,vap_fugacity_coef = solve_eos(t,p,tc,pc,acentric,liq_compositions,vap_compositions,kij,method,alfa_function,mixing_rule)
Ki = liq_fugacity_coef/vap_fugacity_coef
Sx = sum(vap_compositions/Ki)
E = Sx -1
attempts=0
new_p=p
new_liq_compositions = liq_compositions
while(absolute(E) >= 1e-9):
if(attempts == 100):
return 'Probleam can not be solved'
p0=new_p*(1+delta_p)
liq_fugacity_coef0,vap_fugacity_coef0 = solve_eos(t,p0,tc,pc,acentric,new_liq_compositions,vap_compositions,kij,method,alfa_function,mixing_rule)
Ki0 = liq_fugacity_coef0/vap_fugacity_coef0
Sx0 = sum(vap_compositions/Ki0)
E0=Sx0-1
new_p = (new_p*p0*(E0-E))/(p0*E0-new_p*E)
Sx = sum(vap_compositions/Ki)
new_liq_compositions = vap_compositions/(Ki*Sx)
liq_fugacity_coef,vap_fugacity_coef = solve_eos(t,new_p,tc,pc,acentric,new_liq_compositions,vap_compositions,kij,method,alfa_function,mixing_rule)
Ki = liq_fugacity_coef/vap_fugacity_coef
Sx = sum(vap_compositions/Ki)
E = Sx -1
attempts +=1
return(t,new_p,new_liq_compositions,vap_compositions)
def flash(t,p,tc,pc,acentric,feed_compositions,liq_compositions,vap_compositions,v_f,kij,delta_p=0.0001,method='pr',alfa_function='alfa_peng_robinson',mixing_rule='van_der_waals'):
tau=1
while(absolute(tau)> 1e-5):
liq_fugacity_coef,vap_fugacity_coef = solve_eos(t,p,tc,pc,acentric,liq_compositions,vap_compositions,kij,method,alfa_function,mixing_rule)
Ki = liq_fugacity_coef/vap_fugacity_coef
S = sum((feed_compositions*(Ki-1))/(1+(v_f*(Ki-1))))
S0 = sum((-feed_compositions*(Ki-1)**2)/(1+v_f*(Ki-1))**2)
v_f = v_f-(S/S0)
liq_compositions0 = feed_compositions/(1+v_f*(Ki-1))
Sx=sum(liq_compositions0)
liq_compositions = liq_compositions0/Sx
vap_compositions0=liq_compositions0*Ki
Sy=sum(vap_compositions0)
vap_compositions=vap_compositions0/Sy
tau=sum(absolute(liq_compositions*liq_fugacity_coef-vap_compositions*vap_fugacity_coef))
return (t,p,feed_compositions,liq_compositions,vap_compositions,v_f)
def get_ideal_enthalpy(heat_capacities,t):
ideal_enthalpies = []
for cp in heat_capacities:
number, constants = cp
heat_capacity_equation = tempCorr.selector(number)
enthalpy,_ = quad(heat_capacity_equation,298,t,args=(constants,))
ideal_enthalpies.append(enthalpy)
return array(ideal_enthalpies)
def get_ideal_entropy(heat_capacities,t,p):
R=8.314
ideal_entropies = []
for cp in heat_capacities:
number,constants = cp
heat_capacity_equation = lambda t,constants :tempCorr.selector(number)(t,constants)/t
I,_ = quad(heat_capacity_equation,298,t,args=(constants,))
entropy = I - R*log(p)
ideal_entropies.append(entropy)
return array(ideal_entropies)
def get_real_enthalpy(ideal_enthalpies,t,z,A,dAdt,B,L):
R=8.314
enthalpies = ideal_enthalpies + R*t*(z-1+((dAdt-A)/B)*L(z,B))
return enthalpies
def get_real_entropy(ideal_entropies,z,A,dAdt,B,L):
R=8.314
entropies = ideal_entropies + R*(log(z-B)+dAdt/B*L(z,B))
return entropies
| 1.828125 | 2 |
bert/models/bert/cqa_supports.py | fanshiqing/DAPPLE | 50 | 12787501 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import json
import math
import numpy as np
import tokenization
import six
import tensorflow as tf
from tensorflow import logging
class EvalResults(object):
def __init__(self, capacity):
self.metrics = {}
self.capacity = capacity
def add_dict(self, indict):
for key,value in indict.iteritems():
if key in self.metrics:
if len(self.metrics[key]) == self.capacity:
self.metrics[key].pop(0)
else:
self.metrics[key] = []
if isinstance(value, list):
self.metrics[key].append(value[-1])
else:
self.metrics[key].append(value)
def to_string(self):
res = ["%s:%.2f"%(key, np.mean(self.metrics[key]))
for key in self.metrics.keys()]
return " ".join(res)
class CQAExample(object):
"""A single training/test example."""
def __init__(self,
qas_id,
question_text,
doc_tokens,
orig_answer_text=None,
start_position=None,
end_position=None):
self.qas_id = qas_id
self.question_text = question_text
self.doc_tokens = doc_tokens
self.orig_answer_text = orig_answer_text
self.start_position = start_position
self.end_position = end_position
def __str__(self):
return self.__repr__()
def __repr__(self):
s = ""
s += "qas_id: %s" % (tokenization.printable_text(self.qas_id))
s += ", question_text: %s" % (
tokenization.printable_text(self.question_text))
s += ", doc_tokens: [%s]" % (" ".join(self.doc_tokens))
if self.start_position:
s += ", start_position: %d" % (self.start_position)
if self.start_position:
s += ", end_position: %d" % (self.end_position)
return s
class InputExample(object):
"""A single training/test example for simple sequence classification."""
def __init__(self, guid, text_a, text_b=None, label=None):
"""Constructs a InputExample.
Args:
guid: Unique id for the example.
text_a: string. The untokenized text of the first sequence. For single
sequence tasks, only this sequence must be specified.
text_b: (Optional) string. The untokenized text of the second sequence.
Only must be specified for sequence pair tasks.
label: (Optional) string. The label of the example. This should be
specified for train and dev examples, but not for test examples.
"""
self.guid = guid
self.text_a = text_a
self.text_b = text_b
self.label = label
class InputPretrainExample(object):
"""A single training/test example for pretrain task."""
def __init__(self, guid, input_ids, input_mask, segment_ids, masked_lm_positions,
masked_lm_ids, masked_lm_weights, next_sentence_labels):
"""Constructs a InputExample.
Args:
guid: Unique id for the example.
text_a: string. The untokenized text of the first sequence. For single
sequence tasks, only this sequence must be specified.
text_b: (Optional) string. The untokenized text of the second sequence.
Only must be specified for sequence pair tasks.
label: (Optional) string. The label of the example. This should be
specified for train and dev examples, but not for test examples.
"""
self.guid = guid
self.input_ids = input_ids
self.input_mask = input_mask
self.segment_ids = segment_ids
self.masked_lm_positions = masked_lm_positions
self.masked_lm_ids = masked_lm_ids
self.masked_lm_weights = masked_lm_weights
self.next_sentence_labels = next_sentence_labels
class InputCQAFeatures(object):
"""A single set of features of data."""
def __init__(self,
unique_id,
example_index,
doc_span_index,
tokens,
token_to_orig_map,
token_is_max_context,
input_ids,
input_mask,
segment_ids,
start_position=None,
end_position=None):
self.unique_id = unique_id
self.example_index = example_index
self.doc_span_index = doc_span_index
self.tokens = tokens
self.token_to_orig_map = token_to_orig_map
self.token_is_max_context = token_is_max_context
self.input_ids = input_ids
self.input_mask = input_mask
self.segment_ids = segment_ids
self.start_position = start_position
self.end_position = end_position
class InputFeatures(object):
"""A single set of features of data."""
def __init__(self, input_ids, input_mask, segment_ids, label_id):
self.input_ids = input_ids
self.input_mask = input_mask
self.segment_ids = segment_ids
self.label_id = label_id
def read_tsv(input_file, quotechar=None):
"""Reads a tab separated value file."""
import csv
with tf.gfile.Open(input_file, "r") as f:
reader = csv.reader(f, delimiter="\t", quotechar=quotechar)
lines = []
for line in reader:
lines.append(line)
return lines
def read_examples_do_nothing(input_file, is_training):
"""do nothing but just return input_file, reserved for tfrecord data"""
return input_file
def read_textmatch_examples(input_file, is_training):
"""Creates examples for the training and dev sets."""
if is_training:
set_type = 'train'
else:
set_type = 'dev'
examples = []
for (i, line) in enumerate(read_tsv(input_file)):
if i == 0:
continue
guid = "%s-%s" % (set_type, i)
text_a = tokenization.convert_to_unicode(line[3])
text_b = tokenization.convert_to_unicode(line[4])
label = tokenization.convert_to_unicode(line[0])
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
def read_cikm_examples(input_file, is_training):
"""Creates examples for the training and dev sets."""
if is_training:
set_type = 'train'
else:
set_type = 'dev'
examples = []
lengths = []
for (i, line) in enumerate(read_tsv(input_file)):
if i == 0:
continue
guid = "%s-%s" % (set_type, i)
lengths.append(len(line[1].split()) + len(line[2].split()))
text_a = tokenization.convert_to_unicode(line[1])
text_b = tokenization.convert_to_unicode(line[2])
label = tokenization.convert_to_unicode(line[0])
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
print('length', np.mean(lengths))
raise Exception
return examples
def read_review_examples(input_file, is_training):
"""Creates examples for the training and dev sets."""
fold_id = 9 # fold 9 for training, the rest for testing
if is_training:
set_type = 'train'
else:
set_type = 'dev'
examples = []
lengths = []
for (i, line) in enumerate(read_tsv(input_file)):
# if is_training:
# if int(line[1]) == fold_id:
# continue
# else:
# if int(line[1]) != fold_id:
# continue
if int(line[1]) != fold_id:
continue
lengths.append(len(line[2].split()))
# guid = "%s-%s" % (set_type, i)
# text_a = tokenization.convert_to_unicode(line[2])
# text_b = None
# label = tokenization.convert_to_unicode(line[0])
# examples.append(
# InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
print('length', np.mean(lengths))
raise Exception
return examples
def read_ae_examples(input_file, is_training):
"""Creates examples for the training and dev sets."""
if is_training:
set_type = 'train'
else:
set_type = 'dev'
examples = []
for (i, line) in enumerate(read_tsv(input_file)):
if i == 0:
continue
guid = "%s-%s" % (set_type, i)
text_a = ' '.join(tokenization.convert_to_unicode(line[0]).split('|'))
text_b = ' '.join(tokenization.convert_to_unicode(line[1]).split('|'))
if float(line[2]) > 0.5:
label = tokenization.convert_to_unicode('1')
else:
label = tokenization.convert_to_unicode('0')
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
def read_pretrain_examples(input_file, is_training):
"""Creates examples for the training and dev sets."""
fold_id = 9 # fold 9 for training, the rest for testing
if is_training:
set_type = 'train'
else:
set_type = 'dev'
examples = []
for (i, line) in enumerate(read_tsv(input_file)):
tokens = line
if i < 3:
print(i, line)
if len(tokens) != 7:
print(len(tokens))
for (i, token) in enumerate(tokens):
print(i, token)
raise Exception
guid = "%s-%s" % (set_type, i)
# print(len(tokens[0].split(',')), len(tokens[1].split(',')),
# len(tokens[2].split(',')), len(tokens[3].split(',')),
# len(tokens[4].split(',')), len(tokens[5].split(',')),
# len(tokens[6].split(',')))
examples.append(InputPretrainExample(
guid=guid,
input_ids=[int(idx) for idx in tokens[0].split(',')],
input_mask=[int(idx) for idx in tokens[1].split(',')],
segment_ids=[int(idx) for idx in tokens[2].split(',')],
masked_lm_positions=[int(idx) for idx in tokens[3].split(',')],
masked_lm_ids=[int(idx) for idx in tokens[4].split(',')],
masked_lm_weights=[float(idx) for idx in tokens[5].split(',')],
next_sentence_labels=int(tokens[6])))
return examples
# def read_coqa_examples(input_file, is_training):
# """Read a CoQA json file into a list of CQAExample."""
# with tf.gfile.Open(input_file, "r") as reader:
# input_data = json.load(reader)["data"]
#
# def is_whitespace(c):
# if c == " " or c == "\t" or c == "\r" or c == "\n" or ord(c) == 0x202F:
# return True
# return False
#
# examples = []
# for entry in input_data[:10]:
# paragraph_text = entry["story"]
# doc_tokens = []
# char_to_word_offset = []
# prev_is_whitespace = True
# for c in paragraph_text:
# if is_whitespace(c):
# prev_is_whitespace = True
# else:
# if prev_is_whitespace:
# doc_tokens.append(c)
# else:
# doc_tokens[-1] += c
# prev_is_whitespace = False
# char_to_word_offset.append(len(doc_tokens) - 1)
#
# ############################################################
# # convert the convasational QAs to squad format, with history
# ############################################################
#
# story_id = entry['id']
# questions = [(item['input_text'], story_id + str(item['turn_id'])) for item in entry['questions']] # [(question, question_id), ()]
# answers = [(item['span_text'], item['span_start']) for item in entry['answers']]
#
# qas = []
# for i, (question, answer) in enumerate(zip(questions, answers)):
# start_index = 0 if i - int(FLAGS.history) < 0 else i - int(FLAGS.history)
# end_index = i
# question_with_histories = ''
# # prepend historical questions and answers
# for each_question, each_answer in zip(questions[start_index: end_index], answers[start_index: end_index]):
# question_with_histories += each_question[0] + ' ' + each_answer[0] + ' '
# # add the current question
# question_with_histories += question[0]
# if answer[1] == -1:
# qas.append({'id': question[1], 'question': question_with_histories, 'answers': [{'answer_start': -1, 'text': "unknown"}]})
# else:
# qas.append({'id': question[1], 'question': question_with_histories, 'answers': [{'answer_start': answer[1], 'text': answer[0]}]})
#
# for qa in qas:
# qas_id = qa["id"]
# question_text = qa["question"]
# start_position = None
# end_position = None
# orig_answer_text = None
#
# # if is_training:
# # we read in the groundtruth answer bothing druing training and predicting, because we need to compute acc and f1 at predicting time.
# if len(qa["answers"]) != 1:
# raise ValueError(
# "For training, each question should have exactly 1 answer.")
# answer = qa["answers"][0]
# orig_answer_text = answer["text"]
# answer_offset = answer["answer_start"]
# answer_length = len(orig_answer_text)
# start_position = char_to_word_offset[answer_offset]
# end_position = char_to_word_offset[answer_offset + answer_length - 1]
# # Only add answers where the text can be exactly recovered from the
# # document. If this CAN'T happen it's likely due to weird Unicode
# # stuff so we will just skip the example.
# #
# # Note that this means for training mode, every example is NOT
# # guaranteed to be preserved.
# actual_text = " ".join(doc_tokens[start_position:(end_position + 1)])
# cleaned_answer_text = " ".join(
# tokenization.whitespace_tokenize(orig_answer_text))
# if actual_text.find(cleaned_answer_text) == -1:
# logging.warning("Could not find answer: '%s' vs. '%s'",
# actual_text, cleaned_answer_text)
# continue
#
# example = CQAExample(
# qas_id=qas_id,
# question_text=question_text,
# doc_tokens=doc_tokens,
# orig_answer_text=orig_answer_text,
# start_position=start_position,
# end_position=end_position)
# examples.append(example)
# return examples
def convert_examples_to_features_do_nothing(examples, tokenizer, max_seq_length,
doc_stride, max_query_length, is_training):
"""do nothing but just return examples, reserved for tfrecord data"""
return examples
def convert_examples_to_features_cqa(examples, tokenizer, max_seq_length,
doc_stride, max_query_length, is_training):
"""Loads a data file into a list of `InputBatch`s."""
unique_id = 1000000000
features = []
for (example_index, example) in enumerate(examples):
query_tokens = tokenizer.tokenize(example.question_text)
if len(query_tokens) > max_query_length:
query_tokens = query_tokens[0:max_query_length]
tok_to_orig_index = []
orig_to_tok_index = []
all_doc_tokens = []
for (i, token) in enumerate(example.doc_tokens):
orig_to_tok_index.append(len(all_doc_tokens))
sub_tokens = tokenizer.tokenize(token)
for sub_token in sub_tokens:
tok_to_orig_index.append(i)
all_doc_tokens.append(sub_token)
tok_start_position = None
tok_end_position = None
# if is_training:
# we do this for both training and predicting, because we need also start/end position at testing time to compute acc and f1
tok_start_position = orig_to_tok_index[example.start_position]
if example.end_position < len(example.doc_tokens) - 1:
tok_end_position = orig_to_tok_index[example.end_position + 1] - 1
else:
tok_end_position = len(all_doc_tokens) - 1
(tok_start_position, tok_end_position) = _improve_answer_span(
all_doc_tokens, tok_start_position, tok_end_position, tokenizer,
example.orig_answer_text)
# The -3 accounts for [CLS], [SEP] and [SEP]
max_tokens_for_doc = max_seq_length - len(query_tokens) - 3
# We can have documents that are longer than the maximum sequence length.
# To deal with this we do a sliding window approach, where we take chunks
# of the up to our max length with a stride of `doc_stride`.
_DocSpan = collections.namedtuple( # pylint: disable=invalid-name
"DocSpan", ["start", "length"])
doc_spans = []
start_offset = 0
while start_offset < len(all_doc_tokens):
length = len(all_doc_tokens) - start_offset
if length > max_tokens_for_doc:
length = max_tokens_for_doc
doc_spans.append(_DocSpan(start=start_offset, length=length))
if start_offset + length == len(all_doc_tokens):
break
start_offset += min(length, doc_stride)
for (doc_span_index, doc_span) in enumerate(doc_spans):
tokens = []
token_to_orig_map = {}
token_is_max_context = {}
segment_ids = []
tokens.append("[CLS]")
segment_ids.append(0)
for token in query_tokens:
tokens.append(token)
segment_ids.append(0)
tokens.append("[SEP]")
segment_ids.append(0)
for i in range(doc_span.length):
split_token_index = doc_span.start + i
token_to_orig_map[len(tokens)] = tok_to_orig_index[split_token_index]
is_max_context = _check_is_max_context(doc_spans, doc_span_index,
split_token_index)
token_is_max_context[len(tokens)] = is_max_context
tokens.append(all_doc_tokens[split_token_index])
segment_ids.append(1)
tokens.append("[SEP]")
segment_ids.append(1)
input_ids = tokenizer.convert_tokens_to_ids(tokens)
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
input_mask = [1] * len(input_ids)
# Zero-pad up to the sequence length.
while len(input_ids) < max_seq_length:
input_ids.append(0)
input_mask.append(0)
segment_ids.append(0)
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(segment_ids) == max_seq_length
start_position = None
end_position = None
if is_training:
# For training, if our document chunk does not contain an annotation
# we throw it out, since there is nothing to predict.
doc_start = doc_span.start
doc_end = doc_span.start + doc_span.length - 1
if (example.start_position < doc_start or
example.end_position < doc_start or
example.start_position > doc_end or example.end_position > doc_end):
continue
doc_offset = len(query_tokens) + 2
start_position = tok_start_position - doc_start + doc_offset
end_position = tok_end_position - doc_start + doc_offset
else:
# when predicting, we donot throw out any doc span to prevent label leaking
doc_start = doc_span.start
doc_end = doc_span.start + doc_span.length - 1
doc_offset = len(query_tokens) + 2
start_position = tok_start_position - doc_start + doc_offset
end_position = tok_end_position - doc_start + doc_offset
if example_index < 20:
logging.info("*** Example ***")
logging.info("unique_id: %s" % (unique_id))
logging.info("example_index: %s" % (example_index))
logging.info("doc_span_index: %s" % (doc_span_index))
logging.info("tokens: %s" % " ".join(
[tokenization.printable_text(x) for x in tokens]))
logging.info("token_to_orig_map: %s" % " ".join(
["%d:%d" % (x, y) for (x, y) in six.iteritems(token_to_orig_map)]))
logging.info("token_is_max_context: %s" % " ".join([
"%d:%s" % (x, y) for (x, y) in six.iteritems(token_is_max_context)
]))
logging.info("input_ids: %s" % " ".join([str(x) for x in input_ids]))
logging.info(
"input_mask: %s" % " ".join([str(x) for x in input_mask]))
logging.info(
"segment_ids: %s" % " ".join([str(x) for x in segment_ids]))
if is_training:
answer_text = " ".join(tokens[start_position:(end_position + 1)])
logging.info("start_position: %d" % (start_position))
logging.info("end_position: %d" % (end_position))
logging.info(
"answer: %s" % (tokenization.printable_text(answer_text)))
features.append(
InputFeatures(
unique_id=unique_id,
example_index=example_index,
doc_span_index=doc_span_index,
tokens=tokens,
token_to_orig_map=token_to_orig_map,
token_is_max_context=token_is_max_context,
input_ids=input_ids,
input_mask=input_mask,
segment_ids=segment_ids,
start_position=start_position,
end_position=end_position))
unique_id += 1
return features
def convert_examples_to_features(examples, label_list, max_seq_length,
tokenizer, model_type='classification'):
"""Loads a data file into a list of `InputBatch`s."""
label_map = {}
for (i, label) in enumerate(label_list):
label_map[label] = i
features = []
for (ex_index, example) in enumerate(examples):
tokens_a = tokenizer.tokenize(example.text_a)
tokens_b = None
if example.text_b:
tokens_b = tokenizer.tokenize(example.text_b)
if tokens_b:
# Modifies `tokens_a` and `tokens_b` in place so that the total
# length is less than the specified length.
# Account for [CLS], [SEP], [SEP] with "- 3"
_truncate_seq_pair(tokens_a, tokens_b, max_seq_length - 3)
else:
# Account for [CLS] and [SEP] with "- 2"
if len(tokens_a) > max_seq_length - 2:
tokens_a = tokens_a[0:(max_seq_length - 2)]
# The convention in BERT is:
# (a) For sequence pairs:
# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
# (b) For single sequences:
# tokens: [CLS] the dog is hairy . [SEP]
# type_ids: 0 0 0 0 0 0 0
#
# Where "type_ids" are used to indicate whether this is the first
# sequence or the second sequence. The embedding vectors for `type=0` and
# `type=1` were learned during pre-training and are added to the wordpiece
# embedding vector (and position vector). This is not *strictly* necessary
# since the [SEP] token unambigiously separates the sequences, but it makes
# it easier for the model to learn the concept of sequences.
#
# For classification tasks, the first vector (corresponding to [CLS]) is
# used as as the "sentence vector". Note that this only makes sense because
# the entire model is fine-tuned.
tokens = []
segment_ids = []
tokens.append("[CLS]")
segment_ids.append(0)
for token in tokens_a:
tokens.append(token)
segment_ids.append(0)
tokens.append("[SEP]")
segment_ids.append(0)
if tokens_b:
for token in tokens_b:
tokens.append(token)
segment_ids.append(1)
tokens.append("[SEP]")
segment_ids.append(1)
input_ids = tokenizer.convert_tokens_to_ids(tokens)
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
input_mask = [1] * len(input_ids)
# Zero-pad up to the sequence length.
while len(input_ids) < max_seq_length:
input_ids.append(0)
input_mask.append(0)
segment_ids.append(0)
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(segment_ids) == max_seq_length
if model_type == 'classification':
label_id = label_map[example.label]
else:
label_id = float(example.label)
if ex_index < 5:
logging.info("*** Example ***")
logging.info("guid: %s" % (example.guid))
logging.info("tokens: %s" % " ".join(
[tokenization.printable_text(x) for x in tokens]))
logging.info("input_ids: %s" % " ".join([str(x) for x in input_ids]))
logging.info("input_mask: %s" % " ".join([str(x) for x in input_mask]))
logging.info(
"segment_ids: %s" % " ".join([str(x) for x in segment_ids]))
logging.info("label: %s (id = %d)" % (example.label, label_id))
features.append(
InputFeatures(
input_ids=input_ids,
input_mask=input_mask,
segment_ids=segment_ids,
label_id=label_id))
return features
def _truncate_seq_pair(tokens_a, tokens_b, max_length):
"""Truncates a sequence pair in place to the maximum length."""
# This is a simple heuristic which will always truncate the longer sequence
# one token at a time. This makes more sense than truncating an equal percent
# of tokens from each, since if one sequence is very short then each token
# that's truncated likely contains more information than a longer sequence.
while True:
total_length = len(tokens_a) + len(tokens_b)
if total_length <= max_length:
break
if len(tokens_a) > len(tokens_b):
tokens_a.pop()
else:
tokens_b.pop()
def _improve_answer_span(doc_tokens, input_start, input_end, tokenizer,
orig_answer_text):
"""Returns tokenized answer spans that better match the annotated answer."""
# The SQuAD annotations are character based. We first project them to
# whitespace-tokenized words. But then after WordPiece tokenization, we can
# often find a "better match". For example:
#
# Question: What year was <NAME> born?
# Context: The leader was <NAME> (1895-1943).
# Answer: 1895
#
# The original whitespace-tokenized answer will be "(1895-1943).". However
# after tokenization, our tokens will be "( 1895 - 1943 ) .". So we can match
# the exact answer, 1895.
#
# However, this is not always possible. Consider the following:
#
# Question: What country is the top exporter of electornics?
# Context: The Japanese electronics industry is the lagest in the world.
# Answer: Japan
#
# In this case, the annotator chose "Japan" as a character sub-span of
# the word "Japanese". Since our WordPiece tokenizer does not split
# "Japanese", we just use "Japanese" as the annotation. This is fairly rare
# in SQuAD, but does happen.
tok_answer_text = " ".join(tokenizer.tokenize(orig_answer_text))
for new_start in range(input_start, input_end + 1):
for new_end in range(input_end, new_start - 1, -1):
text_span = " ".join(doc_tokens[new_start:(new_end + 1)])
if text_span == tok_answer_text:
return (new_start, new_end)
return (input_start, input_end)
def _check_is_max_context(doc_spans, cur_span_index, position):
"""Check if this is the 'max context' doc span for the token."""
# Because of the sliding window approach taken to scoring documents, a single
# token can appear in multiple documents. E.g.
# Doc: the man went to the store and bought a gallon of milk
# Span A: the man went to the
# Span B: to the store and bought
# Span C: and bought a gallon of
# ...
#
# Now the word 'bought' will have two scores from spans B and C. We only
# want to consider the score with "maximum context", which we define as
# the *minimum* of its left and right context (the *sum* of left and
# right context will always be the same, of course).
#
# In the example the maximum context for 'bought' would be span C since
# it has 1 left context and 3 right context, while span B has 4 left context
# and 0 right context.
best_score = None
best_span_index = None
for (span_index, doc_span) in enumerate(doc_spans):
end = doc_span.start + doc_span.length - 1
if position < doc_span.start:
continue
if position > end:
continue
num_left_context = position - doc_span.start
num_right_context = end - position
score = min(num_left_context, num_right_context) + 0.01 * doc_span.length
if best_score is None or score > best_score:
best_score = score
best_span_index = span_index
return cur_span_index == best_span_index
def write_predictions(all_examples, all_features, all_results, n_best_size,
max_answer_length, do_lower_case, output_prediction_file,
output_nbest_file):
"""Write final predictions to the json file."""
logging.info("Writing predictions to: %s" % (output_prediction_file))
logging.info("Writing nbest to: %s" % (output_nbest_file))
example_index_to_features = collections.defaultdict(list)
for feature in all_features:
example_index_to_features[feature.example_index].append(feature)
unique_id_to_result = {}
for result in all_results:
unique_id_to_result[result.unique_id] = result
_PrelimPrediction = collections.namedtuple( # pylint: disable=invalid-name
"PrelimPrediction",
["feature_index", "start_index", "end_index", "start_logit", "end_logit"])
all_predictions = collections.OrderedDict()
all_nbest_json = collections.OrderedDict()
for (example_index, example) in enumerate(all_examples):
features = example_index_to_features[example_index]
prelim_predictions = []
for (feature_index, feature) in enumerate(features):
result = unique_id_to_result[feature.unique_id]
start_indexes = _get_best_indexes(result.start_logits, n_best_size)
end_indexes = _get_best_indexes(result.end_logits, n_best_size)
for start_index in start_indexes:
for end_index in end_indexes:
# We could hypothetically create invalid predictions, e.g., predict
# that the start of the span is in the question. We throw out all
# invalid predictions.
if start_index >= len(feature.tokens):
continue
if end_index >= len(feature.tokens):
continue
if start_index not in feature.token_to_orig_map:
continue
if end_index not in feature.token_to_orig_map:
continue
if not feature.token_is_max_context.get(start_index, False):
continue
if end_index < start_index:
continue
length = end_index - start_index + 1
if length > max_answer_length:
continue
prelim_predictions.append(
_PrelimPrediction(
feature_index=feature_index,
start_index=start_index,
end_index=end_index,
start_logit=result.start_logits[start_index],
end_logit=result.end_logits[end_index]))
prelim_predictions = sorted(
prelim_predictions,
key=lambda x: (x.start_logit + x.end_logit),
reverse=True)
_NbestPrediction = collections.namedtuple( # pylint: disable=invalid-name
"NbestPrediction", ["text", "start_logit", "end_logit"])
seen_predictions = {}
nbest = []
for pred in prelim_predictions:
if len(nbest) >= n_best_size:
break
feature = features[pred.feature_index]
tok_tokens = feature.tokens[pred.start_index:(pred.end_index + 1)]
orig_doc_start = feature.token_to_orig_map[pred.start_index]
orig_doc_end = feature.token_to_orig_map[pred.end_index]
orig_tokens = example.doc_tokens[orig_doc_start:(orig_doc_end + 1)]
tok_text = " ".join(tok_tokens)
# De-tokenize WordPieces that have been split off.
tok_text = tok_text.replace(" ##", "")
tok_text = tok_text.replace("##", "")
# Clean whitespace
tok_text = tok_text.strip()
tok_text = " ".join(tok_text.split())
orig_text = " ".join(orig_tokens)
final_text = get_final_text(tok_text, orig_text, do_lower_case)
if final_text in seen_predictions:
continue
seen_predictions[final_text] = True
nbest.append(
_NbestPrediction(
text=final_text,
start_logit=pred.start_logit,
end_logit=pred.end_logit))
# In very rare edge cases we could have no valid predictions. So we
# just create a nonce prediction in this case to avoid failure.
if not nbest:
nbest.append(
_NbestPrediction(text="empty", start_logit=0.0, end_logit=0.0))
assert len(nbest) >= 1
total_scores = []
for entry in nbest:
total_scores.append(entry.start_logit + entry.end_logit)
probs = _compute_softmax(total_scores)
nbest_json = []
for (i, entry) in enumerate(nbest):
output = collections.OrderedDict()
output["text"] = entry.text
output["probability"] = probs[i]
output["start_logit"] = entry.start_logit
output["end_logit"] = entry.end_logit
nbest_json.append(output)
assert len(nbest_json) >= 1
all_predictions[example.qas_id] = nbest_json[0]["text"]
all_nbest_json[example.qas_id] = nbest_json
with tf.gfile.GFile(output_prediction_file, "w") as writer:
writer.write(json.dumps(all_predictions, indent=4) + "\n")
with tf.gfile.GFile(output_nbest_file, "w") as writer:
writer.write(json.dumps(all_nbest_json, indent=4) + "\n")
def get_final_text(pred_text, orig_text, do_lower_case):
"""Project the tokenized prediction back to the original text."""
# When we created the data, we kept track of the alignment between original
# (whitespace tokenized) tokens and our WordPiece tokenized tokens. So
# now `orig_text` contains the span of our original text corresponding to the
# span that we predicted.
#
# However, `orig_text` may contain extra characters that we don't want in
# our prediction.
#
# For example, let's say:
# pred_text = <NAME>
# orig_text = <NAME>
#
# We don't want to return `orig_text` because it contains the extra "'s".
#
# We don't want to return `pred_text` because it's already been normalized
# (the SQuAD eval script also does punctuation stripping/lower casing but
# our tokenizer does additional normalization like stripping accent
# characters).
#
# What we really want to return is "<NAME>".
#
# Therefore, we have to apply a semi-complicated alignment heruistic between
# `pred_text` and `orig_text` to get a character-to-charcter alignment. This
# can fail in certain cases in which case we just return `orig_text`.
def _strip_spaces(text):
ns_chars = []
ns_to_s_map = collections.OrderedDict()
for (i, c) in enumerate(text):
if c == " ":
continue
ns_to_s_map[len(ns_chars)] = i
ns_chars.append(c)
ns_text = "".join(ns_chars)
return (ns_text, ns_to_s_map)
# We first tokenize `orig_text`, strip whitespace from the result
# and `pred_text`, and check if they are the same length. If they are
# NOT the same length, the heuristic has failed. If they are the same
# length, we assume the characters are one-to-one aligned.
tokenizer = tokenization.BasicTokenizer(do_lower_case=do_lower_case)
tok_text = " ".join(tokenizer.tokenize(orig_text))
start_position = tok_text.find(pred_text)
if start_position == -1:
if FLAGS.verbose_logging:
logging.info(
"Unable to find text: '%s' in '%s'" % (pred_text, orig_text))
return orig_text
end_position = start_position + len(pred_text) - 1
(orig_ns_text, orig_ns_to_s_map) = _strip_spaces(orig_text)
(tok_ns_text, tok_ns_to_s_map) = _strip_spaces(tok_text)
if len(orig_ns_text) != len(tok_ns_text):
if FLAGS.verbose_logging:
logging.info("Length not equal after stripping spaces: '%s' vs '%s'",
orig_ns_text, tok_ns_text)
return orig_text
# We then project the characters in `pred_text` back to `orig_text` using
# the character-to-character alignment.
tok_s_to_ns_map = {}
for (i, tok_index) in six.iteritems(tok_ns_to_s_map):
tok_s_to_ns_map[tok_index] = i
orig_start_position = None
if start_position in tok_s_to_ns_map:
ns_start_position = tok_s_to_ns_map[start_position]
if ns_start_position in orig_ns_to_s_map:
orig_start_position = orig_ns_to_s_map[ns_start_position]
if orig_start_position is None:
if FLAGS.verbose_logging:
logging.info("Couldn't map start position")
return orig_text
orig_end_position = None
if end_position in tok_s_to_ns_map:
ns_end_position = tok_s_to_ns_map[end_position]
if ns_end_position in orig_ns_to_s_map:
orig_end_position = orig_ns_to_s_map[ns_end_position]
if orig_end_position is None:
if FLAGS.verbose_logging:
logging.info("Couldn't map end position")
return orig_text
output_text = orig_text[orig_start_position:(orig_end_position + 1)]
return output_text
def _get_best_indexes(logits, n_best_size):
"""Get the n-best logits from a list."""
index_and_score = sorted(enumerate(logits), key=lambda x: x[1], reverse=True)
best_indexes = []
for i in range(len(index_and_score)):
if i >= n_best_size:
break
best_indexes.append(index_and_score[i][0])
return best_indexes
def _compute_softmax(scores):
"""Compute softmax probability over raw logits."""
if not scores:
return []
max_score = None
for score in scores:
if max_score is None or score > max_score:
max_score = score
exp_scores = []
total_sum = 0.0
for score in scores:
x = math.exp(score - max_score)
exp_scores.append(x)
total_sum += x
probs = []
for score in exp_scores:
probs.append(score / total_sum)
return probs
def get_dict(train_batch):
b_input_ids = train_batch['input_ids']
b_input_mask = train_batch['input_mask']
b_segment_ids = train_batch['segment_ids']
b_labels = train_batch['label_id']
return b_input_ids,b_input_mask,b_segment_ids,b_labels
| 2.53125 | 3 |
collatz.py | TravelingMan/pyexperiments | 0 | 12787502 | <gh_stars>0
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Collatz
A fun math problem.
"""
def collatz(number):
if number % 2 == 0:
x = number / 2
print(x)
return x
else:
x = 3 * number + 1
print(x)
return x
def get_number():
try:
user_number = int(input('Enter a number: '))
x = collatz(user_number)
while x != 1:
x = collatz(x)
except ValueError:
print('Please enter a standard integer (whole number)')
get_number()
get_number()
| 3.9375 | 4 |
all_call/input.py | jbudis/dante | 4 | 12787503 | from argparse import ArgumentParser, RawDescriptionHelpFormatter
import all_call.train
import numpy as np
import json
import sys
import pandas as pd
import re
import os
from glob import glob
from arguments import yaml_reader
# default parameters for inference
DEFAULT_MODEL_PARAMS = (-0.0107736, 0.00244419, 0.0, 0.00440608)
DEFAULT_READ_DROP = (479.596, -21.4382)
DEFAULT_READ_DROP_REL = (1.18332, -0.0475454)
DEFAULT_FIT_FUNCTION = "linear"
# functions for training
fit_functions = {"const": all_call.train.const_rate, "linear": all_call.train.linear_rate, "n2": all_call.train.n2_rate, "exp": all_call.train.exp_rate}
def load_arguments():
"""
Loads all arguments and sets default values.
:return: argparse arguments
"""
parser = ArgumentParser(formatter_class=RawDescriptionHelpFormatter)
parser.add_argument('dir_structure', type=path_exists, help='Directory with multiple Dante results directories. '
'Each Dante directory has filled "all_profiles.txt" and "all_profiles.true" files.')
# training.add_argument('--model-fig', type=str, default=None, help="File to write .png file with comparison of models and train data. Suffix determines the type of image file.")
# parser.add_argument('--profiles', type=str, required=True, help="TSV file or .npy file with one or more profiles. Required.")
parser.add_argument('--output-params', type=convert_to_absolute, default=None, help="File with parameters of the model to save to. Default: dir_structure/params.txt")
parser.add_argument('--output-profile', type=convert_to_absolute, default=None, help="File, where to collect all the profiles. Default: dir_structure/all_profiles.txt")
parser.add_argument('--output-true', type=convert_to_absolute, default=None, help="File, where to collect all the true values. Default: dir_structure/all_profiles.true")
parser.add_argument('--input-true', type=convert_to_absolute, default=None, help="File, with all the true values. Default: collect from Dante predictions")
parser.add_argument('--config-dir', type=path_exists, default=None, help="Directory, where to save new config files. Default: without saving")
parser.add_argument('--fit-function', choices=fit_functions.keys(), default="linear", help="Function to approximate deletion rate of STRs. Default: linear")
parser.add_argument('-v', '--verbosity-level', type=int, choices=range(3), default=1, help="Level of verbosity, default 1.")
parser.add_argument('-p', '--prepare', action='store_true', help="Only prepare files, do not run training.")
# input_args.add_argument('-l', '--len_repeating', type=int, default=3, help="Length of the STR. Used for read drop modelling.")
args = parser.parse_args()
# check
if args.output_profile is None:
args.output_profile = '%s/all_profiles.txt' % args.dir_structure
if args.output_true is None:
args.output_true = '%s/all_profiles.true' % args.dir_structure
if args.output_params is None:
args.output_params = '%s/params.txt' % args.dir_structure
return args
def convert_to_absolute(path):
"""
Converts to absolute path, do not check if exists.
:param path: str - path
:return: str - absolute path
"""
return os.path.abspath(path)
def path_exists(path):
"""
Checks if the supplied path exists.
:param path: str - path to a file or dir
:return: str - absolute path to a file or dir
"""
try:
path = convert_to_absolute(path)
except Exception:
print('ERROR: %s directory does not exists' % path)
exit(-1)
return path
def crawl_dante(dir_structure):
"""
Crawl Dante dir and collect config, profile, and true_vals files
:param dir_structure: str - directory above the Dante directory structures, here we start the crawl
:return: list(str) x3 - list of paths to configs, profiles, and true values
"""
# read all configs
configs = glob('%s/*/config.yaml' % dir_structure)
good_configs = []
profiles = []
true_vals = []
# check if every config has its profiles and true_vals
for config in configs:
profile = '%s/all_profiles.txt' % os.path.dirname(config)
if not os.path.exists(profile):
print('WARNING: "%s" exists but "%s" does not!!' % (config, profile))
continue
true_val = '%s/all_profiles.true' % os.path.dirname(config)
if not os.path.exists(true_val):
print('WARNING: "%s" exists but "%s" does not!!' % (config, true_val))
continue
# all ok, write them:
good_configs.append(config)
profiles.append(profile)
true_vals.append(true_val)
return good_configs, profiles, true_vals
def get_name(path):
"""
Get directory name from path to config/profile/...
:param path: str - path
:return: str - directory name without blanks
"""
directory = path.split('/')[-2]
directory = directory.replace(' ', '_')
return directory
def update_config(config_path, save_dir, params_file):
"""
Create new config file with inputs from the outputs of Dante.
:param config_path: str - path to the config file
:param save_dir: str - directory where to save the new config
:param save_dir: str - directory where to save the new config
:return: None
"""
# gather inputs:
directory = os.path.dirname(config_path)
inputs = glob('%s/*/annotations*' % directory)
inputs += glob('%s/*/filtered_primer*' % directory)
# read the old config:
config = yaml_reader.load_arguments(config_path)
# update the config with new inputs
config['inputs'] = []
for input in inputs:
config['inputs'].append({'path': input})
# update the config with new params
config['allcall']['param_file'] = params_file
# add "_retrained" to output dirs
config['general']['output_dir'] = '%s_retrained' % config['general']['output_dir']
# write it
name = get_name(config_path)
config_name = '%s/%s_config.yaml' % (save_dir, name)
yaml_reader.save_arguments(config, config_name)
def merge_profiles(profiles, output_file):
"""
Merge all profiles according to the name of dirs and output them.
:param profiles: list(str) - list of paths to profiles
:param output_file: str - output file for merged file
:return: pd.DataFrame - merged DataFrame with all data
"""
if len(profiles) == 0:
return None
# create empty dataframe
all_profiles = pd.DataFrame()
# and fill it
for profile in profiles:
name = get_name(profile)
# get the maximal number of columns:
max_cols = 0
with open(profile) as f:
for line in f:
max_cols = max(max_cols, line.count('\t'))
# write to aggregated file:
current = pd.read_csv(profile, sep='\t', header=None, names=['index'] + range(max_cols), index_col=0, parse_dates=True, engine='python')
current.index = list(map(lambda x: '%s_%s' % (name, x), current.index))
all_profiles = pd.concat([all_profiles, current])
# fill not available data:
all_profiles = all_profiles.fillna(0)
all_profiles = all_profiles.applymap(lambda x: x if type(x) is str else str(int(x)))
all_profiles.sort_index(inplace=True)
# save it:
all_profiles.to_csv(output_file, sep='\t')
# return it
return all_profiles
def read_dante(filename):
"""
Read profiles from CSV from Dante.
:param filename: str - filename to read
:return: Pandas.DataFrame with read profiles or None if no read occurred
"""
# now try to load tsv file:
name = filename.split('/')[-2]
try:
profiles = pd.read_csv(filename, sep="\t", header=None, index_col=None, parse_dates=True)
except Exception:
return None
new_profiles = pd.DataFrame()
max_str = max(profiles.max(0)[1:]) + 2
if profiles is not None:
for column in profiles.columns[1:]:
vals = np.zeros(max_str, dtype=int)
for i, c in enumerate(profiles[column]):
vals[int(c)] += profiles.iloc[i][0]
new_profiles['%s_%d' % (name, column - 1)] = vals
if len(new_profiles.index) > 0:
profiles = new_profiles.transpose()
return profiles
def fix_profile_file(filename):
"""
Fix profile file to be able to read as a tsv.
:param filename: str - filename to fix
"""
# read the file
with open(filename) as f:
lines = f.readlines()
# find separator
sep = '\t' if len(lines[0].split('\t')) >= len(lines[0].split(None)) else None
# count the number of columns:
cols = np.zeros_like(lines, dtype=int)
for i, line in enumerate(lines):
cols[i] = len(line.split(sep))
# print with the highest number
max_cols = max(cols)
with open(filename, 'w') as f:
for i, line in enumerate(lines):
f.write(line.strip())
# append enough zeros
for _ in range(max_cols - cols[i]):
f.write('\t0')
f.write('\n')
def read_profiles(filename):
"""
Read profiles from CSV or from .npy file.
:param filename: str - filename to read
:return: Pandas.DataFrame with read profiles or None if no read occurred
"""
# first try to load numpy array
try:
profiles = np.load(filename)
except IOError:
profiles = None
if profiles is not None:
profiles = pd.DataFrame(data=profiles[np.newaxis], index=[int(filename.split('.')[0].split('/')[-1])])
# now try to load tsv file:
if profiles is None:
try:
fix_profile_file(filename)
profiles = pd.read_csv(filename, sep='\t', header=None, index_col=0, parse_dates=True)
except IOError:
profiles = None
return profiles
def read_true(filename):
"""
Read true values from json file or from .true file.
:param filename: str - json file to read
:return: dict - values read from the json file or None if no read occurred
"""
class WrongCountError(Exception):
pass
true_values = None
try:
with open(filename) as f:
true_values = json.load(f)
except Exception:
pass
if true_values is None:
try:
with open(filename) as f:
true_values = {}
for line in f:
split = line.split()
if len(split) == 3:
m = re.search(r'_\d+$', split[0])
name = split[0]
if m is None:
name += '_1'
true_values[name] = (int(split[1]), int(split[2]))
elif len(split) > 3:
raise WrongCountError("Wrong number of parsed elements (expected 3, got %d)" % len(split))
except Exception as e:
print('ERROR: ', e)
return None
return true_values
def read_params(filename):
"""
Reads all parameters written with write_params(print_all=True)
:param filename: str - filename to read parameters from, if None, load default params
:return: 4-tuple, 2-tuple, function - parameters for model, read count drop, and error function for model distributions
"""
if filename is None:
return DEFAULT_MODEL_PARAMS, DEFAULT_READ_DROP, DEFAULT_READ_DROP_REL, DEFAULT_FIT_FUNCTION
# read 2nd and last line of the file
with open(filename) as f:
lines = f.readlines()
fit_function = lines[1].strip().split()[1]
split = list(map(float, lines[-1].strip().split()))
if len(split) < 8:
print("ERROR: parameters were not read successfully, using defaults!", file=sys.stderr)
return DEFAULT_MODEL_PARAMS, DEFAULT_READ_DROP, DEFAULT_READ_DROP_REL, DEFAULT_FIT_FUNCTION
# extract parameters from last line of file
model_params = tuple(split[0:4])
read_drop_params = tuple(split[4:6])
read_drop_params_rel = tuple(split[6:8])
return model_params, read_drop_params, read_drop_params_rel, fit_function
| 2.65625 | 3 |
scripts/runner.py | kzbnb/numerical_bugs | 8 | 12787504 | import os
import sys
import argparse
parse = argparse.ArgumentParser()
parse.add_argument("--type", type=str,choices=['origin', 'grist',], help="run initial file or grist file")
parse.add_argument("--times", type=int, help="time to run code")
flags, unparsed = parse.parse_known_args(sys.argv[1:])
for i in range(flags.times):
command = f"nohup python -u scripts/one_time_runner.py --type {flags.type} > {flags.type}_{flags.times}.log 2>&1 &"
os.system(command) | 2.3125 | 2 |
main.py | Tigenzero/clutch-code-project | 0 | 12787505 | <filename>main.py
import logging
import logging.config
import os
from time import perf_counter
import argparse
from flask import Flask
from flask import request, jsonify
ADDRESS_ARG = 'address'
def parse_args():
parser = argparse.ArgumentParser(description='Python Service to accept addresses and return the states they reside in.')
parser.add_argument('api_key', type=str, help='Google API key necessary to use their geocoding API')
return parser.parse_args()
def start_flask(command_args):
flask_app = Flask(__name__, instance_relative_config=True)
flask_app.config.from_object('config')
flask_app.config['api_key'] = command_args.api_key
@flask_app.route('/search_address', methods=['GET'])
def geocode_address():
if ADDRESS_ARG in request.args:
address = request.args[ADDRESS_ARG]
else:
return "ERROR: Address not found. Please provide an address"
return "Nothing has been implemented!"
return flask_app
if __name__ == "__main__":
# Create Logs
logfile = os.path.join('logs', 'logging_file.log')
print(logfile)
if not os.path.isdir('logs'):
os.makedirs('logs')
logging.config.fileConfig('logging.conf', defaults={'logfile': logfile})
logging.debug("Starting main")
# Get args
args = parse_args()
print(args.api_key)
app = start_flask(args)
app.run()
print("app running")
| 3 | 3 |
pq.py | phamhm/sublime_motion | 0 | 12787506 | <reponame>phamhm/sublime_motion
import queue
myq = queue.PriorityQueue()
myq.put(10)
myq.put(9)
myq.put(3)
myq.put(2)
myq.put(99)
| 1.65625 | 2 |
final_project/machinetranslation/translator.py | lgomez9/xzceb-flask_eng_fr | 0 | 12787507 | <gh_stars>0
# LGomez Version
""" Translator module for final python project """
import json
import os
from ibm_watson import LanguageTranslatorV3
from ibm_cloud_sdk_core.authenticators import IAMAuthenticator
from dotenv import load_dotenv
load_dotenv()
apikey = os.environ['apikey']
url = os.environ['url']
authenticator = IAMAuthenticator(apikey)
language_translator = LanguageTranslatorV3(version='2018-05-01', authenticator=authenticator)
language_translator.set_service_url(url)
# LGOMEZ E2F
def english_to_french(english_text):
""" Converts English text to French """
if english_text == '':
return ''
translation = language_translator.translate(text=english_text, model_id='en-fr').get_result()
french_text = translation['translations'][0]['translation']
return french_text
# LGOMEZ F2E
def french_to_english(french_text):
""" Converts French text to English """
if french_text == '':
return ''
translation = language_translator.translate(text=french_text, model_id='fr-en').get_result()
english_text = translation['translations'][0]['translation']
return english_text
| 2.890625 | 3 |
REST/python/Environments/create-environments.py | gdesai1234/OctopusDeploy-Api | 199 | 12787508 | <filename>REST/python/Environments/create-environments.py
import json
import requests
from urllib.parse import quote
octopus_server_uri = 'https://your.octopus.app/api'
octopus_api_key = 'API-YOURAPIKEY'
headers = {'X-Octopus-ApiKey': octopus_api_key}
def get_octopus_resource(uri):
response = requests.get(uri, headers=headers)
response.raise_for_status()
return json.loads(response.content.decode('utf-8'))
def post_octopus_resource(uri, body):
response = requests.post(uri, headers=headers, json=body)
response.raise_for_status()
return json.loads(response.content.decode('utf-8'))
def get_by_name(uri, name):
resources = get_octopus_resource(uri)
return next((x for x in resources['Items'] if x['Name'] == name), None)
space_name = 'Default'
environment_names = ['Development', 'Test', 'Staging', 'Production']
space = get_by_name('{0}/spaces?partialName={1}&skip=0&take=100'.format(octopus_server_uri, quote(space_name)), space_name)
for environment_name in environment_names:
existing_environment = get_by_name('{0}/{1}/environments?partialName={2}&skip=0&take=100'.format(octopus_server_uri, space['Id'], quote(environment_name)), environment_name)
if existing_environment is None:
print('Creating environment \'{0}\''.format(environment_name))
environment = {
'Name': environment_name
}
environment_resource = post_octopus_resource('{0}/{1}/environments'.format(octopus_server_uri, space['Id']), environment)
print('EnvironmentId: \'{0}\''.format(environment_resource['Id']))
else:
print('Environment \'{0}\' already exists. Nothing to create :)'.format(environment_name)) | 2.6875 | 3 |
mrcnn-app/ImageCollector/mrcnn-ImageCollector.py | B-C-WANG/AI-Apps | 7 | 12787509 | <filename>mrcnn-app/ImageCollector/mrcnn-ImageCollector.py
import os
import sys
import random
import math
import numpy as np
import skimage.io
import matplotlib
import matplotlib.pyplot as plt
from mrcnn import utils
import mrcnn.model as modellib
from mrcnn import visualize
import cv2
import time
class MaskRCNN():
def __init__(self,mask_rcnn_root):
# Root directory of the project
ROOT_DIR = mask_rcnn_root
# Import Mask RCNN
sys.path.append(ROOT_DIR) # To find local version of the library
sys.path.append(os.path.join(ROOT_DIR, "samples/coco/")) # To find local version
import coco
# Directory to save logs and trained model
MODEL_DIR = os.path.join(ROOT_DIR, "logs")
# Local path to trained weights file
COCO_MODEL_PATH = os.path.join(ROOT_DIR, "mask_rcnn_coco.h5")
# Download COCO trained weights from Releases if needed
if not os.path.exists(COCO_MODEL_PATH):
utils.download_trained_weights(COCO_MODEL_PATH)
class InferenceConfig(coco.CocoConfig):
# Set batch size to 1 since we'll be running inference on
# one image at a time. Batch size = GPU_COUNT * IMAGES_PER_GPU
GPU_COUNT = 1
IMAGES_PER_GPU = 1
config = InferenceConfig()
config.display()
# Create model object in inference mode.
self.model = modellib.MaskRCNN(mode="inference", model_dir=MODEL_DIR, config=config)
# Load weights trained on MS-COCO
self.model.load_weights(COCO_MODEL_PATH, by_name=True)
self.class_names = ['BG', 'person', 'bicycle', 'car', 'motorcycle', 'airplane',
'bus', 'train', 'truck', 'boat', 'traffic light',
'fire hydrant', 'stop sign', 'parking meter', 'bench', 'bird',
'cat', 'dog', 'horse', 'sheep', 'cow', 'elephant', 'bear',
'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag', 'tie',
'suitcase', 'frisbee', 'skis', 'snowboard', 'sports ball',
'kite', 'baseball bat', 'baseball glove', 'skateboard',
'surfboard', 'tennis racket', 'bottle', 'wine glass', 'cup',
'fork', 'knife', 'spoon', 'bowl', 'banana', 'apple',
'sandwich', 'orange', 'broccoli', 'carrot', 'hot dog', 'pizza',
'donut', 'cake', 'chair', 'couch', 'potted plant', 'bed',
'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote',
'keyboard', 'cell phone', 'microwave', 'oven', 'toaster',
'sink', 'refrigerator', 'book', 'clock', 'vase', 'scissors',
'teddy bear', 'hair drier', 'toothbrush']
self.ax = []
self.plt = []
def devide_images(self,image_path_list):
image_save_index = 0
for image_path in image_path_list:
image = skimage.io.imread(image_path)
results = self.model.detect([image],verbose=1)
r = results[0]
rois = r["rois"]
N = rois.shape[0]
class_ids = r["class_ids"]
masks = r["masks"]
scores = r["scores"]
if not os.path.exists("output"):
os.mkdir("output")
for i in range(N):
if not np.any(rois[i]):continue
y1,x1,y2,x2 = rois[i]
#print(x1,x2,y1,y2)
#print(image.shape)
new_image = image[y1:y2,x1:x2,:]
#print(new_image.shape)
mask = masks[:,:,i]
label = self.class_names[class_ids[i]]
plt.imshow(new_image)
try:
plt.savefig("output\%s\%s%s.png"%(label,label,image_save_index))
image_save_index += 1
except FileNotFoundError:
os.mkdir("output\%s" % label)
plt.savefig("output\%s\%s%s.png" % (label, label, image_save_index))
image_save_index += 1
except:
continue
def tst():
# set MaskRCNN root: https://github.com/matterport/Mask_RCNN
temp = MaskRCNN("G:\wbc\GitHub\Mask_RCNN")
# images
temp.devide_images(["C:\\Users\Administrator\OneDrive - shanghaitech.edu.cn\图片\\"+ i for i in
[
"9247489789_132c0d534a_z.jpg",
"3132016470_c27baa00e8_z.jpg",
"3651581213_f81963d1dd_z.jpg"
]
]
)
if __name__ == '__main__':
tst() | 2.390625 | 2 |
scripts/rasterio/find_valid_tiff_files.py | PhilipeRLeal/xarray_case_studies | 1 | 12787510 | import pandas as pd
import numpy as np
import geopandas as gpd
import glob
import rasterio
import rasterio.mask
from shapely.geometry import box
import os
shp_file_path = r'C:\Doutorado\BD\IBGE\IBGE_Estruturas_cartograficas_Brasil\2017\Unidades_Censitarias\Setores_Censitarios\*shp'
gdf= gpd.read_file(glob.glob(shp_file_path)[0])
gdf_origin_bounds = gpd.GeoSeries(box(*gdf.total_bounds), crs=gdf.crs)
Para = gdf[gdf['CD_GEOCODM'].str.startswith('15')]
def get_bounds_from_gdf(gdf_bounds, epsg):
return gdf_bounds.to_crs(epsg)
main_dir = r'C:\Users\<NAME>\Downloads\temp\Global Impervious Surfaces products\Global Impervious Surfaces products'
ending = '*.tif*'
tiff_files = glob.glob( os.path.join(main_dir, ending))
print('Total number of files in directory: ', len(tiff_files))
# Filtering files outside the Main GDF:
valid_tiffs = []
for tiff_file_path in tiff_files:
with rasterio.open(tiff_file_path) as src:
#out_image, out_transform = rasterio.mask.mask(src, shapes, crop=True)
out_meta = src.meta
Bounds = box(*src.bounds)
gdf_bounds = get_bounds_from_gdf(gdf_origin_bounds, out_meta['crs'].to_epsg()).values
if (gdf_bounds.intersects(Bounds) or gdf_bounds.within(Bounds)
or gdf_bounds.contains(Bounds) or gdf_bounds.crosses(Bounds)
):
valid_tiffs.append(tiff_file_path)
print('Valid Total Files: ', len(valid_tiffs))
ref_dir = os.path.dirname(os.path.dirname(main_dir))
saving_paths = os.path.join(ref_dir, 'Valid_files.csv')
to_file = pd.Series(valid_tiffs, name='paths')
to_file.index.name = 'ID'
to_file.to_csv(saving_paths) | 2.625 | 3 |
src/libs/utils.py | bopopescu/covid-19-visualization | 0 | 12787511 | <reponame>bopopescu/covid-19-visualization<filename>src/libs/utils.py
#coding:utf8
import time
import datetime
import random
from src.libs.platform_version import IS_PYTHON2
# import md5
def LONG(n):
return long(n) if IS_PYTHON2 else int(n)
'''获取当前时间字符串'''
def TM():
return time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
'''获取当前日期字符串'''
def DT():
return time.strftime("%Y-%m-%d", time.localtime())
'''获取当前日期戳'''
def TMS():
return time.time() * 1000
'''获取当前日期戳'''
def TMSL():
return LONG(time.time() * 1000)
'''获取当前日期戳字符串'''
def TMSS():
return str(LONG(time.time() * 1000))
'''时间字符串·纯数字'''
def TMN():
return datetime.datetime.now().strftime('%Y%m%d%H%M%S%f')[:-3]
'''时间转字符串时间'''
def T2S(tm):
return time.strftime("%Y-%m-%d %H:%M:%S", tm.timetuple()) if tm else ''
'''时间戳转字符串时间'''
def TS2S(stamp):
return time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(stamp))
'''字符串转时间'''
def S2T(s):
return time.strptime(s, "%Y-%m-%d %H:%M:%S")
'''字符串转时间'''
def S2DT(s):
return datetime.datetime.strptime(s[:19], "%Y-%m-%d %H:%M:%S")
'''字符串转datetime时间'''
def S2D(s = ''):
if s == '': return datetime.datetime.now()
return datetime.datetime.strptime(s, "%Y-%m-%d")
def DT2TMS(dt):
if not dt: return None
return int(time.mktime(dt.timetuple())) * 1000
def F0(n):
n = str(int(n))
return n if len(n) > 1 else "0" + n
def F00(n):
n = str(int(n))
if len(n) < 3: n = "0" + n
return n if len(n) > 2 else "0" + n
def F000(n):
n = str(int(n))
for i in range(4 - len(n)): n = "0" + n
return n
def FLOAT(n):
if not n: return 0
return float(n)
def CardR4(c):
if not c: return "****"
return c[-4:]
# def TJS(stamp):
# return TS(stamp/1000)
def MU():
return int(time.strftime("%M")) % 10
def DateOffset(n = 0, dt = ''):
sf = '%Y%m%d' if len(dt) == 8 else '%Y-%m-%d'
dt = datetime.datetime.now() if dt == '' else datetime.datetime.strptime(dt, sf)
return str(dt + datetime.timedelta(days=n)).split(" ")[0]
'''params - seconds'''
def TimeOffset(n = 60, tm = ''):
sf = '%Y-%m-%d %H:%M:%S'
tm = datetime.datetime.now() if tm == '' else datetime.datetime.strptime(tm, sf)
return str(tm + datetime.timedelta(seconds=n))
'''params - seconds'''
def TimeOffsetS(n = 60, tm = ''):
sf = '%Y-%m-%d %H:%M:%S'
tm = datetime.datetime.now() if tm == '' else datetime.datetime.strptime(tm, sf)
st = (tm + datetime.timedelta(seconds=n)).strftime(sf)
return LONG(time.mktime(time.strptime(st, sf)) * 1000)
def date_less(t1, t2):
if not t1 or not t2: return False
if type(t1) != datetime.datetime: t1 = S2DT(t1)
if type(t2) != datetime.datetime: t2 = S2DT(t2)
return t1 < t2
def DiffDay(s1, s2):
t1 = datetime.datetime.strptime(s1.split(" ")[0], "%Y-%m-%d")
t2 = datetime.datetime.strptime(s2.split(" ")[0], "%Y-%m-%d")
return (t2 - t1).days
'''时间差,s1比s2大的秒数'''
def DiffSecond(t1, t2 = None):
if type(t1) == type(9999999999):
t1 = datetime.datetime.fromtimestamp(t1 / 1000.0)
else:
t1 = datetime.datetime.strptime(t1, "%Y-%m-%d %H:%M:%S")
if t2:
t2 = datetime.datetime.strptime(t2, "%Y-%m-%d %H:%M:%S")
else:
t2 = datetime.datetime.now()
# return LONG((t2 - t1).seconds) * (1 if t2 < t1 else -1)
return LONG((t1 - t2).seconds + (t1 - t2).days * 86400)
def NextDay(dt = None):
if not dt: dt = DT()
dt = datetime.datetime.strptime(dt, "%Y-%m-%d")
return str(dt + datetime.timedelta(days=1)).split(" ")[0]
def LastDay(dt):
dt = datetime.datetime.strptime(dt, "%Y-%m-%d")
return str(dt + datetime.timedelta(days=-1)).split(" ")[0]
def pre30s(s):
dt = datetime.datetime.strptime(s, "%Y-%m-%d %H:%M:%S")
return str(dt + datetime.timedelta(seconds = 30))
def after30s(s):
dt = datetime.datetime.strptime(s, "%Y-%m-%d %H:%M:%S")
return str(dt + datetime.timedelta(seconds = -30))
def dayTime(s):
return str(datetime.datetime.strptime(s[:8], "%Y%m%d"))
def time2second(s):
if s == '': return 0
ps = s.split(":")
return int(ps[0]) * 3600 + int(ps[1]) * 60
def second2time(s):
if s >= 86400: s -= 86400 # 43200
return F0(s / 3600) + ':' + F0(s % 3600 / 60) + ':' + F0(s % 3600 % 60)
'''字符串转时间戳'''
def S2TS(s):
timeArray = time.strptime(s, "%Y-%m-%d %H:%M:%S")
return int(time.mktime(timeArray)) * 1000
'''字符串转时间戳'''
def S2TSL(s):
timeArray = time.strptime(s, "%Y-%m-%d %H:%M:%S")
return LONG(time.mktime(timeArray) * 1000)
def PreNow(tm):
tn = datetime.datetime.now()
if type(tm) != type(tn): tm = S2DT(tm)
return tm < tn
def LaterNow(tm):
tn = datetime.datetime.now()
if type(tm) != type(tn): tm = S2DT(tm)
return tm > tn
def RandCode(length = 10):
txt = str(random.random())[2:]
return txt[:length] if len(txt) > length else txt + str(random.random())[2:length - len(txt) + 2]
def KV2Dict(keys, values):
return dict((keys[i], values[i]) for i in range(len(keys)))
return
def P2C(p):
return float(p) * 20 + 1800
def GetMd5(st):
m1 = md5.new()
m1.update(st.encode('utf8'))
return m1.hexdigest()
if __name__ == '__main__':
pass
# print(S2DT("2020-02-05 11:23:09.23"))
| 2.609375 | 3 |
csdl/examples/valid/ex_implicit_multiple_residuals_define_model_inline.py | LSDOlab/csdl | 0 | 12787512 | <gh_stars>0
def example(Simulator):
from csdl import Model, ScipyKrylov, NewtonSolver, NonlinearBlockGS
import numpy as np
from csdl.examples.models.quadratic_function import QuadraticFunction
from csdl.examples.models.fixed_point import FixedPoint1, FixedPoint2, FixedPoint3
from csdl.examples.models.fixed_point import FixedPoint2
from csdl.examples.models.quadratic_wih_extra_term import QuadraticWithExtraTerm
from csdl.examples.models.simple_add import SimpleAdd
from csdl.examples.models.circle_parabola import CircleParabola
from csdl.examples.models.quadratic_function import QuadraticFunction
class ExampleMultipleResidualsDefineModelInline(Model):
def define(self):
m = Model()
r = m.declare_variable('r')
a = m.declare_variable('a')
b = m.declare_variable('b')
c = m.declare_variable('c')
x = m.declare_variable('x', val=1.5)
y = m.declare_variable('y', val=0.9)
m.register_output('rx', x**2 + (y - r)**2 - r**2)
m.register_output('ry', a * y**2 + b * y + c)
r = self.declare_variable('r', val=2)
a = self.declare_variable('a', val=1)
b = self.declare_variable('b', val=-3)
c = self.declare_variable('c', val=2)
solve_multiple_implicit = self.create_implicit_operation(m)
solve_multiple_implicit.declare_state('x', residual='rx')
solve_multiple_implicit.declare_state('y', residual='ry')
solve_multiple_implicit.linear_solver = ScipyKrylov()
solve_multiple_implicit.nonlinear_solver = NewtonSolver(
solve_subsystems=False)
x, y = solve_multiple_implicit(r, a, b, c)
sim = Simulator(ExampleMultipleResidualsDefineModelInline())
sim.run()
print('x', sim['x'].shape)
print(sim['x'])
print('y', sim['y'].shape)
print(sim['y'])
return sim | 2.84375 | 3 |
match_extensions/number.py | jdillenberger/jmatch | 1 | 12787513 | <reponame>jdillenberger/jmatch
'''
# General pattern functions
Node and subtree match functions, to be executed from
a Patterns node_matches and subtree_matches function
if they are requested by the given pattern.
Functions with a _node suffix, are executed by a Patterns
"node_matches" method and functions with a _subtree suffix
are executed by a Patterns "subtree_matches" method.
The functions defined here make the pattern true
for matching nummeric values.
'''
def range_node(pattern_handler, data):
#pylint: disable=unused-argument
assert '..' in data['pattern'], 'The range defined in your pattern has no valid range format'
num1, num2 = data['pattern'].split('..').sort()
if num1.isnumeric() and num2.isnumeric():
if not str(data['node']).isnumeric():
return False
return num1 < float(data['node']) and num2 > float(data['node'])
raise ValueError
def bigger_then_node(pattern_handler, data):
#pylint: disable=unused-argument
if not str(data['pattern']).isnumeric() and not str(data['pattern']).isnumeric():
raise ValueError('The "bigger_then" function can only be applied to a nummeric value.')
return data['node'] > data['pattern']
def bigger_then_equal_node(pattern_handler, data):
#pylint: disable=unused-argument
if not str(data['pattern']).isnumeric() and not str(data['pattern']).isnumeric():
error = 'The "bigger_then_equal" function can only be applied to a nummeric value.'
raise ValueError(error)
return data['node'] >= data['pattern']
def smaller_then_node(pattern_handler, data):
#pylint: disable=unused-argument
if not str(data['pattern']).isnumeric() and not str(data['pattern']).isnumeric():
raise ValueError('The "smaller_then" function can only be applied to a nummeric value.')
return data['node'] < data['pattern']
def smaller_then_equal_node(pattern_handler, data):
#pylint: disable=unused-argument
if not str(data['pattern']).isnumeric() and not str(data['pattern']).isnumeric():
error = 'The "smaller_then_equal" function can only be applied to a nummeric value.'
raise ValueError(error)
return data['node'] <= data['pattern']
| 3.46875 | 3 |
docs/_downloads/bb12ac5c5d4a848045c14031f77a9918/plot__color_and_fill.py | IKupriyanov-HORIS/lets-plot-docs | 0 | 12787514 | """
Color and Fill Scales
=====================
Scales control how a plot maps data values to the visual values of an
aesthetic.
"""
# sphinx_gallery_thumbnail_path = "gallery_py\_scales\_color_and_fill.png"
from datetime import datetime
import pandas as pd
from lets_plot import *
LetsPlot.setup_html()
# %%
mpg_df = pd.read_csv('https://raw.githubusercontent.com/JetBrains/lets-plot-docs/master/data/mpg.csv')
ec_df = pd.read_csv('https://raw.githubusercontent.com/JetBrains/lets-plot-docs/master/data/economics.csv', \
parse_dates=['date'])
ec_df = ec_df[ec_df.date > datetime(2000, 1, 1)]
# %%
# %% [markdown]
#
# Discrete
# ~~~~~~~~
# %%
p = ggplot(mpg_df, aes(x='fl')) + geom_bar(aes(color='fl', fill='fl'), alpha=.5)
p
# %%
p + scale_color_brewer(type='seq', palette='Blues') + \
scale_fill_brewer(type='seq', palette='Blues')
# %%
p + scale_color_grey(start=0, end=.7) + \
scale_fill_grey(start=0, end=.7)
# %%
# %% [markdown]
#
# Continuous
# ~~~~~~~~~~
# %%
p = ggplot(ec_df, aes(x='psavert')) + geom_histogram(aes(fill='psavert'))
p
# %%
p + scale_fill_gradient(low='#2c7fb8', high='#edf8b1')
# %%
p + scale_fill_gradient2(low='#1a9641', mid='#ffffbf', high='#d7191c')
# %%
p + scale_fill_hue(l=80, c=150) | 2.609375 | 3 |
SRC/engine/IO/meshmenu.py | usnistgov/OOF3D | 31 | 12787515 | # -*- python -*-
# This software was produced by NIST, an agency of the U.S. government,
# and by statute is not subject to copyright in the United States.
# Recipients of this software assume all responsibilities associated
# with its operation, modification and maintenance. However, to
# facilitate maintenance we ask that before distributing modified
# versions of this software, you first contact the authors at
# <EMAIL>.
from ooflib.SWIG.common import config
from ooflib.SWIG.common import ooferror
from ooflib.SWIG.common import switchboard
from ooflib.SWIG.engine import elementshape
from ooflib.SWIG.engine import masterelement
from ooflib.common import debug
from ooflib.common import enum
from ooflib.common import labeltree
from ooflib.common import microstructure
from ooflib.common import parallel_enable
from ooflib.common import utils
from ooflib.common.IO import automatic
from ooflib.common.IO import datafile
from ooflib.common.IO import filenameparam
from ooflib.common.IO import mainmenu
from ooflib.common.IO import microstructureIO
from ooflib.common.IO import oofmenu
from ooflib.common.IO import parameter
from ooflib.common.IO import reporter
from ooflib.common.IO import whoville
from ooflib.common.IO import xmlmenudump
from ooflib.engine import bdycondition
from ooflib.engine import evolve
from ooflib.engine import fieldinit
from ooflib.engine import meshcrosssection
from ooflib.engine import meshmod
from ooflib.engine import meshstatus
from ooflib.engine import outputschedule
from ooflib.engine import skeletoncontext
from ooflib.engine import subproblemcontext
from ooflib.engine.IO import meshparameters
from ooflib.engine.IO import skeletonIO
from ooflib.engine.IO import subproblemmenu
if parallel_enable.enabled():
from ooflib.engine.IO import meshIPC
import ooflib.engine.mesh
import types
import string
SyncMeshParameter = ooflib.engine.mesh.SyncMeshParameter
OOF = mainmenu.OOF
meshmenu = mainmenu.OOF.addItem(oofmenu.OOFMenuItem(
'Mesh',
cli_only=1,
help='Tools for creating and manipulating Meshes.',
discussion="""<para>
The <command>Mesh</command> menu contains tools for creating and
manipulating finite element &meshes;, including methods for
defining &fields; and determining which &equations; to <link
linkend='MenuItem-OOF.Mesh.Solve'>solve</link>.
</para>"""))
settingsmenu = mainmenu.OOF.Settings.addItem(oofmenu.OOFMenuItem(
'Mesh_Defaults',
help='Default values for Mesh parameters'))
####################
# Look for an enclosing mesh parameter -- if not found, use the
# enclosing skeleton parameter. Mesh copying needs the first case,
# new mesh construction needs the second.
def meshNameResolver(param, startname):
if param.automatic():
basename = 'mesh'
else:
basename = startname
try:
meshname = param.group['mesh'].value
except IndexError:
skelname = param.group['skeleton'].value
skelpath = labeltree.makePath(skelname)
else:
skelpath = labeltree.makePath(meshname)[:-1]
return ooflib.engine.mesh.meshes.uniqueName(skelpath + [basename])
###################################
def newMesh(menuitem, name, skeleton, element_types):
# if parallel_enable.enabled():
# # skeleton is a string!
# # The following ASSUMES there are exactly three element_types:
# #(D_typename, T_typename and Q_typename, for edgement, Tri and Quad)
# meshIPC.parallel_newMesh(name,skeleton,
# element_types[0].name,
# element_types[1].name,
# element_types[2].name)
# else:
edict = {}
for eltype in element_types:
el = masterelement.getMasterElementFromEnum(eltype)
edict[el.shape().name()] = el
skelpath = labeltree.makePath(skeleton)
skelctxt = skeletoncontext.skeletonContexts[skelpath]
skelctxt.begin_reading()
try:
skel = skelctxt.getObject()
femesh = skel.femesh(edict)
if femesh is not None:
meshctxt = ooflib.engine.mesh.meshes.add(
skelpath+[name], femesh,
parent=skelctxt,
skeleton=skel,
elementdict=edict,
materialfactory=None)
meshctxt.createDefaultSubProblem()
meshctxt.setStatus(meshstatus.Unsolved("New mesh."))
finally:
skelctxt.end_reading()
switchboard.notify("redraw")
class MasterElementTypesParameter(enum.ListOfEnumsParameter):
def __init__(self, name, value=None, default=None, tip=None):
enum.ListOfEnumsParameter.__init__(
self, name,
elementshape.enumClasses.values(),
#masterelement.getMasterElementEnumClasses(),
value, default, tip)
def clone(self):
return self.__class__(self.name, self.value, self.default, self.tip)
def valueDesc(self):
return "A list of element types."
newmeshcmd = meshmenu.addItem(oofmenu.OOFMenuItem(
'New',
callback=newMesh,
params=parameter.ParameterGroup(
whoville.AutoWhoNameParameter('name', value=automatic.automatic,
resolver=meshNameResolver,
tip="Name of the new Mesh"),
whoville.WhoParameter('skeleton', skeletoncontext.skeletonContexts,
tip=parameter.emptyTipString),
MasterElementTypesParameter('element_types',
tip='A list of finite element types'),
## parameter.BooleanParameter('split_interface', value=0,
## tip='Split the mesh along interfaces?')
),
help='Create a new Mesh from a Skeleton.',
discussion=xmlmenudump.loadFile('DISCUSSIONS/engine/menu/newmesh.xml')
))
# The element_types parameter in the New Mesh menu item needs to be
# recreated whenever new MasterElement types are defined.
def buildNewMeshCmd():
params = parameter.ParameterGroup(
newmeshcmd.get_arg('name'),
newmeshcmd.get_arg('skeleton'),
MasterElementTypesParameter('element_types'))
newmeshcmd.replace_args(params)
switchboard.requestCallback("new master element", buildNewMeshCmd)
#####################################
def renameMesh(menuitem, mesh, name):
if parallel_enable.enabled():
meshIPC.ipcmeshmenu.Rename(mesh=mesh,name=name)
return
oldmeshpath = labeltree.makePath(mesh)
themesh = ooflib.engine.mesh.meshes[oldmeshpath]
themesh.reserve()
themesh.begin_writing()
try:
themesh.rename(name, exclude=oldmeshpath[-1])
finally:
themesh.end_writing()
themesh.cancel_reservation()
meshmenu.addItem(oofmenu.OOFMenuItem(
'Rename',
callback=renameMesh,
params=[whoville.WhoParameter('mesh', ooflib.engine.mesh.meshes,
tip=parameter.emptyTipString),
whoville.WhoNameParameter('name', value='',
tip='New name for the mesh.')
],
help="Rename a Mesh.",
discussion="<para> Assign a new name to a &mesh;. </para>"))
#######################################
def deleteMesh(menuitem, mesh):
if parallel_enable.enabled():
meshIPC.ipcmeshmenu.Delete(mesh=mesh)
return
meshctxt = ooflib.engine.mesh.meshes[mesh]
subproblems = meshctxt.subproblems()
for subproblem in subproblems:
subproblem.begin_writing()
try:
subproblem.destroy()
finally:
subproblem.end_writing()
meshctxt.reserve()
meshctxt.begin_writing()
try:
meshctxt.destroy() # removes mesh from ooflib.engine.mesh.meshes
finally:
meshctxt.end_writing()
meshctxt.cancel_reservation()
meshmenu.addItem(oofmenu.OOFMenuItem(
'Delete',
callback=deleteMesh,
params=[whoville.WhoParameter('mesh', ooflib.engine.mesh.meshes,
tip=parameter.emptyTipString)
],
help="Delete a Mesh.",
discussion="""<para>
Delete a &mesh;. Its &skel; and µ are
<emphasis>not</emphasis> deleted.
</para>"""
))
#######################################
def copyMesh(menuitem, mesh, name, copy_field, copy_equation, copy_bc):
if parallel_enable.enabled():
meshIPC.ipcmeshmenu.Copy(mesh=mesh,name=name,
copy_field=copy_field,
copy_equation=copy_equation, copy_bc=copy_bc)
return
notifications = set()
basemesh = ooflib.engine.mesh.meshes[mesh]
basemesh.begin_reading()
try:
edict = basemesh.elementdict
copiedmeshname = name
skel = basemesh.getSkeleton()
skelpath = labeltree.makePath(basemesh.path())[:-1]
#Interface branch, pass skeleton path to femesh
copiedfemesh = skel.femesh(edict, basemesh.materialfactory)
newmesh = ooflib.engine.mesh.meshes.add(
skelpath+[copiedmeshname],
copiedfemesh,
parent=skeletoncontext.skeletonContexts[skelpath],
skeleton=skel, elementdict=edict,
materialfactory=basemesh.materialfactory)
newmesh.reserve()
newmesh.begin_writing()
try:
copiedmesh = skelpath+[copiedmeshname]
copiedmeshfullname = string.join(copiedmesh,":")
for subpctxt in basemesh.subproblems():
newsubpctxt = subpctxt.clone(newmesh, copy_field, copy_equation,
notifications)
if copy_field:
for field in subpctxt.all_compound_fields():
newsubpctxt.getObject().acquire_field_data(
field, subpctxt.getObject())
# end loop over subproblems
newmesh.getObject().setCurrentTime(
basemesh.getObject().getCurrentTime())
if copy_field:
for field in newmesh.all_subproblem_fields():
if (config.dimension() == 2 and
basemesh.femesh().in_plane(field)):
newmesh.set_in_plane_field(field, 1)
notifications.add(("field inplane",
copiedmeshfullname, field.name(), 1))
try:
initializer = basemesh.initializers[field]
except KeyError:
pass
else:
newmesh.set_field_initializer(field, initializer)
notifications.add(("field initialized"))
if copy_bc:
for (bcname, bc) in basemesh.allBoundaryConds():
#Interface branch
#Don't copy the invisible Float BCs associated with
#interfaces. (see femesh.spy)
if bcname.find('_cntnty_')==0:
continue
copied = bc.copy(bc.boundary)
copied.add_to_mesh(bcname, copiedmesh)
if copy_field and copy_bc and copy_equation:
newmesh.setStatus(basemesh.status)
else:
newmesh.setStatus(meshstatus.Unsolved("New copy"))
finally:
newmesh.end_writing()
newmesh.cancel_reservation()
finally:
basemesh.end_reading()
for n in notifications:
## TODO OPT: remove duplicate notifications
switchboard.notify(*n)
meshmenu.addItem(oofmenu.OOFMenuItem(
'Copy', callback=copyMesh,
params= parameter.ParameterGroup(
whoville.WhoParameter('mesh', ooflib.engine.mesh.meshes,
tip=parameter.emptyTipString),
whoville.AutoWhoNameParameter('name', value=automatic.automatic,
resolver=meshNameResolver,
tip="Name of the copied Mesh. Use automatic selection, or type in a name."),
parameter.BooleanParameter('copy_field', value=1, tip='Copy fields?'),
parameter.BooleanParameter('copy_equation', value=1, tip='Copy equation?'),
parameter.BooleanParameter('copy_bc', value=1,
tip='Copy boundary conditions?') ),
help="Copy a Mesh.",
discussion=xmlmenudump.loadFile('DISCUSSIONS/engine/menu/copymesh.xml')
))
#=--=##=--=##=--=##=--=##=--=##=--=##=--=##=--=##=--=##=--=##=--=##=--=#
# Copy the field state (definitions, active-ness, planarity) of one
# mesh into another. This is the backwards-compatible deprecated
# version that uses the Mesh's default subproblem. The preferred
# version is in subproblemmenu.py.
def _copyFieldState(menuitem, source, target):
if source == target:
raise ooferror.ErrUserError('Source and target must differ!')
if parallel_enable.enabled():
meshIPC.ipcmeshmenu.Copy_Field_State(source=source,target=target)
return
notifications = []
source_mesh = ooflib.engine.mesh.meshes[source]
target_mesh = ooflib.engine.mesh.meshes[target]
source_subp = source_mesh.get_default_subproblem()
target_subp = target_mesh.get_default_subproblem()
source_subp.begin_reading()
target_subp.reserve()
target_subp.begin_writing()
try:
source_obj = source_subp.getObject()
target_obj = target_subp.getObject()
source_fields = source_subp.all_compound_fields()
target_fields = target_subp.all_compound_fields()
# Undefine all the fields in the target that are not in the source.
for f in target_fields:
if not source_obj.is_defined_field(f):
target_obj.undefine_field(f)
notifications.append(
("field defined", target_subp.path(), f.name(), 0))
for f in source_fields:
# Definition.
if not target_obj.is_defined_field(f):
target_obj.define_field(f)
notifications.append(
("field defined", target_subp.path(), f.name(), 1))
# Activation.
if source_obj.is_active_field(f):
if not target_obj.is_active_field(f):
target_obj.activate_field(f)
notifications.append(
("field activated", target_subp.path(), f.name(), 1))
else:
if target_obj.is_active_field(f):
target_obj.deactivate_field(f)
notifications.append(
("field activated", target_subp.path(), f.name(), 0))
# Planarity.
if config.dimension() == 2:
inplane = source_mesh.femesh().in_plane(f)
if target_mesh.femesh().in_plane(f) != inplane:
target_mesh.set_in_plane_field(f, inplane)
notifications.append(("field inplane", target, f.name(),
inplane))
try:
initializer = source_mesh.initializers[f]
except KeyError:
pass
else:
target_mesh.set_field_initializer(f, initializer)
notifications.append(("field initialized"))
finally:
source_subp.end_reading()
target_subp.end_writing()
target_subp.cancel_reservation()
# Make all the switchboard notifications outside the locked region.
for n in notifications:
switchboard.notify(*n)
# Update BCs
target_subp.autoenableBCs()
target_subp.changed("Field state changed.")
switchboard.notify("redraw")
target_mesh.setStatus(meshstatus.Unsolved("Copied fields"))
meshmenu.addItem(oofmenu.OOFMenuItem(
'Copy_Field_State',
callback=_copyFieldState,
params=[whoville.WhoParameter('source',ooflib.engine.mesh.meshes,
tip=parameter.emptyTipString),
whoville.WhoParameter('target',ooflib.engine.mesh.meshes,
tip=parameter.emptyTipString)],
help="Copy the Field state (defined, active, etc) from one Mesh to another.",
discussion="""<para>
This command copies the &field; state from the default
&subproblem; in one &mesh; to another, meaning that the same
&fields; will be defined, active, and in-plane in the
<varname>target</varname> &mesh; as in the
<varname>source</varname> &mesh;. If &fields; were explicitly
<link linkend='MenuItem-OOF.Mesh.Set_Field_Initializer'>initialized</link>
in the source &mesh;, the initializers will be copied, but the
command does <emphasis>not</emphasis> copy the &field; values.
(This is because the source and target meshes might have quite
different geometries.)</para>
<para>DEPRECATED. Use <xref
linkend='MenuItem-OOF.Subproblem.Copy_Field_State'/> instead.
</para>"""
) )
# Likewise for equation state. This is also deprecated. See
# subproblemmenu.py for the preferred version.
def _copyEquationState(menuitem, source, target):
if source == target:
raise ooferror.ErrUserError('Source and target must differ!')
if parallel_enable.enabled():
meshIPC.ipcmeshmenu.Copy_Equation_State(source=source,target=target)
return
notifications = []
source_subp = ooflib.engine.mesh.meshes[source].get_default_subproblem()
target_subp = ooflib.engine.mesh.meshes[target].get_default_subproblem()
source_subp.begin_reading()
target_subp.reserve()
target_subp.begin_writing()
try:
source_obj = source_subp.getObject()
target_obj = target_subp.getObject()
source_eqns = source_obj.all_equations()
target_eqns = target_obj.all_equations()
for e in target_eqns:
if not source_obj.is_active_equation(e):
target_obj.deactivate_equation(e)
notifications.append(
("equation activated", target_subp.path(), e.name(), 0))
if config.devel()>=1:
if not source_obj.is_kinetically_active_equation(e):
target_obj.kinetic_deactivate_equation(e)
notifications.append(
('kinetics activated', target_subp.path(), e.name(), 0))
if not source_obj.is_dynamically_active_equation(e):
target_obj.deactivate_dynamics(e)
notifications.append(
('dynamics activated', target_subp.path(), e.name(), 0))
for e in source_eqns:
if not target_obj.is_active_equation(e):
target_obj.activate_equation(e)
notifications.append(
("equation activated", target_subp.path(), e.name(), 1))
if config.devel()>=1:
if not target_obj.is_kinetically_active_equation(e):
target_obj.kinetic_activate_equation(e)
notifications.append(
('kinetics activated', target_subp.path(), e.name(), 1))
if not target_obj.is_dynamically_active_equation(e):
target_obj.activate_dynamics(e)
notifications.append(
('dynamics activated', target_subp.path(), e.name(), 1))
finally:
source_subp.end_reading()
target_subp.end_writing()
target_subp.cancel_reservation()
for n in notifications:
switchboard.notify(*n)
target_subp.autoenableBCs()
target_subp.changed("Equations changed.")
meshmenu.addItem(oofmenu.OOFMenuItem(
'Copy_Equation_State',
callback=_copyEquationState,
params=[whoville.WhoParameter('source',ooflib.engine.mesh.meshes,
tip=parameter.emptyTipString),
whoville.WhoParameter('target',ooflib.engine.mesh.meshes,
tip=parameter.emptyTipString)],
help="Copy the set of active Equations from one Mesh to another.",
discussion="""<para>
This command copies the &equation; state from the default
&subproblem; in one &mesh; to the default &subproblem; in another,
meaning that the same &equations; will be active in the
<varname>target</varname> &subproblem; as in the
<varname>source</varname> &subproblem;.
</para>"""
) )
#=--=##=--=##=--=##=--=##=--=##=--=##=--=##=--=##=--=##=--=##=--=##=--=#
# Field definition and activation
fieldmenu = meshmenu.addItem(oofmenu.OOFMenuItem(
'Field',
help='Define and activate Fields.',
discussion="""<para>
The <command>Field</command> menu contains the commands that
define and set the properties of &fields; on &meshes;.
</para>"""))
def _defineField(menuitem, mesh, field):
## This has been rewritten to use the default subproblem, for
## backwards compatibility. The menuitem is deprecated -- use
## Subproblem.Field.Define instead.
if parallel_enable.enabled():
meshIPC.ipcfieldmenu.Define(mesh=mesh,field=field)
else:
meshcontext = ooflib.engine.mesh.meshes[mesh]
subpcontext = meshcontext.get_default_subproblem()
subpcontext.reserve()
subpcontext.begin_writing()
try:
subpcontext.getObject().define_field(field)
finally:
subpcontext.end_writing()
subpcontext.cancel_reservation()
switchboard.notify("field defined", subpcontext.path(), field.name(), 1)
subpcontext.autoenableBCs()
subpcontext.changed("Field defined.")
meshcontext.setStatus(meshstatus.Unsolved("New fields defined"))
def _undefineField(menuitem, mesh, field):
## Also deprecated. Use Subproblem.Field.Undefine instead.
if parallel_enable.enabled():
meshIPC.ipcfieldmenu.Undefine(mesh=mesh,field=field)
else:
meshcontext = ooflib.engine.mesh.meshes[mesh]
subpcontext = meshcontext.get_default_subproblem()
subpcontext.reserve()
subpcontext.begin_writing()
try:
subpcontext.getObject().undefine_field(field)
# After undefining a Field, the data cache in the mesh has
# the wrong number of dofs in it. We could in principle
# delete the correct dofs from each cache entry, but it
# might be slow (especially for a disk cache). The
# simpler thing to do is to just delete the whole cache.
subpcontext.getParent().clearDataCache()
finally:
subpcontext.end_writing()
subpcontext.cancel_reservation()
subpcontext.autoenableBCs()
subpcontext.changed("Field undefined.")
switchboard.notify("field defined", subpcontext.path(), field.name(), 0)
meshcontext.setStatus(meshstatus.Unsolved("New fields defined"))
fieldmenu.addItem(oofmenu.OOFMenuItem(
'Define',
callback=_defineField,
params=[whoville.WhoParameter('mesh', ooflib.engine.mesh.meshes,
tip=parameter.emptyTipString),
meshparameters.FieldParameter('field', tip=parameter.emptyTipString)
],
help="Define a Field on a Mesh. Only defined Fields may be given values.",
## TODO: Fix discussion
discussion=xmlmenudump.loadFile('DISCUSSIONS/engine/menu/definefield.xml')
))
fieldmenu.addItem(oofmenu.OOFMenuItem(
'Undefine',
callback=_undefineField,
params=[whoville.WhoParameter('mesh', ooflib.engine.mesh.meshes,
tip=parameter.emptyTipString),
meshparameters.FieldParameter('field', tip=parameter.emptyTipString)
],
help="Undefine a Field on a Mesh. Only defined Fields may be given values.",
discussion="""<para>
Undefine a &field; on a &mesh;'s default &subproblem;. This frees
the memory used to store the &field; components and destroys their
values, unless other &subproblems; are using the &field;. See <xref
linkend='MenuItem-OOF.Mesh.Field.Define'/>. DEPRECATED.
</para>"""
))
def _activateField(menuitem, mesh, field):
activation = False
if parallel_enable.enabled():
meshIPC.ipcfieldmenu.Activate(mesh=mesh,field=field)
else:
meshcontext = ooflib.engine.mesh.meshes[mesh]
subpcontext = meshcontext.get_default_subproblem()
subpcontext.reserve()
subpcontext.begin_writing()
try:
subp = subpcontext.getObject()
if subp.is_defined_field(field):
subp.activate_field(field)
activation = True
else:
reporter.report(
"You must define a Field before you can activate it.")
finally:
subpcontext.end_writing()
subpcontext.cancel_reservation()
if activation:
subpcontext.autoenableBCs()
switchboard.notify("field activated", subpcontext.path(),
field.name(), 1)
subpcontext.changed("Field activated.")
meshcontext.setStatus(meshstatus.Unsolved("Field activated"))
def _deactivateField(menuitem, mesh, field):
deactivation = False
if parallel_enable.enabled():
meshIPC.ipcfieldmenu.Deactivate(mesh=mesh,field=field)
else:
meshcontext = ooflib.engine.mesh.meshes[mesh]
subpcontext = meshcontext.get_default_subproblem()
subpcontext.reserve()
subpcontext.begin_writing()
try:
subp = subpcontext.getObject()
if subp.is_active_field(field):
subp.deactivate_field(field)
deactivation = True
else:
reporter.report(
"You must define and activate a Field before you can deactivate it.")
finally:
subpcontext.end_writing()
subpcontext.cancel_reservation()
if deactivation:
subpcontext.autoenableBCs()
switchboard.notify("field activated", subpcontext.path(),
field.name(), 0)
subpcontext.changed("Field deactivated.")
meshcontext.setStatus(meshstatus.Unsolved("Field deactivated"))
fieldmenu.addItem(oofmenu.OOFMenuItem(
"Activate",
callback=_activateField,
params=[whoville.WhoParameter('mesh', ooflib.engine.mesh.meshes,
tip=parameter.emptyTipString),
meshparameters.FieldParameter('field', tip=parameter.emptyTipString)
],
help="Activate a Field. The solver finds the values of active Fields.",
discussion=xmlmenudump.loadFile('DISCUSSIONS/engine/menu/activatefield.xml')
))
fieldmenu.addItem(oofmenu.OOFMenuItem(
'Deactivate',
callback=_deactivateField,
params=[whoville.WhoParameter('mesh', ooflib.engine.mesh.meshes,
tip=parameter.emptyTipString),
meshparameters.FieldParameter('field', tip=parameter.emptyTipString)
],
help="Deactivate a Field. The solver finds the values of active Fields.",
discussion="""<para>
Deactivating a &field; means that its values will not be found
when the &mesh; is <link
linkend="MenuItem-OOF.Mesh.Solve">solved</link>. See <xref
linkend='MenuItem-OOF.Mesh.Field.Activate'/>.
</para>"""
))
def _inPlaneField(menuitem, mesh, field):
if parallel_enable.enabled():
meshIPC.ipcfieldmenu.In_Plane(mesh=mesh,field=field)
else:
meshcontext = ooflib.engine.mesh.meshes[mesh]
meshcontext.reserve()
meshcontext.begin_writing()
try:
meshcontext.set_in_plane_field(field, 1)
finally:
meshcontext.end_writing()
meshcontext.cancel_reservation()
switchboard.notify("field inplane", meshcontext.path(), field.name(), 1)
meshcontext.changed("Field planarity changed.")
# meshcontext.setStatus(meshstatus.Unsolved("Field planarity changed"))
def _outOfPlaneField(menuitem, mesh, field):
if parallel_enable.enabled():
meshIPC.ipcfieldmenu.Out_of_Plane(mesh=mesh,field=field)
else:
meshcontext = ooflib.engine.mesh.meshes[mesh]
meshcontext.reserve()
meshcontext.begin_writing()
try:
meshcontext.set_in_plane_field(field, 0)
finally:
meshcontext.end_writing()
meshcontext.cancel_reservation()
switchboard.notify("field inplane", meshcontext.path(),
field.name(), 0)
meshcontext.changed("Field planarity changed.")
# meshcontext.setStatus(meshstatus.Unsolved("Field planarity changed"))
if config.dimension() == 2:
fieldmenu.addItem(oofmenu.OOFMenuItem(
'In_Plane',
callback=_inPlaneField,
params=[
whoville.WhoParameter(
'mesh', ooflib.engine.mesh.meshes,
tip=parameter.emptyTipString),
meshparameters.FieldParameter(
'field', tip=parameter.emptyTipString)
],
help="In-plane Fields are constrained to have no z-components.",
discussion="""<para>
This command invokes <link
linkend='Section-Concepts-Mesh-3D'>generalized plane-strain</link>
for the given &field; on all &subproblems; on the given &mesh;.
The out-of-plane derivatives of the &field; are taken to be zero.
See <xref linkend='MenuItem-OOF.Mesh.Field.Out_of_Plane'/>.>
</para>"""
))
fieldmenu.addItem(oofmenu.OOFMenuItem(
'Out_of_Plane',
callback=_outOfPlaneField,
params=[
whoville.WhoParameter(
'mesh', ooflib.engine.mesh.meshes,
tip=parameter.emptyTipString),
meshparameters.FieldParameter(
'field', tip=parameter.emptyTipString)
],
help="Out-of-plane Fields are allowed to have z-components.",
discussion="""<para>
This command disables <link
linkend='Section-Concepts-Mesh-3D'>generalized plane-strain</link>
for the given &field; on all &subproblems; on the given &mesh;.
The out-of-plane derivatives of the &field; will be computed.
Generally, it's necessary to <link
linkend='MenuItem-OOF.Mesh.Equation.Activate'>activate</link> a
<link
linkend='Section-Concepts-Mesh-Equation-PlaneFlux'>plane-flux
equation</link> in order to solve for the out-of-plane derivatives
of a &field;.
</para>"""
))
#=--=##=--=##=--=##=--=##=--=##=--=##=--=##=--=##=--=##=--=##=--=##=--=#
# Field initialization
# Field initialization often involves FloatBC initialization, which is
# really just another type of Field initialization.
## Assign an initializer to a field. This doesn't actually *apply*
## the initializer, so field values at nodes aren't changed.
def initField(menuitem, mesh, field, initializer):
# This routine is repeated almost verbatim in meshIO.py, where
# it's used to initialize meshes loaded from files.
meshcontext = ooflib.engine.mesh.meshes[mesh]
meshcontext.reserve()
meshcontext.begin_writing()
try:
meshcontext.set_field_initializer(field, initializer)
finally:
meshcontext.end_writing()
meshcontext.cancel_reservation()
switchboard.notify("field initializer set")
# for subproblem in meshcontext.subproblems():
# if field in subproblem.all_fields():
# subproblem.changed()
# switchboard.notify("redraw")
meshmenu.addItem(oofmenu.OOFMenuItem(
'Set_Field_Initializer',
callback = initField,
params=[whoville.WhoParameter('mesh', ooflib.engine.mesh.meshes,
tip=parameter.emptyTipString),
meshparameters.FieldParameter('field',
tip=parameter.emptyTipString,
outofplane=True),
fieldinit.FieldInitParameter('initializer',
tip=parameter.emptyTipString)
],
help="Determine how to assign values to a Field on a Mesh.",
discussion=xmlmenudump.loadFile('DISCUSSIONS/engine/menu/initfield.xml')
))
# When using subproblems, the field initializers have to be copied
# separately from the Field state, because the initializers live
# in the mesh and the state flags live in the subproblems.
def _copyFieldInits(menuitem, source, target):
if source == target:
return
if parallel_enable.enabled():
meshIPT.ipcmeshmenu.Copy_Field_Initializers(source=source,target=target)
return
notifications=[]
source_mesh = ooflib.engine.mesh.meshes[source]
target_mesh = ooflib.engine.mesh.meshes[target]
source_mesh.begin_reading()
target_mesh.reserve()
target_mesh.begin_writing()
try:
# Copy Field initializers
source_fields = source_mesh.all_subproblem_fields()
target_fields = target_mesh.all_subproblem_fields()
for f in source_fields:
if f in target_fields:
try:
initializer=source_mesh.initializers[f]
except KeyError:
pass
else:
target_mesh.set_field_initializer(f, initializer)
notifications.append(("field initialized"))
# Copy FloatBC inititalizers
for bcname in source_mesh.allBndyCondNames():
initializer = source_mesh.get_bc_initializer(bcname)
if initializer:
# Check that the target mesh has a FloatBC with this name
try:
targetbc = target_mesh.getBdyCondition(bcname)
except KeyError:
pass
else:
if isinstance(targetbc, bdycondition.FloatBC):
target_mesh.set_bc_initializer(bcname, initializer)
finally:
source_mesh.end_reading()
target_mesh.end_writing()
target_mesh.cancel_reservation()
for n in notifications:
switchboard.notify(*n)
meshmenu.addItem(oofmenu.OOFMenuItem(
'Copy_Field_Initializers',
callback=_copyFieldInits,
params=[whoville.WhoParameter('source', ooflib.engine.mesh.meshes,
tip=parameter.emptyTipString),
whoville.WhoParameter('target', ooflib.engine.mesh.meshes,
tip=parameter.emptyTipString)],
help="Copy all of the relevant Field initializers from one Mesh to another.",
discussion="""<para>
Copy all of the &field; initialization functions from the source
&mesh; to the target &mesh;. This does <emphasis>not</emphasis> actually
initialize the &fields; in the target &mesh;. If a &field; is not
defined in the target &mesh;, its initializer will not be copied.
</para>"""
))
def _clearFieldInit(menuitem, mesh, field):
themesh = ooflib.engine.mesh.meshes[mesh]
themesh.reserve()
themesh.begin_writing()
try:
themesh.remove_initializer(field)
finally:
themesh.end_writing()
themesh.cancel_reservation()
switchboard.notify("field initializer set")
meshmenu.addItem(oofmenu.OOFMenuItem(
'Clear_Field_Initializer',
callback=_clearFieldInit,
params=[whoville.WhoParameter('mesh', ooflib.engine.mesh.meshes,
tip=parameter.emptyTipString),
meshparameters.FieldParameter('field',
outofplane=True,
tip=parameter.emptyTipString)],
help="Remove the initializer for the given Field.",
discussion="""<para>
Remove the initializer for the given &field; from the given
&mesh;. This does not change the values of the &field; itself,
but prevents it from being reinitialized later.
</para>"""
))
def _clearFieldInits(menuitem, mesh):
themesh = ooflib.engine.mesh.meshes[mesh]
themesh.reserve()
themesh.begin_writing()
try:
for fld in themesh.all_subproblem_fields():
themesh.remove_initializer(fld)
themesh.remove_all_bc_initializers()
finally:
themesh.end_writing()
themesh.cancel_reservation()
switchboard.notify("field initializer set")
meshmenu.addItem(oofmenu.OOFMenuItem(
'Clear_Field_Initializers',
callback=_clearFieldInits,
params=[whoville.WhoParameter('mesh', ooflib.engine.mesh.meshes,
tip=parameter.emptyTipString)],
help="Remove all Field initializers from the current Mesh.",
discussion="""<para>
Remove all the &field; and boundary condition initializers from
the given &mesh;. This does not change the values of the &fields;
themselves, but prevents them from being reinitialized later.
</para>"""
))
def applyFieldInits(menuitem, mesh):
themesh = ooflib.engine.mesh.meshes[mesh]
themesh.reserve()
themesh.begin_writing()
try:
themesh.initialize_fields(themesh.getObject().getCurrentTime())
themesh.initialize_bcs(themesh.getObject().getCurrentTime())
finally:
themesh.end_writing()
themesh.cancel_reservation()
switchboard.notify("mesh data changed", themesh)
themesh.setStatus(meshstatus.Unsolved("Fields initialized."))
switchboard.notify("redraw")
meshmenu.addItem(oofmenu.OOFMenuItem(
'Apply_Field_Initializers',
callback=applyFieldInits,
params=[whoville.WhoParameter('mesh', ooflib.engine.mesh.meshes,
tip=parameter.emptyTipString)],
help="Initialize all Fields.",
discussion=xmlmenudump.loadFile('DISCUSSIONS/engine/menu/applyinit.xml')
))
def _applyFieldInitsAtTime(menuitem, mesh, time):
themesh = ooflib.engine.mesh.meshes[mesh]
themesh.reserve()
themesh.begin_writing()
try:
themesh.initialize_fields(time)
themesh.initialize_bcs(time)
finally:
themesh.end_writing()
themesh.cancel_reservation()
switchboard.notify("mesh data changed", themesh)
themesh.setStatus(meshstatus.Unsolved("Fields initialized."))
switchboard.notify("draw at time", time)
meshmenu.addItem(oofmenu.OOFMenuItem(
'Apply_Field_Initializers_at_Time',
callback=_applyFieldInitsAtTime,
params=[whoville.WhoParameter('mesh', ooflib.engine.mesh.meshes,
tip=parameter.emptyTipString),
parameter.FloatParameter('time', 0.0,
tip=parameter.emptyTipString)],
help="Initialize all Fields and reset the Mesh's time.",
discussion=xmlmenudump.loadFile('DISCUSSIONS/engine/menu/applyinittime.xml')
))
#=--=##=--=##=--=##=--=##=--=##=--=##=--=##=--=##=--=##=--=##=--=##=--=#
# Equations
eqnmenu = meshmenu.addItem(oofmenu.OOFMenuItem('Equation',
help='Activate equations.'))
def _activateEquation(menuitem, mesh, equation):
if parallel_enable.enabled():
meshIPC.ipceqnmenu.Activate(mesh=mesh,equation=equation)
else:
meshcontext = ooflib.engine.mesh.meshes[mesh]
subpcontext = meshcontext.get_default_subproblem()
subpcontext.reserve()
subpcontext.begin_writing()
try:
subpcontext.getObject().activate_equation(equation)
finally:
subpcontext.end_writing()
subpcontext.cancel_reservation()
subpcontext.autoenableBCs()
switchboard.notify('equation activated', subpcontext.path(),
equation.name(), 1)
subpcontext.changed("Equation activated.")
def _deactivateEquation(menuitem, mesh, equation):
if parallel_enable.enabled():
meshIPC.ipceqnmenu.Deactivate(mesh=mesh,equation=equation)
else:
meshcontext = ooflib.engine.mesh.meshes[mesh]
subpcontext = meshcontext.get_default_subproblem()
subpcontext.reserve()
subpcontext.begin_writing()
try:
subpcontext.getObject().deactivate_equation(equation)
finally:
subpcontext.end_writing()
subpcontext.cancel_reservation()
switchboard.notify('equation activated', subpcontext.path(),
equation.name(), 0)
subpcontext.autoenableBCs()
subpcontext.changed("Equation deactivated.")
eqnmenu.addItem(oofmenu.OOFMenuItem(
'Activate',
callback=_activateEquation,
params=[whoville.WhoParameter('mesh', ooflib.engine.mesh.meshes,
tip=parameter.emptyTipString),
meshparameters.EquationParameter('equation',
tip=parameter.emptyTipString)
],
help="Activate an Equation. The Solver solves the active Equations.",
discussion="""<para>
Activate the given &equation; on the default &subproblem; on the
given &mesh;. Activated &equations; are the ones that will be
<link linkend='MenuItem-OOF.Mesh.Solve'>solved</link>. For a
solution to be possible, the active &equations; must involve
&fluxes; that are produced by &properties; in the &mesh;, and
those &properties; must couple to <link
linkend='MenuItem-OOF.Mesh.Field.Define'>defined</link> &fields;.
There must be as many active &equations; as there are <link
linkend='MenuItem-OOF.Mesh.Field.Activate'>active</link> &fields;</para>
<para> DEPRECATED. Use <xref
linkend="MenuItem-OOF.Subproblem.Equation.Activate"/> instead.
</para>"""
))
eqnmenu.addItem(oofmenu.OOFMenuItem(
'Deactivate',
callback=_deactivateEquation,
params=[whoville.WhoParameter('mesh', ooflib.engine.mesh.meshes,
tip=parameter.emptyTipString),
meshparameters.EquationParameter('equation',
tip=parameter.emptyTipString)
],
help="Deactivate an Equation. The Solver solves the active Equations.",
discussion="""<para>
Deactivate the given &equation; on the default &subproblem; on the
given &mesh;. See <xref
linkend='MenuItem-OOF.Mesh.Equation.Deactivate'/>.</para>
<para> DEPRECATED. USE <xref
linkend="MenuItem-OOF.Subproblem.Equation.Deactivate"/> instead.
</para>"""
))
###########################################
# Cross sections
csmenu = meshmenu.addItem(oofmenu.OOFMenuItem(
'Cross_Section',
help="Create and manipulate Mesh cross sections for plotting.",
discussion=xmlmenudump.loadFile("DISCUSSIONS/engine/menu/cross_section.xml")
))
def csnameresolver(param, name):
if param.automatic():
basename = 'cs'
else:
basename = name
meshname = param.group['mesh'].value
if meshname is not None:
meshpath = labeltree.makePath(meshname)
meshctxt = ooflib.engine.mesh.meshes[meshpath]
return meshctxt.uniqueCSName(basename)
csnameparam = parameter.AutomaticNameParameter(
'name', value=automatic.automatic, tip="Name of the cross section.",
resolver=csnameresolver)
csmeshparam = whoville.WhoParameter('mesh', ooflib.engine.mesh.meshes,
tip=parameter.emptyTipString)
csparamgroup = parameter.ParameterGroup(csnameparam, csmeshparam)
def _newCS(menuitem, mesh, name, cross_section):
meshctxt = ooflib.engine.mesh.meshes[mesh]
meshctxt.reserve()
meshctxt.begin_writing()
try:
meshctxt.addCrossSection(name, cross_section)
finally:
meshctxt.end_writing()
meshctxt.cancel_reservation()
switchboard.notify("cross sections changed")
switchboard.notify("redraw")
csmenu.addItem(oofmenu.OOFMenuItem(
'New',
callback=_newCS,
params=csparamgroup + [
parameter.RegisteredParameter('cross_section',
meshcrosssection.MeshCrossSection,
tip="New cross section object.") ],
help="Create a new cross section on a Mesh.",
discussion=xmlmenudump.loadFile('DISCUSSIONS/engine/menu/new_cross_section.xml')
))
def _delCS(menuitem, mesh, name):
meshctxt = ooflib.engine.mesh.meshes[mesh]
meshctxt.reserve()
meshctxt.begin_writing()
try:
meshctxt.removeCrossSection(name)
finally:
meshctxt.end_writing()
meshctxt.cancel_reservation()
switchboard.notify("cross sections changed")
switchboard.notify("redraw")
csmenu.addItem(oofmenu.OOFMenuItem(
'Remove',
callback=_delCS,
params=[whoville.WhoParameter('mesh', ooflib.engine.mesh.meshes,
tip=parameter.emptyTipString),
parameter.StringParameter('name', tip='Cross section to remove.')],
help='Delete a cross section from a mesh.',
discussion="""<para>
Delete the cross section named <varname>name</varname> from the &mesh;
named <varname>mesh</varname>.
</para>"""))
def _selectCS(menuitem, mesh, cross_section):
meshctxt = ooflib.engine.mesh.meshes[mesh]
meshctxt.reserve()
meshctxt.begin_writing()
try:
meshctxt.selectCrossSection(cross_section)
finally:
meshctxt.end_writing()
meshctxt.cancel_reservation()
switchboard.notify("cross sections changed")
switchboard.notify("redraw")
csmenu.addItem(oofmenu.OOFMenuItem(
'Select',
callback=_selectCS,
params=[whoville.WhoParameter('mesh', ooflib.engine.mesh.meshes,
tip=parameter.emptyTipString),
parameter.StringParameter('cross_section', tip='Cross section to select.')],
help="Select a cross section on a mesh.",
discussion=xmlmenudump.loadFile('DISCUSSIONS/engine/menu/select_cs.xml')
))
def _deselectCS(menuitem, mesh):
meshctxt = ooflib.engine.mesh.meshes[mesh]
meshctxt.reserve()
meshctxt.begin_writing()
try:
meshctxt.deselectCrossSection()
finally:
meshctxt.end_writing()
meshctxt.cancel_reservation()
switchboard.notify("cross sections changed")
switchboard.notify("redraw")
csmenu.addItem(oofmenu.OOFMenuItem(
'Deselect',
callback=_deselectCS,
params=[whoville.WhoParameter('mesh', ooflib.engine.mesh.meshes,
tip=parameter.emptyTipString)],
help="Deselect all cross sections on a mesh.",
discussion=xmlmenudump.loadFile('DISCUSSIONS/engine/menu/deselect_cs.xml')
))
def _renameCS(menuitem, mesh, cross_section, name):
meshctxt = ooflib.engine.mesh.meshes[mesh]
meshctxt.reserve()
meshctxt.begin_writing()
try:
meshctxt.renameCrossSection(cross_section, name)
finally:
meshctxt.end_writing()
meshctxt.cancel_reservation()
switchboard.notify("cross sections changed")
csmenu.addItem(oofmenu.OOFMenuItem(
'Rename',
callback=_renameCS,
params=[whoville.WhoParameter('mesh', ooflib.engine.mesh.meshes,
tip=parameter.emptyTipString),
parameter.StringParameter('cross_section',
tip='Cross section to rename.'),
parameter.StringParameter('name',
tip='New name for the cross section.')
],
help="Rename a cross section on a mesh.",
discussion="<para>Assign a new name to a cross section.</para>"))
def _editCS(menuitem, mesh, name, cross_section):
meshctxt = ooflib.engine.mesh.meshes[mesh]
meshctxt.reserve()
meshctxt.begin_writing()
try:
meshctxt.replaceCrossSection(name, cross_section)
finally:
meshctxt.end_writing()
meshctxt.cancel_reservation()
switchboard.notify("cross sections changed")
switchboard.notify("redraw")
csmenu.addItem(oofmenu.OOFMenuItem(
'Edit',
callback=_editCS,
params=[whoville.WhoParameter('mesh', ooflib.engine.mesh.meshes,
tip=parameter.emptyTipString),
parameter.StringParameter('name', tip='Cross section to edit.'),
parameter.RegisteredParameter('cross_section',
meshcrosssection.MeshCrossSection,
tip='New value for the cross section.')
],
help="Reparametrize a cross section on a mesh.",
discussion=xmlmenudump.loadFile('DISCUSSIONS/engine/menu/edit_cs.xml')
))
def _copyCS(menuitem, current, cross_section, mesh, name):
sourcemesh = ooflib.engine.mesh.meshes[current]
sourcemesh.begin_reading()
try:
cs = sourcemesh.getCrossSection(cross_section).clone()
finally:
sourcemesh.end_reading()
targetmesh = ooflib.engine.mesh.meshes[mesh]
targetmesh.reserve()
targetmesh.begin_writing()
try:
targetmesh.addCrossSection(name,cs)
finally:
targetmesh.end_writing()
targetmesh.cancel_reservation()
switchboard.notify("cross sections changed")
csmenu.addItem(oofmenu.OOFMenuItem(
'Copy',
callback=_copyCS,
params=[whoville.WhoParameter('current', ooflib.engine.mesh.meshes,
tip='Mesh to copy the cross section from.'),
parameter.StringParameter('cross_section',
tip='Cross section to copy.')
]
+ parameter.ParameterGroup(
whoville.WhoParameter('mesh', ooflib.engine.mesh.meshes,
tip='Mesh to copy the cross section to.'),
parameter.AutomaticNameParameter('name',
value=automatic.automatic,
resolver=csnameresolver,
tip='Name of the copied cross section.')),
help="Copy a cross section, possibly to a different Mesh.",
discussion=xmlmenudump.loadFile('DISCUSSIONS/engine/menu/copy_cs.xml')
))
#######################################
def saveMesh(menuitem, filename, mode, format, mesh):
from ooflib.engine.IO import meshIO # avoids import loop
meshcontext = ooflib.engine.mesh.meshes[mesh]
meshcontext.begin_reading()
try:
if meshcontext.outOfSync():
raise ooferror.ErrUserError(
"The Mesh must be rebuilt before it can be saved.")
meshpath = labeltree.makePath(mesh)
skelpath = meshpath[:2]
skelcontext = skeletoncontext.skeletonContexts[skelpath]
if format==datafile.ABAQUS:
meshIO.writeABAQUSfromMesh(filename, mode.string(), meshcontext)
else:
dfile = datafile.writeDataFile(filename, mode.string(), format)
microstructureIO.writeMicrostructure(dfile,
skelcontext.getParent())
skeletonIO.writeSkeleton(dfile, skelcontext)
meshIO.writeMesh(dfile, meshcontext)
dfile.close()
finally:
meshcontext.end_reading()
OOF.File.Save.addItem(oofmenu.OOFMenuItem(
'Mesh',
callback = saveMesh,
ordering=80,
params = [
filenameparam.WriteFileNameParameter('filename', tip="Name of the file."),
filenameparam.WriteModeParameter(
'mode', tip="'w' to (over)write and 'a' to append."),
enum.EnumParameter('format', datafile.DataFileFormatExt, datafile.ASCII,
tip="Format of the file."),
SyncMeshParameter('mesh', tip='Name of the Mesh.')],
help="Save a Mesh to a file.",
discussion=xmlmenudump.loadFile('DISCUSSIONS/engine/menu/savemesh.xml')
))
def _fixmenu(*args):
if ooflib.engine.mesh.meshes.nActual() == 0:
OOF.File.Save.Mesh.disable()
else:
OOF.File.Save.Mesh.enable()
_fixmenu()
switchboard.requestCallback(('new who', 'Mesh'), _fixmenu)
switchboard.requestCallback(('remove who', 'Mesh'), _fixmenu)
##########################
def modifyMesh(menuitem, mesh, modifier):
# The structure is same as "skeletonmenu._modify()"
meshcontext = ooflib.engine.mesh.meshes[mesh]
meshcontext.reserve()
meshcontext.begin_writing()
try:
modifier.apply(meshcontext)
finally:
meshcontext.end_writing()
meshcontext.cancel_reservation()
modifier.signal(meshcontext)
modifier.setStatus(meshcontext)
switchboard.notify('Mesh modified', mesh, modifier) # caught by Mesh page
OOF.Mesh.addItem(oofmenu.OOFMenuItem(
'Modify',
callback=modifyMesh,
params=[
whoville.WhoParameter('mesh', ooflib.engine.mesh.meshes, tip=parameter.emptyTipString),
parameter.RegisteredParameter('modifier', meshmod.MeshModification,
tip="Mesh modifier.")
],
help="Make changes to a Mesh.",
discussion=xmlmenudump.loadFile('DISCUSSIONS/engine/menu/modify_mesh.xml')
))
#=--=##=--=##=--=##=--=##=--=##=--=##=--=##=--=##=--=##=--=##=--=##=--=##=--=#
# SC Patch Recovery
if config.devel()>=1:
def recoverFluxes(menuitem, mesh):
meshcontext = ooflib.engine.mesh.meshes[mesh]
skel = meshcontext.getSkeleton()
femesh = meshcontext.femesh()
femesh.create_scpatch(skel)
femesh.flux_recovery()
OOF.Mesh.addItem(oofmenu.OOFMenuItem(
'SCPRecovery',
callback=recoverFluxes,
params=[whoville.WhoParameter('mesh', ooflib.engine.mesh.meshes,
tip=parameter.emptyTipString)],
help="Superconvergent Patch Recovery."))
#=--=##=--=##=--=##=--=##=--=##=--=##=--=##=--=##=--=##=--=##=--=##=--=##=--=#
# Putting this item in meshdatacache.spy causes a nasty import loop.
from ooflib.SWIG.engine import meshdatacache
def _dummy(*args, **kwargs): pass
settingsmenu.addItem(oofmenu.OOFMenuItem(
'Data_Cache_Type',
callback=_dummy, # Just setting the parameter is enough.
params = [meshdatacache.cacheTypeParam],
help="Set the storage method for time step data in new Meshes.",
discussion=xmlmenudump.loadFile('DISCUSSIONS/engine/menu/datacachetype.xml')
))
def _consistencyTolerance(menuitem, tolerance, max_iterations):
subproblemcontext.consistencyTolerance = tolerance
evolve.maxconsistencysteps = max_iterations
settingsmenu.addItem(oofmenu.OOFMenuItem(
"SelfConsistency",
callback=_consistencyTolerance,
params=[
parameter.FloatParameter(
"tolerance",
subproblemcontext.consistencyTolerance,
tip="Relative tolerance for consistency."),
parameter.IntParameter(
"max_iterations",
evolve.maxconsistencysteps,
tip="Maximum number of iterations to perform.")
],
help="Set the tolerance and iteration limit used when self-consistently solving multiple subproblems simultaneously.",
discussion=xmlmenudump.loadFile(
'DISCUSSIONS/engine/menu/selfconsistency.xml')))
#=--=##=--=##=--=##=--=##=--=##=--=##=--=##=--=##=--=##=--=##=--=##=--=##=--=#
from ooflib.SWIG.engine import properties
def _numericalDiff(menuitem, epsilon):
properties.cvar.deriv_eps = epsilon
settingsmenu.addItem(oofmenu.OOFMenuItem(
"Numerical_Differentiation",
callback=_numericalDiff,
params=[
parameter.FloatParameter(
"epsilon",
properties.cvar.deriv_eps,
tip="Increment for numerical differentiation")],
help="Set the increment used for approximate derivatives when exact derivatives are not available.",
discussion=xmlmenudump.loadFile(
'DISCUSSIONS/engine/menu/numericaldiff.xml')
))
#=--=##=--=##=--=##=--=##=--=##=--=##=--=##=--=##=--=##=--=##=--=##=--=##=--=#
def _removeAllSolvers(menuitem, mesh):
meshctxt = ooflib.engine.mesh.meshes[mesh]
for subprob in meshctxt.subproblems():
subprob.begin_writing()
try:
subprob.solver_mode = None
finally:
subprob.end_writing()
switchboard.notify("subproblem solvers changed")
OOF.Mesh.addItem(oofmenu.OOFMenuItem(
'Remove_All_Solvers',
callback=_removeAllSolvers,
params=[whoville.WhoParameter('mesh', ooflib.engine.mesh.meshes,
tip=parameter.emptyTipString)],
help='Remove the Solvers from all Subproblems.',
discussion=xmlmenudump.loadFile('DISCUSSIONS/engine/menu/removesolvers.xml')
))
#=--=##=--=##=--=##=--=##=--=##=--=##=--=##=--=##=--=##=--=##=--=##=--=##=--=#
def _copyAllSolvers(menuitem, source, target):
sourceMesh = ooflib.engine.mesh.meshes[source]
targetMesh = ooflib.engine.mesh.meshes[target]
sourceMesh.begin_reading()
solvers = {}
try:
for subp in sourceMesh.subproblems():
if subp.solver_mode is not None:
solvers[subp.name()] = subp.solver_mode.clone()
finally:
sourceMesh.end_reading()
meshpath = targetMesh.path()
for name, solver in solvers.items():
subppath = meshpath + ":" + name
try:
targetsubp = ooflib.engine.subproblemcontext.subproblems[subppath]
except KeyError:
pass
else:
subproblemmenu.setSolver(menuitem, subppath, solver)
OOF.Mesh.addItem(oofmenu.OOFMenuItem(
'Copy_All_Solvers',
callback=_copyAllSolvers,
params=[
whoville.WhoParameter('source',
ooflib.engine.mesh.meshes,
tip="Mesh to copy the solvers from."),
whoville.WhoParameter('target',
ooflib.engine.mesh.meshes,
tip="Mesh to which to copy the solvers.")
],
help="Copy all solvers from one mesh to another.",
discussion=xmlmenudump.loadFile(
'DISCUSSIONS/engine/menu/copyallsolvers.xml')
))
#=--=##=--=##=--=##=--=##=--=##=--=##=--=##=--=##=--=##=--=##=--=##=--=##=--=#
def _setSubproblemOrder(menuitem, mesh, subproblems):
meshctxt = ooflib.engine.mesh.meshes[mesh]
for order,subprobname in enumerate(subproblems):
subprob = meshctxt.get_subproblem(subprobname)
subprob.solveOrder = order
switchboard.notify("subproblems reordered", meshctxt)
OOF.Mesh.addItem(oofmenu.OOFMenuItem(
'ReorderSubproblems',
callback=_setSubproblemOrder,
params=[whoville.WhoParameter(
'mesh', ooflib.engine.mesh.meshes,
tip=parameter.emptyTipString),
parameter.ListOfStringsParameter(
'subproblems',
tip='A list of Subproblem names in the order in which they should be solved.')
],
help="Set the order in which subproblems will be solved.",
discussion=xmlmenudump.loadFile(
'DISCUSSIONS/engine/menu/reordersubp.xml')
))
#=--=##=--=##=--=##=--=##=--=##=--=##=--=##=--=##=--=##=--=##=--=##=--=##=--=#
import time
def _solve(menuitem, mesh, endtime):
meshctxt = ooflib.engine.mesh.meshes[mesh]
meshctxt.reserve()
meshctxt.begin_writing()
try:
if not meshctxt.status.solvable:
raise ooferror.ErrUserError('Mesh is not solvable! '
+ meshctxt.status.getDetails())
t = time.clock()
evolve.evolve(meshctxt, endtime)
reporter.report("Elapsed time:", time.clock()-t, "seconds")
finally:
meshctxt.end_writing()
meshctxt.cancel_reservation()
switchboard.notify("mesh solved", meshctxt)
switchboard.notify("draw at time", meshctxt.getCurrentTime())
OOF.Mesh.addItem(oofmenu.OOFMenuItem(
'Solve',
callback=_solve,
params=[whoville.WhoParameter('mesh', ooflib.engine.mesh.meshes,
tip=parameter.emptyTipString),
parameter.FloatParameter('endtime', tip='Ending time.')
],
help='Solve or evolve the mesh.',
discussion=xmlmenudump.loadFile("DISCUSSIONS/engine/menu/solve.xml")
))
| 1.25 | 1 |
base/include/wx-3.0/wx/wxPython/i_files/_core_ex.py | jorgediazjr/dials-dev20191018 | 0 | 12787516 | <reponame>jorgediazjr/dials-dev20191018
#----------------------------------------------------------------------------
# Use Python's bool constants if available, make some if not
try:
True
except NameError:
__builtins__.True = 1==1
__builtins__.False = 1==0
def bool(value): return not not value
__builtins__.bool = bool
# workarounds for bad wxRTTI names
__wxPyPtrTypeMap['wxGauge95'] = 'wxGauge'
__wxPyPtrTypeMap['wxSlider95'] = 'wxSlider'
__wxPyPtrTypeMap['wxStatusBar95'] = 'wxStatusBar'
#----------------------------------------------------------------------------
# Load version numbers from __version__... Ensure that major and minor
# versions are the same for both wxPython and wxWidgets.
from __version__ import *
__version__ = VERSION_STRING
assert MAJOR_VERSION == _core_.MAJOR_VERSION, "wxPython/wxWidgets version mismatch"
assert MINOR_VERSION == _core_.MINOR_VERSION, "wxPython/wxWidgets version mismatch"
if RELEASE_VERSION != _core_.RELEASE_VERSION:
import warnings
warnings.warn("wxPython/wxWidgets release number mismatch")
def version():
"""Returns a string containing version and port info"""
if wx.Platform == '__WXMSW__':
port = 'msw'
elif wx.Platform == '__WXMAC__':
if 'wxOSX-carbon' in wx.PlatformInfo:
port = 'osx-carbon'
else:
port = 'osx-cocoa'
elif wx.Platform == '__WXGTK__':
port = 'gtk'
if 'gtk2' in wx.PlatformInfo:
port = 'gtk2'
elif 'gtk3' in wx.PlatformInfo:
port = 'gtk3'
else:
port = '?'
return "%s %s (classic)" % (wx.VERSION_STRING, port)
#----------------------------------------------------------------------------
# Set wxPython's default string<-->unicode conversion encoding from
# the locale, but only if Python's default hasn't been changed. (We
# assume that if the user has customized it already then that is the
# encoding we need to use as well.)
#
# The encoding selected here is used when string or unicode objects
# need to be converted in order to pass them to wxWidgets. Please be
# aware that the default encoding within the same locale may be
# slightly different on different platforms. For example, please see
# http://www.alanwood.net/demos/charsetdiffs.html for differences
# between the common latin/roman encodings.
default = _sys.getdefaultencoding()
if default == 'ascii':
import locale
import codecs
try:
if hasattr(locale, 'getpreferredencoding'):
default = locale.getpreferredencoding()
else:
default = locale.getdefaultlocale()[1]
codecs.lookup(default)
except (ValueError, LookupError, TypeError):
default = _sys.getdefaultencoding()
del locale
del codecs
if default:
wx.SetDefaultPyEncoding(default)
del default
#----------------------------------------------------------------------------
class PyDeadObjectError(AttributeError):
pass
class _wxPyDeadObject(object):
"""
Instances of wx objects that are OOR capable will have their __class__
changed to this class when the C++ object is deleted. This should help
prevent crashes due to referencing a bogus C++ pointer.
"""
reprStr = "wxPython wrapper for DELETED %s object! (The C++ object no longer exists.)"
attrStr = "The C++ part of the %s object has been deleted, attribute access no longer allowed."
def __repr__(self):
if not hasattr(self, "_name"):
self._name = "[unknown]"
return self.reprStr % self._name
def __getattr__(self, *args):
if not hasattr(self, "_name"):
self._name = "[unknown]"
raise PyDeadObjectError(self.attrStr % self._name)
def __nonzero__(self):
return 0
class PyUnbornObjectError(AttributeError):
pass
class _wxPyUnbornObject(object):
"""
Some stock objects are created when the wx._core module is
imported, but their C++ instance is not created until the wx.App
object is created and initialized. These object instances will
temporarily have their __class__ changed to this class so an
exception will be raised if they are used before the C++ instance
is ready.
"""
reprStr = "wxPython wrapper for UNBORN object! (The C++ object is not initialized yet.)"
attrStr = "The C++ part of this object has not been initialized, attribute access not allowed."
def __repr__(self):
#if not hasattr(self, "_name"):
# self._name = "[unknown]"
return self.reprStr #% self._name
def __getattr__(self, *args):
#if not hasattr(self, "_name"):
# self._name = "[unknown]"
raise PyUnbornObjectError(self.attrStr) # % self._name )
def __nonzero__(self):
return 0
#----------------------------------------------------------------------------
def CallAfter(callableObj, *args, **kw):
"""
Call the specified function after the current and pending event
handlers have been completed. This is also good for making GUI
method calls from non-GUI threads. Any extra positional or
keyword args are passed on to the callable when it is called.
:see: `wx.CallLater`
"""
assert callable(callableObj), "callableObj is not callable"
app = wx.GetApp()
assert app is not None, 'No wx.App created yet'
if not hasattr(app, "_CallAfterId"):
app._CallAfterId = wx.NewEventType()
app.Connect(-1, -1, app._CallAfterId,
lambda event: event.callable(*event.args, **event.kw) )
evt = wx.PyEvent()
evt.SetEventType(app._CallAfterId)
evt.callable = callableObj
evt.args = args
evt.kw = kw
wx.PostEvent(app, evt)
#----------------------------------------------------------------------------
class CallLater:
"""
A convenience class for `wx.Timer`, that calls the given callable
object once after the given amount of milliseconds, passing any
positional or keyword args. The return value of the callable is
availbale after it has been run with the `GetResult` method.
If you don't need to get the return value or restart the timer
then there is no need to hold a reference to this object.
:see: `wx.CallAfter`
"""
__RUNNING = set()
def __init__(self, millis, callableObj, *args, **kwargs):
assert callable(callableObj), "callableObj is not callable"
self.millis = millis
self.callable = callableObj
self.SetArgs(*args, **kwargs)
self.runCount = 0
self.running = False
self.hasRun = False
self.result = None
self.timer = None
self.Start()
def Start(self, millis=None, *args, **kwargs):
"""
(Re)start the timer
"""
self.hasRun = False
if millis is not None:
self.millis = millis
if args or kwargs:
self.SetArgs(*args, **kwargs)
self.Stop()
self.timer = wx.PyTimer(self.Notify)
self.timer.Start(self.millis, wx.TIMER_ONE_SHOT)
self.running = True
self.__RUNNING.add(self)
Restart = Start
def Stop(self):
"""
Stop and destroy the timer.
"""
if self.timer is not None:
self.timer.Stop()
self.timer = None
self.__RUNNING.discard(self)
def GetInterval(self):
if self.timer is not None:
return self.timer.GetInterval()
else:
return 0
def IsRunning(self):
return self.timer is not None and self.timer.IsRunning()
def SetArgs(self, *args, **kwargs):
"""
(Re)set the args passed to the callable object. This is
useful in conjunction with Restart if you want to schedule a
new call to the same callable object but with different
parameters.
"""
self.args = args
self.kwargs = kwargs
def HasRun(self):
return self.hasRun
def GetResult(self):
return self.result
def Notify(self):
"""
The timer has expired so call the callable.
"""
if self.callable and getattr(self.callable, 'im_self', True):
self.runCount += 1
self.running = False
self.result = self.callable(*self.args, **self.kwargs)
self.hasRun = True
if not self.running:
# if it wasn't restarted, then cleanup
wx.CallAfter(self.Stop)
Interval = property(GetInterval)
Result = property(GetResult)
class FutureCall(CallLater):
"""A compatibility alias for `CallLater`."""
#----------------------------------------------------------------------------
# Control which items in this module should be documented by epydoc.
# We allow only classes and functions, which will help reduce the size
# of the docs by filtering out the zillions of constants, EVT objects,
# and etc that don't make much sense by themselves, but are instead
# documented (or will be) as part of the classes/functions/methods
# where they should be used.
class __DocFilter:
"""
A filter for epydoc that only allows non-Ptr classes and
functions, in order to reduce the clutter in the API docs.
"""
def __init__(self, globals):
self._globals = globals
def __call__(self, name):
import types
obj = self._globals.get(name, None)
# only document classes and function
if type(obj) not in [type, types.ClassType, types.FunctionType, types.BuiltinFunctionType]:
return False
# skip other things that are private or will be documented as part of somethign else
if name.startswith('_') or name.startswith('EVT') or name.endswith('_swigregister') or name.endswith('Ptr') :
return False
# skip functions that are duplicates of static functions in a class
if name.find('_') != -1:
cls = self._globals.get(name.split('_')[0], None)
methname = name.split('_')[1]
if hasattr(cls, methname) and type(getattr(cls, methname)) is types.FunctionType:
return False
return True
#----------------------------------------------------------------------------
#----------------------------------------------------------------------------
# Import other modules in this package that should show up in the
# "core" wx namespace
from _gdi import *
from _windows import *
from _controls import *
from _misc import *
#----------------------------------------------------------------------------
#----------------------------------------------------------------------------
| 2.09375 | 2 |
ferup_fileinfo.py | JeffpanUK/NuPyTools | 2 | 12787517 | <gh_stars>1-10
#-*-coding:utf-8-*-
#!\usr\bin\env py3
import os
import sys
import re
import codecs
class FileInfo(object):
'''
statistics of corpus
'''
def __init__(self, options, logger):
self.logger = logger
self.options = options
self.task = options['task']
self.punc = self.loadPuncs(options['punc'])
def loadPuncs(self, puncf):
try:
with codecs.open(puncf, 'r', 'utf-8') as punch:
punc = set([])
for line in punch:
punc.add(line.strip().split()[0])
punc = list(punc)
#rp = list(map(lambda x: '\\'+x, punc))
#rp = '[' + ''.join(rp) + ']'
rp = "[%s]"%(re.escape("".join(punc)))
return re.compile(rp)
except Exception as e:
self.logger.error(e)
def wordseg(self):
baseDir = self.options['InDir']
fs = os.listdir(baseDir)
wordRec = set([])
wordNo = 0
puncNo = 0
lineNo = 0
for f in fs:
tpuncNo = 0
twordNo = 0
tlineNo = 0
tlexc = set([])
with codecs.open(os.path.join(baseDir, f), 'r', 'utf-8') as fn:
self.logger.info("Processing: %s"%f)
for line in fn:
tlineNo += 1
line = line.strip()
tpuncNo += len(re.findall(self.punc, line))
words = re.sub(self.punc, ' ',line)
words = words.split()
twordNo += len(words)
tlexc = tlexc.union(set(words))
puncNo += tpuncNo
wordNo += twordNo
lineNo += tlineNo
wordRec = wordRec.union(tlexc)
self.logger.info("Line - %d, Words - %d, Lexical - %d, Punctuation - %d"\
%(tlineNo, twordNo, len(set(tlexc)), tpuncNo))
self.logger.info("Total Counts")
self.logger.info("Line - %d, Words - %d, Lexical - %d, Punctuation - %d"\
%(lineNo, wordNo, len(wordRec), puncNo))
self.logger.info("Average words per line: %.2f"%(wordNo/lineNo))
def process(self):
if self.task == 'ws':
self.wordseg()
else:
self.logger.warn("Not Supported Task Type %s." % (self.task))
if __name__ == '__main__':
import time
import logging
from argparse import ArgumentParser
parser = ArgumentParser(description='ferup-fileinfo')
parser.add_argument("--version", action="version", version="ferup-fileinfo 1.0")
parser.add_argument(type=str, action="store", dest="InDir", default="", help='input raw data directory')
parser.add_argument("-t", "--task", action="store", dest="task", default="ws", help='pw - prosody word; ws - word segmentation')
parser.add_argument("-p", "--punc", action="store", dest="punc", type=str, default=r"puncs.list", help='punctuation list file')
args = parser.parse_args()
options = vars(args)
logger = logging.getLogger()
formatter = logging.Formatter('[%(asctime)s][*%(levelname)s*][%(filename)s:%(lineno)d|%(funcName)s] - %(message)s', '%Y%m%d-%H:%M:%S')
file_handler = logging.FileHandler('LOG-ferup-fileinfo.txt', 'w')
file_handler.setFormatter(formatter)
logger.addHandler(file_handler)
stream_handler = logging.StreamHandler()
stream_handler.setFormatter(formatter)
logger.addHandler(stream_handler)
logger.setLevel(logging.INFO)
allStartTP = time.time()
appInst = FileInfo(options, logger)
appInst.process()
allEndTP = time.time()
logger.info("Operation Finished [Time Cost:%0.3f Seconds]" % float(allEndTP - allStartTP)) | 2.6875 | 3 |
ask.py | christoga/python | 5 | 12787518 | <reponame>christoga/python<filename>ask.py
print "What's your name?",
name = raw_input()
print "Hello %r!" % (name)
| 2.6875 | 3 |
tests/helpers.py | Degget1986/neo-mamba | 12 | 12787519 | from neo3.core import serialization
from neo3.core.serialization import BinaryReader, BinaryWriter
class SerializableObject(serialization.ISerializable):
def serialize(self, writer: BinaryWriter) -> None:
pass
def deserialize(self, reader: BinaryReader) -> None:
pass
def __len__(self):
return 0 | 2.53125 | 3 |
pythonaulas/Aula 13/Desafio 052.py | jrwarg/Estudos-Phyton | 0 | 12787520 | """
DESAFIO 052: Números Primos
Faça um programa que leia um número inteiro e diga se ele é ou não um número primo.
"""
n = int(input('Digite um número: '))
v = True
if n < 2:
v = False
else:
for c in range(2, n - 1):
if n % c == 0:
v = False
if v:
print('{} é um número primo.'.format(n))
else:
print('{} não é um número primo.'.format(n))
| 3.9375 | 4 |
happytransformer/mwp/trainer.py | Mizar77/happy-transformer | 0 | 12787521 | from happytransformer.happy_trainer import HappyTrainer
from happytransformer.mwp.default_args import ARGS_MWP_TRAIN
class WPTrainer(HappyTrainer):
def train(self, input_filepath, args=ARGS_MWP_TRAIN):
raise NotImplementedError()
def eval(self, input_filepath):
raise NotImplementedError()
def test(self, input_filepath, pipeline):
raise NotImplementedError() | 2.09375 | 2 |
test/util/web/test_webapi.py | TomBlock/cate | 0 | 12787522 | <reponame>TomBlock/cate<gh_stars>0
import re
import unittest
from cate.util.web import webapi
class UrlPatternTest(unittest.TestCase):
def test_url_pattern_works(self):
re_pattern = webapi.url_pattern('/open/{{id1}}ws/{{id2}}wf')
matcher = re.fullmatch(re_pattern, '/open/34ws/a66wf')
self.assertIsNotNone(matcher)
self.assertEqual(matcher.groupdict(), {'id1': '34', 'id2': 'a66'})
re_pattern = webapi.url_pattern('/open/ws{{id1}}/wf{{id2}}')
matcher = re.fullmatch(re_pattern, '/open/ws34/wfa66')
self.assertIsNotNone(matcher)
self.assertEqual(matcher.groupdict(), {'id1': '34', 'id2': 'a66'})
x = 'C%3A%5CUsers%5CNorman%5CIdeaProjects%5Cccitools%5Cect-core%5Ctest%5Cui%5CTEST_WS_3'
re_pattern = webapi.url_pattern('/ws/{{base_dir}}/res/{{res_name}}/add')
matcher = re.fullmatch(re_pattern, '/ws/%s/res/SST/add' % x)
self.assertIsNotNone(matcher)
self.assertEqual(matcher.groupdict(), {'base_dir': x, 'res_name': 'SST'})
def test_url_pattern_ok(self):
self.assertEqual(webapi.url_pattern('/version'),
'/version')
self.assertEqual(webapi.url_pattern('{{num}}/get'),
'(?P<num>[^\;\/\?\:\@\&\=\+\$\,]+)/get')
self.assertEqual(webapi.url_pattern('/open/{{ws_name}}'),
'/open/(?P<ws_name>[^\;\/\?\:\@\&\=\+\$\,]+)')
self.assertEqual(webapi.url_pattern('/open/ws{{id1}}/wf{{id2}}'),
'/open/ws(?P<id1>[^\;\/\?\:\@\&\=\+\$\,]+)/wf(?P<id2>[^\;\/\?\:\@\&\=\+\$\,]+)')
def test_url_pattern_fail(self):
with self.assertRaises(ValueError) as cm:
webapi.url_pattern('/open/{{ws/name}}')
self.assertEqual(str(cm.exception), 'name in {{name}} must be a valid identifier, but got "ws/name"')
with self.assertRaises(ValueError) as cm:
webapi.url_pattern('/info/{{id}')
self.assertEqual(str(cm.exception), 'no matching "}}" after "{{" in "/info/{{id}"')
class WebAPIErrorTest(unittest.TestCase):
def test_plain(self):
self._plain(webapi.WebAPIServiceError)
self._plain(webapi.WebAPIRequestError)
def test_with_cause(self):
self._with_cause(webapi.WebAPIServiceError)
self._with_cause(webapi.WebAPIRequestError)
def _plain(self, cls):
try:
raise cls("haha")
except cls as e:
self.assertEqual(str(e), "haha")
self.assertEqual(e.cause, None)
def _with_cause(self, cls):
e1 = ValueError("a > 5")
try:
raise cls("hoho") from e1
except cls as e2:
self.assertEqual(str(e2), "hoho")
self.assertEqual(e2.cause, e1)
class WebAPIRequestHandlerTest(unittest.TestCase):
def test_to_status_error_empty(self):
status = webapi.WebAPIRequestHandler._to_status_error()
self.assertEqual(status, {'status': 'error'})
def test_to_status_error_plain(self):
try:
raise ValueError("test message")
except ValueError as error:
status = webapi.WebAPIRequestHandler._to_status_error(error)
self.assertEqual(status['error']['message'], 'test message')
self.assertEqual(status['error']['type'], 'ValueError')
self.assertIsNotNone(status['error']['traceback'])
self.assertEqual(status['status'], 'error')
status = webapi.WebAPIRequestHandler._to_status_error(error, type_name="MyErrorType")
self.assertEqual(status['error']['message'], 'test message')
self.assertEqual(status['error']['type'], 'MyErrorType')
self.assertIsNotNone(status['error']['traceback'])
self.assertEqual(status['status'], 'error')
status = webapi.WebAPIRequestHandler._to_status_error(error, message="another message")
self.assertEqual(status['error']['message'], 'another message')
self.assertEqual(status['error']['type'], 'ValueError')
self.assertIsNotNone(status['error']['traceback'])
self.assertEqual(status['status'], 'error')
def test_to_status_error_chained(self):
error1 = ValueError("error 1")
try:
raise ValueError("error 2") from error1
except ValueError as error:
status = webapi.WebAPIRequestHandler._to_status_error(error)
self.assertEqual(status['error']['message'], 'error 2')
self.assertEqual(status['error']['type'], 'ValueError')
self.assertIsNotNone(status['error']['traceback'])
self.assertEqual(status['status'], 'error')
| 2.515625 | 3 |
eosUtils/db.py | hysds/eosUtils | 0 | 12787523 | <filename>eosUtils/db.py
import os, sys, sqlite3, json
from datetime import datetime
from misc import getDatetimeFromDateString
#configured datasets
CONFIGURED_DATASETS = ['AIRS', 'MODIS-Terra', 'MODIS-Aqua', 'ALOS']
#select by time
SELECT_BY_TIME = '''select * from %s where
(starttime between :startTime and :endTime) or
(endtime between :startTime and :endTime) or
(starttime <= :startTime and endtime >= :endTime);'''
#select by time and space
SELECT_BY_TIME_AND_SPACE = '''select * from %s where
(
(starttime between :startTime and :endTime) or
(endtime between :startTime and :endTime) or
(starttime <= :startTime and endtime >= :endTime)
) and
(
(min_lat between :minLat and :maxLat) or
(max_lat between :minLat and :maxLat) or
(min_lat <= :minLat and max_lat >= :maxLat)
) and
(
(min_lon between :minLon and :maxLon) or
(max_lon between :minLon and :maxLon) or
(min_lon <= :minLon and max_lon >= :maxLon) or
(min_lon > max_lon and
(
min_lon between :minLon and :maxLon or
(max_lon + 360.) between :minLon and :maxLon or
(min_lon <= :minLon and (max_lon + 360.) >= :maxLon)
)
)
);'''
def queryDataset(dataset, starttime, endtime, latMin, latMax,
lonMin, lonMax, returnTimeSpaceInfo=False):
"""Return time/space info of granules that match query."""
#get sqlite database file and table name
if dataset.startswith('MYD'):
tableId = 'AQUA'
dbFile = os.path.join(sys.prefix, 'sqlite_data', 'MODIS-Aqua.db')
elif dataset.startswith('MOD'):
tableId = 'TERRA'
dbFile = os.path.join(sys.prefix, 'sqlite_data', 'MODIS-Terra.db')
elif dataset.startswith('AIRS'):
tableId = 'AIRS'
dbFile = os.path.join(sys.prefix, 'sqlite_data', 'AIRS.db')
elif dataset.startswith('CloudSat'):
tableId = 'CloudSat'
dbFile = os.path.join(sys.prefix, 'sqlite_data', 'CloudSat.db')
elif dataset == 'ALOS':
tableId = 'ALOS'
dbFile = os.path.join(sys.prefix, 'sqlite_data', 'ALOS.db')
else:
raise RuntimeError("Unrecognized dataset: %s" % dataset)
#get datetimes
starttime = getDatetimeFromDateString(starttime)
endtime = getDatetimeFromDateString(endtime)
#get sqlite connection and cursor
conn = sqlite3.connect(dbFile)
c = conn.cursor()
#if spatial query is global, just use time
if (latMin <= -85. and latMax >= 85. and lonMin <= -175. and lonMax >= 175.) or \
tableId == 'CloudSat':
select = SELECT_BY_TIME % tableId
else:
select = SELECT_BY_TIME_AND_SPACE % tableId
#query
c.execute(select, {'startTime': starttime, 'endTime': endtime,
'minLon': lonMin, 'maxLon': lonMax,
'minLat': latMin, 'maxLat': latMax})
#build dict of information
infoDict = {}
for fields in c:
if dataset == 'ALOS':
(objectid, st_t, end_t, bbox, min_lat, max_lat, min_lon, max_lon,
frame_number, orbit_number, orbit_repeat, track_number) = fields
else:
(objectid, st_t, end_t, min_lat, max_lat, min_lon, max_lon) = fields
#get MODIS-* objectids
if tableId == 'AQUA':
objectid = objectid.replace('MYD03', 'MYD*')[0:18]
elif tableId == 'TERRA':
objectid = objectid.replace('MOD03', 'MOD*')[0:18]
#if CloudSat, just send nulls
if dataset == 'CloudSat':
infoDict[objectid] = {
'starttime': getDatetimeFromDateString(st_t).isoformat(),
'endtime': end_t,
'lonMin': min_lon,
'lonMax': max_lon,
'latMin': min_lat,
'latMax': max_lat
}
elif dataset == 'ALOS':
infoDict[objectid] = {
'starttime': getDatetimeFromDateString(st_t).isoformat(),
'endtime': getDatetimeFromDateString(end_t).isoformat(),
'bbox': json.loads(bbox),
'lonMin': min_lon,
'lonMax': max_lon,
'latMin': min_lat,
'latMax': max_lat,
'frameNumber': frame_number,
'orbitNumber': orbit_number,
'orbitRepeat': orbit_repeat,
'trackNumber': track_number
}
else:
infoDict[objectid] = {
'starttime': getDatetimeFromDateString(st_t).isoformat(),
'endtime': getDatetimeFromDateString(end_t).isoformat(),
'lonMin': min_lon,
'lonMax': max_lon,
'latMin': min_lat,
'latMax': max_lat
}
#return dict or just list of granules
if returnTimeSpaceInfo: return infoDict
else:
objectids = infoDict.keys()
objectids.sort()
return objectids
| 2.859375 | 3 |
DiffDriveControl.py | Starfunx/Robot_Simulation | 0 | 12787524 | # coding: utf-8
import numpy as np
import matplotlib.pyplot as plt
import Transform as Transform
import DiffDriveRobot
class Wheel(object):
"""docstring for Wheel."""
def __init__(self):
super(Wheel, self).__init__()
self.speed = 0
def setSpeed(self, speed):
self.speed = speed
def getSpeed(self):
return self.speed
def getDist(self, dT):
return self.speed * dT
class Robot(object):
"""docstring for Robot."""
def __init__(self, x, y, theta, robot):
super(Robot, self).__init__()
self.polygon = np.array([[-150, -150], [-150, 150], [150, 150], [150, -150], [-150, -150]],dtype =float)
self.x = x
self.y = y
self.theta = theta
self.robot = robot
self.MEntreAxes = 200
self.OEntreAxes = 250
self.xC = x
self.yC = y
self.thetaC = theta
self.XErr = 0
self.YErr = 0
self.ThetaErr = 0
self.DistErr = 0
self.CapErr = 0
self.alpha = []
self.thetaa = []
self.DistErra = []
# mutateurs
def setX(self, x):
self.x = x
def setY(self, y):
self.y = y
def setTheta(self, theta):
self.theta = theta
def setXC(self, xC):
self.xC = xC
def setYC(self, yC):
self.yC = yC
def setThetaC(self, thetaC):
self.thetaC = thetaC
# asscenseurs
def getX(self):
return self.x
def getY(self):
return self.y
def getTheta(self):
return self.theta
#autres methodes
#fonctions traduisant le fonctionment du robot (modèle)
def updateOdometry(self, dT):
dOL = self.robot.getLeftEncoderDist(dT)
dOR = self.robot.getRightEncoderDist(dT)
dXrobot = (dOR + dOL)/2
dTheta = (dOR - dOL)/self.OEntreAxes
self.theta = self.theta + dTheta
if(self.theta <= -np.pi): self.theta = self.theta + 2*np.pi
if(self.theta > np.pi): self.theta = self.theta - 2*np.pi
self.x = self.x + dXrobot*np.cos(self.theta)
self.y = self.y + dXrobot*np.sin(self.theta)
def computeError(self): # Equations 11 & 12
self.XErr = self.xC - self.x
self.YErr = self.yC - self.y
self.ThetaErr = self.thetaC - self.theta #unused
Kp = 1
Kalpha = 5
alpha = np.arctan2(self.YErr, self.XErr)-self.theta
if alpha <= -np.pi: alpha+= 2*np.pi
if alpha > +np.pi: alpha-= 2*np.pi
self.thetaa.append(self.theta)
self.alpha.append(alpha)
self.DistErr = Kp*np.sqrt(self.XErr**2 + self.YErr**2)*np.cos(alpha)
# self.CapErr = Kp*np.sin(alpha)*np.cos(alpha) + Kalpha*alpha
self.CapErr = Kalpha*np.sin(alpha)*np.cos(alpha)
self.DistErra.append(self.DistErr)
def setConsign(self):
V = self.DistErr
Omega = self.CapErr
VMG = (V - Omega * self.MEntreAxes/2)/1 #1 = wheelRadius
VMD = (V + Omega * self.MEntreAxes/2)/1
self.robot.setLeftMotorSpeed(VMG)
self.robot.setRightMotorSpeed(VMD)
def draw(self):
shape2 = np.transpose(Transform.rotate(self.polygon, self.theta))
shape2 = np.transpose(Transform.translate(np.transpose(shape2), self.x, self.y))
plt.plot( shape2[0], shape2[1])
plt.plot( self.xC, self.yC , 'bx')
def update(self, dT):
self.updateOdometry(dT)
self.computeError()
self.setConsign()
if __name__== "__main__":
import main
| 3.28125 | 3 |
shims/CounterFitConnection/tests/test_counterfit_connection.py | CallumTarttelin/CounterFit | 86 | 12787525 | <reponame>CallumTarttelin/CounterFit<gh_stars>10-100
'''
Tests the CounterFit app connection
To run this test, ensure you have the CounterFit Virtual IoT Device app running
'''
# pylint: disable=redefined-outer-name,unused-argument,duplicate-code
import pytest
import time
from counterfit_connection import CounterFitConnection
def test_init_counterfit_device():
'''
Tests the connection. You should see the CounterFit app status change to connected
'''
CounterFitConnection.init('127.0.0.1', 5000)
# def test_get_sensor_bool_value():
# '''
# Tests reading a True boolean value from a boolean sensor on port 0
# '''
# CounterFitConnection.init('127.0.0.1', 5000)
# assert CounterFitConnection.get_sensor_boolean_value(0)
# def test_read_serial_char():
# '''
# Tests reading a character from a serial sensor on port /dev/ttyAMA0 containing the text 'hello'
# '''
# CounterFitConnection.init('127.0.0.1', 5000)
# assert CounterFitConnection.read_serial_sensor_char('/dev/ttyAMA0') == 'h'
# assert CounterFitConnection.read_serial_sensor_char('/dev/ttyAMA0') == 'e'
# assert CounterFitConnection.read_serial_sensor_char('/dev/ttyAMA0') == 'l'
# assert CounterFitConnection.read_serial_sensor_char('/dev/ttyAMA0') == 'l'
# assert CounterFitConnection.read_serial_sensor_char('/dev/ttyAMA0') == 'o'
# def test_read_serial_line():
# '''
# Tests reading a line from a serial sensor on port /dev/ttyAMA0 containing the text 'hello\nworld'
# '''
# CounterFitConnection.init('127.0.0.1', 5000)
# assert CounterFitConnection.read_serial_sensor_line('/dev/ttyAMA0') == 'hello'
# assert CounterFitConnection.read_serial_sensor_line('/dev/ttyAMA0') == 'world'
def test_camera_image():
'''
Tests reading an image from a camera sensor. The image is saved locally
'''
CounterFitConnection.init('127.0.0.1', 5000)
image_data = CounterFitConnection.read_binary_sensor('Picamera')
with open('test_image.png', 'wb') as image_file:
image_file.write(image_data.read())
def test_is_connected():
'''
Tests is connected. Make sure counterfit is running
'''
CounterFitConnection.init('127.0.0.1', 5000)
assert CounterFitConnection.is_connected()
def test_is_connected_is_false():
'''
Tests is connected. Make sure counterfit is running until you see a message telling you to close it
'''
CounterFitConnection.init('127.0.0.1', 5000)
print("Please close counterfit")
time.sleep(10)
assert not CounterFitConnection.is_connected()
| 2.5625 | 3 |
scripts/wk/hw/obj.py | 2Shirt/WizardK | 0 | 12787526 | """WizardKit: Hardware objects (mostly)"""
# vim: sts=2 sw=2 ts=2
import logging
import pathlib
import plistlib
import re
from collections import OrderedDict
from wk.cfg.hw import (
ATTRIBUTE_COLORS,
KEY_NVME,
KEY_SMART,
KNOWN_DISK_ATTRIBUTES,
KNOWN_DISK_MODELS,
KNOWN_RAM_VENDOR_IDS,
REGEX_POWER_ON_TIME,
)
from wk.cfg.main import KIT_NAME_SHORT
from wk.exe import get_json_from_command, run_program
from wk.std import (
PLATFORM,
bytes_to_string,
color_string,
sleep,
string_to_bytes,
)
# STATIC VARIABLES
LOG = logging.getLogger(__name__)
NVME_WARNING_KEYS = (
'spare_below_threshold',
'reliability_degraded',
'volatile_memory_backup_failed',
)
SMART_SELF_TEST_START_TIMEOUT_IN_SECONDS = 120
WK_LABEL_REGEX = re.compile(
fr'{KIT_NAME_SHORT}_(LINUX|UFD)',
re.IGNORECASE,
)
# Exception Classes
class CriticalHardwareError(RuntimeError):
"""Exception used for critical hardware failures."""
class SMARTNotSupportedError(TypeError):
"""Exception used for disks lacking SMART support."""
class SMARTSelfTestInProgressError(RuntimeError):
"""Exception used when a SMART self-test is in progress."""
# Classes
class BaseObj():
"""Base object for tracking device data."""
def __init__(self):
self.tests = OrderedDict()
def all_tests_passed(self):
"""Check if all tests passed, returns bool."""
return all(results.passed for results in self.tests.values())
def any_test_failed(self):
"""Check if any test failed, returns bool."""
return any(results.failed for results in self.tests.values())
class CpuRam(BaseObj):
"""Object for tracking CPU & RAM specific data."""
def __init__(self):
super().__init__()
self.description = 'Unknown'
self.details = {}
self.ram_total = 'Unknown'
self.ram_dimms = []
self.tests = OrderedDict()
# Update details
self.get_cpu_details()
self.get_ram_details()
def generate_report(self):
"""Generate CPU & RAM report, returns list."""
report = []
report.append(color_string('Device', 'BLUE'))
report.append(f' {self.description}')
# Include RAM details
report.append(color_string('RAM', 'BLUE'))
report.append(f' {self.ram_total} ({", ".join(self.ram_dimms)})')
# Tests
for test in self.tests.values():
report.extend(test.report)
return report
def get_cpu_details(self):
"""Get CPU details using OS specific methods."""
if PLATFORM == 'Darwin':
cmd = 'sysctl -n machdep.cpu.brand_string'.split()
proc = run_program(cmd, check=False)
self.description = re.sub(r'\s+', ' ', proc.stdout.strip())
elif PLATFORM == 'Linux':
cmd = ['lscpu', '--json']
json_data = get_json_from_command(cmd)
for line in json_data.get('lscpu', [{}]):
_field = line.get('field', '').replace(':', '')
_data = line.get('data', '')
if not (_field or _data):
# Skip
continue
self.details[_field] = _data
self.description = self.details.get('Model name', '')
# Replace empty description
if not self.description:
self.description = 'Unknown CPU'
def get_ram_details(self):
"""Get RAM details using OS specific methods."""
if PLATFORM == 'Darwin':
dimm_list = get_ram_list_macos()
elif PLATFORM == 'Linux':
dimm_list = get_ram_list_linux()
details = {'Total': 0}
for dimm_details in dimm_list:
size, manufacturer = dimm_details
if size <= 0:
# Skip empty DIMMs
continue
description = f'{bytes_to_string(size)} {manufacturer}'
details['Total'] += size
if description in details:
details[description] += 1
else:
details[description] = 1
# Save details
self.ram_total = bytes_to_string(details.pop('Total', 0))
self.ram_dimms = [
f'{count}x {desc}' for desc, count in sorted(details.items())
]
class Disk(BaseObj):
"""Object for tracking disk specific data."""
def __init__(self, path):
super().__init__()
self.attributes = {}
self.description = 'Unknown'
self.details = {}
self.notes = []
self.path = pathlib.Path(path).resolve()
self.smartctl = {}
self.tests = OrderedDict()
# Update details
self.get_details()
self.enable_smart()
self.update_smart_details()
if self.details['bus'] == 'USB' and not self.attributes:
# Try using SAT
LOG.warning('Using SAT for smartctl for %s', self.path)
self.enable_smart(use_sat=True)
self.update_smart_details(use_sat=True)
if not self.is_4k_aligned():
self.add_note('One or more partitions are not 4K aligned', 'YELLOW')
def abort_self_test(self):
"""Abort currently running non-captive self-test."""
cmd = ['sudo', 'smartctl', '--abort', self.path]
run_program(cmd, check=False)
def add_note(self, note, color=None):
"""Add note that will be included in the disk report."""
if color:
note = color_string(note, color)
if note not in self.notes:
self.notes.append(note)
self.notes.sort()
def check_attributes(self, only_blocking=False):
"""Check if any known attributes are failing, returns bool."""
attributes_ok = True
known_attributes = get_known_disk_attributes(self.details['model'])
for attr, value in self.attributes.items():
# Skip unknown attributes
if attr not in known_attributes:
continue
# Get thresholds
blocking_attribute = known_attributes[attr].get('Blocking', False)
err_thresh = known_attributes[attr].get('Error', None)
max_thresh = known_attributes[attr].get('Maximum', None)
if not max_thresh:
max_thresh = float('inf')
# Skip non-blocking attributes if necessary
if only_blocking and not blocking_attribute:
continue
# Skip informational attributes
if not err_thresh:
continue
# Check attribute
if known_attributes[attr].get('PercentageLife', False):
if 0 <= value['raw'] <= err_thresh:
attributes_ok = False
elif err_thresh <= value['raw'] < max_thresh:
attributes_ok = False
# Done
return attributes_ok
def disable_disk_tests(self):
"""Disable all tests."""
LOG.warning('Disabling all tests for: %s', self.path)
for test in self.tests.values():
if test.status in ('Pending', 'Working'):
test.set_status('Denied')
test.disabled = True
def enable_smart(self, use_sat=False):
"""Try enabling SMART for this disk."""
cmd = [
'sudo',
'smartctl',
f'--device={"sat,auto" if use_sat else "auto"}',
'--tolerance=permissive',
'--smart=on',
self.path,
]
run_program(cmd, check=False)
def generate_attribute_report(self):
"""Generate attribute report, returns list."""
known_attributes = get_known_disk_attributes(self.details['model'])
report = []
for attr, value in sorted(self.attributes.items()):
note = ''
value_color = 'GREEN'
# Skip attributes not in our list
if attr not in known_attributes:
continue
# Check for attribute note
note = known_attributes[attr].get('Note', '')
# ID / Name
label = f'{attr:>3}'
if isinstance(attr, int):
# Assuming SMART, include hex ID and name
label += f' / {str(hex(attr))[2:].upper():0>2}: {value["name"]}'
label = f' {label.replace("_", " "):38}'
# Value color
if known_attributes[attr].get('PercentageLife', False):
# PercentageLife values
if 0 <= value['raw'] <= known_attributes[attr]['Error']:
value_color = 'RED'
note = '(failed, % life remaining)'
elif value['raw'] < 0 or value['raw'] > 100:
value_color = 'PURPLE'
note = '(invalid?)'
else:
for threshold, color in ATTRIBUTE_COLORS:
threshold_val = known_attributes[attr].get(threshold, None)
if threshold_val and value['raw'] >= threshold_val:
value_color = color
if threshold == 'Error':
note = '(failed)'
elif threshold == 'Maximum':
note = '(invalid?)'
# 199/C7 warning
if str(attr) == '199' and value['raw'] > 0:
note = '(bad cable?)'
# Build colored string and append to report
line = color_string(
[label, value['raw_str'], note],
[None, value_color, 'YELLOW'],
)
report.append(line)
# Done
return report
def generate_report(self, header=True):
"""Generate Disk report, returns list."""
report = []
if header:
report.append(color_string(f'Device ({self.path.name})', 'BLUE'))
report.append(f' {self.description}')
# Attributes
if self.attributes:
if header:
report.append(color_string('Attributes', 'BLUE'))
report.extend(self.generate_attribute_report())
# Notes
if self.notes:
report.append(color_string('Notes', 'BLUE'))
for note in self.notes:
report.append(f' {note}')
# Tests
for test in self.tests.values():
report.extend(test.report)
return report
def get_details(self):
"""Get disk details using OS specific methods.
Required details default to generic descriptions
and are converted to the correct type.
"""
if PLATFORM == 'Darwin':
self.details = get_disk_details_macos(self.path)
elif PLATFORM == 'Linux':
self.details = get_disk_details_linux(self.path)
# Set necessary details
self.details['bus'] = str(self.details.get('bus', '???')).upper()
self.details['bus'] = self.details['bus'].replace('IMAGE', 'Image')
self.details['bus'] = self.details['bus'].replace('NVME', 'NVMe')
self.details['fstype'] = self.details.get('fstype', 'Unknown')
self.details['log-sec'] = self.details.get('log-sec', 512)
self.details['model'] = self.details.get('model', 'Unknown Model')
self.details['name'] = self.details.get('name', self.path)
self.details['phy-sec'] = self.details.get('phy-sec', 512)
self.details['serial'] = self.details.get('serial', 'Unknown Serial')
self.details['size'] = self.details.get('size', -1)
self.details['ssd'] = self.details.get('ssd', False)
# Ensure certain attributes types
for attr in ['bus', 'model', 'name', 'serial']:
if not isinstance(self.details[attr], str):
self.details[attr] = str(self.details[attr])
for attr in ['phy-sec', 'size']:
if not isinstance(self.details[attr], int):
try:
self.details[attr] = int(self.details[attr])
except (TypeError, ValueError):
LOG.error('Invalid disk %s: %s', attr, self.details[attr])
self.details[attr] = -1
# Set description
self.description = (
f'{bytes_to_string(self.details["size"], use_binary=False)}'
f' ({self.details["bus"]})'
f' {self.details["model"]}'
f' {self.details["serial"]}'
)
def get_labels(self):
"""Build list of labels for this disk, returns list."""
labels = []
# Add all labels from lsblk
for disk in [self.details, *self.details.get('children', [])]:
labels.append(disk.get('label', ''))
labels.append(disk.get('partlabel', ''))
# Remove empty labels
labels = [str(label) for label in labels if label]
# Done
return labels
def get_smart_self_test_details(self):
"""Shorthand to get deeply nested self-test details, returns dict."""
details = {}
try:
details = self.smartctl['ata_smart_data']['self_test']
except (KeyError, TypeError):
# Assuming disk lacks SMART support, ignore and return empty dict.
pass
# Done
return details
def is_4k_aligned(self):
"""Check that all disk partitions are aligned, returns bool."""
aligned = True
if PLATFORM == 'Darwin':
aligned = is_4k_aligned_macos(self.details)
elif PLATFORM == 'Linux':
aligned = is_4k_aligned_linux(self.path, self.details['phy-sec'])
return aligned
def safety_checks(self):
"""Run safety checks and raise an exception if necessary."""
blocking_event_encountered = False
self.update_smart_details()
# Attributes
if not self.check_attributes(only_blocking=True):
blocking_event_encountered = True
LOG.error('%s: Blocked for failing attribute(s)', self.path)
# NVMe status
nvme_status = self.smartctl.get('smart_status', {}).get('nvme', {})
if nvme_status.get('media_read_only', False):
blocking_event_encountered = True
msg = 'Media has been placed in read-only mode'
self.add_note(msg, 'RED')
LOG.error('%s %s', self.path, msg)
for key in NVME_WARNING_KEYS:
if nvme_status.get(key, False):
msg = key.replace('_', ' ')
self.add_note(msg, 'YELLOW')
LOG.warning('%s %s', self.path, msg)
# SMART overall assessment
smart_passed = True
try:
smart_passed = self.smartctl['smart_status']['passed']
except (KeyError, TypeError):
# Assuming disk doesn't support SMART overall assessment
pass
if not smart_passed:
blocking_event_encountered = True
msg = 'SMART overall self-assessment: Failed'
self.add_note(msg, 'RED')
LOG.error('%s %s', self.path, msg)
# Raise blocking exception if necessary
if blocking_event_encountered:
raise CriticalHardwareError(f'Critical error(s) for: {self.path}')
# SMART self-test status
test_details = self.get_smart_self_test_details()
if 'remaining_percent' in test_details.get('status', ''):
msg = f'SMART self-test in progress for: {self.path}'
LOG.error(msg)
raise SMARTSelfTestInProgressError(msg)
def run_self_test(self, log_path):
"""Run disk self-test and check if it passed, returns bool.
NOTE: This function is here to reserve a place for future
NVMe self-tests announced in NVMe spec v1.3.
"""
result = self.run_smart_self_test(log_path)
return result
def run_smart_self_test(self, log_path):
"""Run SMART self-test and check if it passed, returns bool.
NOTE: An exception will be raised if the disk lacks SMART support.
"""
finished = False
result = None
started = False
status_str = 'Starting self-test...'
test_details = self.get_smart_self_test_details()
test_minutes = 15
size_str = bytes_to_string(self.details["size"], use_binary=False)
header_str = color_string(
['[', self.path.name, ' ', size_str, ']'],
[None, 'BLUE', None, 'CYAN', None],
sep='',
)
# Check if disk supports self-tests
if not test_details:
raise SMARTNotSupportedError(
f'SMART self-test not supported for {self.path}')
# Get real test length
test_minutes = test_details.get('polling_minutes', {}).get('short', 5)
test_minutes = int(test_minutes) + 10
# Start test
with open(log_path, 'w', encoding='utf-8') as _f:
_f.write(f'{header_str}\nInitializing...')
cmd = [
'sudo',
'smartctl',
'--tolerance=normal',
'--test=short',
self.path,
]
run_program(cmd, check=False)
# Monitor progress (in five second intervals)
for _i in range(int(test_minutes*60/5)):
sleep(5)
# Update status
self.update_smart_details()
test_details = self.get_smart_self_test_details()
# Check test progress
if started:
status_str = test_details.get('status', {}).get('string', 'Unknown')
status_str = status_str.capitalize()
# Update log
with open(log_path, 'w', encoding='utf-8') as _f:
_f.write(f'{header_str}\nSMART self-test status:\n {status_str}')
# Check if finished
if 'remaining_percent' not in test_details.get('status', {}):
finished = True
break
elif 'remaining_percent' in test_details.get('status', {}):
started = True
elif _i * 5 >= SMART_SELF_TEST_START_TIMEOUT_IN_SECONDS:
# Test didn't start within limit, stop waiting
break
# Check result
if finished:
result = test_details.get('status', {}).get('passed', False)
elif started:
raise TimeoutError(f'SMART self-test timed out for {self.path}')
# Done
return result
def update_smart_details(self, use_sat=False):
"""Update SMART details via smartctl."""
self.attributes = {}
# Check if SAT is needed
if not use_sat:
# use_sat not set, check previous run (if possible)
for arg in self.smartctl.get('smartctl', {}).get('argv', []):
if arg == '--device=sat,auto':
use_sat = True
break
# Get SMART data
cmd = [
'sudo',
'smartctl',
f'--device={"sat,auto" if use_sat else "auto"}',
'--tolerance=verypermissive',
'--all',
'--json',
self.path,
]
self.smartctl = get_json_from_command(cmd, check=False)
# Check for attributes
if KEY_NVME in self.smartctl:
for name, value in self.smartctl[KEY_NVME].items():
try:
self.attributes[name] = {
'name': name,
'raw': int(value),
'raw_str': str(value),
}
except (TypeError, ValueError):
# Ignoring invalid attribute
LOG.error('Invalid NVMe attribute: %s %s', name, value)
elif KEY_SMART in self.smartctl:
for attribute in self.smartctl[KEY_SMART].get('table', {}):
try:
_id = int(attribute['id'])
except (KeyError, ValueError):
# Ignoring invalid attribute
LOG.error('Invalid SMART attribute: %s', attribute)
continue
name = str(attribute.get('name', 'Unknown')).replace('_', ' ').title()
raw = int(attribute.get('raw', {}).get('value', -1))
raw_str = attribute.get('raw', {}).get('string', 'Unknown')
# Fix power-on time
match = REGEX_POWER_ON_TIME.match(raw_str)
if _id == 9 and match:
raw = int(match.group(1))
# Add to dict
self.attributes[_id] = {
'name': name, 'raw': raw, 'raw_str': raw_str}
# Add note if necessary
if not self.attributes:
self.add_note('No NVMe or SMART data available', 'YELLOW')
class Test():
# pylint: disable=too-few-public-methods
"""Object for tracking test specific data."""
def __init__(self, dev, label):
self.dev = dev
self.disabled = False
self.failed = False
self.label = label
self.passed = False
self.report = []
self.status = 'Pending'
def set_status(self, status):
"""Update status string."""
if self.disabled:
# Don't change status if disabled
return
self.status = status
# Functions
def get_disk_details_linux(path):
"""Get disk details using lsblk, returns dict."""
cmd = ['lsblk', '--bytes', '--json', '--output-all', '--paths', path]
json_data = get_json_from_command(cmd, check=False)
details = json_data.get('blockdevices', [{}])[0]
# Fix details
for dev in [details, *details.get('children', [])]:
dev['bus'] = dev.pop('tran', '???')
dev['parent'] = dev.pop('pkname', None)
dev['ssd'] = not dev.pop('rota', True)
if 'loop' in str(path) and dev['bus'] is None:
dev['bus'] = 'Image'
dev['model'] = ''
dev['serial'] = ''
# Done
return details
def get_disk_details_macos(path):
"""Get disk details using diskutil, returns dict."""
details = {}
# Get "list" details
cmd = ['diskutil', 'list', '-plist', path]
proc = run_program(cmd, check=False, encoding=None, errors=None)
try:
plist_data = plistlib.loads(proc.stdout)
except (TypeError, ValueError):
# Invalid / corrupt plist data? return empty dict to avoid crash
LOG.error('Failed to get diskutil list for %s', path)
return details
# Parse "list" details
details = plist_data.get('AllDisksAndPartitions', [{}])[0]
details['children'] = details.pop('Partitions', [])
details['path'] = path
for child in details['children']:
child['path'] = path.with_name(child.get('DeviceIdentifier', 'null'))
# Get "info" details
for dev in [details, *details['children']]:
cmd = ['diskutil', 'info', '-plist', dev['path']]
proc = run_program(cmd, check=False, encoding=None, errors=None)
try:
plist_data = plistlib.loads(proc.stdout)
except (TypeError, ValueError):
LOG.error('Failed to get diskutil info for %s', path)
continue #Skip
# Parse "info" details
dev.update(plist_data)
dev['bus'] = dev.pop('BusProtocol', '???')
dev['fstype'] = dev.pop('FilesystemType', '')
dev['label'] = dev.pop('VolumeName', '')
dev['model'] = dev.pop('MediaName', 'Unknown')
dev['mountpoint'] = dev.pop('MountPoint', '')
dev['name'] = dev.get('name', str(dev['path']))
dev['phy-sec'] = dev.pop('DeviceBlockSize', 512)
dev['serial'] = get_disk_serial_macos(dev['path'])
dev['size'] = dev.pop('Size', -1)
dev['ssd'] = dev.pop('SolidState', False)
dev['vendor'] = ''
if dev.get('WholeDisk', True):
dev['parent'] = None
else:
dev['parent'] = dev.pop('ParentWholeDisk', None)
# Fix details if main dev is a child
for child in details['children']:
if path == child['path']:
for key in ('fstype', 'label', 'name', 'size'):
details[key] = child[key]
break
# Done
return details
def get_disk_serial_macos(path):
"""Get disk serial using system_profiler, returns str."""
cmd = ['sudo', 'smartctl', '--info', '--json', path]
smart_info = get_json_from_command(cmd)
return smart_info.get('serial_number', 'Unknown Serial')
def get_disks(skip_kits=False):
"""Get disks using OS-specific methods, returns list."""
disks = []
if PLATFORM == 'Darwin':
disks = get_disks_macos()
elif PLATFORM == 'Linux':
disks = get_disks_linux()
# Skip WK disks
if skip_kits:
disks = [
disk_obj for disk_obj in disks
if not any(
WK_LABEL_REGEX.search(label) for label in disk_obj.get_labels()
)
]
# Done
return disks
def get_disks_linux():
"""Get disks via lsblk, returns list."""
cmd = ['lsblk', '--json', '--nodeps', '--paths']
disks = []
# Add valid disks
json_data = get_json_from_command(cmd)
for disk in json_data.get('blockdevices', []):
disk_obj = Disk(disk['name'])
# Skip loopback devices, optical devices, etc
if disk_obj.details['type'] != 'disk':
continue
# Add disk
disks.append(disk_obj)
# Done
return disks
def get_disks_macos():
"""Get disks via diskutil, returns list."""
cmd = ['diskutil', 'list', '-plist', 'physical']
disks = []
# Get info from diskutil
proc = run_program(cmd, encoding=None, errors=None, check=False)
if proc.returncode != 0:
# Assuming we're running on an older macOS version
cmd.pop(-1)
proc = run_program(cmd, encoding=None, errors=None, check=False)
# Parse plist data
try:
plist_data = plistlib.loads(proc.stdout)
except (TypeError, ValueError):
# Invalid / corrupt plist data? return empty list to avoid crash
LOG.error('Failed to get diskutil list')
return disks
# Add valid disks
for disk in plist_data['WholeDisks']:
disks.append(Disk(f'/dev/{disk}'))
# Remove virtual disks
# TODO: Test more to figure out why some drives are being marked 'Unknown'
disks = [
d for d in disks if d.details.get('VirtualOrPhysical') != 'Virtual'
]
# Done
return disks
def get_known_disk_attributes(model):
"""Get known NVMe/SMART attributes (model specific), returns str."""
known_attributes = KNOWN_DISK_ATTRIBUTES.copy()
# Apply model-specific data
for regex, data in KNOWN_DISK_MODELS.items():
if re.search(regex, model):
for attr, thresholds in data.items():
if attr in known_attributes:
known_attributes[attr].update(thresholds)
else:
known_attributes[attr] = thresholds
# Done
return known_attributes
def get_ram_list_linux():
"""Get RAM list using dmidecode."""
cmd = ['sudo', 'dmidecode', '--type', 'memory']
dimm_list = []
manufacturer = 'Unknown'
size = 0
# Get DMI data
proc = run_program(cmd)
dmi_data = proc.stdout.splitlines()
# Parse data
for line in dmi_data:
line = line.strip()
if line == 'Memory Device':
# Reset vars
manufacturer = 'Unknown'
size = 0
elif line.startswith('Size:'):
size = line.replace('Size: ', '')
try:
size = string_to_bytes(size, assume_binary=True)
except ValueError:
# Assuming empty module
size = 0
elif line.startswith('Manufacturer:'):
manufacturer = line.replace('Manufacturer: ', '')
dimm_list.append([size, manufacturer])
# Save details
return dimm_list
def get_ram_list_macos():
"""Get RAM list using system_profiler."""
dimm_list = []
# Get and parse plist data
cmd = [
'system_profiler',
'-xml',
'SPMemoryDataType',
]
proc = run_program(cmd, check=False, encoding=None, errors=None)
try:
plist_data = plistlib.loads(proc.stdout)
except (TypeError, ValueError):
# Ignore and return an empty list
return dimm_list
# Check DIMM data
dimm_details = plist_data[0].get('_items', [{}])[0].get('_items', [])
for dimm in dimm_details:
manufacturer = dimm.get('dimm_manufacturer', None)
manufacturer = KNOWN_RAM_VENDOR_IDS.get(
manufacturer,
f'Unknown ({manufacturer})')
size = dimm.get('dimm_size', '0 GB')
try:
size = string_to_bytes(size, assume_binary=True)
except ValueError:
# Empty DIMM?
LOG.error('Invalid DIMM size: %s', size)
continue
dimm_list.append([size, manufacturer])
# Save details
return dimm_list
def is_4k_aligned_macos(disk_details):
"""Check partition alignment using diskutil info, returns bool."""
aligned = True
# Check partitions
for part in disk_details.get('children', []):
offset = part.get('PartitionMapPartitionOffset', 0)
if not offset:
# Assuming offset couldn't be found and it defaulted to 0
# NOTE: Just logging the error, not bailing
LOG.error('Failed to get partition offset for %s', part['path'])
aligned = aligned and offset >= 0 and offset % 4096 == 0
# Done
return aligned
def is_4k_aligned_linux(dev_path, physical_sector_size):
"""Check partition alignment using lsblk, returns bool."""
aligned = True
cmd = [
'sudo',
'sfdisk',
'--json',
dev_path,
]
# Get partition details
json_data = get_json_from_command(cmd)
# Check partitions
for part in json_data.get('partitiontable', {}).get('partitions', []):
offset = physical_sector_size * part.get('start', -1)
aligned = aligned and offset >= 0 and offset % 4096 == 0
# Done
return aligned
if __name__ == '__main__':
print("This file is not meant to be called directly.")
| 2.375 | 2 |
Python Fundamentals/Regular Expressions/Exercise/Task03_02.py | IvanTodorovBG/SoftUni | 1 | 12787527 | <filename>Python Fundamentals/Regular Expressions/Exercise/Task03_02.py
import re
data = input()
searched = input()
pattern = f"\\b{searched}\\b"
result = re.findall(pattern, data, re.IGNORECASE)
print(data.count(searched)) | 4.34375 | 4 |
visualSHARK/util/helper.py | benjaminLedel/visualSHARK_topicShark | 0 | 12787528 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import re
import math
import networkx as nx
import logging
import timeit
from collections import deque
from visualSHARK.models import Commit
def tag_filter(tags, discard_qualifiers=True, discard_patch=False):
versions = []
# qualifiers are expected at the end of the tag and they may have a number attached
# it is very important for the b to be at the end otherwise beta would already be matched!
qualifiers = ['rc', 'alpha', 'beta', 'b']
# separators are expected to divide 2 or more numbers
separators = ['.', '_', '-']
for t in tags:
tag = t.name
c = Commit.objects.get(id=t.commit_id)
qualifier = ''
remove_qualifier = ''
for q in qualifiers:
if q in tag.lower():
tmp = tag.lower().split(q)
if tmp[-1].isnumeric():
qualifier = [q, tmp[-1]]
remove_qualifier = ''.join(qualifier)
break
else:
qualifier = [q]
remove_qualifier = q
break
# if we have a qualifier we remove it before we check for best number seperator
tmp = tag.lower()
if qualifier:
tmp = tmp.split(remove_qualifier)[0]
# we only want numbers and separators
version = re.sub('[a-z]', '', tmp)
# the best separator is the one separating the most numbers
best = -1
best_sep = None
for sep in separators:
current = 0
for v in version.split(sep):
v = ''.join(c for c in v if c.isdigit())
if v.isnumeric():
current += 1
if current > best:
best = current
best_sep = sep
version = version.split(best_sep)
final_version = []
for v in version:
v = ''.join(c for c in v if c.isdigit())
if v.isnumeric():
final_version.append(int(v))
# if we have a version we append it to our list
if final_version:
# force semver because we are sorting
if len(final_version) == 1:
final_version.append(0)
if len(final_version) == 2:
final_version.append(0)
fversion = {'version': final_version, 'original': tag, 'revision': c.revision_hash}
if qualifier:
fversion['qualifier'] = qualifier
versions.append(fversion)
# discard fliers
p_version = [int(v['version'][0]) for v in versions]
sort = sorted(p_version)
a = 0.25 * len(sort)
b = 0.75 * len(sort)
if a.is_integer():
a = int(a) # otherwise could be 6.0
x_025 = ((sort[a] + sort[a + 1]) / 2)
else:
x_025 = sort[math.floor(a) + 1]
if b.is_integer():
b = int(b)
x_075 = ((sort[b] + sort[b + 1]) / 2)
else:
x_075 = sort[math.floor(b) + 1]
iqr = x_075 - x_025
flyer_lim = 1.5 * iqr
ret = []
for version in versions:
major = int(version['version'][0])
# no fliers in final list
if major > (x_075 + flyer_lim) or major < (x_025 - flyer_lim):
print('exclude: {} because {} is not between {} and {}'.format(version['version'], major, (x_025 - flyer_lim), (x_075 + flyer_lim)))
continue
if discard_qualifiers and 'qualifier' in version.keys():
continue
ret.append(version)
# sort remaining
s = sorted(ret, key=lambda x: (x['version'][0], x['version'][1], x['version'][2]))
ret = []
for v in s:
# only minor, we discard patch releases (3rd in semver, everything after 2nd in other schemas)
if discard_patch:
if len(v['version']) > 2:
del v['version'][2:]
if v['version'] not in [v2['version'] for v2 in ret]:
ret.append(v)
return ret
class OntdekBaan3(object):
"""Discover all paths in a commitgraph represented as an NetworkX DAG.
The Problem:
High number of paths without repeated nodes (simple paths) in normal Git Workflow.
The Solution:
We reset the start node if we have no path to travel to the end node (happens for SVN -> Git Tags).
We prune the graph to the subgraph containing only paths from our start to end.
We compute the longest path (which is possible in polynomial time as we work on a DAG).
We then find all nodes nod already contained in the longest path.
For each of those nodes we find a connection to a node in the longest path which is a merge or split (because then it is cached in Volg).
"""
def __init__(self, g):
self._graph = g.copy()
self._nodes = set()
self._log = logging.getLogger(self.__class__.__name__)
def _prune_graph(self, start, end):
non_pruned = self._graph.copy()
for n in non_pruned:
if not nx.has_path(non_pruned, n, end):
if n in self._graph:
self._graph.remove_node(n)
if not nx.has_path(non_pruned, start, n):
if n in self._graph:
self._graph.remove_node(n)
def _find_parent_in_paths(self, node):
succ = deque(list(self._graph.pred[node]))
while succ:
# pop out at the right
n = succ.pop()
if n in self._nodes and (len(self._graph.pred[n]) > 1 or len(self._graph.succ[n]) > 1):
return n
# append new parents to the left
for p in self._graph.pred[n]:
succ.appendleft(p)
def _find_child_in_paths(self, node):
succ = deque(list(self._graph.succ[node]))
while succ:
# pop out at the right
n = succ.pop()
if n in self._nodes and (len(self._graph.pred[n]) > 1 or len(self._graph.succ[n]) > 1):
return n
# append new childs to the left
for s in self._graph.succ[n]:
succ.appendleft(s)
def _reset_start_node(self, start, end):
self._new_start_node = start
while not nx.has_path(self._graph, self._new_start_node, end):
self._log.info('no path from {} to {} traveling backwards'.format(self._new_start_node, end))
parents = list(self._graph.pred[self._new_start_node])
if len(parents) == 0:
raise Exception('can not travel backwards from start {}, no parents on {}: ({})!'.format(start, self._new_start_node, parents))
elif len(parents) > 1:
# if we have multiple parents, chose the one which has the shortest path to target in undirected graph
length = len(self._graph)
chosen_parent = None
un = self._graph.to_undirected()
for p in parents:
path = nx.shortest_path(un, p, end)
if len(path) < length:
length = len(path)
chosen_parent = p
else:
chosen_parent = parents[0]
self._new_start_node = chosen_parent
# do we need to reattach our real start node?
# it could lead to errors if the direction is reversed because Volg does not support the reversed direction
if self._new_start_node != start:
self._log.info('real start was {} but we travelled backwards to {}'.format(start, self._new_start_node))
return self._new_start_node
def get_all_paths(self, start, end):
# travel backwards / forwards for unreachable nodes
new_start = self._reset_start_node(start, end)
# prune graph to our required sub-graph
self._prune_graph(new_start, end)
# start / end can be pre- / appended the same as other nodes not in the longest path
lp = nx.dag_longest_path(self._graph)
# we need to ensure that start and end node are at the appropriate
# if this is raised we could find shortest path from start to lp and end to lp and pre- or append them
if lp[0] != self._new_start_node or lp[-1] != end:
raise Exception('start: {} or end {} not in first path!'.format(self._new_start_node, end))
self._nodes = set(lp)
yield lp
for n in self._graph:
if n not in self._nodes:
# find parent in lp
# find child in lp
p = self._find_parent_in_paths(n)
c = self._find_child_in_paths(n)
p1 = nx.shortest_path(self._graph, p, n)
p2 = nx.shortest_path(self._graph, n, c)
self._nodes.update(set(p1 + p2))
yield(p1[:-1] + p2) # n is in both paths so we cut one of
class OntdekBaan2(object):
"""Discover all paths in a commitgraph represented as an NetworkX DAG.
The Problem:
High number of paths without repeated nodes in normal Git Workflow.
The Solution:
Split paths at articulation points to reduce number of paths.
Problem still remaining:
- no common suffixes are cleared, without Volg caching there may be a problem
- long running branches that are merged back into master later
- release branches (because we are taking a lot of information from master)
"""
def __init__(self, graph):
self._log = logging.getLogger(self.__class__.__name__)
self._graph = graph.copy()
def _preprocess(self, start_node, end_node):
self._start_node = start_node
self._end_node = end_node
self._log.info('finding all paths between {} and {}'.format(start_node, end_node))
# we need to prune the graph beforehand, this is expensive but otherwise we would have even more paths
# we also prune common prefix in the implementation, common suffix can only be done later
st = timeit.default_timer()
self._log.info('pruning graph')
non_pruned = self._graph.copy()
for node in non_pruned:
for child in iter(non_pruned.succ[node]):
try:
nx.shortest_path(self._graph, child, self._end_node)
except nx.NetworkXNoPath:
self._graph.remove_edge(node, child)
t = timeit.default_timer() - st
self._log.info('pruning finished in {:.3f}'.format(t))
# if our start node contains no path to the end node travel backwards until it does,
# except if it has more than one parent, then its over and we bail
self._new_start_node = start_node
while not nx.has_path(non_pruned, self._new_start_node, end_node):
self._log.info('no path from {} to {} traveling backwards'.format(self._new_start_node, end_node))
parents = list(non_pruned.pred[self._new_start_node])
if len(parents) == 0:
raise Exception('can not travel backwards from start {}, no parents on {}: ({})!'.format(self._start_node, self._new_start_node, parents))
elif len(parents) > 1:
# if we have multiple parents, chose the one which has the shortest path to target in undirected graph
length = len(non_pruned)
chosen_parent = None
un = non_pruned.to_undirected()
for p in parents:
path = nx.shortest_path(un, p, end_node)
if len(path) < length:
length = len(path)
chosen_parent = p
else:
chosen_parent = parents[0]
self._new_start_node = chosen_parent
# get list of APs
self._aps = list(nx.articulation_points(self._graph.to_undirected()))
def get_all_paths(self, start_node, end_node):
"""Return every traversable path between start and end commit."""
self._preprocess(start_node, end_node)
ap = self._new_start_node
full_paths = []
while ap:
ap, paths = self._get_paths(ap, self._end_node)
# we can do this here because it does not matter in which order we traverse the graph
# we do not need full paths everywhere because of the caching in Volg
if not full_paths:
full_paths = paths
self._log.debug('non ap, assigning full_paths')
else:
self._log.debug('encountered AP {} splitting path'.format(ap)) # this is potentially happening quiet often
# first one gets the complete path, as we do not prune common suffixes
# we know that the AP (our new starting node) has to be the last element of every path
# therefore, we chose the first
full_paths[0] += paths[0][1:]
full_paths += paths[1:]
# do we need to reattach our real start node?
# it could lead to errors if the direction is reversed because Volg does not support the reversed direction
if self._new_start_node != self._start_node:
self._log.info('real start was {} but we travelled backwards to {}'.format(self._start_node, self._new_start_node))
return full_paths
def _get_paths(self, start_node, end_node):
"""Get all paths where the end_node is reachable or up to an AP."""
nodes = [start_node]
paths = [[start_node]]
# print('{}, {}'.format(nodes, paths))
new_start = None
while nodes:
node = nodes.pop()
childs = list(self._graph.succ[node])
# we bail on AP or end_node reached
if node in self._aps and node != start_node:
childs = []
new_start = node
elif node == end_node:
childs = []
# print('node {} childs {}'.format(node, childs))
if childs:
nodes += childs
npath = None
for path in paths:
if path[-1] == node:
# by creating a new list instead of copying we eleminate common prefixes in the resulting paths
npath = [node]
path.append(childs[0])
# print('[1] append {} to path {}'.format(childs[0], path))
# first one we have already
for child in childs[1:]:
# do we already have a path?
if npath:
path = npath.copy() # we need this copy here in case of childs > 2
path.append(child)
paths.append(path)
# print('[2] append {} to new path {}'.format(child, npath))
# this is just for the end node
if not childs:
for path in paths:
if path[-1] in self._graph.pred[node]:
# print('[n] append {} to {} because {}'.format(node, path, path[-1]))
path.append(node)
return new_start, paths
| 2.375 | 2 |
firecam/data_xform/fire_weather.py | open-climate-tech/firecam | 9 | 12787529 | # Copyright 2020 Open Climate Tech Contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""
Reads data from csv export of one of 3 types of data:
1) votes and polygons
2) CameraID and direction
3) Filename and x/y coordinates of fire region
For each of these, it finds the approximate location and finds the historical weather,
which is cached/saved in DB.
Weather data is merged with fire data to genrate output CSV file.
"""
import os, sys
from firecam.lib import settings
from firecam.lib import collect_args
from firecam.lib import goog_helper
from firecam.lib import db_manager
from firecam.lib import img_archive
from firecam.lib import weather
import random
import time, datetime, dateutil.parser
import logging
import csv
import json
import math
from shapely.geometry import Polygon, Point
from PIL import Image
def getCentroid(polygonStr):
polygonCoords = json.loads(polygonStr)
poly = Polygon(polygonCoords)
centerLatLong = list(zip(*poly.centroid.xy))[0]
return (round(centerLatLong[0],3), round(centerLatLong[1],3))
def getRandInterpolatedVal(percentiles):
randVal = random.random()
rand10 = randVal*10
rand10Int = int(rand10)
minVal = percentiles[rand10Int]
maxVal = percentiles[rand10Int + 1]
return minVal + (rand10 - rand10Int) * (maxVal - minVal)
def keepData(score, centroid, numPolys, isRealFire):
northMexico = Polygon([(32.533, -117.157), (32.696, -115.173), (32.174, -114.692), (32.073, -117.232)])
return not northMexico.intersects(Point(centroid))
def outputWithWeather(outFile, score, timestamp, centroid, numPolys, weatherCentroid, weatherCamera, isRealFire):
dataArr = weather.normalizeWeather(score, numPolys, weatherCentroid, weatherCamera, timestamp, centroid, isRealFire)
dataArrStr = list(map(str, dataArr))
# logging.warning('Data arrayStr: %s', dataArrStr)
dataStr = ', '.join(dataArrStr)
# logging.warning('Data str: %s', dataStr)
outFile.write(dataStr + '\n')
def patchCameraId(cameraID):
if cameraID.startswith('lo-'):
cameraID = 'm' + cameraID
elif cameraID.startswith('so-'):
cameraID = 'sojr-' + cameraID[3:]
return cameraID
def main():
reqArgs = [
["o", "outputFile", "output file name"],
["i", "inputCsv", "csvfile with fire/detection data"],
['m', "mode", "mode: votepoly or camdir or pruned"],
]
optArgs = [
["s", "startRow", "starting row"],
["e", "endRow", "ending row"],
]
args = collect_args.collectArgs(reqArgs, optionalArgs=optArgs, parentParsers=[goog_helper.getParentParser()])
startRow = int(args.startRow) if args.startRow else 0
endRow = int(args.endRow) if args.endRow else 1e9
mode = args.mode
assert mode == 'votepoly' or mode == 'camdir' or mode == 'pruned'
outFile = open(args.outputFile, 'w')
dbManager = db_manager.DbManager(sqliteFile=settings.db_file,
psqlHost=settings.psqlHost, psqlDb=settings.psqlDb,
psqlUser=settings.psqlUser, psqlPasswd=settings.psqlPasswd)
lastCam = None
lastTime = None
random.seed(0)
with open(args.inputCsv) as csvFile:
csvreader = csv.reader(csvFile)
for (rowIndex, csvRow) in enumerate(csvreader):
if rowIndex < startRow:
continue
if rowIndex > endRow:
print('Reached end row', rowIndex, endRow)
break
if mode == 'votepoly':
[cameraID, timestamp, score, polygon, sourcePolygons, isRealFire] = csvRow[:6]
timestamp = int(timestamp)
logging.warning('Processing row: %d, cam: %s, ts: %s', rowIndex, cameraID, timestamp)
if cameraID == lastCam and timestamp == lastTime:
logging.warning('Duplicate row: %d, cam: %s, ts: %s', rowIndex, cameraID, timestamp)
lastCam = cameraID
lastTime = timestamp
centroid = getCentroid(polygon)
if timestamp < 1607786165: #sourcePolygons didn't exist before this
if isRealFire:
numPolys = round(getRandInterpolatedVal(settings.percentilesNumPolyFire))
else:
numPolys = round(getRandInterpolatedVal(settings.percentilesNumPolyOther))
else:
numPolys = 1
if sourcePolygons:
sourcePolygonsArr = json.loads(sourcePolygons)
numPolys = len(sourcePolygonsArr)
cameraID = patchCameraId(cameraID)
(mapImgGCS, camLatitude, camLongitude) = dbManager.getCameraMapLocation(cameraID)
else:
if mode == 'camdir':
[cameraID, isoTime, direction] = csvRow[:3]
logging.warning('Processing row: %d, cam: %s, ts: %s', rowIndex, cameraID, isoTime)
timestamp = time.mktime(dateutil.parser.parse(isoTime).timetuple())
if 'center left' in direction:
offset = -20
elif 'center right' in direction:
offset = 20
elif 'center' in direction:
offset = 0
elif 'left' in direction:
offset = -40
elif 'right' in direction:
offset = 40
else:
logging.error('Unexpected dir row: %d, dir: %s', rowIndex, direction)
continue
elif mode == 'pruned':
[_cropName, minX, _minY, maxX, _maxY, fileName] = csvRow[:6]
minX = int(minX)
maxX = int(maxX)
nameParsed = img_archive.parseFilename(fileName)
cameraID = nameParsed['cameraID']
cameraID = patchCameraId(cameraID)
timestamp = nameParsed['unixTime']
dateStr = nameParsed['isoStr'][:nameParsed['isoStr'].index('T')]
if dateStr == lastTime and cameraID == lastCam:
# logging.warning('Skip same fire. row %s', rowIndex)
continue
lastCam = cameraID
lastTime = dateStr
localFilePath = os.path.join(settings.downloadDir, fileName)
if not os.path.isfile(localFilePath):
logging.warning('Skip missing file %s, row %s', fileName, rowIndex)
continue
img = Image.open(localFilePath)
degreesInView = 110
centerX = (minX + maxX) / 2
offset = centerX / img.size[0] * degreesInView - degreesInView/2
img.close()
(mapImgGCS, camLatitude, camLongitude) = dbManager.getCameraMapLocation(cameraID)
camHeading = img_archive.getHeading(cameraID)
heading = (camHeading + offset) % 360
angle = 90 - heading
distanceDegrees = 0.2 # approx 14 miles
fireLat = camLatitude + math.sin(angle*math.pi/180)*distanceDegrees
fireLong = camLongitude + math.cos(angle*math.pi/180)*distanceDegrees
centroid = (fireLat, fireLong)
score = getRandInterpolatedVal(settings.percentilesScoreFire)
numPolys = round(getRandInterpolatedVal(settings.percentilesNumPolyFire))
isRealFire = 1
logging.warning('Processing row: %d, heading: %s, centroid: %s, score: %s, numpoly: %s', rowIndex, heading, centroid, score, numPolys)
if not keepData(score, centroid, numPolys, isRealFire):
logging.warning('Skipping Mexico fire row %d, camera %s', rowIndex, cameraID)
continue
(weatherCentroid, weatherCamera) = weather.getWeatherData(dbManager, cameraID, timestamp, centroid, (camLatitude, camLongitude))
if not weatherCentroid:
logging.warning('Skipping row %d', rowIndex)
continue
# logging.warning('Weather %s', weatherCentroid)
outputWithWeather(outFile, score, timestamp, centroid, numPolys, weatherCentroid, weatherCamera, isRealFire)
logging.warning('Processed row: %d, cam: %s, ts: %s', rowIndex, cameraID, timestamp)
outFile.close()
if __name__=="__main__":
main()
| 2.28125 | 2 |
helper_ai.py | jpypi/pandemic-game-ai | 2 | 12787530 | <reponame>jpypi/pandemic-game-ai
from game import Game
from cards import PlayerCardDeck, PlayerCard
def calculate_uniform_probability_of_card_draw(num_of_cards):
return 1/num_of_cards
def calculate_chance_of_epidemic(game):
#Do we cast this as int or round to ceiling?
split_size = float(game.starting_num_of_player_cards) / game.num_of_epidemics
cards_left = game.player_cards.remaining_cards()
cards_burnt = game.starting_num_of_player_cards - cards_left
#now calculate how many epidemic splits have we gone through
rem = split_size - cards_burnt % split_size
expected_epidemics = float(cards_burnt / split_size) + 1
if game.occurred_epidemics >= expected_epidemics:
return 0
else:
#only is ever 2 cards from player side
a = calculate_uniform_probability_of_card_draw(rem)
b = calculate_uniform_probability_of_card_draw(rem-1)
return a + b
def calculate_drawing_infection_city_card(game, city):
p_epidemic = calculate_chance_of_epidemic(game)
#print("probability of drawing epidemic is " + str(p_epidemic))
if check_if_in_discard(game,city.name):
p_card = 0
for r in range(game.infection_rate):
p_card += calculate_uniform_probability_of_card_draw(len(game.infection_discard) + 1 - r)
return p_card * p_epidemic
else:
p__not_epidemic = 1 - p_epidemic
if check_if_has_been_drawn(game,city.name):
p_card = 0
for r in range(game.infection_rate):
p_card += calculate_uniform_probability_of_card_draw(len(game.infection_cards_restack) - r)
return p__not_epidemic * p_card
else:
if len(game.infection_cards_restack) > 0:
return 0
else:
p_card = 0
for r in range(game.infection_rate):
p_card += calculate_uniform_probability_of_card_draw(len(game.infection_cards) - r)
return p_card * p__not_epidemic + calculate_drawing_epidemic_infection_city_card(game,city)
def check_if_in_discard(game, city_name):
in_discard = False
for discard in game.infection_discard:
if discard.name == city_name:
in_discard = True
break
return in_discard
def check_if_has_been_drawn(game, city_name):
#only checks the main draw deck
for card in game.infection_cards:
if card.name == city_name:
return card.has_been_drawn
return False
def calculate_drawing_epidemic_infection_city_card(game, city):
p_epidemic = calculate_chance_of_epidemic(game)
#check if its in discard
if check_if_in_discard(game,city.name) or check_if_has_been_drawn(game,city.name):
#it can't be drawn from bottom of deck
return 0
else:
#assume uniform distribution for now
p_card = calculate_uniform_probability_of_card_draw(len(game.infection_cards))
return p_epidemic * p_card
def calculate_outbreak_in_city(game, city):
#for now assume only 1 turn and no mixed color outbreaks;
if city.diseases[city.color] == 3:
p_draw = calculate_drawing_infection_city_card(game, city)
return p_draw
elif city.diseases[city.color] > 1:
p_draw = calculate_drawing_epidemic_infection_city_card(game,city)
return p_draw
else:
#calculate the probability of drawing epidemic and then redrawing that card
p_epidemic = calculate_drawing_epidemic_infection_city_card(game, city)
p_draw = 0
for x in range(game.infection_rate):
p_draw += calculate_uniform_probability_of_card_draw(len(game.infection_discard)+1+x)
return p_epidemic * p_draw
def calculate_probability_of_outbreak(game):
#calculate the likelihood for each city
# TODO! add in the other non-color happenstances
p_x = []
total = 0
for city in game.cities:
p = calculate_outbreak_in_city(game,city)
total += p
p_x.append(p)
print(city.name + " having an outbreak is " + str(p))
return total | 3.265625 | 3 |
SampleBufferSonification/model_class.py | jcstucki/SampleBufferSonfication | 0 | 12787531 | <gh_stars>0
from tensorflow.keras import Sequential
from tensorflow.keras.layers import Dense
from tensorflow.keras.layers import LSTM
from tensorflow.keras.models import load_model
import numpy as np
import time
class LSTM_Network:
def __init__(self, file):
self.file = file
self.model = object()
self.fit_model = object()
self.y_predicted = int()
def define_model(self):
self.model = Sequential()
self.model.add(LSTM(32, return_sequences = True, batch_input_shape = (1,1,1), stateful = True))
self.model.add(LSTM(32, return_sequences = True, stateful = True))
self.model.add(Dense(1))
self.model.compile(loss = 'mean_squared_error', optimizer = 'rmsprop')
return self.model
def load_model(self):
self.model = load_model(self.file)
return self.fit_model
def save_model(self):
self.model.save(self.file)
def train(self, x, y):
#Reshape Data
x = np.reshape(x, [-1,1,1])
y = np.reshape(y, [-1,1,1])
#Fit Data to model
try:
self.fit_model = self.model.fit(x, y, epochs = 1, batch_size = 1, verbose = 2)
self.save_model()
return self.fit_model
except Exception as e:
print(e)
print("No Training Data Yet")
def predict(self, x_predict):
try:
x_predict = np.reshape(x_predict, [-1,1,1])
self.y_predict = self.model.predict(x_predict)
self.y_predict = self.y_predict[-1]
except:
self.y_predict = 0
return self.y_predict
| 3.0625 | 3 |
src/features/compress.py | DianaDI/compressedCV | 0 | 12787532 | from PIL import Image
from pathlib import Path
from glob import glob
from os.path import basename
from tqdm import tqdm
import os
class Compression:
def __init__(self, compress_level, optimize=True, log=True, resize=False, resize_params=(0, 0)):
"""
Init compression params
:param compress_level: compression % (0, 100)
:param optimize: boolean, true flag will do an extra pass on the image to find a way
to reduce its size as much as possible.
:param log: boolean, prints log information
:param resize: boolean, true if resize needed
:param resize_params: specify if resize is true
"""
self.compress_level = compress_level
self.optimize = optimize
self.resize = resize
self.resize_params = resize_params
self.log = log
self.img_format = ".bmp"
def compress(self, path, save_path):
img = Image.open(path)
if self.resize:
# downsize the image with an ANTIALIAS filter (gives the highest quality)
img = img.resize(self.resize_params, Image.ANTIALIAS)
img.save(save_path[:-4] + '.jpg', optimize=self.optimize, quality=self.compress_level)
img.close()
def compress_bulk(self, data_dir, save_dir):
try:
os.makedirs(save_dir)
except OSError as e:
print(e)
pass
if self.log:
print(f'COMPRESSION OF {data_dir} HAS STARTED')
print(f'INPUT DIR SIZE: {self.get_dir_size(data_dir)}')
for file in tqdm(glob(data_dir + "/*" + self.img_format)):
self.compress(file, save_dir + "/" + basename(file))
if self.log: print("COMPRESSED DIR SIZE: " + self.get_dir_size(save_dir))
def get_dir_size(self, path):
root_directory = Path(path)
size = sum(f.stat().st_size for f in root_directory.glob('**/*'))
return str(size) + " bytes"
| 3 | 3 |
src/encoded/tests/test_schema_library.py | aperritano/encoded | 0 | 12787533 | import pytest
@pytest.fixture
def library(lab, award):
return {
'lab': lab['uuid'],
'award': award['uuid'],
'nucleic_acid_term_name': 'DNA'
}
@pytest.fixture
def library_starting_quantity(library):
item = library.copy()
item.update({
'nucleic_acid_starting_quantity': '10',
'nucleic_acid_starting_quantity_units': 'ng',
})
return item
@pytest.fixture
def library_starting_quantity_no_units(library):
item = library.copy()
item.update({
'nucleic_acid_starting_quantity': '10',
})
return item
@pytest.fixture
def library_with_invalid_fragmentation_methods_string(library):
item = library.copy()
item.update(
{
'fragmentation_methods': 'chemical (DpnII restriction)',
}
)
return item
@pytest.fixture
def library_with_valid_fragmentation_method_list(library):
item = library.copy()
item.update(
{
'fragmentation_methods': ['chemical (DpnII restriction)', 'chemical (HindIII restriction)'],
}
)
return item
def test_library_starting_quantity_post(testapp, library_starting_quantity):
testapp.post_json('/library', library_starting_quantity)
def test_library_fragmentation_method_string(testapp, library_with_invalid_fragmentation_methods_string):
res = testapp.post_json('/library', library_with_invalid_fragmentation_methods_string, status=422)
def test_library_fragmentation_method_list(testapp, library_with_valid_fragmentation_method_list):
testapp.post_json('/library', library_with_valid_fragmentation_method_list, status=201) | 2.0625 | 2 |
rddeconv/__init__.py | agriff86/rd-deconvolve | 0 | 12787534 | <reponame>agriff86/rd-deconvolve
# -*- coding: utf-8 -*-
"""Correct (deconvolve) the output from two-filter radon detectors"""
from rddeconv import metadata
__version__ = metadata.version
__author__ = metadata.authors[0]
__license__ = metadata.license
__copyright__ = metadata.copyright
from .deconvolve import deconvolve_dataframe
from .deconvolve import deconvolve_dataframe_in_chunks
import pymc3
import logging
import logzero
import theano
# log_format = '%(color)s[%(levelname)1.1s %(process)d %(asctime)s %(module)s:%(lineno)d]%(end_color)s %(message)s'
log_format = (
"%(color)s[%(levelname)1.1s PID=%(process)d %(asctime)s]%(end_color)s %(message)s"
)
formatter = logzero.LogFormatter(fmt=log_format, datefmt='%H:%M:%S')
logzero.setup_default_logger(formatter=formatter)
# setup the pymc3 logger to use the same settings as the main one
pymc3_logger = logzero.setup_logger(
"pymc3", disableStderrLogger=True, formatter=formatter
)
# setup the theano logger to silence INFO messages (and use otherwise similar settings)
#theano_logger = logzero.setup_logger(
# "theano", disableStderrLogger=True, formatter=formatter
#)
#theano_logger.setLevel(logging.WARNING)
# silence compilelock messages
_logger = logging.getLogger("theano.gof.compilelock")
_logger.setLevel(logging.WARNING)
# this is a helpful constant
from .theoretical_model import lamrn
from .util import standard_parameters_700L
from .util import standard_parameters_1500L
from .util import load_standard_csv
| 2.0625 | 2 |
nfldata/spiders/schools.py | pkukkapalli/nfldata | 0 | 12787535 | """Defines spiders related to schools that NFL players have attended."""
import scrapy
from nfldata.common.pfr import pfr_request, PRO_FOOTBALL_REFERENCE_DOMAIN
from nfldata.items.schools import School
class SchoolsSpider(scrapy.Spider):
"""The spider that crawls and stores information about schools that players
have attended."""
name = 'schools'
allowed_domains = [PRO_FOOTBALL_REFERENCE_DOMAIN]
@classmethod
def create_table(cls, database):
"""Create the table needed for this spider."""
School.sql_create(database)
def start_requests(self):
return [pfr_request('schools')]
def parse(self, response): # pylint: disable=arguments-differ
for row in response.css(
'table#college_stats_table tbody tr:not(.thead)'):
school = row.css('td[data-stat="college_name"] a::attr(href)').get()
if school:
name = row.css('td[data-stat="college_name"] a::text').get()
yield School(school=school, name=name)
| 3.34375 | 3 |
moby_distribution/registry/auth.py | shabbywu/distribution | 7 | 12787536 | <reponame>shabbywu/distribution
# -*- coding: utf-8 -*-
import base64
import logging
from typing import Any, Dict, Optional
import requests
from www_authenticate import parse
from moby_distribution.registry.exceptions import AuthFailed
from moby_distribution.spec.auth import TokenResponse
logger = logging.getLogger(__name__)
class AuthorizationProvider:
def provide(self) -> str:
"""Provide the 'Authorization' used in HTTP Headers
Usage: AuthorizationProvider().provider()
"""
raise NotImplementedError
class BaseAuthentication:
"""Base Authentication Protocol"""
def __init__(self, www_authenticate: str):
self._raw_www_authenticate = www_authenticate
self._www_authenticate = None
@property
def www_authenticate(self):
if self._www_authenticate is None:
self._www_authenticate = parse(self._raw_www_authenticate)
return self._www_authenticate
def authenticate(self, username: Optional[str] = None, password: Optional[str] = None) -> AuthorizationProvider:
raise NotImplementedError
@property
def raw_www_authenticate(self) -> str:
return self._raw_www_authenticate
class BasicAuthAuthorizationProvider(AuthorizationProvider):
"""BasicAuthAuthorizationProvider provide the `HTTP Basic Authentication`"""
def __init__(self, username: str, password: str):
self.username = username
self.password = password
def provide(self) -> str:
auth = base64.b64encode(f"{self.username}:{self.password}".encode()).decode()
return f"Basic {auth}"
class TokenAuthorizationProvider(AuthorizationProvider):
"""TokenAuthorizationProvider provide the `Bearer Token` Authentication"""
def __init__(self, token_response: TokenResponse, token_type: str = "Bearer"):
self.token_response = token_response
self.token_type = token_type
def provide(self) -> str:
if self.token_response.token:
return f"{self.token_type} {self.token_response.token}"
elif self.token_response.access_token:
return f"{self.token_type} {self.token_response.access_token}"
raise ValueError("Missing Token")
class HTTPBasicAuthentication(BaseAuthentication):
"""`HTTP Basic Authentication` Authenticator"""
def authenticate(self, username: Optional[str] = None, password: Optional[str] = None) -> AuthorizationProvider:
if username is None or password is None:
raise AuthFailed(
message="请提供用户名和密码",
status_code=400,
response=None,
)
return BasicAuthAuthorizationProvider(username, password)
class DockerRegistryTokenAuthentication(BaseAuthentication):
"""Docker Registry v2 authentication via central service
spec: https://github.com/distribution/distribution/blob/main/docs/spec/auth/token.md
"""
REQUIRE_KEYS = ["realm", "service"]
backend: str
service: str
scope: str
def __init__(self, www_authenticate: str, offline_token: bool = True):
super().__init__(www_authenticate)
self.offline_token = offline_token
assert "bearer" in self.www_authenticate
self.bearer = self.www_authenticate["bearer"]
for key in self.REQUIRE_KEYS:
assert key in self.bearer
self.backend = self.bearer["realm"]
self.service = self.bearer["service"]
self.scope = self.bearer.get("scope", None)
def authenticate(self, username: Optional[str] = None, password: Optional[str] = None) -> AuthorizationProvider:
"""Authenticate to the registry.
If no username and password provided, will authenticate as the anonymous user.
:param username: User name to authenticate as.
:param password: <PASSWORD>.
:return:
"""
params: Dict[str, Any] = {
"service": self.service,
"scope": self.scope,
"client_id": username or "anonymous",
"offline_token": self.offline_token,
}
headers = {}
if username and password:
auth = base64.b64encode(f"{username}:{password}".encode()).decode()
headers["Authorization"] = f"Basic {auth}"
elif any([username, password]) and not all([username, password]):
logger.warning("请同时提供 username 和 password!")
resp = requests.get(self.backend, headers=headers, params=params)
if resp.status_code != 200:
raise AuthFailed(
message="用户凭证校验失败, 请检查用户信息和操作权限",
status_code=resp.status_code,
response=resp,
)
return TokenAuthorizationProvider(TokenResponse(**resp.json()))
class UniversalAuthentication(BaseAuthentication):
"""An Auto auth backend, which will auto auth by `scheme` provided by www_authenticate"""
def authenticate(self, username: Optional[str] = None, password: Optional[str] = None) -> AuthorizationProvider:
if "basic" in self.www_authenticate:
return HTTPBasicAuthentication(self.raw_www_authenticate).authenticate(username, password)
elif "bearer" in self.www_authenticate:
return DockerRegistryTokenAuthentication(self.raw_www_authenticate).authenticate(username, password)
raise NotImplementedError("未支持的认证方式")
| 2.375 | 2 |
dVRL_simulator/environments/pick.py | Cladett/rlman | 0 | 12787537 | <reponame>Cladett/rlman
from gym import utils
from dVRL_simulator.PsmEnv_Position_pick_reach import PSMEnv_Position_pick_reach
import numpy as np
class PSMPickEnv(PSMEnv_Position_pick_reach): # , utils.EzPickle):
def __init__(self, psm_num=1, reward_type='sparse',
randomize_obj=False, randomize_ee=False,
action_type='continuous'):
initial_pos = np.array([0, 0, -0.10])
super(PSMPickEnv, self).__init__(psm_num = psm_num,
n_substeps=1,
block_gripper=False,
has_object=True,
target_in_the_air=True,
height_offset=0.0001,
target_offset=[0, 0, 0.005],
obj_range=0.025,
target_range=0.025,
distance_threshold=0.003,
initial_pos=initial_pos,
reward_type=reward_type,
dynamics_enabled=False,
two_dimension_only=False,
randomize_initial_pos_obj=randomize_obj,
randomize_initial_pos_ee=randomize_ee,
action_type=action_type,
docker_container = self.__class__.__name__.lower())
utils.EzPickle.__init__(self)
| 2.1875 | 2 |
mpi_process.py | pycroscopy/distUSID | 1 | 12787538 | """
Created on 7/17/16 10:08 AM
@author: <NAME>, <NAME>
"""
from __future__ import division, print_function, absolute_import
import numpy as np
import psutil
import joblib
import time as tm
import h5py
import itertools
from numbers import Number
from multiprocessing import cpu_count
try:
from mpi4py import MPI
if MPI.COMM_WORLD.Get_size() == 1:
# mpi4py available but NOT called via mpirun or mpiexec => single node
MPI = None
except ImportError:
# mpi4py not even present! Single node by default:
MPI = None
mpi_serial_warning = False
from pyUSID.io.hdf_utils import check_if_main, check_for_old, get_attributes
from pyUSID.io.usi_data import USIDataset
from pyUSID.io.io_utils import recommend_cpu_cores, get_available_memory, format_time, format_size
"""
For hyperthreaded applications: need to tack on the additional flag as shown below
No need to specify -n 4 or whatever if you want to use all available processors
$ mpirun -use-hwthread-cpus python hello_world.py
Check the number of ranks per socket. If only 1 rank per socket - that rank is allowed to call joblib
Thus this paradigm will span the pure-mpi and mpi+joblib paradigm. Note that this does not prevent some sockets to run
in pure MPI mode while others run in MPI+joblib mode. Eventually, this should allow each rank to use jolib when the
number of ranks in a given socket are noticeably less than the number of logical cores....
The naive approach will be to simply allow all ranks to write data directly to file
Forcing only a single rank within a socket may negate performance benefits
Writing out to separate files and then merging them later on is the most performant option
Look into sub-communication worlds that can create mini worlds instead of the general COMM WORLD
https://stackoverflow.com/questions/50900655/mpi4py-create-multiple-groups-and-scatter-from-each-group
https://www.bu.edu/pasi/files/2011/01/Lisandro-Dalcin-mpi4py.pdf
No work will be necessary to figure out the new ranking within the new communicator / group - automatically assigned
from lowest value
When it is time to write the results chunks back to file.
a. If not master -> send data to master
b. If master -> gather from this smaller world and then write to file once. IF this is too much memory to handle,
then loop over each rank <-- how is this different from just looping over each rank within the new communicator and
asking it to write?:
i. receive
ii. write
iii. repeat.
A copy of the data will be made on Rank 0. ie - Rank 0 will have to hold N ranks worth of data. Meaning that each
rank can hold only around M/(2N) of data where M is the memory per node and N is the number of ranks per socket
http://mpitutorial.com/tutorials/introduction-to-groups-and-communicators/
https://info.gwdg.de/~ceulig/docs-dev/doku.php?id=en:services:application_services:high_performance_computing:mpi4py
https://rabernat.github.io/research_computing/parallel-programming-with-mpi-for-python.html
Create a giant low precision dataset. Instead of storing indices, let each rank set the completed indices to
True. The problem is that the smallest precision is 1 byte and NOT 1 bit. Even boolean = 1 byte!
See - http://docs.h5py.org/en/latest/faq.html#faq
https://support.hdfgroup.org/HDF5/hdf5-quest.html#bool
https://groups.google.com/a/continuum.io/forum/#!topic/anaconda/qFOGRTOxFTM
"""
def group_ranks_by_socket(verbose=False):
"""
Groups MPI ranks in COMM_WORLD by socket. Another way to think about this is that it assigns a master rank for each
rank such that there is a single master rank per socket (CPU). The results from this function can be used to split
MPI communicators based on the socket for intra-node communication.
This is necessary when wanting to carve up the memory for all ranks within a socket.
This is also relevant when trying to bring down the number of ranks that are writing to the HDF5 file.
This is all based on the premise that data analysis involves a fair amount of file writing and writing with
3 ranks is a lot better than writing with 100 ranks. An assumption is made that the communication between the
ranks within each socket would be faster than communicating across nodes / scokets. No assumption is made about the
names of each socket
Parameters
----------
verbose : bool, optional
Whether or not to print debugging statements
Returns
-------
master_ranks : 1D unsigned integer numpy array
Array with values that signify which rank a given rank should consider its master.
"""
comm = MPI.COMM_WORLD
size = comm.Get_size()
rank = comm.Get_rank()
# Step 1: Gather all the socket names:
sendbuf = MPI.Get_processor_name()
if verbose:
print('Rank: ', rank, ', sendbuf: ', sendbuf)
recvbuf = comm.allgather(sendbuf)
if verbose and rank == 0:
print('Rank: ', rank, ', recvbuf received: ', recvbuf)
# Step 2: Find all unique socket names:
recvbuf = np.array(recvbuf)
unique_sockets = np.unique(recvbuf)
if verbose and rank == 0:
print('Unique sockets: {}'.format(unique_sockets))
master_ranks = np.zeros(size, dtype=np.uint16)
for item in unique_sockets:
temp = np.where(recvbuf == item)[0]
master_ranks[temp] = temp[0]
if verbose and rank == 0:
print('Parent rank for all ranks: {}'.format(master_ranks))
return master_ranks
def to_ranges(iterable):
"""
Converts a sequence of iterables to range tuples
From https://stackoverflow.com/questions/4628333/converting-a-list-of-integers-into-range-in-python
Credits: @juanchopanza and @luca
Parameters
----------
iterable : collections.Iterable object
iterable object like a list
Returns
-------
iterable : generator object
Cast to list or similar to use
"""
iterable = sorted(set(iterable))
for key, group in itertools.groupby(enumerate(iterable),
lambda t: t[1] - t[0]):
group = list(group)
yield group[0][1], group[-1][1]
class Process(object):
"""
Encapsulates the typical steps performed when applying a processing function to a dataset.
"""
def __init__(self, h5_main, cores=None, max_mem_mb=4*1024, verbose=False):
"""
Parameters
----------
h5_main : h5py.Dataset instance
The dataset over which the analysis will be performed. This dataset should be linked to the spectroscopic
indices and values, and position indices and values datasets.
cores : uint, optional
Default - all available cores - 2
How many cores to use for the computation
max_mem_mb : uint, optional
How much memory to use for the computation. Default 1024 Mb
verbose : Boolean, (Optional, default = False)
Whether or not to print debugging statements
"""
if h5_main.file.mode != 'r+':
raise TypeError('Need to ensure that the file is in r+ mode to write results back to the file')
if MPI is not None:
# If we came here then, the user has intentionally asked for multi-node computation
comm = MPI.COMM_WORLD
self.mpi_comm = comm
self.mpi_rank = comm.Get_rank()
self.mpi_size = comm.Get_size()
if verbose:
print("Rank {} of {} on {} sees {} logical cores on the socket".format(comm.Get_rank(), comm.Get_size(),
MPI.Get_processor_name(),
cpu_count()))
# First, ensure that cores=logical cores in node. No point being economical / considerate
cores = psutil.cpu_count()
# It is sufficient if just one rank checks all this.
if self.mpi_rank == 0:
print('Working on {} ranks via MPI'.format(self.mpi_size))
# Ensure that the file is opened in the correct comm or something
if h5_main.file.driver != 'mpio':
raise TypeError('The HDF5 file should have been opened with driver="mpio". Current driver = "{}"'
''.format(h5_main.file.driver))
"""
# Not sure how to check for this correctly
messg = None
try:
if h5_main.file.comm != comm:
messg = 'The HDF5 file should have been opened with comm=MPI.COMM_WORLD. Currently comm={}'
''.format(h5_main.file.comm)
except AttributeError:
messg = 'The HDF5 file should have been opened with comm=MPI.COMM_WORLD'
if messg is not None:
raise TypeError(messg)
"""
else:
print('No mpi4py found or script was not called via mpixexec / mpirun. Assuming single node computation')
self.mpi_comm = None
self.mpi_size = 1
self.mpi_rank = 0
# Checking if dataset is "Main"
if not check_if_main(h5_main, verbose=verbose and self.mpi_rank == 0):
raise ValueError('Provided dataset is not a "Main" dataset with necessary ancillary datasets')
if MPI is not None:
MPI.COMM_WORLD.barrier()
# Not sure if we need a barrier here.
# Saving these as properties of the object:
self.h5_main = USIDataset(h5_main)
self.verbose = verbose
self._cores = None
self.__ranks_on_socket = 1
self.__socket_master_rank = 0
self._max_pos_per_read = None
self._max_mem_mb = None
# Now have to be careful here since the below properties are a function of the MPI rank
self.__start_pos = None
self.__rank_end_pos = None
self.__end_pos = None
self.__pixels_in_batch = None
# Determining the max size of the data that can be put into memory
# all ranks go through this and they need to have this value any
self._set_memory_and_cores(cores=cores, mem=max_mem_mb)
self.duplicate_h5_groups = []
self.partial_h5_groups = []
self.process_name = None # Reset this in the extended classes
self.parms_dict = None
# The name of the HDF5 dataset that should be present to signify which positions have already been computed
self.__status_dset_name = 'completed_positions'
self._results = None
self.h5_results_grp = None
# Check to see if the resuming feature has been implemented:
self.__resume_implemented = False
try:
self._get_existing_datasets()
except NotImplementedError:
if verbose and self.mpi_rank == 0:
print('It appears that this class may not be able to resume computations')
except:
# NameError for variables that don't exist
# AttributeError for self.var_name that don't exist
# TypeError (NoneType) etc.
self.__resume_implemented = True
if self.mpi_rank == 0:
print('Consider calling test() to check results before calling compute() which computes on the entire'
' dataset and writes back to the HDF5 file')
# DON'T check for duplicates since parms_dict has not yet been initialized.
# Sub classes will check by themselves if they are interested.
def __assign_job_indices(self):
"""
Sets the start and end indices for each MPI rank
"""
# First figure out what positions need to be computed
self._compute_jobs = np.where(self._h5_status_dset[()] == 0)[0]
if self.verbose and self.mpi_rank == 0:
print('Among the {} positions in this dataset, the following positions need to be computed: {}'
'.'.format(self.h5_main.shape[0], self._compute_jobs))
pos_per_rank = self._compute_jobs.size // self.mpi_size # integer division
if self.verbose and self.mpi_rank == 0:
print('Each rank is required to work on {} of the {} (remaining) positions in this dataset'
'.'.format(pos_per_rank, self._compute_jobs.size))
# The start and end indices now correspond to the indices in the incomplete jobs rather than the h5 dataset
self.__start_pos = self.mpi_rank * pos_per_rank
self.__rank_end_pos = (self.mpi_rank + 1) * pos_per_rank
self.__end_pos = int(min(self.__rank_end_pos, self.__start_pos + self._max_pos_per_read))
if self.mpi_rank == self.mpi_size - 1:
# Force the last rank to go to the end of the dataset
self.__rank_end_pos = self._compute_jobs.size
if self.verbose:
print('Rank {} will read positions {} to {} of {}'.format(self.mpi_rank, self.__start_pos,
self.__rank_end_pos, self.h5_main.shape[0]))
def _estimate_compute_time_per_pixel(self, *args, **kwargs):
"""
Estimates how long it takes to compute an average pixel's worth of data. This information should be used by the
user to limit the number of pixels that will be processed per batch to make best use of checkpointing. This
function is exposed to the developer of the child classes. An approximate can be derived if it is simpler
Returns
-------
"""
chosen_pos = np.random.randint(0, high=self.h5_main.shape[0]-1, size=5)
t0 = tm.time()
_ = parallel_compute(self.h5_main[chosen_pos, :], self._map_function, cores=1,
lengthy_computation=False, func_args=args, func_kwargs=kwargs, verbose=False)
return (tm.time() - t0) / len(chosen_pos)
def _get_pixels_in_current_batch(self):
"""
Returns the indices of the pixels that will be processed in this batch.
Returns
-------
pixels_in_batch : numpy.ndarray
1D array of unsigned integers denoting the pixels that will be read, processed, and written back to
"""
return self.__pixels_in_batch
def test(self, **kwargs):
"""
Tests the process on a subset (for example a pixel) of the whole data. The class can be reinstantiated with
improved parameters and tested repeatedly until the user is content, at which point the user can call
compute() on the whole dataset. This is not a function that is expected to be called in mpi
Parameters
----------
kwargs - dict, optional
keyword arguments to test the process
Returns
-------
"""
# All children classes should call super() OR ensure that they only work for self.mpi_rank == 0
raise NotImplementedError('test_on_subset has not yet been implemented')
def _check_for_duplicates(self):
"""
Checks for instances where the process was applied to the same dataset with the same parameters
Returns
-------
duplicate_h5_groups : list of h5py.Group objects
List of groups satisfying the above conditions
"""
if self.verbose and self.mpi_rank == 0:
print('Checking for duplicates:')
# This list will contain completed runs only
duplicate_h5_groups = check_for_old(self.h5_main, self.process_name, new_parms=self.parms_dict)
partial_h5_groups = []
# First figure out which ones are partially completed:
if len(duplicate_h5_groups) > 0:
for index, curr_group in enumerate(duplicate_h5_groups):
"""
Earlier, we only checked the 'last_pixel' but to be rigorous we should check self.__status_dset_name
The last_pixel attribute check may be deprecated in the future.
Note that legacy computations did not have this dataset. We can add to partially computed datasets
"""
if self.__status_dset_name in curr_group.keys():
# Case 1: Modern Process results:
status_dset = curr_group[self.__status_dset_name]
if not isinstance(status_dset, h5py.Dataset):
# We should not come here if things were implemented correctly
if self.mpi_rank == 0:
print('Results group: {} contained an object named: {} that should have been a dataset'
'.'.format(curr_group, self.__status_dset_name))
if self.h5_main.shape[0] != status_dset.shape[0] or len(status_dset.shape) > 1 or \
status_dset.dtype != np.uint8:
if self.mpi_rank == 0:
print('Status dataset: {} was not of the expected shape or datatype'.format(status_dset))
# Finally, check how far the computation was completed.
if len(np.where(status_dset[()] == 0)[0]) == 0:
# remove from duplicates and move to partial
partial_h5_groups.append(duplicate_h5_groups.pop(index))
# Let's write the legacy attribute for safety
curr_group.attrs['last_pixel'] = self.h5_main.shape[0]
# No further checks necessary
continue
else:
# Optionally calculate how much was completed:
if self.mpi_rank == 0:
percent_complete = int(100 * len(np.where(status_dset[()] == 0)[0]) / status_dset.shape[0])
print('Group: {}: computation was {}% completed'.format(curr_group, percent_complete))
# Case 2: Legacy results group:
if 'last_pixel' not in curr_group.attrs.keys():
if self.mpi_rank == 0:
# Should not be coming here at all
print('Group: {} had neither the status HDF5 dataset or the legacy attribute: "last_pixel"'
'.'.format(curr_group))
# Not sure what to do with such groups. Don't consider them in the future
duplicate_h5_groups.pop(index)
continue
# Finally, do the legacy test:
if curr_group.attrs['last_pixel'] < self.h5_main.shape[0]:
# Should we create the dataset here, to make the group future-proof?
# remove from duplicates and move to partial
partial_h5_groups.append(duplicate_h5_groups.pop(index))
if len(duplicate_h5_groups) > 0 and self.mpi_rank == 0:
print('Note: ' + self.process_name + ' has already been performed with the same parameters before. '
'These results will be returned by compute() by default. '
'Set override to True to force fresh computation')
print(duplicate_h5_groups)
if len(partial_h5_groups) > 0 and self.mpi_rank == 0:
print('Note: ' + self.process_name + ' has already been performed PARTIALLY with the same parameters. '
'compute() will resuming computation in the last group below. '
'To choose a different group call use_patial_computation()'
'Set override to True to force fresh computation or resume from a '
'data group besides the last in the list.')
print(partial_h5_groups)
return duplicate_h5_groups, partial_h5_groups
def use_partial_computation(self, h5_partial_group=None):
"""
Extracts the necessary parameters from the provided h5 group to resume computation
Parameters
----------
h5_partial_group : h5py.Group object
Group containing partially computed results
"""
# Attempt to automatically take partial results
if h5_partial_group is None:
if len(self.partial_h5_groups) < 1:
raise ValueError('No group was found with partial results and no such group was provided')
h5_partial_group = self.partial_h5_groups[-1]
else:
# Make sure that this group is among the legal ones already discovered:
if h5_partial_group not in self.partial_h5_groups:
raise ValueError('Provided group does not appear to be in the list of discovered groups')
self.parms_dict = get_attributes(h5_partial_group)
self.h5_results_grp = h5_partial_group
def _set_memory_and_cores(self, cores=None, mem=None):
"""
Checks hardware limitations such as memory, # cpus and sets the recommended datachunk sizes and the
number of cores to be used by analysis methods. This function can work with clusters with heterogeneous
memory sizes (e.g. CADES SHPC Condo).
Parameters
----------
cores : uint, optional
Default - 1
How many cores to use for the computation
mem : uint, optional
Default - 1024
The amount a memory in Mb to use in the computation
"""
if MPI is None:
min_free_cores = 1 + int(psutil.cpu_count() > 4)
if cores is None:
self._cores = max(1, psutil.cpu_count() - min_free_cores)
else:
if not isinstance(cores, int):
raise TypeError('cores should be an integer but got: {}'.format(cores))
cores = int(abs(cores))
self._cores = max(1, min(psutil.cpu_count(), cores))
self.__socket_master_rank = 0
self.__ranks_on_socket = 1
else:
# user-provided input cores will simply be ignored in an effort to use the entire CPU
ranks_by_socket = group_ranks_by_socket(verbose=self.verbose)
self.__socket_master_rank = ranks_by_socket[self.mpi_rank]
# which ranks in this socket?
ranks_on_this_socket = np.where(ranks_by_socket == self.__socket_master_rank)[0]
# how many in this socket?
self.__ranks_on_socket = ranks_on_this_socket.size
# Force usage of all available memory
mem = None
self._cores = 1
# Disabling the following line since mpi4py and joblib didn't play well for Bayesian Inference
# self._cores = self.__cores_per_rank = psutil.cpu_count() // self.__ranks_on_socket
# TODO: Convert all to bytes!
_max_mem_mb = get_available_memory() / 1024 ** 2 # in MB
if mem is None:
mem = _max_mem_mb
else:
if not isinstance(mem, int):
raise TypeError('mem must be a whole number')
mem = abs(mem)
self._max_mem_mb = min(_max_mem_mb, mem)
# Remember that multiple processes (either via MPI or joblib) will share this socket
max_data_chunk = self._max_mem_mb / (self._cores * self.__ranks_on_socket)
# Now calculate the number of positions OF RAW DATA ONLY that can be stored in memory in one go PER RANK
mb_per_position = self.h5_main.dtype.itemsize * self.h5_main.shape[1] / 1024 ** 2
self._max_pos_per_read = int(np.floor(max_data_chunk / mb_per_position))
if self.verbose and self.mpi_rank == self.__socket_master_rank:
# expected to be the same for all ranks so just use this.
print('Rank {} - on socket with {} logical cores and {} avail. RAM shared by {} ranks each given {} cores'
'.'.format(self.__socket_master_rank, psutil.cpu_count(), format_size(_max_mem_mb * 1024**2, 2),
self.__ranks_on_socket, self._cores))
print('Allowed to read {} pixels per chunk'.format(self._max_pos_per_read))
@staticmethod
def _map_function(*args, **kwargs):
"""
The function that manipulates the data on a single instance (position). This will be used by _unit_computation()
to process a chunk of data in parallel
Parameters
----------
args : list
arguments to the function in the correct order
kwargs : dictionary
keyword arguments to the function
Returns
-------
object
"""
raise NotImplementedError('Please override the _unit_function specific to your process')
def _read_data_chunk(self):
"""
Reads a chunk of data for the intended computation into memory
"""
if self.__start_pos < self.__rank_end_pos:
self.__end_pos = int(min(self.__rank_end_pos, self.__start_pos + self._max_pos_per_read))
# DON'T DIRECTLY apply the start and end indices anymore to the h5 dataset. Find out what it means first
self.__pixels_in_batch = self._compute_jobs[self.__start_pos: self.__end_pos]
self.data = self.h5_main[self.__pixels_in_batch, :]
if self.verbose:
print('Rank {} - Read positions: {}'.format(self.mpi_rank, self.__pixels_in_batch, self.__rank_end_pos))
# DON'T update the start position
else:
if self.verbose:
print('Rank {} - Finished reading all data!'.format(self.mpi_rank))
self.data = None
def _write_results_chunk(self):
"""
Writes the computed results into appropriate datasets.
This needs to be rewritten since the processed data is expected to be at least as large as the dataset
"""
# Now update the start position
self.__start_pos = self.__end_pos
# This line can remain as is
raise NotImplementedError('Please override the _set_results specific to your process')
def _create_results_datasets(self):
"""
Process specific call that will write the h5 group, guess dataset, corresponding spectroscopic datasets and also
link the guess dataset to the spectroscopic datasets. It is recommended that the ancillary datasets be populated
within this function.
"""
raise NotImplementedError('Please override the _create_results_datasets specific to your process')
def __create_compute_status_dataset(self):
"""
Creates a dataset that keeps track of what pixels / rows have already been computed. Users are not expected to
extend / modify this function.
"""
# TODO: This will fail for Fitter and Image Processing class which will need to run Process twice . Need to allow room for customization
# Check to make sure that such a group doesn't already exist
if self.__status_dset_name in self.h5_results_grp.keys():
self._h5_status_dset = self.h5_results_grp[self.__status_dset_name]
if not isinstance(self._h5_status_dset, h5py.Dataset):
raise ValueError('Provided results group: {} contains an expected object ({}) that is not a dataset'
'.'.format(self.h5_results_grp, self._h5_status_dset))
if self.h5_main.shape[0] != self._h5_status_dset.shape[0] or len(self._h5_status_dset.shape) > 1 or \
self._h5_status_dset.dtype != np.uint8:
if self.mpi_rank == 0:
raise ValueError('Status dataset: {} was not of the expected shape or datatype'
'.'.format(self._h5_status_dset))
else:
self._h5_status_dset = self.h5_results_grp.create_dataset(self.__status_dset_name, dtype=np.uint8,
shape=(self.h5_main.shape[0],))
# Could be fresh computation or resuming from a legacy computation
if 'last_pixel' in self.h5_results_grp.attrs.keys():
completed_pixels = self.h5_results_grp.attrs['last_pixel']
if completed_pixels > 0:
self._h5_status_dset[:completed_pixels] = 1
def _get_existing_datasets(self):
"""
The purpose of this function is to allow processes to resume from partly computed results
Start with self.h5_results_grp
"""
raise NotImplementedError('Please override the _get_existing_datasets specific to your process')
def _unit_computation(self, *args, **kwargs):
"""
The unit computation that is performed per data chunk. This allows room for any data pre / post-processing
as well as multiple calls to parallel_compute if necessary
"""
# TODO: Try to use the functools.partials to preconfigure the map function
# cores = number of processes / rank here
self._results = parallel_compute(self.data, self._map_function, cores=self._cores,
lengthy_computation=False,
func_args=args, func_kwargs=kwargs,
verbose=self.verbose)
def compute(self, override=False, *args, **kwargs):
"""
Creates placeholders for the results, applies the unit computation to chunks of the dataset
Parameters
----------
override : bool, optional. default = False
By default, compute will simply return duplicate results to avoid recomputing or resume computation on a
group with partial results. Set to True to force fresh computation.
args : list
arguments to the mapped function in the correct order
kwargs : dictionary
keyword arguments to the mapped function
Returns
-------
h5_results_grp : h5py.Group object
Group containing all the results
"""
class SimpleFIFO(object):
"""
Simple class that maintains a moving average of some numbers.
"""
def __init__(self, length=5):
"""
Create a SimpleFIFO object
Parameters
----------
length : unsigned integer
Number of values that need to be maintained for the moving average
"""
self.__queue = list()
if not isinstance(length, int):
raise TypeError('length must be a positive integer')
if length <= 0:
raise ValueError('length must be a positive integer')
self.__max_length = length
self.__count = 0
def put(self, item):
"""
Adds the item to the internal queue. If the size of the queue exceeds its capacity, the oldest
item is removed.
Parameters
----------
item : float or int
Any real valued number
"""
if (not isinstance(item, Number)) or isinstance(item, complex):
raise TypeError('Provided item: {} is not a Number'.format(item))
self.__queue.append(item)
self.__count += 1
if len(self.__queue) > self.__max_length:
_ = self.__queue.pop(0)
def get_mean(self):
"""
Returns the average of the elements within the queue
Returns
-------
avg : number.Number
Mean of all elements within the queue
"""
return np.mean(self.__queue)
def get_cycles(self):
"""
Returns the number of items that have been added to the queue in total
Returns
-------
count : int
number of items that have been added to the queue in total
"""
return self.__count
if not override:
if len(self.duplicate_h5_groups) > 0:
if self.mpi_rank == 0:
print('Returned previously computed results at ' + self.duplicate_h5_groups[-1].name)
return self.duplicate_h5_groups[-1]
elif len(self.partial_h5_groups) > 0:
if self.mpi_rank == 0:
print('Resuming computation in group: ' + self.partial_h5_groups[-1].name)
self.use_partial_computation()
resuming = False
if self.h5_results_grp is None:
# starting fresh
if self.verbose and self.mpi_rank == 0:
print('Creating HDF5 group and datasets to hold results')
self._create_results_datasets()
else:
# resuming from previous checkpoint
resuming = True
self._get_existing_datasets()
self.__create_compute_status_dataset()
if resuming and self.mpi_rank == 0:
percent_complete = int(100 * len(np.where(self._h5_status_dset[()] == 0)[0]) /
self._h5_status_dset.shape[0])
print('Resuming computation. {}% completed already'.format(percent_complete))
self.__assign_job_indices()
# Not sure if this is necessary but I don't think it would hurt either
if self.mpi_comm is not None:
self.mpi_comm.barrier()
compute_times = SimpleFIFO(5)
write_times = SimpleFIFO(5)
orig_rank_start = self.__start_pos
if self.mpi_rank == 0 and self.mpi_size == 1:
if self.__resume_implemented:
print('\tThis class (likely) supports interruption and resuming of computations!\n'
'\tIf you are operating in a python console, press Ctrl+C or Cmd+C to abort\n'
'\tIf you are in a Jupyter notebook, click on "Kernel">>"Interrupt"\n'
'\tIf you are operating on a cluster and your job gets killed, re-run the job to resume\n')
else:
print('\tThis class does NOT support interruption and resuming of computations.\n'
'\tIn order to enable this feature, simply implement the _get_existing_datasets() function')
if self.verbose and self.mpi_rank == self.__socket_master_rank:
print('Rank: {} - with nothing loaded has {} free memory'
''.format(self.mpi_rank, format_size(get_available_memory())))
self._read_data_chunk()
if self.mpi_comm is not None:
self.mpi_comm.barrier()
if self.verbose and self.mpi_rank == self.__socket_master_rank:
print('Rank: {} - with only raw data loaded has {} free memory'
''.format(self.mpi_rank, format_size(get_available_memory())))
while self.data is not None:
num_jobs_in_batch = self.__end_pos - self.__start_pos
t_start_1 = tm.time()
self._unit_computation(*args, **kwargs)
comp_time = np.round(tm.time() - t_start_1, decimals=2) # in seconds
time_per_pix = comp_time / num_jobs_in_batch
compute_times.put(time_per_pix)
if self.verbose:
print('Rank {} - computed chunk in {} or {} per pixel. Average: {} per pixel'
'.'.format(self.mpi_rank, format_time(comp_time), format_time(time_per_pix),
format_time(compute_times.get_mean())))
# Ranks can become memory starved. Check memory usage - raw data + results in memory at this point
if self.verbose and self.mpi_rank == self.__socket_master_rank:
print('Rank: {} - now holding onto raw data + results has {} free memory'
''.format(self.mpi_rank, format_size(get_available_memory())))
t_start_2 = tm.time()
self._write_results_chunk()
# NOW, update the positions. Users are NOT allowed to touch start and end pos
self.__start_pos = self.__end_pos
# Leaving in this provision that will allow restarting of processes
if self.mpi_size == 1:
self.h5_results_grp.attrs['last_pixel'] = self.__end_pos
# Child classes don't even have to worry about flushing. Process will do it.
self.h5_main.file.flush()
dump_time = np.round(tm.time() - t_start_2, decimals=2)
write_times.put(dump_time / num_jobs_in_batch)
if self.verbose:
print('Rank {} - wrote its {} pixel chunk in {}'.format(self.mpi_rank,
num_jobs_in_batch,
format_time(dump_time)))
time_remaining = (self.__rank_end_pos - self.__end_pos) * \
(compute_times.get_mean() + write_times.get_mean())
if self.verbose or self.mpi_rank == 0:
percent_complete = int(100 * (self.__end_pos - orig_rank_start) /
(self.__rank_end_pos - orig_rank_start))
print('Rank {} - {}% complete. Time remaining: {}'.format(self.mpi_rank, percent_complete,
format_time(time_remaining)))
# All ranks should mark the pixels for this batch as completed. 'last_pixel' attribute will be updated later
# Setting each section to 1 independently
for section in to_ranges(self.__pixels_in_batch):
self._h5_status_dset[section[0]: section[1]+1] = 1
self._read_data_chunk()
if self.verbose:
print('Rank {} - Finished computing all jobs!'.format(self.mpi_rank))
if self.mpi_comm is not None:
self.mpi_comm.barrier()
if self.mpi_rank == 0:
print('Finished processing the entire dataset!')
# Update the legacy 'last_pixel' attribute here:
if self.mpi_rank == 0:
self.h5_results_grp.attrs['last_pixel'] = self.h5_main.shape[0]
return self.h5_results_grp
def parallel_compute(data, func, cores=1, lengthy_computation=False, func_args=None, func_kwargs=None, verbose=False):
"""
Computes the provided function using multiple cores using the joblib library
Parameters
----------
data : numpy.ndarray
Data to map function to. Function will be mapped to the first axis of data
func : callable
Function to map to data
cores : uint, optional
Number of logical cores to use to compute
Default - 1 (serial computation)
lengthy_computation : bool, optional
Whether or not each computation is expected to take substantial time.
Sometimes the time for adding more cores can outweigh the time per core
Default - False
func_args : list, optional
arguments to be passed to the function
func_kwargs : dict, optional
keyword arguments to be passed onto function
verbose : bool, optional. default = False
Whether or not to print statements that aid in debugging
Returns
-------
results : list
List of computational results
"""
if not callable(func):
raise TypeError('Function argument is not callable')
if not isinstance(data, np.ndarray):
raise TypeError('data must be a numpy array')
if func_args is None:
func_args = list()
else:
if isinstance(func_args, tuple):
func_args = list(func_args)
if not isinstance(func_args, list):
raise TypeError('Arguments to the mapped function should be specified as a list')
if func_kwargs is None:
func_kwargs = dict()
else:
if not isinstance(func_kwargs, dict):
raise TypeError('Keyword arguments to the mapped function should be specified via a dictionary')
req_cores = cores
if MPI is not None:
rank = MPI.COMM_WORLD.Get_rank()
# Was unable to get the MPI + joblib framework to work. Did not compute anything at all. Just froze
cores = 1
else:
rank = 0
cores = recommend_cpu_cores(data.shape[0],
requested_cores=cores,
lengthy_computation=lengthy_computation,
verbose=verbose)
if verbose:
print('Rank {} starting computing on {} cores (requested {} cores)'.format(rank, cores, req_cores))
if cores > 1:
values = [joblib.delayed(func)(x, *func_args, **func_kwargs) for x in data]
results = joblib.Parallel(n_jobs=cores)(values)
# Finished reading the entire data set
print('Rank {} finished parallel computation'.format(rank))
else:
if verbose:
print("Rank {} computing serially ...".format(rank))
# List comprehension vs map vs for loop?
# https://stackoverflow.com/questions/1247486/python-list-comprehension-vs-map
results = [func(vector, *func_args, **func_kwargs) for vector in data]
return results
| 2.015625 | 2 |
tests/test.py | MAfarrag/Oasis | 2 | 12787539 | # -*- coding: utf-8 -*-
"""
Created on Sun Jan 19 16:34:44 2020
@author: mofarrag
"""
import Oasis
#try:
# import Hapi
#except ImportError:
# try:
# import HAPI
# except ImportError:
# import sys
# sys.path.append(".")
# import Hapi
| 1.71875 | 2 |
TicTacToeGame.py | jdipietro/SophiaSci1 | 0 | 12787540 | import numpy as np
class TicTacToeGame:
def __init__(self, size):
self.m_SizeSize = size;
self.m_Grid = np.zeros((size, size), np.int8)
self.m_Grid.fill(-1)
self.m_CurentPlayer = 0
def Move(self, player, row, col):
if self.IsMoveAllowed(player, row, col) == True:
self.m_Grid[row][col] = player
def WillMoveWin(self, player, row, col):
if not self.IsMoveAllowed(player, row, col):
return False
# check horizontal
hasWon = True
for i in range(self.m_SizeSize):
colIdx = (col + i) % self.m_SizeSize
hasWon = hasWon and self.m_Grid[row][colIdx] == player
# Check vertical win
if not hasWon:
hasWon = True
for i in range(self.m_SizeSize):
rowIdx = (row + i) % self.m_SizeSize
hasWon = hasWon and self.m_Grid[row][colIdx] == player
if not hasWon and row == 1 and col == 1:
hasWon = True
# Test diagonal from upper left to lower right
for i in range(self.m_SizeSize):
hasWon = hasWon and self.m_Grid[i][i] == player
if hasWon:
return True
# Test diagnol from lower left to upper right
hasWon = True
for i in range(self.m_SizeSize):
hasWon = hasWon and self.m_Grid[2 - i][i] == player
return hasWon
def RankMove(self, player, row, col):
reward = 0
if not self.IsMoveAllowed(player, row, col):
reward = reward + -100
backup = self.m_Grid[row][col]
self.m_Grid[row][col] = player
if self.WillMoveWin(player, row, col):
reward = reward + 1000
self.m_Grid[row][col] = backup
return reward
def IsMoveAllowed(self, player, row, col):
if int(row) in range(self.m_SizeSize) and int(col) in range(self.m_SizeSize):
return self.m_Grid[row][col] == -1
else:
return False
def NoEmptySpaces(self):
for i in range(self.m_SizeSize):
for j in range(self.m_SizeSize):
if self.m_Grid[i][j] == -1:
return False
return True
def Render(self):
# print (self.m_Grid)
print("")
for row in range(self.m_SizeSize):
lineTxt = ""
for col in range(self.m_SizeSize):
if (self.m_Grid[row][col] == 0):
lineTxt += " O"
elif (self.m_Grid[row][col] == 0):
lineTxt += " X"
else:
lineTxt += " _"
print(lineTxt)
| 2.953125 | 3 |
main.py | dev-everaldo-cyrino/exercicios-de-raciocinio-6 | 0 | 12787541 | soma =0
multiplicacao = 1
total = []
n=1
for x in range(5):
num = int(input('digite o {}° numero: '.format(n)))
total.append(num)
multiplicacao *= num
soma += num
n +=1
print('''
a soma é {}
a multiplicação é {}
e os numeros são {}'''.format(soma,multiplicacao,total)) | 4.03125 | 4 |
DesignAddon/DesignBeh/BlockEntityScripts/ClientSystem.py | linyuzhe210/BlockEntityChestDemo | 3 | 12787542 | <filename>DesignAddon/DesignBeh/BlockEntityScripts/ClientSystem.py
# -*- coding: UTF-8 -*-
from mod.client.system.clientSystem import ClientSystem
import mod.client.extraClientApi as clientApi
import time
class Main(ClientSystem):
def __init__(self, namespace, system_name):
ClientSystem.__init__(self, namespace, system_name)
namespace = clientApi.GetEngineNamespace()
system_name = clientApi.GetEngineSystemName()
self.ListenForEvent(namespace, system_name, 'ClientBlockUseEvent', self, self.block_used)
self.ListenForEvent(namespace, system_name, 'ChunkLoadedClientEvent', self, self.chunk_first_loaded)
self.ListenForEvent(namespace, system_name, 'UiInitFinished', self, self.chunk_first_loaded)
self.ListenForEvent('Design', 'BlockEntityServer', 'OpenChestFinished', self, self.chest_opened)
self.ListenForEvent('Design', 'BlockEntityServer', 'InitChestRotation', self, self.chest_rotation)
self.block_interact_cooldown = {}
self.rotation_queue = []
def block_used(self, event):
player_id = event['playerId']
block_name = event['blockName']
x = event['x']
y = event['y']
z = event['z']
if block_name == 'design:tileentity_chest':
if player_id not in self.block_interact_cooldown:
self.block_interact_cooldown[player_id] = time.time()
elif time.time() - self.block_interact_cooldown[player_id] < 0.15:
return
else:
self.block_interact_cooldown[player_id] = time.time()
game_comp = clientApi.GetEngineCompFactory().CreateGame(clientApi.GetLevelId())
dimension_id = game_comp.GetCurrentDimension()
self.NotifyToServer('TryOpenChest', {'dimensionId': dimension_id, 'pos': [x, y, z]})
def chest_opened(self, event):
data = event['data']
block_comp = clientApi.GetEngineCompFactory().CreateBlockInfo(clientApi.GetLevelId())
for block_data in data:
block_pos = tuple(block_data['pos'])
block_comp.SetBlockEntityMolangValue(block_pos, "variable.mod_states", float(block_data['states']))
'''
[踩过的一些坑]
1、设置自定义方块实体渲染的Molang值时,该方块必须在玩家客户端已加载区块里才能设置
2、因此出现两种情况:①玩家第一次登陆时,若在客户端上已加载的区块存在方块实体,需要向服务端请求数据后回传并设置Molang值。
②若第一次登陆时,所加载的区块外存在需要设置Molang的自定义方块实体模型,则需要后续游戏时,在方块所在区块加载后才能设置。
3、若服务端回应慢,则会出现灾难现象(eg. 客户端已加载区块,但没有获得服务端的方块实体数据,无法正常渲染方块实体动画)。
4、会导致服务端传输数据量非常大。
5、优化方案暂时不写。
'''
def chunk_first_loaded(self, event):
self.NotifyToServer('GetChestInit', {'playerId': clientApi.GetLocalPlayerId()})
'''
[灾难级别消耗]
这样子写没有好果汁吃,非常卡
'''
# def chunk_loaded(self, event):
# chunk_pos_x = event['chunkPosX']
# chunk_pos_z = event['chunkPosZ']
# block_info_comp = clientApi.GetEngineCompFactory().CreateBlockInfo(clientApi.GetLevelId())
# data = []
# for x in xrange(chunk_pos_x, chunk_pos_x + 16):
# for z in xrange(chunk_pos_z, chunk_pos_z + 16):
# for y in xrange(0, 256):
# block_data = block_info_comp.GetBlock((x, y, z))
# if block_data[0] == 'design:tileentity_chest':
# data.append('{},{},{}'.format(x, y, z))
# if data:
# self.NotifyToServer('GetChestInit', {'playerId': clientApi.GetLocalPlayerId(), 'data': data})
def chest_rotation(self, event):
print event
new_event = {tuple(map(int, k.split(','))): v for k, v in event.items()}
block_comp = clientApi.GetEngineCompFactory().CreateBlockInfo(clientApi.GetLevelId())
def rotate_chest():
index = 0
count = len(new_event.items())
for pos, data in new_event.items():
block_data = block_comp.GetBlock(pos)
if block_data[0] == 'design:tileentity_chest':
block_comp.SetBlockEntityMolangValue(pos, "variable.mod_rotation", data['rotation'])
block_comp.SetBlockEntityMolangValue(pos, "variable.mod_invert", float(data['invert']) if data['invert'] != 0 else 0.0)
index += 1
if index == count:
return True
else:
return False
if new_event:
# 事件触发时,可能会出现此时方块在客户端渲染完成,但无法设置方块的Molang的情况。
# 因此我们在这边加入函数调用的计数,当满足至少设置两次朝向Molang的条件后,再弹出函数地址
self.rotation_queue.append([rotate_chest, 0])
# self.rotation_queue.append(rotate_chest)
def Update(self):
# 记录即将完成设置朝向任务的函数所处在列表的下标值
_die = []
for index, value in enumerate(self.rotation_queue):
if value[0]():
value[1] += 1
if value[1] == 2:
_die.append(index)
# for index, value in enumerate(self.rotation_queue):
# if value():
# _die.append(index)
# 将完成任务的函数所在的元素设置为None
for i in _die:
self.rotation_queue[i] = None
# 过滤掉用None占位的列表元素
if self.rotation_queue:
self.rotation_queue = filter(None, self.rotation_queue) | 2.203125 | 2 |
audiochall.py | CodeAchieveDream/SpeechRecognition | 3 | 12787543 | # -*- coding: utf-8 -*-
"""
import numpy as np
import time
import tensorflow as tf
from tensorflow.python.ops import ctc_ops
from collections import Counter
yuyinutils = __import__("audioutils")
sparse_tuple_to_texts_ch = yuyinutils.sparse_tuple_to_texts_ch
ndarray_to_text_ch = yuyinutils.ndarray_to_text_ch
get_audio_and_transcriptch = yuyinutils.get_audio_and_transcriptch
pad_sequences = yuyinutils.pad_sequences
sparse_tuple_from = yuyinutils.sparse_tuple_from
get_wavs_lables = yuyinutils.get_wavs_lables
tf.reset_default_graph()
b_stddev = 0.046875
h_stddev = 0.046875
n_hidden = 1024
n_hidden_1 = 1024
n_hidden_2 =1024
n_hidden_5 = 1024
n_cell_dim = 1024
n_hidden_3 = 2 * 1024
keep_dropout_rate=0.95
relu_clip = 20
def BiRNN_model(batch_x, seq_length, n_input, n_context, n_character, keep_dropout):
# batch_x_shape: [batch_size, n_steps, n_input + 2*n_input*n_context]
batch_x_shape = tf.shape(batch_x)
# 将输入转成时间序列优先
batch_x = tf.transpose(batch_x, [1, 0, 2])
# 再转成2维传入第一层
batch_x = tf.reshape(batch_x,
[-1, n_input + 2 * n_input * n_context]) # (n_steps*batch_size, n_input + 2*n_input*n_context)
# 使用clipped RELU activation and dropout.
# 1st layer
with tf.name_scope('fc1'):
b1 = variable_on_cpu('b1', [n_hidden_1], tf.random_normal_initializer(stddev=b_stddev))
h1 = variable_on_cpu('h1', [n_input + 2 * n_input * n_context, n_hidden_1],
tf.random_normal_initializer(stddev=h_stddev))
layer_1 = tf.minimum(tf.nn.relu(tf.add(tf.matmul(batch_x, h1), b1)), relu_clip)
layer_1 = tf.nn.dropout(layer_1, keep_dropout)
# 2nd layer
with tf.name_scope('fc2'):
b2 = variable_on_cpu('b2', [n_hidden_2], tf.random_normal_initializer(stddev=b_stddev))
h2 = variable_on_cpu('h2', [n_hidden_1, n_hidden_2], tf.random_normal_initializer(stddev=h_stddev))
layer_2 = tf.minimum(tf.nn.relu(tf.add(tf.matmul(layer_1, h2), b2)), relu_clip)
layer_2 = tf.nn.dropout(layer_2, keep_dropout)
# 3rd layer
with tf.name_scope('fc3'):
b3 = variable_on_cpu('b3', [n_hidden_3], tf.random_normal_initializer(stddev=b_stddev))
h3 = variable_on_cpu('h3', [n_hidden_2, n_hidden_3], tf.random_normal_initializer(stddev=h_stddev))
layer_3 = tf.minimum(tf.nn.relu(tf.add(tf.matmul(layer_2, h3), b3)), relu_clip)
layer_3 = tf.nn.dropout(layer_3, keep_dropout)
# 双向rnn
with tf.name_scope('lstm'):
# Forward direction cell:
lstm_fw_cell = tf.contrib.rnn.BasicLSTMCell(n_cell_dim, forget_bias=1.0, state_is_tuple=True)
lstm_fw_cell = tf.contrib.rnn.DropoutWrapper(lstm_fw_cell,
input_keep_prob=keep_dropout)
# Backward direction cell:
lstm_bw_cell = tf.contrib.rnn.BasicLSTMCell(n_cell_dim, forget_bias=1.0, state_is_tuple=True)
lstm_bw_cell = tf.contrib.rnn.DropoutWrapper(lstm_bw_cell,
input_keep_prob=keep_dropout)
# `layer_3` `[n_steps, batch_size, 2*n_cell_dim]`
layer_3 = tf.reshape(layer_3, [-1, batch_x_shape[0], n_hidden_3])
outputs, output_states = tf.nn.bidirectional_dynamic_rnn(cell_fw=lstm_fw_cell,
cell_bw=lstm_bw_cell,
inputs=layer_3,
dtype=tf.float32,
time_major=True,
sequence_length=seq_length)
# 连接正反向结果[n_steps, batch_size, 2*n_cell_dim]
outputs = tf.concat(outputs, 2)
# to a single tensor of shape [n_steps*batch_size, 2*n_cell_dim]
outputs = tf.reshape(outputs, [-1, 2 * n_cell_dim])
with tf.name_scope('fc5'):
b5 = variable_on_cpu('b5', [n_hidden_5], tf.random_normal_initializer(stddev=b_stddev))
h5 = variable_on_cpu('h5', [(2 * n_cell_dim), n_hidden_5], tf.random_normal_initializer(stddev=h_stddev))
layer_5 = tf.minimum(tf.nn.relu(tf.add(tf.matmul(outputs, h5), b5)), relu_clip)
layer_5 = tf.nn.dropout(layer_5, keep_dropout)
with tf.name_scope('fc6'):
# 全连接层用于softmax分类
b6 = variable_on_cpu('b6', [n_character], tf.random_normal_initializer(stddev=b_stddev))
h6 = variable_on_cpu('h6', [n_hidden_5, n_character], tf.random_normal_initializer(stddev=h_stddev))
layer_6 = tf.add(tf.matmul(layer_5, h6), b6)
# 将2维[n_steps*batch_size, n_character]转成3维 time-major [n_steps, batch_size, n_character].
layer_6 = tf.reshape(layer_6, [-1, batch_x_shape[0], n_character])
# Output shape: [n_steps, batch_size, n_character]
return layer_6
"""
used to create a variable in CPU memory.
"""
def variable_on_cpu(name, shape, initializer):
# Use the /cpu:0 device for scoped operations
with tf.device('/cpu:0'):
# Create or get apropos variable
var = tf.get_variable(name=name, shape=shape, initializer=initializer)
return var
wav_path='D:/ data_thchs30/data_thchs30/train'
label_file='D: /data_thchs30/doc/trans/train.word.txt'
wav_path = 'N:\WAV\wav\wav\\train'
label_file = 'N:\WAV\doc\doc\\trans\\train.word.txt'
wav_files, labels = get_wavs_lables(wav_path,label_file)
print(wav_files[0], labels[0])
# wav/train/A11/A11_0.WAV -> 绿 是 阳春 烟 景 大块 文章 的 底色 四月 的 林 峦 更是 绿 得 鲜活 秀媚 诗意 盎然
print("wav:", len(wav_files), "label", len(labels))
# 字表
all_words = []
for label in labels:
#print(label)
all_words += [word for word in label]
counter = Counter(all_words)
words = sorted(counter)
words_size= len(words)
word_num_map = dict(zip(words, range(words_size)))
print('字表大小:', words_size)
n_input = 26#计算美尔倒谱系数的个数
n_context = 9#对于每个时间点,要包含上下文样本的个数
batch_size =8
def next_batch(labels, start_idx = 0,batch_size=1,wav_files = wav_files):
filesize = len(labels)
end_idx = min(filesize, start_idx + batch_size)
idx_list = range(start_idx, end_idx)
txt_labels = [labels[i] for i in idx_list]
wav_files = [wav_files[i] for i in idx_list]
(source, audio_len, target, transcript_len) = get_audio_and_transcriptch(None,
wav_files,
n_input,
n_context,word_num_map,txt_labels)
start_idx += batch_size
# Verify that the start_idx is not larger than total available sample size
if start_idx >= filesize:
start_idx = -1
# Pad input to max_time_step of this batch
source, source_lengths = pad_sequences(source)#如果多个文件将长度统一,支持按最大截断或补0
sparse_labels = sparse_tuple_from(target)
return start_idx,source, source_lengths, sparse_labels
next_idx,source,source_len,sparse_lab = next_batch(labels,0,batch_size)
print(len(sparse_lab))
print(np.shape(source))
#print(sparse_lab)
t = sparse_tuple_to_texts_ch(sparse_lab,words)
print(t[0])
#source已经将变为前9(不够补空)+本身+后9,每个26,第一个顺序是第10个的数据。
# shape = [batch_size, max_stepsize, n_input + (2 * n_input * n_context)]
# the batch_size and max_stepsize每步都是变长的。
input_tensor = tf.placeholder(tf.float32, [None, None, n_input + (2 * n_input * n_context)], name='input')#语音log filter bank or MFCC features
# Use sparse_placeholder; will generate a SparseTensor, required by ctc_loss op.
targets = tf.sparse_placeholder(tf.int32, name='targets')#文本
# 1d array of size [batch_size]
seq_length = tf.placeholder(tf.int32, [None], name='seq_length')#序列长
keep_dropout= tf.placeholder(tf.float32)
# logits is the non-normalized output/activations from the last layer.
# logits will be input for the loss function.
# nn_model is from the import statement in the load_model function
logits = BiRNN_model( input_tensor, tf.to_int64(seq_length), n_input, n_context,words_size +1,keep_dropout)
#调用ctc loss
avg_loss = tf.reduce_mean(ctc_ops.ctc_loss(targets, logits, seq_length))
#[optimizer]
learning_rate = 0.001
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(avg_loss)
with tf.name_scope("decode"):
decoded, log_prob = ctc_ops.ctc_beam_search_decoder( logits, seq_length, merge_repeated=False)
with tf.name_scope("accuracy"):
distance = tf.edit_distance( tf.cast(decoded[0], tf.int32), targets)
# 计算label error rate (accuracy)
ler = tf.reduce_mean(distance, name='label_error_rate')
epochs = 100
savedir = "log/yuyinchalltest/"
saver = tf.train.Saver(max_to_keep=1) # 生成saver
# create the session
sess = tf.Session()
# 没有模型的话,就重新初始化
sess.run(tf.global_variables_initializer())
kpt = tf.train.latest_checkpoint(savedir)
print("kpt:",kpt)
startepo= 0
if kpt!=None:
saver.restore(sess, kpt)
ind = kpt.find("-")
startepo = int(kpt[ind+1:])
print(startepo)
# 准备运行训练步骤
section = '\n{0:=^40}\n'
print(section.format('Run training epoch'))
train_start = time.time()
for epoch in range(epochs):#样本集迭代次数
epoch_start = time.time()
if epoch<startepo:
continue
print("epoch start:",epoch,"total epochs= ",epochs)
#######################run batch####
n_batches_per_epoch = int(np.ceil(len(labels) / batch_size))
print("total loop ",n_batches_per_epoch,"in one epoch,",batch_size,"items in one loop")
train_cost = 0
train_ler = 0
next_idx =0
for batch in range(n_batches_per_epoch):#一次batch_size,取多少次
#取数据
next_idx,source,source_lengths,sparse_labels = \
next_batch(labels,next_idx ,batch_size)
feed = {input_tensor: source, targets: sparse_labels,seq_length: source_lengths,keep_dropout:keep_dropout_rate}
#计算 avg_loss optimizer ;
batch_cost, _ = sess.run([avg_loss, optimizer], feed_dict=feed )
train_cost += batch_cost
if (batch +1)%20 == 0:
print('loop:',batch, 'Train cost: ', train_cost/(batch+1))
feed2 = {input_tensor: source, targets: sparse_labels,seq_length: source_lengths,keep_dropout:1.0}
d,train_ler = sess.run([decoded[0],ler], feed_dict=feed2)
dense_decoded = tf.sparse_tensor_to_dense( d, default_value=-1).eval(session=sess)
dense_labels = sparse_tuple_to_texts_ch(sparse_labels,words)
counter =0
print('Label err rate: ', train_ler)
for orig, decoded_arr in zip(dense_labels, dense_decoded):
# convert to strings
decoded_str = ndarray_to_text_ch(decoded_arr,words)
print(' file {}'.format( counter))
print('Original: {}'.format(orig))
print('Decoded: {}'.format(decoded_str))
counter=counter+1
break
epoch_duration = time.time() - epoch_start
log = 'Epoch {}/{}, train_cost: {:.3f}, train_ler: {:.3f}, time: {:.2f} sec'
print(log.format(epoch ,epochs, train_cost,train_ler,epoch_duration))
saver.save(sess, savedir+"yuyinch.cpkt", global_step=epoch)
train_duration = time.time() - train_start
print('Training complete, total duration: {:.2f} min'.format(train_duration / 60))
sess.close()
| 2.25 | 2 |
Solutions/0045.jump.py | lyhshang/LeetCode-Solutions | 0 | 12787544 | <reponame>lyhshang/LeetCode-Solutions
# -*- coding: utf-8 -*-
# author:lyh
# datetime:2020/5/18 21:34
"""
45. 跳跃游戏 II
给定一个非负整数数组,你最初位于数组的第一个位置。
数组中的每个元素代表你在该位置可以跳跃的最大长度。
你的目标是使用最少的跳跃次数到达数组的最后一个位置。
示例:
输入: [2,3,1,1,4]
输出: 2
解释: 跳到最后一个位置的最小跳跃数是 2。
从下标为 0 跳到下标为 1 的位置,跳 1 步,然后跳 3 步到达数组的最后一个位置。
说明:
假设你总是可以到达数组的最后一个位置。
"""
from typing import List
class Solution:
def jump(self, nums: List[int]) -> int:
r = 0
l = 0
step = 0
while r+1 < len(nums):
nr = 0
for i in range(l, r+1):
nr = max(i+nums[i], nr)
step += 1
l = r+1
r = nr
return step
if __name__ == '__main__':
print(
Solution().jump([2,3,1,1,4]), 2,
)
| 3.4375 | 3 |
test.py | yuchen02/Gesture-recognition | 0 | 12787545 | <gh_stars>0
import numpy as np
import paddle
from PIL import Image
import paddle.fluid as fluid
from model import MyDNN
import cv2
#读取预测图像,进行预测
def load_image(path):
img = Image.open(path)
img = img.resize((100, 100), Image.ANTIALIAS)
img = np.array(img).astype('float32')
img = img.transpose((2, 0, 1))
img = img / 255.0
print(img.shape)
return img
# 构建预测动态图过程
with fluid.dygraph.guard():
infer_path = './data/ges-Dataset/6/IMG_1124.JPG'
model_dict, _ = fluid.load_dygraph('MyDNN.pdparams') # fluid.load_dygraph(model_path)
model =MyDNN()
model.load_dict(model_dict) # 加载模型参数
model.eval() # 评估模式
infer_img = load_image(infer_path)
infer_img = np.array(infer_img).astype('float32')
infer_img = infer_img[np.newaxis, :, :, :]
infer_img = fluid.dygraph.to_variable(infer_img)
result = model(infer_img)
print(result)
img=cv2.imread(infer_path)
cv2.imshow('11',img)
print(np.argmax(result.numpy()))
| 2.71875 | 3 |
ergo/platforms/metaculus/question/lineardate.py | bmillwood/ergo | 2 | 12787546 | <gh_stars>1-10
from datetime import date, datetime, timedelta
import pandas as pd
from plotnine import (
aes,
element_text,
facet_wrap,
geom_histogram,
ggplot,
guides,
scale_fill_brewer,
scale_x_datetime,
theme,
)
from ergo.theme import ergo_theme
from .linear import LinearQuestion
class LinearDateQuestion(LinearQuestion):
# TODO: add log functionality (if some psychopath makes a log scaled date question)
def _scale_x(self, xmin: float = None, xmax: float = None):
return scale_x_datetime(limits=(xmin, xmax))
@property
def question_range(self):
"""
Question range from the Metaculus data plus the question's data range
"""
qr = {
"min": 0,
"max": 1,
"date_min": datetime.strptime(
self.possibilities["scale"]["min"], "%Y-%m-%d"
).date(),
"date_max": datetime.strptime(
self.possibilities["scale"]["max"], "%Y-%m-%d"
).date(),
}
qr["date_range"] = (qr["date_max"] - qr["date_min"]).days
return qr
# TODO Make less fancy. Would be better to only accept datetimes
def normalize_samples(self, samples):
"""
Normalize samples from dates to the normalized scale used by the Metaculus API
:param samples: dates from the predicted distribution answering the question
:return: normalized samples
"""
if isinstance(samples[0], date):
if type(samples) != pd.Series:
try:
samples = pd.Series(samples)
except ValueError:
raise ValueError("Could not process samples vector")
return self.normalize_dates(samples)
else:
return super().normalize_samples(samples)
def normalize_dates(self, dates: pd.Series):
"""
Map dates to the normalized scale used by the Metaculus API
:param dates: a pandas series of dates
:return: normalized samples
"""
return (dates - self.question_range["date_min"]).dt.days / self.question_range[
"date_range"
]
def denormalize_samples(self, samples):
"""
Map normalized samples to dates using the date range from the question
:param samples: normalized samples
:return: dates
"""
def denorm(sample):
return self.question_range["date_min"] + timedelta(
days=round(self.question_range["date_range"] * sample)
)
if type(samples) == float:
return denorm(samples)
else:
samples = pd.Series(samples)
return samples.apply(denorm)
# TODO enforce return type date/datetime
def sample_community(self):
"""
Sample an approximation of the entire current community prediction,
on the true scale of the question.
:return: One sample on the true scale
"""
normalized_sample = self.sample_normalized_community()
return self.denormalize_samples(normalized_sample)
def comparison_plot( # type: ignore
self, df: pd.DataFrame, xmin=None, xmax=None, bins: int = 50, **kwargs
):
return (
ggplot(df, aes(df.columns[1], fill=df.columns[0]))
+ scale_fill_brewer(type="qual", palette="Pastel1")
+ geom_histogram(position="identity", alpha=0.9, bins=bins)
+ self._scale_x(xmin, xmax)
+ facet_wrap(df.columns[0], ncol=1)
+ guides(fill=False)
+ ergo_theme
+ theme(axis_text_x=element_text(rotation=45, hjust=1))
)
def density_plot( # type: ignore
self,
df: pd.DataFrame,
xmin=None,
xmax=None,
fill: str = "#fbb4ae",
bins: int = 50,
**kwargs,
):
return (
ggplot(df, aes(df.columns[0]))
+ geom_histogram(fill=fill, bins=bins)
+ self._scale_x(xmin, xmax)
+ ergo_theme
+ theme(axis_text_x=element_text(rotation=45, hjust=1))
)
| 2.5 | 2 |
vyperlogix/gds/otp.py | raychorn/chrome_gui | 1 | 12787547 | '''
This module provides an Otp object that can be used to get one time
pad strings. It should be adequate for situations that don't require
high security.
You can call the module directly as a script; it will want the
number of otp strings to print out and an optional seed number.
For example, 'python otp.py 20' will print out 20 otp strings.
You may pass in a function to the constructor. This function takes
an integer parameter (defaults to 0) and must return a string.
This string is hashed with the MD5 algorithm and the hex
representation of the hash is returned. If you do not pass in a
string generating function, an internal function is used that is
based on the whrandom module.
Once you have constructed an Otp object, call the Get() method to
return an OTP string. The Get() method can have an integer
parameter that is passed to the str_function() function. For the
default function (GenerateString()), if the seed is nonzero, the
Wichmann-Hill generator is started over and initialized from the
seed.
Each call to Get generates a new random string, sends it to
md5.update(), (which appends it to its own internal copy of all the
strings it's been sent), and then a new md5 hash is gotten, which is
converted to the hex representation with 32 hex characters.
This module could be made to provide cryptographically secure
one time pads by substituting a cryptographic quality random
number generator for the whrandom object. You can go out on
the web and search for "random number" and find some hardware
devices to do this.
Copyright (C) 2002 GDS Software
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License as
published by the Free Software Foundation; either version 2 of
the License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public
License along with this program; if not, write to the Free
Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
MA 02111-1307 USA
See http://www.gnu.org/licenses/licenses.html for more details.
'''
import md5, whrandom
__version__ = "$Id: otp.py,v 1.3 2002/08/21 12:41:49 donp Exp $"
whG = whrandom.whrandom()
def GenerateString(seed = 0):
'''Generate a string from a four byte integer. The string is the
4 bytes of the integer, each converted to a character.
'''
global whG
if seed: # seed != 0 means to restart; whrandom seeds from time.
whG.seed(seed & 0xff,
(seed & 0xff00) >> 8,
(seed & 0xff0000) >> 16)
n = whG.randint(0, 2**30-1)
str = ""
str = str + chr((n & 0xFF000000) >> 24)
str = str + chr((n & 0x00FF0000) >> 16)
str = str + chr((n & 0x0000FF00) >> 8)
str = str + chr((n & 0x000000FF) >> 0)
return str
class Otp:
def __init__(self, str_function = GenerateString, seed = 0):
str_function(seed) # Initialize random number generator
self.m = md5.new()
def Get(self, seed=0):
'''Return an OTP.
'''
self.m.update(GenerateString(seed))
string = self.m.digest()
str = ""
for ix in xrange(len(string)):
str = str + "%02X" % ord(string[ix])
return str
if __name__ == "__main__":
import sys
num = 1
seed = 0
if len(sys.argv) < 2:
print "Usage: otp num_times [seed]"
sys.exit(1)
num = int(sys.argv[1])
if len(sys.argv) == 3:
seed = int(sys.argv[2])
o = Otp(seed=seed)
for ix in xrange(num):
print o.Get()
| 3.265625 | 3 |
setup.py | flug/gonzo | 8 | 12787548 | <gh_stars>1-10
import os
from setuptools import setup, find_packages
here = os.path.abspath(os.path.dirname(__file__))
make_abs = lambda fn: os.path.join(here, fn)
def parse_requirments(fn, dependency_links):
requirements = []
if not os.path.exists(fn):
return requirements, dependency_links
with open(fn, 'r') as f:
for dep in f:
dep = dep.strip()
# need to make github requirements work with
# setuptools like it would work with `pip -r`
# URLs will not work, so we transform them to
# dependency_links and requirements
if dep.startswith('git+'):
dependency_links.append(dep)
_, dep = dep.rsplit('#egg=', 1)
dep = dep.replace('-', '==', 1)
requirements.append(dep)
return requirements, dependency_links
requirements, dependency_links = parse_requirments(
make_abs('requirements.txt'), [])
test_requirements, dependency_links = parse_requirments(
make_abs('test_requirements.txt'), dependency_links)
setup(
name='gonzo',
packages=find_packages(exclude=['tests', 'tests.*']),
version='0.4.2',
author='onefinestay',
author_email='<EMAIL>',
url='https://github.com/onefinestay/gonzo',
install_requires=requirements,
tests_require=test_requirements,
classifiers=[
"Development Status :: 3 - Alpha",
"Programming Language :: Python",
"Intended Audience :: Developers",
"Natural Language :: English",
"Topic :: Software Development",
"Topic :: Utilities",
"Environment :: Console",
],
description='Instance and release management made easy',
long_description=open(make_abs('README.rst')).read(),
include_package_data=True,
entry_points={
'console_scripts': [
'gonzo = gonzo.scripts.base:main'
]
},
zip_safe=False,
)
| 2.171875 | 2 |
value/measures/migrations/0001_initial.py | M3SOulu/value | 2 | 12787549 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='Measure',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(unique=True, max_length=255)),
('is_active', models.BooleanField(default=True)),
],
options={
'db_table': 'measures',
},
),
migrations.CreateModel(
name='MeasureValue',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('description', models.CharField(max_length=255)),
('order', models.IntegerField(default=0)),
('color', models.CharField(default=b'#337BB7', max_length=7, choices=[(b'#5CB85C', b'#5CB85C'), (b'#BAE8BA', b'#BAE8BA'), (b'#8AD38A', b'#8AD38A'), (b'#369836', b'#369836'), (b'#1B7C1B', b'#1B7C1B'), (b'#F0AD4E', b'#F0AD4E'), (b'#FFD8A0', b'#FFD8A0'), (b'#FFC675', b'#FFC675'), (b'#DE9226', b'#DE9226'), (b'#AD6D11', b'#AD6D11'), (b'#D9534F', b'#D9534F'), (b'#FFADAB', b'#FFADAB'), (b'#FC827F', b'#FC827F'), (b'#BE2F2B', b'#BE2F2B'), (b'#961512', b'#961512'), (b'#5BC1DE', b'#5BC1DE'), (b'#BAEAF8', b'#BAEAF8'), (b'#85D5EC', b'#85D5EC'), (b'#39ACCD', b'#39ACCD'), (b'#1993B6', b'#1993B6'), (b'#337BB7', b'#337BB7'), (b'#7EB1DC', b'#7EB1DC'), (b'#5393C8', b'#5393C8'), (b'#1265AB', b'#1265AB'), (b'#094B83', b'#094B83'), (b'#222222', b'#222222'), (b'#929191', b'#929191'), (b'#5E5E5E', b'#5E5E5E'), (b'#000000', b'#000000'), (b'#030202', b'#030202')])),
('measure', models.ForeignKey(to='measures.Measure')),
],
options={
'ordering': ('order',),
'db_table': 'measure_values',
},
),
]
| 1.75 | 2 |
psi/migrations/versions/05_7b2d863b105_.py | lusi1990/betterlifepsi | 33 | 12787550 | """Add role seed data for flask-security
Revision ID: 7b2d863b105
Revises: <PASSWORD>
Create Date: 2015-07-02 10:48:35.805882
"""
# revision identifiers, used by Alembic.
revision = '7b2d863b105'
down_revision = '<PASSWORD>'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
role_table = sa.table('role',
sa.Column('id', sa.Integer(), autoincrement=True, nullable=False),
sa.Column('name', sa.String(length=80), nullable=True),
sa.Column('description', sa.String(length=255), nullable=True),
)
op.bulk_insert(role_table, [
{'id': 2, 'name': 'product_category_view', 'description': 'View product categories'},
{'id': 3, 'name': 'product_category_create', 'description': 'Create product category'},
{'id': 4, 'name': 'product_category_edit', 'description': 'Edit product category'},
{'id': 5, 'name': 'product_category_delete', 'description': 'Delete product category'},
{'id': 6, 'name': 'sales_order_view', 'description': 'View sales orders'},
{'id': 7, 'name': 'sales_order_create', 'description': 'Create sales order'},
{'id': 8, 'name': 'sales_order_edit', 'description': 'Edit sales order'},
{'id': 9, 'name': 'sales_order_delete', 'description': 'Delete sales order'},
{'id': 10, 'name': 'purchase_order_view', 'description': 'View purchase orders'},
{'id': 11, 'name': 'purchase_order_create', 'description': 'Create purchase order'},
{'id': 12, 'name': 'purchase_order_edit', 'description': 'Edit purchase order'},
{'id': 13, 'name': 'purchase_order_delete', 'description': 'Delete purchase order'},
{'id': 14, 'name': 'expense_view', 'description': 'View expenses'},
{'id': 15, 'name': 'expense_create', 'description': 'Create expense'},
{'id': 16, 'name': 'expense_edit', 'description': 'Edit expense'},
{'id': 17, 'name': 'expense_delete', 'description': 'Delete expense'},
{'id': 18, 'name': 'incoming_view', 'description': 'View incoming'},
{'id': 19, 'name': 'incoming_create', 'description': 'Create incoming'},
{'id': 20, 'name': 'incoming_edit', 'description': 'Edit incoming'},
{'id': 21, 'name': 'incoming_delete', 'description': 'Delete incoming'},
{'id': 22, 'name': 'supplier_view', 'description': 'View suppliers'},
{'id': 23, 'name': 'supplier_create', 'description': 'Create supplier'},
{'id': 24, 'name': 'supplier_edit', 'description': 'Edit supplier'},
{'id': 25, 'name': 'supplier_delete', 'description': 'Delete supplier'},
{'id': 26, 'name': 'product_view', 'description': 'View products'},
{'id': 27, 'name': 'product_create', 'description': 'Create product'},
{'id': 28, 'name': 'product_edit', 'description': 'Edit product'},
{'id': 29, 'name': 'product_delete', 'description': 'Delete product'},
{'id': 30, 'name': 'enum_values_view', 'description': 'View enum values'},
{'id': 31, 'name': 'enum_values_create', 'description': 'Create enum value'},
{'id': 32, 'name': 'enum_values_edit', 'description': 'Edit enum value'},
{'id': 33, 'name': 'enum_values_delete', 'description': 'Delete enum value'},
{'id': 34, 'name': 'preference_view', 'description': 'View system preference'},
{'id': 35, 'name': 'preference_edit', 'description': 'Update system preference'},
{'id': 36, 'name': 'user_view', 'description': 'View user'},
{'id': 37, 'name': 'user_create', 'description': 'Create user'},
{'id': 38, 'name': 'user_edit', 'description': 'Edit user'},
{'id': 39, 'name': 'user_delete', 'description': 'Delete user'},
{'id': 40, 'name': 'role_view', 'description': 'View roles'},
{'id': 41, 'name': 'role_create', 'description': 'Create role'},
{'id': 42, 'name': 'role_edit', 'description': 'Edit role'},
{'id': 43, 'name': 'role_delete', 'description': 'Delete role'},
],multiinsert=False)
from sqlalchemy.sql import text
op.get_bind().execute(text("ALTER SEQUENCE role_id_seq RESTART WITH 44;"))
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
pass
### end Alembic commands ###
| 2 | 2 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.