repo_name
stringlengths
5
92
path
stringlengths
4
221
copies
stringclasses
19 values
size
stringlengths
4
6
content
stringlengths
766
896k
license
stringclasses
15 values
hash
int64
-9,223,277,421,539,062,000
9,223,102,107B
line_mean
float64
6.51
99.9
line_max
int64
32
997
alpha_frac
float64
0.25
0.96
autogenerated
bool
1 class
ratio
float64
1.5
13.6
config_test
bool
2 classes
has_no_keywords
bool
2 classes
few_assignments
bool
1 class
Oli76/rwslib
rwslib/builders.py
1
77124
# -*- coding: utf-8 -*- __author__ = 'isparks' import uuid from xml.etree import cElementTree as ET from datetime import datetime from string import ascii_letters from rwslib.builder_constants import * """ builders.py provides convenience classes for building ODM documents for clinical data and metadata post messages. """ # ----------------------------------------------------------------------------------------------------------------------- # Constants VALID_ID_CHARS = ascii_letters + '_' # ----------------------------------------------------------------------------------------------------------------------- # Utilities def now_to_iso8601(): """Returns NOW date/time as a UTC date/time formated as iso8601 string""" utc_date = datetime.utcnow() return dt_to_iso8601(utc_date) def dt_to_iso8601(dt): """Turn a datetime into an ISO8601 formatted string""" return dt.strftime("%Y-%m-%dT%H:%M:%S") def bool_to_yes_no(val): """Convert True/False to Yes/No""" return 'Yes' if val else 'No' def bool_to_true_false(val): """Convert True/False to TRUE / FALSE""" return 'TRUE' if val else 'FALSE' def indent(elem, level=0): """Indent a elementree structure""" i = "\n" + level * " " if len(elem) > 0: if not elem.text or not elem.text.strip(): elem.text = i + " " if not elem.tail or not elem.tail.strip(): elem.tail = i for elem in elem: indent(elem, level + 1) if not elem.tail or not elem.tail.strip(): elem.tail = i else: if level and (not elem.tail or not elem.tail.strip()): elem.tail = i def make_element(builder, tag, content): """Make an element with this tag and text content""" builder.start(tag, {}) builder.data(content) # Must be UTF-8 encoded builder.end(tag) # ----------------------------------------------------------------------------------------------------------------------- # Classes class ODMElement(object): """Base class for ODM XML element classes""" def __call__(self, *args): """Collect all children passed in call""" for child in args: self << child return self def __lshift__(self, other): """__lshift__ should be overridden in descendant classes to accept child elements and incorporate them. By default takes no child elements """ raise ValueError("%s takes no child elements" % self.__class__.__name__) def add(self, *args): """Like call but adds a set of args""" for child in args: self << child return self def set_single_attribute(self, other, trigger_klass, property_name): """Used to set guard the setting of an attribute which is singular and can't be set twice""" if isinstance(other, trigger_klass): # Check property exists if not hasattr(self, property_name): raise AttributeError("%s has no property %s" % (self.__class__.__name__, property_name)) if getattr(self, property_name) is None: setattr(self, property_name, other) else: raise ValueError( '%s already has a %s element set.' % (self.__class__.__name__, other.__class__.__name__,)) def set_list_attribute(self, other, trigger_klass, property_name): """Used to set guard the setting of a list attribute, ensuring the same element is not added twice.""" # Check property exists if isinstance(other, trigger_klass): if not hasattr(self, property_name): raise AttributeError("%s has no property %s" % (self.__class__.__name__, property_name)) val = getattr(self, property_name, []) if other in val: raise ValueError("%s already exists in %s" % (other.__class__.__name__, self.__class__.__name__)) else: val.append(other) setattr(self, property_name, val) class UserRef(ODMElement): def __init__(self, oid): self.oid = oid def build(self, builder): builder.start("UserRef", dict(UserOID=self.oid)) builder.end("UserRef") class LocationRef(ODMElement): def __init__(self, oid): self.oid = oid def build(self, builder): builder.start("LocationRef", dict(LocationOID=self.oid)) builder.end("LocationRef") class ReasonForChange(ODMElement): def __init__(self, reason): self.reason = reason def build(self, builder): builder.start("ReasonForChange", {}) builder.data(self.reason) builder.end("ReasonForChange") class DateTimeStamp(ODMElement): def __init__(self, date_time): self.date_time = date_time def build(self, builder): builder.start("DateTimeStamp", {}) if isinstance(self.date_time, datetime): builder.data(dt_to_iso8601(self.date_time)) else: builder.data(self.date_time) builder.end("DateTimeStamp") class AuditRecord(ODMElement): """AuditRecord is supported only by ItemData in Rave""" EDIT_MONITORING = 'Monitoring' EDIT_DATA_MANAGEMENT = 'DataManagement' EDIT_DB_AUDIT = 'DBAudit' EDIT_POINTS = [EDIT_MONITORING, EDIT_DATA_MANAGEMENT, EDIT_DB_AUDIT] def __init__(self, edit_point=None, used_imputation_method=None, identifier=None, include_file_oid=None): self._edit_point = None self.edit_point = edit_point self.used_imputation_method = used_imputation_method self._id = None self.id = identifier self.include_file_oid = include_file_oid self.user_ref = None self.location_ref = None self.reason_for_change = None self.date_time_stamp = None @property def id(self): return self._id @id.setter def id(self, value): if value not in [None, ''] and str(value).strip() != '': val = str(value).strip()[0] if val not in VALID_ID_CHARS: raise AttributeError('%s id cannot start with "%s" character' % (self.__class__.__name__, val,)) self._id = value @property def edit_point(self): return self._edit_point @edit_point.setter def edit_point(self, value): if value is not None: if value not in self.EDIT_POINTS: raise AttributeError('%s edit_point must be one of %s not %s' % ( self.__class__.__name__, ','.join(self.EDIT_POINTS), value,)) self._edit_point = value def build(self, builder): params = {} if self.edit_point is not None: params["EditPoint"] = self.edit_point if self.used_imputation_method is not None: params['UsedImputationMethod'] = bool_to_yes_no(self.used_imputation_method) if self.id is not None: params['ID'] = str(self.id) if self.include_file_oid is not None: params['mdsol:IncludeFileOID'] = bool_to_yes_no(self.include_file_oid) builder.start("AuditRecord", params) if self.user_ref is None: raise ValueError("User Reference not set.") self.user_ref.build(builder) if self.location_ref is None: raise ValueError("Location Reference not set.") self.location_ref.build(builder) if self.date_time_stamp is None: raise ValueError("DateTime not set.") self.date_time_stamp.build(builder) # Optional if self.reason_for_change is not None: self.reason_for_change.build(builder) builder.end("AuditRecord") def __lshift__(self, other): if not isinstance(other, (UserRef, LocationRef, DateTimeStamp, ReasonForChange,)): raise ValueError("AuditRecord cannot accept a child element of type %s" % other.__class__.__name__) # Order is important, apparently self.set_single_attribute(other, UserRef, 'user_ref') self.set_single_attribute(other, LocationRef, 'location_ref') self.set_single_attribute(other, DateTimeStamp, 'date_time_stamp') self.set_single_attribute(other, ReasonForChange, 'reason_for_change') return other class TransactionalElement(ODMElement): """Models an ODM Element that is allowed a transaction type. Different elements have different allowed transaction types""" ALLOWED_TRANSACTION_TYPES = [] def __init__(self, transaction_type): self._transaction_type = None self.transaction_type = transaction_type @property def transaction_type(self): return self._transaction_type @transaction_type.setter def transaction_type(self, value): if value is not None: if value not in self.ALLOWED_TRANSACTION_TYPES: raise AttributeError('%s transaction_type element must be one of %s not %s' % ( self.__class__.__name__, ','.join(self.ALLOWED_TRANSACTION_TYPES), value,)) self._transaction_type = value class MdsolQuery(ODMElement): """MdsolQuery extension element for Queries at item level only""" def __init__(self, value=None, query_repeat_key=None, recipient=None, status=None, requires_response=None, response=None): self.value = value self.query_repeat_key = query_repeat_key self.recipient = recipient self._status = None self.status = status self.requires_response = requires_response self.response = response @property def status(self): return self._status @status.setter def status(self, value): if value is not None: if not isinstance(value, QueryStatusType): raise AttributeError("%s action type is invalid in mdsol:Query." % (value,)) self._status = value def build(self, builder): params = {} if self.value is not None: params['Value'] = str(self.value) if self.query_repeat_key is not None: params['QueryRepeatKey'] = str(self.query_repeat_key) if self.recipient is not None: params['Recipient'] = str(self.recipient) if self.status is not None: params['Status'] = self.status.value if self.requires_response is not None: params['RequiresResponse'] = bool_to_yes_no(self.requires_response) # When closing a query if self.response is not None: params['Response'] = str(self.response) builder.start("mdsol:Query", params) builder.end("mdsol:Query") class ItemData(TransactionalElement): """Models the ODM ItemData object""" ALLOWED_TRANSACTION_TYPES = ['Insert', 'Update', 'Upsert', 'Context', 'Remove'] def __init__(self, itemoid, value, specify_value=None, transaction_type=None, lock=None, freeze=None, verify=None): super(self.__class__, self).__init__(transaction_type) self.itemoid = itemoid self.value = value self.specify_value = specify_value self.lock = lock self.freeze = freeze self.verify = verify self.audit_record = None self.queries = [] self.measurement_unit_ref = None def build(self, builder): """Build XML by appending to builder <ItemData ItemOID="MH_DT" Value="06 Jan 2009" TransactionType="Insert"> """ params = dict(ItemOID=self.itemoid) if self.transaction_type is not None: params["TransactionType"] = self.transaction_type if self.value in [None, '']: params['IsNull'] = 'Yes' else: params['Value'] = str(self.value) if self.specify_value is not None: params['mdsol:SpecifyValue'] = self.specify_value if self.lock is not None: params['mdsol:Lock'] = bool_to_yes_no(self.lock) if self.freeze is not None: params['mdsol:Freeze'] = bool_to_yes_no(self.freeze) if self.verify is not None: params['mdsol:Verify'] = bool_to_yes_no(self.verify) builder.start("ItemData", params) if self.audit_record is not None: self.audit_record.build(builder) # Measurement unit ref must be after audit record or RWS complains if self.measurement_unit_ref is not None: self.measurement_unit_ref.build(builder) for query in self.queries: query.build(builder) builder.end("ItemData") def __lshift__(self, other): if not isinstance(other, (MeasurementUnitRef, AuditRecord, MdsolQuery,)): raise ValueError("ItemData object can only receive MeasurementUnitRef, AuditRecord or MdsolQuery objects") self.set_single_attribute(other, MeasurementUnitRef, 'measurement_unit_ref') self.set_single_attribute(other, AuditRecord, 'audit_record') self.set_list_attribute(other, MdsolQuery, 'queries') return other class ItemGroupData(TransactionalElement): """Models the ODM ItemGroupData object. Note no name for the ItemGroupData element is required. This is built automatically by the form. """ ALLOWED_TRANSACTION_TYPES = ['Insert', 'Update', 'Upsert', 'Context'] def __init__(self, transaction_type=None, item_group_repeat_key=None, whole_item_group=False): super(self.__class__, self).__init__(transaction_type) self.item_group_repeat_key = item_group_repeat_key self.whole_item_group = whole_item_group self.items = {} def __lshift__(self, other): """Override << operator""" if not isinstance(other, ItemData): raise ValueError("ItemGroupData object can only receive ItemData object") if other.itemoid in self.items: raise ValueError("ItemGroupData object with that itemoid is already in the ItemGroupData object") self.items[other.itemoid] = other return other def build(self, builder, formname): """Build XML by appending to builder """ params = dict(ItemGroupOID=formname) if self.transaction_type is not None: params["TransactionType"] = self.transaction_type if self.item_group_repeat_key is not None: params["ItemGroupRepeatKey"] = str( self.item_group_repeat_key) # may be @context for transaction type upsert or context params["mdsol:Submission"] = "WholeItemGroup" if self.whole_item_group else "SpecifiedItemsOnly" builder.start("ItemGroupData", params) # Ask children for item in self.items.values(): item.build(builder) builder.end("ItemGroupData") class FormData(TransactionalElement): """Models the ODM FormData object""" ALLOWED_TRANSACTION_TYPES = ['Insert', 'Update'] def __init__(self, formoid, transaction_type=None, form_repeat_key=None): super(self.__class__, self).__init__(transaction_type) self.formoid = formoid self.form_repeat_key = form_repeat_key self.itemgroups = [] def __lshift__(self, other): """Override << operator""" if not isinstance(other, ItemGroupData): raise ValueError("FormData object can only receive ItemGroupData object") self.set_list_attribute(other, ItemGroupData, 'itemgroups') return other def build(self, builder): """Build XML by appending to builder <FormData FormOID="MH" TransactionType="Update"> """ params = dict(FormOID=self.formoid) if self.transaction_type is not None: params["TransactionType"] = self.transaction_type if self.form_repeat_key is not None: params["FormRepeatKey"] = str(self.form_repeat_key) builder.start("FormData", params) # Ask children for itemgroup in self.itemgroups: itemgroup.build(builder, self.formoid) builder.end("FormData") class StudyEventData(TransactionalElement): """Models the ODM StudyEventData object""" ALLOWED_TRANSACTION_TYPES = ['Insert', 'Update', 'Remove', 'Context'] def __init__(self, study_event_oid, transaction_type="Update", study_event_repeat_key=None): super(self.__class__, self).__init__(transaction_type) self.study_event_oid = study_event_oid self.study_event_repeat_key = study_event_repeat_key self.forms = [] def __lshift__(self, other): """Override << operator""" if not isinstance(other, FormData): raise ValueError("StudyEventData object can only receive FormData object") self.set_list_attribute(other, FormData, 'forms') return other def build(self, builder): """Build XML by appending to builder <StudyEventData StudyEventOID="SCREENING" StudyEventRepeatKey="1" TransactionType="Update"> """ params = dict(StudyEventOID=self.study_event_oid) if self.transaction_type is not None: params["TransactionType"] = self.transaction_type if self.study_event_repeat_key is not None: params["StudyEventRepeatKey"] = self.study_event_repeat_key builder.start("StudyEventData", params) # Ask children for form in self.forms: form.build(builder) builder.end("StudyEventData") class SubjectData(TransactionalElement): """Models the ODM SubjectData and ODM SiteRef objects""" ALLOWED_TRANSACTION_TYPES = ['Insert', 'Update', 'Upsert'] def __init__(self, sitelocationoid, subject_key, subject_key_type="SubjectName", transaction_type="Update"): super(self.__class__, self).__init__(transaction_type) self.sitelocationoid = sitelocationoid self.subject_key = subject_key self.subject_key_type = subject_key_type self.study_events = [] # Can have collection self.audit_record = None def __lshift__(self, other): """Override << operator""" if not isinstance(other, (StudyEventData, AuditRecord,)): raise ValueError("SubjectData object can only receive StudyEventData or AuditRecord object") self.set_list_attribute(other, StudyEventData, 'study_events') self.set_single_attribute(other, AuditRecord, 'audit_record') return other def build(self, builder): """Build XML by appending to builder""" params = dict(SubjectKey=self.subject_key) params['mdsol:SubjectKeyType'] = self.subject_key_type if self.transaction_type is not None: params["TransactionType"] = self.transaction_type builder.start("SubjectData", params) # Ask children if self.audit_record is not None: self.audit_record.build(builder) builder.start("SiteRef", {'LocationOID': self.sitelocationoid}) builder.end("SiteRef") for event in self.study_events: event.build(builder) builder.end("SubjectData") class ClinicalData(ODMElement): """Models the ODM ClinicalData object""" def __init__(self, projectname, environment, metadata_version_oid="1"): self.projectname = projectname self.environment = environment self.metadata_version_oid = metadata_version_oid self.subject_data = None def __lshift__(self, other): """Override << operator""" if not isinstance(other, SubjectData): raise ValueError("ClinicalData object can only receive SubjectData object") self.set_single_attribute(other, SubjectData, 'subject_data') return other def build(self, builder): """Build XML by appending to builder""" params = dict(MetaDataVersionOID=self.metadata_version_oid, StudyOID="%s (%s)" % (self.projectname, self.environment,), ) builder.start("ClinicalData", params) # Ask children if self.subject_data is not None: self.subject_data.build(builder) builder.end("ClinicalData") class ODM(ODMElement): """Models the ODM object""" FILETYPE_TRANSACTIONAL = 'Transactional' FILETYPE_SNAPSHOT = 'Snapshot' def __init__(self, originator, description="", creationdatetime=now_to_iso8601(), fileoid=None, filetype=None): self.originator = originator # Required self.description = description self.creationdatetime = creationdatetime # filetype will always be "Transactional" # ODM version will always be 1.3 # Granularity="SingleSubject" # AsOfDateTime always OMITTED (it's optional) self.clinical_data = None self.study = None self.filetype = ODM.FILETYPE_TRANSACTIONAL if filetype is None else ODM.FILETYPE_SNAPSHOT # Create unique fileoid if none given self.fileoid = str(uuid.uuid4()) if fileoid is None else fileoid def __lshift__(self, other): """Override << operator""" if not isinstance(other, (ClinicalData, Study,)): raise ValueError("ODM object can only receive ClinicalData or Study object") self.set_single_attribute(other, ClinicalData, 'clinical_data') self.set_single_attribute(other, Study, 'study') return other def getroot(self): """Build XML object, return the root""" builder = ET.TreeBuilder() params = dict(ODMVersion="1.3", FileType=self.filetype, CreationDateTime=self.creationdatetime, Originator=self.originator, FileOID=self.fileoid, xmlns="http://www.cdisc.org/ns/odm/v1.3", ) params['xmlns:mdsol'] = "http://www.mdsol.com/ns/odm/metadata" if self.description: params['Description'] = self.description builder.start("ODM", params) # Ask the children if self.study is not None: self.study.build(builder) if self.clinical_data is not None: self.clinical_data.build(builder) builder.end("ODM") return builder.close() def __str__(self): doc = self.getroot() indent(doc) header = '<?xml version="1.0" encoding="utf-8" ?>\n' return header + ET.tostring(doc, encoding='utf-8').decode('utf-8') # ----------------------------------------------------------------------------------------------------------------------- # Metadata Objects class GlobalVariables(ODMElement): """GlobalVariables Metadata element""" def __init__(self, protocol_name, name=None, description=''): """Name and description are not important. protocol_name maps to the Rave project name""" self.protocol_name = protocol_name self.name = name if name is not None else protocol_name self.description = description def build(self, builder): """Build XML by appending to builder""" builder.start("GlobalVariables", {}) make_element(builder, 'StudyName', self.name) make_element(builder, 'StudyDescription', self.description) make_element(builder, 'ProtocolName', self.protocol_name) builder.end("GlobalVariables") class TranslatedText(ODMElement): """Represents a language and a translated text for that language""" def __init__(self, text, lang=None): self.text = text self.lang = lang def build(self, builder): """Build XML by appending to builder""" params = {} if self.lang is not None: params['xml:lang'] = self.lang builder.start("TranslatedText", params) builder.data(self.text) builder.end("TranslatedText") class Symbol(ODMElement): def __init__(self): self.translations = [] def __lshift__(self, other): """Override << operator""" if not isinstance(other, TranslatedText): raise ValueError("Symbol can only accept TranslatedText objects as children") self.set_list_attribute(other, TranslatedText, 'translations') return other def build(self, builder): """Build XML by appending to builder""" builder.start("Symbol", {}) for child in self.translations: child.build(builder) builder.end("Symbol") class MeasurementUnit(ODMElement): """A measurement unit""" def __init__(self, oid, name, unit_dictionary_name=None, constant_a=1, constant_b=1, constant_c=0, constant_k=0, standard_unit=False): self.symbols = [] self.oid = oid self.name = name self.unit_dictionary_name = unit_dictionary_name self.constant_a = constant_a self.constant_b = constant_b self.constant_c = constant_c self.constant_k = constant_k self.standard_unit = standard_unit def build(self, builder): """Build XML by appending to builder""" params = dict(OID=self.oid, Name=self.name) if self.unit_dictionary_name: params['mdsol:UnitDictionaryName'] = self.unit_dictionary_name for suffix in ['A', 'B', 'C', 'K']: val = getattr(self, 'constant_{0}'.format(suffix.lower())) params['mdsol:Constant{0}'.format(suffix)] = str(val) if self.standard_unit: params['mdsol:StandardUnit'] = 'Yes' builder.start("MeasurementUnit", params) for child in self.symbols: child.build(builder) builder.end("MeasurementUnit") def __lshift__(self, other): """Override << operator""" if not isinstance(other, Symbol): raise ValueError("MeasurementUnits object can only receive Symbol object") self.set_list_attribute(other, Symbol, 'symbols') return other class BasicDefinitions(ODMElement): """Container for Measurement units""" def __init__(self): self.measurement_units = [] def build(self, builder): """Build XML by appending to builder""" builder.start("BasicDefinitions", {}) for child in self.measurement_units: child.build(builder) builder.end("BasicDefinitions") def __lshift__(self, other): """Override << operator""" if not isinstance(other, MeasurementUnit): raise ValueError("BasicDefinitions object can only receive MeasurementUnit object") self.measurement_units.append(other) return other class StudyEventRef(ODMElement): def __init__(self, oid, order_number, mandatory): self.oid = oid self.order_number = order_number self.mandatory = mandatory def build(self, builder): """Build XML by appending to builder""" params = dict(StudyEventOID=self.oid, OrderNumber=str(self.order_number), Mandatory=bool_to_yes_no(self.mandatory)) builder.start("StudyEventRef", params) builder.end("StudyEventRef") class Protocol(ODMElement): """Protocol child of MetaDataVersion, holder of StudyEventRefs""" def __init__(self): self.study_event_refs = [] def build(self, builder): """Build XML by appending to builder""" builder.start("Protocol", {}) for child in self.study_event_refs: child.build(builder) builder.end("Protocol") def __lshift__(self, other): """Override << operator""" if not isinstance(other, (StudyEventRef,)): raise ValueError('Protocol cannot accept a {0} as a child element'.format(other.__class__.__name__)) self.set_list_attribute(other, StudyEventRef, 'study_event_refs') return other class FormRef(ODMElement): def __init__(self, oid, order_number, mandatory): self.oid = oid self.order_number = order_number self.mandatory = mandatory def build(self, builder): params = dict(FormOID=self.oid, OrderNumber=str(self.order_number), Mandatory=bool_to_yes_no(self.mandatory) ) builder.start('FormRef', params) builder.end('FormRef') class StudyEventDef(ODMElement): # Event types SCHEDULED = 'Scheduled' UNSCHEDULED = 'Unscheduled' COMMON = 'Common' def __init__(self, oid, name, repeating, event_type, category=None, access_days=None, start_win_days=None, target_days=None, end_win_days=None, overdue_days=None, close_days=None ): self.oid = oid self.name = name self.repeating = repeating self.event_type = event_type self.category = category self.access_days = access_days self.start_win_days = start_win_days self.target_days = target_days self.end_win_days = end_win_days self.overdue_days = overdue_days self.close_days = close_days self.formrefs = [] def build(self, builder): """Build XML by appending to builder""" params = dict(OID=self.oid, Name=self.name, Repeating=bool_to_yes_no(self.repeating), Type=self.event_type) if self.category is not None: params['Category'] = self.category if self.access_days is not None: params['mdsol:AccessDays'] = str(self.access_days) if self.start_win_days is not None: params['mdsol:StartWinDays'] = str(self.start_win_days) if self.target_days is not None: params['mdsol:TargetDays'] = str(self.target_days) if self.end_win_days is not None: params['mdsol:EndWinDays'] = str(self.end_win_days) if self.overdue_days is not None: params['mdsol:OverDueDays'] = str(self.overdue_days) if self.close_days is not None: params['mdsol:CloseDays'] = str(self.close_days) builder.start("StudyEventDef", params) for formref in self.formrefs: formref.build(builder) builder.end("StudyEventDef") def __lshift__(self, other): """Override << operator""" if not isinstance(other, (FormRef,)): raise ValueError('StudyEventDef cannot accept a {0} as a child element'.format(other.__class__.__name__)) self.set_list_attribute(other, FormRef, 'formrefs') return other class ItemGroupRef(ODMElement): def __init__(self, oid, order_number, mandatory=True): self.oid = oid self.order_number = order_number self.mandatory = mandatory def build(self, builder): params = dict(ItemGroupOID=self.oid, OrderNumber=str(self.order_number), Mandatory=bool_to_yes_no(self.mandatory), ) builder.start("ItemGroupRef", params) builder.end("ItemGroupRef") class MdsolHelpText(ODMElement): """Help element for FormDefs and ItemDefs""" def __init__(self, lang, content): self.lang = lang self.content = content def build(self, builder): builder.start('mdsol:HelpText', {'xml:lang': self.lang}) builder.data(self.content) builder.end('mdsol:HelpText') class MdsolViewRestriction(ODMElement): """ViewRestriction for FormDefs and ItemDefs""" def __init__(self, rolename): self.rolename = rolename def build(self, builder): builder.start('mdsol:ViewRestriction', {}) builder.data(self.rolename) builder.end('mdsol:ViewRestriction') class MdsolEntryRestriction(ODMElement): """EntryRestriction for FormDefs and ItemDefs""" def __init__(self, rolename): self.rolename = rolename def build(self, builder): builder.start('mdsol:EntryRestriction', {}) builder.data(self.rolename) builder.end('mdsol:EntryRestriction') class FormDef(ODMElement): LOG_PORTRAIT = 'Portrait' LOG_LANDSCAPE = 'Landscape' DDE_MUSTNOT = 'MustNotDDE' DDE_MAY = 'MayDDE' DDE_MUST = 'MustDDE' NOLINK = 'NoLink' LINK_NEXT = 'LinkNext' LINK_CUSTOM = 'LinkCustom' def __init__(self, oid, name, repeating=False, order_number=None, active=True, template=False, signature_required=False, log_direction=LOG_PORTRAIT, double_data_entry=DDE_MUSTNOT, confirmation_style=NOLINK, link_study_event_oid=None, link_form_oid=None ): self.oid = oid self.name = name self.order_number = order_number self.repeating = repeating # Not actually used by Rave. self.active = active self.template = template self.signature_required = signature_required self.log_direction = log_direction self.double_data_entry = double_data_entry self.confirmation_style = confirmation_style self.link_study_event_oid = link_study_event_oid self.link_form_oid = link_form_oid self.itemgroup_refs = [] self.helptexts = [] # Not clear that Rave can accept multiple from docs self.view_restrictions = [] self.entry_restrictions = [] def build(self, builder): params = dict(OID=self.oid, Name=self.name, Repeating=bool_to_yes_no(self.repeating) ) if self.order_number is not None: params['mdsol:OrderNumber'] = str(self.order_number) if self.active is not None: params['mdsol:Active'] = bool_to_yes_no(self.active) params['mdsol:Template'] = bool_to_yes_no(self.template) params['mdsol:SignatureRequired'] = bool_to_yes_no(self.signature_required) params['mdsol:LogDirection'] = self.log_direction params['mdsol:DoubleDataEntry'] = self.double_data_entry params['mdsol:ConfirmationStyle'] = self.confirmation_style if self.link_study_event_oid: params['mdsol:LinkStudyEventOID'] = self.link_study_event_oid if self.link_form_oid: params['mdsol:LinkFormOID'] = self.link_form_oid builder.start("FormDef", params) for itemgroup_ref in self.itemgroup_refs: itemgroup_ref.build(builder) for helptext in self.helptexts: helptext.build(builder) for view_restriction in self.view_restrictions: view_restriction.build(builder) for entry_restriction in self.entry_restrictions: entry_restriction.build(builder) builder.end("FormDef") def __lshift__(self, other): """Override << operator""" if not isinstance(other, (ItemGroupRef, MdsolHelpText, MdsolViewRestriction, MdsolEntryRestriction,)): raise ValueError('StudyEventDef cannot accept a {0} as a child element'.format(other.__class__.__name__)) self.set_list_attribute(other, ItemGroupRef, 'itemgroup_refs') self.set_list_attribute(other, MdsolHelpText, 'helptexts') self.set_list_attribute(other, MdsolViewRestriction, 'view_restrictions') self.set_list_attribute(other, MdsolEntryRestriction, 'entry_restrictions') return other class MdsolLabelRef(ODMElement): """A reference to a label on a form""" def __init__(self, oid, order_number): self.oid = oid self.order_number = order_number def build(self, builder): params = dict(LabelOID=self.oid, OrderNumber=str(self.order_number), ) builder.start('mdsol:LabelRef', params) builder.end('mdsol:LabelRef') class MdsolAttribute(ODMElement): def __init__(self, namespace, name, value, transaction_type='Insert'): self.namespace = namespace self.name = name self.value = value self.transaction_type = transaction_type def build(self, builder): params = dict(Namespace=self.namespace, Name=self.name, Value=self.value, TransactionType=self.transaction_type, ) builder.start('mdsol:Attribute', params) builder.end('mdsol:Attribute') class ItemRef(ODMElement): def __init__(self, oid, order_number, mandatory=False, key_sequence=None, imputation_method_oid=None, role=None, role_codelist_oid=None): self.oid = oid self.order_number = order_number self.mandatory = mandatory self.key_sequence = key_sequence self.imputation_method_oid = imputation_method_oid self.role = role self.role_codelist_oid = role_codelist_oid self.attributes = [] def build(self, builder): params = dict(ItemOID=self.oid, OrderNumber=str(self.order_number), Mandatory=bool_to_yes_no(self.mandatory) ) if self.key_sequence is not None: params['KeySequence'] = str(self.key_sequence) if self.imputation_method_oid is not None: params['ImputationMethodOID'] = self.imputation_method_oid if self.role is not None: params['Role'] = self.role if self.role_codelist_oid is not None: params['RoleCodeListOID'] = self.role_codelist_oid builder.start('ItemRef', params) for attribute in self.attributes: attribute.build(builder) builder.end('ItemRef') def __lshift__(self, other): """ItemRef can accept MdsolAttribute(s)""" if not isinstance(other, (MdsolAttribute)): raise ValueError('ItemRef cannot accept a {0} as a child element'.format(other.__class__.__name__)) self.set_list_attribute(other, MdsolAttribute, 'attributes') return other class ItemGroupDef(ODMElement): def __init__(self, oid, name, repeating=False, is_reference_data=False, sas_dataset_name=None, domain=None, origin=None, role=None, purpose=None, comment=None): self.oid = oid self.name = name self.repeating = repeating self.is_reference_data = is_reference_data self.sas_dataset_name = sas_dataset_name self.domain = domain self.origin = origin self.role = role self.purpose = purpose self.comment = comment self.item_refs = [] self.label_refs = [] def build(self, builder): params = dict(OID=self.oid, Name=self.name, Repeating=bool_to_yes_no(self.repeating), IsReferenceData=bool_to_yes_no(self.is_reference_data) ) if self.sas_dataset_name is not None: params['SASDatasetName'] = self.sas_dataset_name if self.domain is not None: params['Domain'] = self.domain if self.origin is not None: params['Origin'] = self.origin if self.role is not None: params['Role'] = self.role if self.purpose is not None: params['Purpose'] = self.purpose if self.comment is not None: params['Comment'] = self.comment builder.start('ItemGroupDef', params) for itemref in self.item_refs: itemref.build(builder) # Extensions always listed AFTER core elements for labelref in self.label_refs: labelref.build(builder) builder.end('ItemGroupDef') def __lshift__(self, other): """ItemGroupDef can accept ItemRef and LabelRef""" if not isinstance(other, (ItemRef, MdsolLabelRef)): raise ValueError('ItemGroupDef cannot accept a {0} as a child element'.format(other.__class__.__name__)) self.set_list_attribute(other, ItemRef, 'item_refs') self.set_list_attribute(other, MdsolLabelRef, 'label_refs') return other class Question(ODMElement): def __init__(self): self.translations = [] def __lshift__(self, other): """Override << operator""" if not isinstance(other, (TranslatedText)): raise ValueError('Question cannot accept a {0} as a child element'.format(other.__class__.__name__)) self.set_list_attribute(other, TranslatedText, 'translations') return other def build(self, builder): """Questions can contain translations""" builder.start('Question', {}) for translation in self.translations: translation.build(builder) builder.end('Question') class MeasurementUnitRef(ODMElement): def __init__(self, oid, order_number=None): self.oid = oid self.order_number = order_number def build(self, builder): params = dict(MeasurementUnitOID=self.oid) if self.order_number is not None: params['mdsol:OrderNumber'] = str(self.order_number) builder.start('MeasurementUnitRef', params) builder.end('MeasurementUnitRef') class MdsolHeaderText(ODMElement): """Header text for ItemDef when showed in grid""" def __init__(self, content, lang=None): self.content = content self.lang = lang def build(self, builder): params = {} if self.lang is not None: params['xml:lang'] = self.lang builder.start('mdsol:HeaderText', params) builder.data(self.content) builder.end('mdsol:HeaderText') class CodeListRef(ODMElement): """CodeListRef: a reference a codelist within an ItemDef""" def __init__(self, oid): self.oid = oid def build(self, builder): builder.start('CodeListRef', {'CodeListOID': self.oid}) builder.end('CodeListRef') class MdsolLabelDef(ODMElement): """Label definition""" def __init__(self, oid, name, field_number=None): self.oid = oid self.name = name self.field_number = field_number self.help_texts = [] self.translations = [] self.view_restrictions = [] def build(self, builder): params = dict(OID=self.oid, Name=self.name) if self.field_number is not None: params['FieldNumber'] = str(self.field_number) builder.start("mdsol:LabelDef", params) for translation in self.translations: translation.build(builder) for view_restriction in self.view_restrictions: view_restriction.build(builder) builder.end("mdsol:LabelDef") def __lshift__(self, other): """Override << operator""" if not isinstance(other, (MdsolViewRestriction, TranslatedText)): raise ValueError('MdsolLabelDef cannot accept a {0} as a child element'.format(other.__class__.__name__)) self.set_list_attribute(other, TranslatedText, 'translations') self.set_list_attribute(other, MdsolViewRestriction, 'view_restrictions') return other class MdsolReviewGroup(ODMElement): """Maps to Rave review groups for an Item""" def __init__(self, name): self.name = name def build(self, builder): builder.start('mdsol:ReviewGroup', {}) builder.data(self.name) builder.end('mdsol:ReviewGroup') class CheckValue(ODMElement): """A value in a RangeCheck""" def __init__(self, value): self.value = value def build(self, builder): builder.start('CheckValue', {}) builder.data(str(self.value)) builder.end('CheckValue') class RangeCheck(ODMElement): """ Rangecheck in Rave relates to QueryHigh QueryLow and NonConformandHigh and NonComformanLow for other types of RangeCheck, need to use an EditCheck (part of Rave's extensions to ODM) """ def __init__(self, comparator, soft_hard): self._comparator = None self.comparator = comparator self._soft_hard = None self.soft_hard = soft_hard self.check_value = None self.measurement_unit_ref = None @property def comparator(self): return self._comparator @comparator.setter def comparator(self, value): if not isinstance(value, RangeCheckComparatorType): raise AttributeError("%s comparator is invalid in RangeCheck." % (value,)) self._comparator = value @property def soft_hard(self): return self._soft_hard @soft_hard.setter def soft_hard(self, value): if not isinstance(value, RangeCheckType): raise AttributeError("%s soft_hard invalid in RangeCheck." % (value,)) self._soft_hard = value def build(self, builder): params = dict(SoftHard=self.soft_hard.value, Comparator=self.comparator.value) builder.start("RangeCheck", params) if self.check_value is not None: self.check_value.build(builder) if self.measurement_unit_ref is not None: self.measurement_unit_ref.build(builder) builder.end("RangeCheck") def __lshift__(self, other): """Override << operator""" if not isinstance(other, (CheckValue, MeasurementUnitRef,)): raise ValueError('RangeCheck cannot accept a {0} as a child element'.format(other.__class__.__name__)) self.set_single_attribute(other, CheckValue, 'check_value') self.set_single_attribute(other, MeasurementUnitRef, 'measurement_unit_ref') class ItemDef(ODMElement): VALID_DATATYPES = [DataType.Text, DataType.Integer, DataType.Float, DataType.Date, DataType.DateTime, DataType.Time] def __init__(self, oid, name, datatype, length, significant_digits=None, sas_field_name=None, sds_var_name=None, origin=None, # Not mapped in Rave comment=None, active=True, control_type=None, acceptable_file_extensions=None, indent_level=0, source_document_verify=False, default_value=None, sas_format=None, sas_label=None, query_future_date=False, visible=True, translation_required=False, query_non_conformance=False, other_visits=False, can_set_item_group_date=False, can_set_form_date=False, can_set_study_event_date=False, can_set_subject_date=False, visual_verify=False, does_not_break_signature=False, date_time_format=None, field_number=None, variable_oid=None ): self.oid = oid self.name = name if datatype not in ItemDef.VALID_DATATYPES: raise AttributeError('{0} is not a valid datatype!'.format(datatype)) if control_type is not None: if not isinstance(control_type, ControlType): raise AttributeError("{0} is not a valid Control Type".format(control_type)) self.datatype = datatype self.length = length self.significant_digits = significant_digits self.sas_field_name = sas_field_name self.sds_var_name = sds_var_name self.origin = origin self.comment = comment self.active = active self.control_type = control_type self.acceptable_file_extensions = acceptable_file_extensions self.indent_level = indent_level self.source_document_verify = source_document_verify self.default_value = default_value self.sas_format = sas_format self.sas_label = sas_label self.query_future_date = query_future_date self.visible = visible self.translation_required = translation_required self.query_non_conformance = query_non_conformance self.other_visits = other_visits self.can_set_item_group_date = can_set_item_group_date self.can_set_form_date = can_set_form_date self.can_set_study_event_date = can_set_study_event_date self.can_set_subject_date = can_set_subject_date self.visual_verify = visual_verify self.does_not_break_signature = does_not_break_signature self.date_time_format = date_time_format self.field_number = field_number self.variable_oid = variable_oid self.question = None self.codelistref = None self.measurement_unit_refs = [] self.help_texts = [] self.view_restrictions = [] self.entry_restrictions = [] self.header_text = None self.review_groups = [] self.range_checks = [] def build(self, builder): """Build XML by appending to builder""" params = dict(OID=self.oid, Name=self.name, DataType=self.datatype.value, Length=str(self.length), ) if self.date_time_format is not None: params['mdsol:DateTimeFormat'] = self.date_time_format params['mdsol:Active'] = bool_to_yes_no(self.active) if self.significant_digits is not None: params['SignificantDigits'] = str(self.significant_digits) if self.sas_field_name is not None: params['SASFieldName'] = self.sas_field_name if self.sds_var_name is not None: params['SDSVarName'] = self.sds_var_name if self.origin is not None: params['Origin'] = self.origin if self.comment is not None: params['Comment'] = self.comment if self.control_type is not None: params['mdsol:ControlType'] = self.control_type.value if self.acceptable_file_extensions is not None: params['mdsol:AcceptableFileExtensions'] = self.acceptable_file_extensions if self.default_value is not None: params['mdsol:DefaultValue'] = str(self.default_value) params['mdsol:SourceDocument'] = bool_to_yes_no(self.source_document_verify) params['mdsol:IndentLevel'] = str(self.indent_level) if self.sas_format is not None: params['mdsol:SASFormat'] = self.sas_format if self.sas_label is not None: params['mdsol:SASLabel'] = self.sas_label params['mdsol:QueryFutureDate'] = bool_to_yes_no(self.query_future_date) params['mdsol:Visible'] = bool_to_yes_no(self.visible) params['mdsol:TranslationRequired'] = bool_to_yes_no(self.translation_required) params['mdsol:QueryNonConformance'] = bool_to_yes_no(self.query_non_conformance) params['mdsol:OtherVisits'] = bool_to_yes_no(self.other_visits) params['mdsol:CanSetItemGroupDate'] = bool_to_yes_no(self.can_set_item_group_date) params['mdsol:CanSetFormDate'] = bool_to_yes_no(self.can_set_form_date) params['mdsol:CanSetStudyEventDate'] = bool_to_yes_no(self.can_set_study_event_date) params['mdsol:CanSetSubjectDate'] = bool_to_yes_no(self.can_set_subject_date) params['mdsol:VisualVerify'] = bool_to_yes_no(self.visual_verify) params['mdsol:DoesNotBreakSignature'] = bool_to_yes_no(self.does_not_break_signature) if self.field_number is not None: params['mdsol:FieldNumber'] = self.field_number if self.variable_oid is not None: params['mdsol:VariableOID'] = self.variable_oid builder.start("ItemDef", params) if self.question is not None: self.question.build(builder) if self.codelistref is not None: self.codelistref.build(builder) for mur in self.measurement_unit_refs: mur.build(builder) for range_check in self.range_checks: range_check.build(builder) if self.header_text is not None: self.header_text.build(builder) for view_restriction in self.view_restrictions: view_restriction.build(builder) for entry_restriction in self.entry_restrictions: entry_restriction.build(builder) for help_text in self.help_texts: help_text.build(builder) for review_group in self.review_groups: review_group.build(builder) builder.end("ItemDef") def __lshift__(self, other): """Override << operator""" # ExternalQuestion?,, # Role*, Alias*, # mdsol:HelpText?, mdsol:ViewRestriction* or mdsolEntryRestrictions*), (or mdsol:ReviewGroups*), mdsol:Label?) if not isinstance(other, (MdsolHelpText, MdsolEntryRestriction, MdsolViewRestriction, Question, MeasurementUnitRef, CodeListRef, MdsolHeaderText, MdsolReviewGroup, RangeCheck)): raise ValueError('ItemDef cannot accept a {0} as a child element'.format(other.__class__.__name__)) self.set_single_attribute(other, Question, 'question') self.set_single_attribute(other, CodeListRef, 'codelistref') self.set_single_attribute(other, MdsolHeaderText, 'header_text') self.set_list_attribute(other, RangeCheck, 'range_checks') self.set_list_attribute(other, MeasurementUnitRef, 'measurement_unit_refs') self.set_list_attribute(other, MdsolHelpText, 'help_texts') self.set_list_attribute(other, MdsolViewRestriction, 'view_restrictions') self.set_list_attribute(other, MdsolEntryRestriction, 'entry_restrictions') self.set_list_attribute(other, MdsolReviewGroup, 'review_groups') return other class Decode(ODMElement): def __init__(self): self.translations = [] def build(self, builder): builder.start("Decode", {}) for translation in self.translations: translation.build(builder) builder.end("Decode") def __lshift__(self, other): """Override << operator""" if not isinstance(other, TranslatedText): raise ValueError('Decode cannot accept child of type {0}'.format(other.__class__.__name__)) self.translations.append(other) return other class CodeListItem(ODMElement): def __init__(self, coded_value, order_number=None, specify=False): self.coded_value = coded_value self.order_number = order_number self.specify = specify self.decode = None def build(self, builder): params = dict(CodedValue=self.coded_value) if self.order_number is not None: params['mdsol:OrderNumber'] = str(self.order_number) if self.specify: params['mdsol:Specify'] = "Yes" builder.start("CodeListItem", params) if self.decode is not None: self.decode.build(builder) builder.end("CodeListItem") def __lshift__(self, other): """Override << operator""" if not isinstance(other, Decode): raise ValueError('CodelistItem cannot accept child of type {0}'.format(other.__class__.__name__)) self.set_single_attribute(other, Decode, 'decode') return other class CodeList(ODMElement): """A container for CodeListItems equivalent of Rave Dictionary""" VALID_DATATYPES = [DataType.Integer, DataType.Text, DataType.Float, DataType.String] def __init__(self, oid, name, datatype, sas_format_name=None): self.oid = oid self.name = name if datatype not in CodeList.VALID_DATATYPES: raise ValueError("{0} is not a valid CodeList datatype".format(datatype)) self.datatype = datatype self.sas_format_name = sas_format_name self.codelist_items = [] def build(self, builder): params = dict(OID=self.oid, Name=self.name, DataType=self.datatype.value) if self.sas_format_name is not None: params['SASFormatName'] = self.sas_format_name builder.start("CodeList", params) for item in self.codelist_items: item.build(builder) builder.end("CodeList") def __lshift__(self, other): """Override << operator""" if not isinstance(other, CodeListItem): raise ValueError('Codelist cannot accept child of type {0}'.format(other.__class__.__name__)) self.set_list_attribute(other, CodeListItem, 'codelist_items') return other class MdsolConfirmationMessage(ODMElement): """Form is saved confirmation message""" def __init__(self, message, lang=None): self.message = message self.lang = lang def build(self, builder): params = {} if self.lang: params['xml:lang'] = self.lang builder.start('mdsol:ConfirmationMessage', params) builder.data(self.message) builder.end('mdsol:ConfirmationMessage') class MdsolDerivationStep(ODMElement): """A derivation step modeled after the Architect Loader definition. Do not use directly, use appropriate subclasses. """ VALID_STEPS = VALID_DERIVATION_STEPS def __init__(self, variable_oid=None, data_format=None, form_oid=None, folder_oid=None, field_oid=None, value=None, function=None, custom_function=None, record_position=None, form_repeat_number=None, folder_repeat_number=None, logical_record_position=None ): self.variable_oid = variable_oid self.data_format = data_format self.form_oid = form_oid self.folder_oid = folder_oid self.field_oid = field_oid self.value = value self._function = None self.function = function self.custom_function = custom_function self.record_position = record_position self.form_repeat_number = form_repeat_number self.folder_repeat_number = folder_repeat_number self.logical_record_position = logical_record_position @property def function(self): return self._function @function.setter def function(self, value): if value is not None: if value not in MdsolDerivationStep.VALID_STEPS: raise AttributeError("Invalid derivation function %s" % value) self._function = value def build(self, builder): params = dict() if self.variable_oid is not None: params['VariableOID'] = self.variable_oid if self.data_format is not None: params['DataFormat'] = self.data_format if self.folder_oid is not None: params['FolderOID'] = self.folder_oid if self.field_oid is not None: params['FieldOID'] = self.field_oid if self.form_oid is not None: params['FormOID'] = self.form_oid if self.value is not None: params['Value'] = self.value if self.function is not None: params['Function'] = self.function.value if self.custom_function is not None: params['CustomFunction'] = self.custom_function if self.record_position is not None: params['RecordPosition'] = str(self.record_position) if self.form_repeat_number is not None: params['FormRepeatNumber'] = str(self.form_repeat_number) if self.folder_repeat_number is not None: params['FolderRepeatNumber'] = str(self.folder_repeat_number) if self.logical_record_position is not None: params['LogicalRecordPosition'] = self.logical_record_position builder.start("mdsol:DerivationStep", params) builder.end("mdsol:DerivationStep") class MdsolCheckStep(ODMElement): """A check step modeled after the Architect Loader definition. Do not use directly, use appropriate subclasses. """ VALID_STEPS = ALL_STEPS def __init__(self, variable_oid=None, data_format=None, form_oid=None, folder_oid=None, field_oid=None, static_value=None, function=None, custom_function=None, record_position=None, form_repeat_number=None, folder_repeat_number=None, logical_record_position=None ): self.variable_oid = variable_oid self.data_format = data_format self.form_oid = form_oid self.folder_oid = folder_oid self.field_oid = field_oid self.static_value = static_value self._function = None self.function = function self.custom_function = custom_function self.record_position = record_position self.form_repeat_number = form_repeat_number self.folder_repeat_number = folder_repeat_number self.logical_record_position = logical_record_position @property def function(self): return self._function @function.setter def function(self, value): if value is not None: if value not in MdsolCheckStep.VALID_STEPS: raise AttributeError("Invalid function %s" % value) self._function = value def build(self, builder): params = dict() if self.variable_oid is not None: params['VariableOID'] = self.variable_oid if self.data_format is not None: params['DataFormat'] = self.data_format if self.folder_oid is not None: params['FolderOID'] = self.folder_oid if self.field_oid is not None: params['FieldOID'] = self.field_oid if self.form_oid is not None: params['FormOID'] = self.form_oid if self.static_value is not None: params['StaticValue'] = self.static_value if self.function is not None: params['Function'] = self.function.value if self.custom_function is not None: params['CustomFunction'] = self.custom_function if self.record_position is not None: params['RecordPosition'] = str(self.record_position) if self.form_repeat_number is not None: params['FormRepeatNumber'] = str(self.form_repeat_number) if self.folder_repeat_number is not None: params['FolderRepeatNumber'] = str(self.folder_repeat_number) if self.logical_record_position is not None: params['LogicalRecordPosition'] = self.logical_record_position builder.start("mdsol:CheckStep", params) builder.end("mdsol:CheckStep") class MdsolCheckAction(ODMElement): """ Check Action modeled after check action in Architect Loader spreadsheet. Do not use directly, use appropriate sub-class. """ def __init__(self, variable_oid=None, field_oid=None, form_oid=None, folder_oid=None, record_position=None, form_repeat_number=None, folder_repeat_number=None, check_action_type=None, check_string=None, check_options=None, check_script=None ): self.variable_oid = variable_oid self.folder_oid = folder_oid self.field_oid = field_oid self.form_oid = form_oid self.record_position = record_position self.form_repeat_number = form_repeat_number self.folder_repeat_number = folder_repeat_number self._check_action_type = None self.check_action_type = check_action_type self.check_string = check_string self.check_options = check_options self.check_script = check_script @property def check_action_type(self): return self._check_action_type @check_action_type.setter def check_action_type(self, value): if value is not None: if not isinstance(value, ActionType): raise AttributeError("Invalid check action %s" % value) self._check_action_type = value def build(self, builder): params = dict() if self.variable_oid is not None: params['VariableOID'] = self.variable_oid if self.field_oid is not None: params['FieldOID'] = self.field_oid if self.form_oid is not None: params['FormOID'] = self.form_oid if self.folder_oid is not None: params['FolderOID'] = self.folder_oid if self.record_position is not None: params['RecordPosition'] = str(self.record_position) if self.form_repeat_number is not None: params['FormRepeatNumber'] = str(self.form_repeat_number) if self.folder_repeat_number is not None: params['FolderRepeatNumber'] = str(self.folder_repeat_number) if self.check_action_type is not None: params['Type'] = self.check_action_type.value if self.check_string is not None: params['String'] = self.check_string if self.check_options is not None: params['Options'] = self.check_options if self.check_script is not None: params['Script'] = self.check_script builder.start("mdsol:CheckAction", params) builder.end("mdsol:CheckAction") class MdsolEditCheckDef(ODMElement): """Extension for Rave edit checks""" def __init__(self, oid, active=True, bypass_during_migration=False, needs_retesting=False): self.oid = oid self.active = active self.bypass_during_migration = bypass_during_migration self.needs_retesting = needs_retesting self.check_steps = [] self.check_actions = [] def build(self, builder): params = dict(OID=self.oid, Active=bool_to_true_false(self.active), BypassDuringMigration=bool_to_true_false(self.bypass_during_migration), NeedsRetesting=bool_to_true_false(self.needs_retesting) ) builder.start('mdsol:EditCheckDef', params) for step in self.check_steps: step.build(builder) for action in self.check_actions: action.build(builder) builder.end('mdsol:EditCheckDef') def __lshift__(self, other): """Override << operator""" if not isinstance(other, (MdsolCheckStep, MdsolCheckAction,)): raise ValueError('EditCheck cannot accept a {0} as a child element'.format(other.__class__.__name__)) self.set_list_attribute(other, MdsolCheckStep, 'check_steps') self.set_list_attribute(other, MdsolCheckAction, 'check_actions') class MdsolDerivationDef(ODMElement): """Extension for Rave derivations""" def __init__(self, oid, active=True, bypass_during_migration=False, needs_retesting=False, variable_oid=None, field_oid=None, form_oid=None, folder_oid=None, record_position=None, form_repeat_number=None, folder_repeat_number=None, logical_record_position=None, all_variables_in_folders=None, all_variables_in_fields=None ): self.oid = oid self.active = active self.bypass_during_migration = bypass_during_migration self.needs_retesting = needs_retesting self.variable_oid = variable_oid self.field_oid = field_oid self.form_oid = form_oid self.folder_oid = folder_oid self.record_position = record_position self.form_repeat_number = form_repeat_number self.folder_repeat_number = folder_repeat_number self.logical_record_position = logical_record_position self.all_variables_in_folders = all_variables_in_folders self.all_variables_in_fields = all_variables_in_fields self.derivation_steps = [] def build(self, builder): params = dict( OID=self.oid, Active=bool_to_true_false(self.active), BypassDuringMigration=bool_to_true_false(self.bypass_during_migration), NeedsRetesting=bool_to_true_false(self.needs_retesting) ) if self.variable_oid is not None: params['VariableOID'] = self.variable_oid if self.field_oid is not None: params['FieldOID'] = self.field_oid if self.form_oid is not None: params['FormOID'] = self.form_oid if self.folder_oid is not None: params['FolderOID'] = self.folder_oid if self.record_position is not None: params['RecordPosition'] = str(self.record_position) if self.form_repeat_number is not None: params['FormRepeatNumber'] = str(self.form_repeat_number) if self.folder_repeat_number is not None: params['FolderRepeatNumber'] = str(self.folder_repeat_number) if self.all_variables_in_folders is not None: params['AllVariablesInFolders'] = bool_to_true_false(self.all_variables_in_folders) if self.all_variables_in_fields is not None: params['AllVariablesInFields'] = bool_to_true_false(self.all_variables_in_fields) if self.logical_record_position is not None: params['LogicalRecordPosition'] = self.logical_record_position builder.start('mdsol:DerivationDef', params) for step in self.derivation_steps: step.build(builder) builder.end('mdsol:DerivationDef') def __lshift__(self, other): """Override << operator""" if not isinstance(other, MdsolDerivationStep): raise ValueError('Derivation cannot accept a {0} as a child element'.format(other.__class__.__name__)) self.set_list_attribute(other, MdsolDerivationStep, 'derivation_steps') class MdsolCustomFunctionDef(ODMElement): """Extension for Rave Custom functions""" VB = "VB" # VB was deprecated in later Rave versions. C_SHARP = "C#" SQL = "SQ" VALID_LANGUAGES = [C_SHARP, SQL, VB] def __init__(self, oid, code, language="C#"): self.oid = oid self.code = code self.language = language def build(self, builder): params = dict(OID=self.oid, Language=self.language) builder.start('mdsol:CustomFunctionDef', params) builder.data(self.code) builder.end('mdsol:CustomFunctionDef') class MetaDataVersion(ODMElement): """MetaDataVersion, child of study""" def __init__(self, oid, name, description=None, primary_formoid=None, default_matrix_oid=None, delete_existing=False, signature_prompt=None): self.oid = oid self.name = name self.description = description self.primary_formoid = primary_formoid self.default_matrix_oid = default_matrix_oid self.delete_existing = delete_existing self.signature_prompt = signature_prompt self.confirmation_message = None self.protocol = None self.codelists = [] self.item_defs = [] self.label_defs = [] self.item_group_defs = [] self.form_defs = [] self.study_event_defs = [] self.edit_checks = [] self.derivations = [] self.custom_functions = [] def build(self, builder): """Build XML by appending to builder""" params = dict(OID=self.oid, Name=self.name) if self.description is not None: params['Description'] = self.description if self.signature_prompt is not None: params['mdsol:SignaturePrompt'] = self.signature_prompt if self.primary_formoid is not None: params['mdsol:PrimaryFormOID'] = self.primary_formoid if self.default_matrix_oid is not None: params['mdsol:DefaultMatrixOID'] = self.default_matrix_oid params['mdsol:DeleteExisting'] = bool_to_yes_no(self.delete_existing) builder.start("MetaDataVersion", params) if self.protocol: self.protocol.build(builder) for event in self.study_event_defs: event.build(builder) for formdef in self.form_defs: formdef.build(builder) for itemgroupdef in self.item_group_defs: itemgroupdef.build(builder) for itemdef in self.item_defs: itemdef.build(builder) for codelist in self.codelists: codelist.build(builder) # Extensions must always come after core elements if self.confirmation_message: self.confirmation_message.build(builder) for labeldef in self.label_defs: labeldef.build(builder) for edit_check in self.edit_checks: edit_check.build(builder) for derivation in self.derivations: derivation.build(builder) for custom_function in self.custom_functions: custom_function.build(builder) builder.end("MetaDataVersion") def __lshift__(self, other): """Override << operator""" if not isinstance(other, (Protocol, StudyEventDef, FormDef, ItemGroupDef, ItemDef, MdsolLabelDef, CodeList, MdsolConfirmationMessage, MdsolEditCheckDef, MdsolDerivationDef, MdsolCustomFunctionDef)): raise ValueError('MetaDataVersion cannot accept a {0} as a child element'.format(other.__class__.__name__)) self.set_single_attribute(other, Protocol, 'protocol') self.set_single_attribute(other, MdsolConfirmationMessage, 'confirmation_message') self.set_list_attribute(other, StudyEventDef, 'study_event_defs') self.set_list_attribute(other, FormDef, 'form_defs') self.set_list_attribute(other, ItemGroupDef, 'item_group_defs') self.set_list_attribute(other, MdsolLabelDef, 'label_defs') self.set_list_attribute(other, ItemDef, 'item_defs') self.set_list_attribute(other, CodeList, 'codelists') self.set_list_attribute(other, MdsolEditCheckDef, 'edit_checks') self.set_list_attribute(other, MdsolDerivationDef, 'derivations') self.set_list_attribute(other, MdsolCustomFunctionDef, 'custom_functions') # NB. Current schema limits to 1 return other class Study(ODMElement): """ODM Study Metadata element""" PROJECT = 'Project' GLOBAL_LIBRARY = 'GlobalLibrary Volume' PROJECT_TYPES = [PROJECT, GLOBAL_LIBRARY] def __init__(self, oid, project_type=None): self.oid = oid self.global_variables = None self.basic_definitions = None self.metadata_version = None self.studyevent_defs = [] if project_type is None: self.project_type = "Project" else: if project_type in Study.PROJECT_TYPES: self.project_type = project_type else: raise ValueError('Project type "{0}" not valid. Expected one of {1}'.format(project_type, ','.join( Study.PROJECT_TYPES))) def __lshift__(self, other): """Override << operator""" if not isinstance(other, (GlobalVariables, BasicDefinitions, MetaDataVersion)): raise ValueError('Study cannot accept a {0} as a child element'.format(other.__class__.__name__)) self.set_single_attribute(other, GlobalVariables, 'global_variables') self.set_single_attribute(other, BasicDefinitions, 'basic_definitions') self.set_single_attribute(other, MetaDataVersion, 'metadata_version') return other def build(self, builder): """Build XML by appending to builder""" params = dict(OID=self.oid) params['mdsol:ProjectType'] = self.project_type builder.start("Study", params) # Ask children if self.global_variables is not None: self.global_variables.build(builder) if self.basic_definitions is not None: self.basic_definitions.build(builder) if self.metadata_version is not None: self.metadata_version.build(builder) builder.end("Study")
mit
-2,991,630,257,282,767,400
33.787551
121
0.603444
false
4.042562
false
false
false
toddheitmann/PetroPy
setup.py
1
1590
"""Setup script for PetroPy""" from setuptools import setup from os import path from petropy import __version__ with open(path.join(path.dirname(__file__), "requirements.txt"), "r") as f: requirements = f.read().splitlines() with open(path.join(path.dirname(__file__), "README.rst"), "r") as f: long_description = f.read() setup( name = 'petropy', packages=["petropy", ], version = __version__, description = 'A package to calculate petrophysical properties for formation evaluation.', long_description = long_description, author = 'Todd Heitmann', author_email = '[email protected]', url = 'https://github.com/toddheitmann/petropy', keywords = ['petrophysics', 'formation evaluation', 'reservoir characterization', 'Oil and Gas'], classifiers=[ "Intended Audience :: Customer Service", "Intended Audience :: Developers", "Intended Audience :: Education", "Intended Audience :: End Users/Desktop", "Intended Audience :: Other Audience", "Intended Audience :: Science/Research", "License :: OSI Approved :: MIT License", "Natural Language :: English", "Operating System :: OS Independent", "Programming Language :: Python :: 2", "Programming Language :: Python :: 3", "Topic :: Scientific/Engineering", "Topic :: System :: Filesystems", "Topic :: Scientific/Engineering :: Information Analysis", ], install_requires = requirements, package_data = {'petropy': ['data/*.csv', 'data/*.xml', 'data/*.las']} )
mit
8,575,491,469,597,915,000
37.780488
101
0.637107
false
4.025316
false
false
false
saisankargochhayat/algo_quest
leetcode/115. Distinct Subsequences/soln.py
1
1479
from functools import lru_cache class Solution: def numDistinct(self, s: str, t: str) -> int: @lru_cache(maxsize=None) def helper(i, j): M, N = len(s), len(t) if i == M or j == N or M-i < N-j: return int(j == N) # if i of s and j of s dont match or match either case we skip ans = helper(i+1, j) # if it matches we skip both if s[i] == t[j]: ans += helper(i+1, j+1) return ans res = helper(0,0) return res class Solution: def numDistinct(self, s: str, t: str) -> int: # Dictionary for memoization mem = {} def helper(i, j): M, N = len(s), len(t) # Base case if i == M or j == N or M - i < N - j: return int(j == len(t)) # Check if the result is already cached if (i, j) in mem: return mem[i,j] # Always make this recursive call ans = helper(i + 1, j) # If the characters match, make the other # one and add the result to "ans" if s[i] == t[j]: ans += helper(i + 1, j + 1) # Cache the answer and return mem[i, j] = ans return ans return helper(0, 0)
apache-2.0
3,884,121,225,222,017,000
27.461538
74
0.408384
false
4.019022
false
false
false
benob/chainer
chainer/functions/evaluation/accuracy.py
1
2436
import numpy import six from chainer import cuda from chainer import function from chainer.utils import type_check class Accuracy(function.Function): def __init__(self, ignore_label=None): self.ignore_label = ignore_label def check_type_forward(self, in_types): type_check.expect(in_types.size() == 2) x_type, t_type = in_types type_check.expect( x_type.dtype.kind == 'f', t_type.dtype == numpy.int32 ) t_ndim = t_type.ndim.eval() type_check.expect( x_type.ndim >= t_type.ndim, x_type.shape[0] == t_type.shape[0], x_type.shape[2: t_ndim + 1] == t_type.shape[1:] ) for i in six.moves.range(t_ndim + 1, x_type.ndim.eval()): type_check.expect(x_type.shape[i] == 1) def forward(self, inputs): xp = cuda.get_array_module(*inputs) y, t = inputs if self.ignore_label is not None: mask = (t == self.ignore_label) ignore_cnt = mask.sum() # will always be true when the true label is ignore_label # TODO(henry0312) # If cupy.where returns indexes, we could make the code better. # Also, we would need Advanced Indexing. pred = xp.where(mask, self.ignore_label, y.argmax(axis=1).reshape(t.shape)) count = (pred == t).sum() - ignore_cnt total = t.size - ignore_cnt if total == 0: return xp.asarray(0.0, dtype=y.dtype), else: return xp.asarray(float(count) / total, dtype=y.dtype), else: pred = y.argmax(axis=1).reshape(t.shape) return xp.asarray((pred == t).mean(dtype=y.dtype)), def accuracy(y, t, ignore_label=None): """Computes muticlass classification accuracy of the minibatch. Args: y (Variable): Variable holding a matrix whose (i, j)-th element indicates the score of the class j at the i-th example. t (Variable): Variable holding an int32 vector of ground truth labels. ignore_label (int or None): Skip calculating accuracy if the ture label is ``ignore_label``. Returns: Variable: A variable holding a scalar array of the accuracy. .. note:: This function is non-differentiable. """ return Accuracy(ignore_label=ignore_label)(y, t)
mit
-7,014,396,633,179,930,000
31.918919
78
0.573481
false
3.747692
false
false
false
zakirovandrey/cfmaxwell
src/genConeFold.py
1
21709
#!/usr/bin/python # -*- coding: utf-8 -*- import sys from types import * from operator import * def makeCombinations(lst, res=['']): if len(lst)<=0: return res else: return makeCombinations(lst[:-1], [c+n for n in res for c in lst[-1]]) class CFact: def __init__(self, Gen, actN): self.Gen=Gen; self.actN=actN self.podDatas,self.podDatasShift = ['',],[0,] for s in xrange(Gen.dim): #datas = self.pars4actN[actN[s]] datas = Gen.Rules2act['pars'][actN[s]] for k in xrange(1<<s): self.podDatas.append(self.podDatas[k] + datas[1]) self.podDatas[k] += datas[0] self.podDatasShift.append(self.podDatasShift[k] + (0,1<<s)[datas[1] is 'p']) pass pass pass def PodActList(self, tC='A'): tier0=makeCombinations([self.Gen.Rules2act['subacts'][a][:2] for a in self.actN]) tier1=makeCombinations([self.Gen.Rules2act['subacts'][a][2:] for a in self.actN]) #tier0=makeCombinations([self.decomp4actN[a][:2] for a in self.actN]) #tier1=makeCombinations([self.decomp4actN[a][2:] for a in self.actN]) tier2=reduce(add,zip(tier0,tier1)) tier0=filter(lambda (a,n,t):'-' not in a, zip(tier0,xrange(len(tier0)),['B']*len(tier0))); tier0.reverse() tier1=filter(lambda (a,n,t):'-' not in a, zip(tier1,xrange(len(tier1)),['T']*len(tier1))); tier1.reverse() tier2=filter(lambda (a,n,t):'-' not in a, zip(tier2,xrange(len(tier2)),['B','T']*len(tier0))); tier2.reverse() if tC is 'B': return tier0 if tC is 'T': return tier1 if tC is 'BT': return tier2 return tier0+tier1 def PodActList_mp(self): tier=makeCombinations([self.Gen.Rules2act['subacts_mp'][a] for a in self.actN]) tier=filter(lambda (a,n):'-' not in a, zip(tier,xrange(len(tier)))); tier.reverse() return tier def getIsh(self, datI, shI): return filter(len, [('',self.Gen.rulesShift[self.podDatas[datI]][s])[(shI&(1<<s))>0] for s in xrange(self.Gen.dim)]) def getPodActPar(self, par, parI, cntI, tC): '''Возвращает параметр parI (0..2^d) для под-act-а с базовым datas-ом cntI (0..2^d), исходя из шаблона par''' if tC is 'B': datI,poddatI = cntI&parI,cntI^parI if tC is 'T': datI,poddatI = cntI|parI,((1<<self.Gen.dim)-1)&(~(cntI^parI)) if tC is 'X': parIl,cntIl=map(lambda v:(0,1,-1)[v],self.Gen.num2list(parI,3)),self.Gen.num2list(cntI) datI = self.Gen.list2num([(p+n)>=1 for (p,n) in zip(parIl,cntIl)]) poddatI = self.Gen.list2num([1&~((p&1)^n) for (p,n) in zip(parIl,cntIl)]) #datI,poddatI --- номера датаса и его поддатаса для параметра parI poddatIx = 0 for s in xrange(self.Gen.dim-1,-1,-1): i2s = 1<<s if self.Gen.Rules2dim[self.podDatas[datI][s]]: poddatIx = 2*poddatIx+((poddatI&i2s)!=0) #poddatIx --- смещение параметра в массиве поддатасов для датаса datI #print par, parI, cntI, tC,self.podDatas[datI],"->",datI,poddatI,poddatIx if par.find('dat')>=0: dat_shift = self.podDatasShift[datI] return (self.Gen.datTmpl%datI+'->',self.Gen.datTmpl%(datI-dat_shift)+'[%s].'%('+'.join(self.getIsh(datI-dat_shift,dat_shift))))[dat_shift>0]+'datas'+('','+%d'%poddatIx)[poddatIx>0] if par.find('const int _I')>=0: sh = par[par.find('const int _I')+len('const int _I'):] sI = sh.find('p') if sI >= 0: if poddatI&(1<<sI): return '%d'%(1<<self.Gen.get_dim(sh[:sI])) return 'I'+sh sI = sh.find('m') if sI >= 0: if poddatI&(1<<sI): return '-I'+sh.replace('m','p') return '-%d'%(1<<self.Gen.get_dim(sh[:sI])) return '?'+sh return '======== not implemented yet,',par, n, t class CFact_mp(CFact): def __init__(self, Gen, actN): CFact.__init__(self, Gen, actN) self.podDatas_mp,self.podDatasShift_mp = ['',],[0,] for s in xrange(Gen.dim): datas = Gen.Rules2act_mp['pars'][actN[s]] for k in xrange(3**s): self.podDatas_mp.append(self.podDatas_mp[k] + datas[2]) for k in xrange(3**s): self.podDatas_mp.append(self.podDatas_mp[k] + datas[0]) for k in xrange(3**s): self.podDatas_mp[k] += datas[1] for k in xrange(3**s): self.podDatasShift_mp.append(self.podDatasShift_mp[k] + (0,3**s)[datas[2] is 'p']) for k in xrange(3**s): self.podDatasShift_mp.append(self.podDatasShift_mp[k] + 2*(0,3**s)[datas[0] is 'm']) pass pass def PodActList_mp(self): #tier=makeCombinations([self.decomp4actNmp[a] for a in self.actN]) tier=makeCombinations([self.Gen.Rules2act_mp['subacts'][a] for a in self.actN]) tier=filter(lambda (a,n):'-' not in a, zip(tier,xrange(len(tier)))); tier.reverse() return tier def PodActList_mpPIC(self): #tier=makeCombinations([self.decomp4actNmpPIC[a] for a in self.actN]) tier=makeCombinations([self.Gen.Rules2act_mp['subactsPIC'][a] for a in self.actN]) tier=filter(lambda (a,n):'-' not in a, zip(tier,xrange(len(tier)))); tier.reverse() return tier def getIsh_mp(self, datI, shI): '''вычисляет сдвиг datas-а''' if '_' in self.podDatas_mp[datI]: return ('<no-data>',) #print datI, self.podDatas_mp[datI], self.Gen.rulesShift[self.podDatas_mp[datI]] shIl = self.Gen.num2list(shI,3) Ish=[] for s,sh in zip(xrange(self.Gen.dim), self.Gen.num2list(shI,3)): rul = self.Gen.rulesShift[self.podDatas_mp[datI]][s] Ish.append(('',rul, rul.replace('p','m'))[shIl[s]]) return filter(len, Ish) def getPodActPar(self, par, parI, cntIl, tC): '''Возвращает параметр parI (0..3^d) для под-act-а с базовым datas-ом cntI (0..2^d), исходя из шаблона par''' if tC is 'X': parIl=map(lambda v:(0,1,-1)[v],self.Gen.num2list(parI,3)) datIl = [(-1,-1,0,0,1,1)[p+n+2] for (p,n) in zip(parIl,cntIl)] # можно (p+n)/2 # номер (смещение) datas-а относительно базового cntI poddatIl = [(0,1,0,1,0,1)[p+n+2] for (p,n) in zip(parIl,cntIl)] # можно (p+n)%2 # номер poddatas-а в datas-е datI = self.Gen.list2num([((p+n)/2)%3 for (p,n) in zip(parIl,cntIl)], 3) poddatI = self.Gen.list2num([((p+n)%2)&3 for (p,n) in zip(parIl,cntIl)]) poddatIx = 0 for s in xrange(self.Gen.dim-1,-1,-1): i2s = 1<<s if self.Gen.Rules2dim[self.podDatas_mp[datI][s]]: poddatIx = 2*poddatIx+((poddatI&i2s)!=0) if par.find('dat')>=0: dat_shift = self.podDatasShift_mp[datI] return (self.Gen.datTmpl%datI+'->',self.Gen.datTmpl%(datI-dat_shift)+'[%s].'%('+'.join(self.getIsh_mp(datI-dat_shift,dat_shift))))[dat_shift!=0]+'datas'+('','+%d'%poddatIx)[poddatIx>0] if par.find('const int _I')>=0: sh = par[par.find('const int _I')+len('const int _I'):] sI = sh.find('p') if sI >= 0: if poddatIl[sI]==1: return '%d'%(1<<self.Gen.get_dim(sh[:sI])) if datIl[sI]<=0: return '-I'+sh.replace('p','m') return 'I'+sh sI = sh.find('m') if sI >= 0: if poddatIl[sI]==0: return '-%d'%(1<<self.Gen.get_dim(sh[:sI])) if datIl[sI]>=0: return '-I'+sh.replace('m','p') return 'I'+sh return '?'+sh return '======== not implemented yet,',par, n, t class CFpodact(CFact): def __init__(self, act, actN): CFact.__init__(self, act.Gen, actN) self.nadact = act pass def getParsList(self, n, tC): pars_list, full_pars_list = self.Gen.get_pars(self.podDatas) fakt_pars = [self.nadact.getPodActPar(par, full_pars_list.index(par), n,tC) for par in pars_list] return fakt_pars class CFpodact_mp(CFact_mp): def __init__(self, act, actN): CFact_mp.__init__(self, act.Gen, actN) self.nadact = act def getParsList(self, n, tC): pars_list, full_pars_list = self.Gen.get_pars_mp(self.podDatas_mp) fakt_pars = [self.nadact.getPodActPar(par, full_pars_list.index(par), n, tC) for par in pars_list] return fakt_pars class Generator: def __init__(self, dim, types): self.Rules2act = { 'pars': {'D':'dp', 'S':'dp', 'I':'_d', 'J':'dp', 'X':'dx', 'Y':'d_', 'P':'dd', 'Q':'dd', 'L':'ld', 'M':'dp', 'R':'dr'}, 'subacts':{'D':'DDDD', 'S':'SSSS', 'I':'-IID', 'X':'DXX-', 'Y':'DYY-', 'P':'DPPD', 'L':'-LLD', 'R':'DRR-'}, 'subacts_mp': {'D':'DD', 'I':'-J', 'J':'JD', 'X':'X-', 'Y':'Y-', 'P':'PQ', 'L':'-M', 'R':'R-'}, 'LR':{'L':'IL-', 'R':'-RY'}, } self.Rules2act_mp = { #'pars': {'D':'mdp', 'S':'msp', 'J':'_dp', 'X':'mdp', 'P':'mdd', 'Q':'ddp', 'M':'ldp', 'R':'mdr'}, 'pars': {'D':'mdp', 'S':'msp', 'J':'_dp', 'X':'mdx', 'P':'mdd', 'Q':'ddp', 'M':'ldp', 'R':'mdr'}, 'subacts': {'D':'DD', 'I':'JD', 'J':'JD', 'X':'DX', 'P':'DP', 'Q': 'QD', 'L':'MD', 'M':'MD', 'R':'DR'}, 'subactsPIC': {'D':'DDDD', 'J':'-JDD', 'X':'DDX-', 'P':'DDPQ', 'Q': 'PQDD', 'M':'-MDD', 'R':'DDR-'}, } self.Rules2rank = {} self.Rules2dim = {} for s in 'dmps': self.Rules2dim[s] = 1 for s in 'xlr': self.Rules2dim[s] = 0 self.dim=dim self.par_name_start=-(dim+2)-(dim==1) self.datTmpl='datas_____'[:2+(dim+1)/2:]+'%0'+'%dd'%((dim+1)/2) self.types=types print '// acts: %d'%len(makeCombinations([types]*dim)) pass def num2list(self, num, bas=2): numLst = [] for s in xrange(self.dim): numLst.append(num%bas) num /= bas return numLst def list2num(self, numLst, bas=2): numLstR = numLst[:] numLstR.reverse() return reduce(lambda r,v: r*bas+v, numLstR, 0) def get_dim(self, pd): return reduce(lambda r,s: r+self.Rules2dim[s], pd, 0) def add2rules(self, pd): self.rules[pd] = 'cubeLR<%d,T%%(Npd)d,%s>'%(self.get_dim(pd),self.Rules2rank.get(pd,self.rank)) + '* const '+self.datTmpl.replace('%','%(Npd)') self.add2rulesShift(pd) pass def add2rulesShift(self, pd): self.rulesShift[pd] = [] #self.rulesShiftM[pd] = [] for s in xrange(len(pd)): sh=pd[:s]+'p'+pd[s+1:] self.rules[sh] = 'const int _I'+sh self.rulesShift[pd].append('_I'+sh) shM=pd[:s]+'m'+pd[s+1:] self.rules[shM] = 'const int _I'+shM #self.rulesShiftM[pd].append('_I'+shM) pass pass def get_pars(self, datas, shift=0): full_pars_list = [self.rules.get(pd,'')%{'Npd':i+shift} for (i,pd) in zip(xrange(1<<self.dim),datas)] pars_list = filter(len, full_pars_list) pars_list = map(lambda i: pars_list[i], filter(lambda i: pars_list.index(pars_list[i])==i, xrange(len(pars_list)))) return pars_list, full_pars_list def get_pars_mp(self, datas): full_pars_list = [self.rules.get(pd,'')%{'Npd':i} for (i,pd) in zip(xrange(3**self.dim),datas)] pars_list = filter(len, full_pars_list) pars_list = map(lambda i: pars_list[i], filter(lambda i: pars_list.index(pars_list[i])==i, xrange(len(pars_list)))) return pars_list, full_pars_list def getTmplPars(self, formal_pars): template_pars = ','.join(map(lambda s: 'class '+s, filter(lambda s: s[0] is 'T', ''.join(formal_pars).split(',')))) rank_par = ('','int %s'%self.rank)[self.rank is 'rank'] return 'template <%s>'%(', '.join(filter(len,(rank_par,template_pars)))) def makeAct(self, actN): act = CFact(self, actN) formal_pars = self.get_pars(act.podDatas)[0] shift_pars = map(lambda fp: fp[10:], filter(lambda fp: fp[:12] == 'const int _I', formal_pars)) shift_line = ', '.join(map(lambda p: '%s=(%s<<%d)-%d'%(p[1:],p,self.get_dim(p[2:]),1<<self.get_dim(p[2:][:p[2:].index('p')])), shift_pars)) #вычисление сдвигов поддатасов из имени сдвига датаса (например, из _Ixpd получаем Ixpd=(_Ixpd<<2)-1, где 2=dim(xpd), а 1=1<<dim(x). print self.getTmplPars(formal_pars)+' inline void %s(%s) {'%(self.actTmpl%actN,', '.join(formal_pars)) if len(shift_line)>0: if ''.join(self.subactTmpl.keys()) in 'SF': print '//', print ' const int %s;'%shift_line if 'B' in self.subactTmpl.keys(): for (a,n,tC) in act.PodActList('B'): print ' %s(%s);'%(self.subactTmpl['B']%a, ', '.join(CFpodact(act,a).getParsList(n,tC))) if 'F' in self.subactTmpl.keys(): print ' %s(%s);'%(self.subactTmpl['F']%actN,', '.join(map(lambda p: p[self.par_name_start:], formal_pars))) if 'S' in self.subactTmpl.keys(): tier=filter(lambda a:'-' not in a, makeCombinations([self.Rules2act['LR'].get(a, '-%c-'%a) for a in actN])); tier.reverse() for tactN in tier: tact = CFact(self, tactN) tformal_pars = self.get_pars(tact.podDatas, shift=self.list2num([{'I':-1,'Y':1}.get(c,0) for c in tactN]))[0] print ' %s(%s);'%(self.subactTmpl['S']%tactN,', '.join(map(lambda p: p[self.par_name_start:], tformal_pars))) if 'X' in self.subactTmpl.keys(): for (a,n) in act.PodActList_mp(): print ' %s(%s);'%(self.subactTmpl['X']%a, ', '.join(CFpodact_mp(act,a).getParsList(n,'X'))) if 'T' in self.subactTmpl.keys(): for (a,n,tC) in act.PodActList('T'): print ' %s(%s);'%(self.subactTmpl['T']%a, ', '.join(CFpodact(act,a).getParsList(n,tC))) print '}' pass def makeAct_mp(self, actN): actT = CFact_mp(self, 'D'*self.dim) act = CFact_mp(self, actN) formal_pars = self.get_pars_mp(act.podDatas_mp)[0] shift_pars = map(lambda fp: fp[10:], filter(lambda fp: fp[:12] == 'const int _I', formal_pars)) shift_line = ', '.join(map(lambda p: '%s=(%s<<%d)%c%d'%(p[1:],p,self.get_dim(p[2:]),"-+-+"[p[2:].count('m')],1<<self.get_dim(p[2:][:p[2:].replace('m','p').index('p')])), shift_pars)) #вычисление сдвигов поддатасов из имени сдвига датаса (например, из _Ixmd получаем Ixmd=(_Ixmd<<2)+1, где 2=dim(xmd), а 1=1<<dim(x). print self.getTmplPars(formal_pars)+' inline void %s(%s) {'%(self.actTmpl%actN,', '.join(formal_pars)) if len(shift_line)>0: print ' const int %s;'%shift_line if 'B' in self.subactTmpl.keys(): for (a,n,tC) in act.PodActList('B'): print ' %s(%s);'%(self.subactTmpl['B']%a, ', '.join(CFpodact(act,a).getParsList(n,tC))) #if 'F' in self.subactTmpl.keys(): # print ' %s(%s);'%(self.subactTmpl['F']%actN,', '.join(map(lambda p: p[self.par_name_start:], formal_pars))) # #for (a,n,tC) in act.PodActList('F'): print ' %s(%s);'%(self.subactTmpl['F']%a, ', '.join(CFpodact(act,a).getParsList(n,tC))) if 'J' in self.subactTmpl.keys(): caseNshift = self.list2num([1]*self.dim, 4) print ' for(int ic=0; ic<4; ic++) {' print 'if(dat0->datas[ic].Npts>NptsMax) {\n T0& datT=dat0->datas[ic];\n NptsMax = datT.Npts;\n printf("'+'===%s:'%actN+' inc NptsMax to %d in xyt: %.3g %.3g %d\\n", NptsMax, datT.x, datT.y, datT.it);\n}' print ' int ip=dat0->datas[ic].Nexch;\n while(ip < dat0->datas[ic].Npts) {\n pts& pt=dat0->datas[ic].ptslist[ip];\n double dstep=1.0;\n do {\n switch(pt.ix+4*pt.iy) {' for (a,n) in act.PodActList_mpPIC(): nL = map(lambda nt: nt-1, self.num2list(n,4)) parList = CFpodact_mp(act,a).getParsList(nL,'X') print ' case %d: dstep=pt.%s(dstep, %s); break;'%(n-caseNshift,self.subactTmpl['J']%a, ', '.join(parList)) print ' }\n } while(dstep<1.0);\n if((pt.ix&2)|(pt.iy&2)) {\n int swk=pt.ix+4*pt.iy;\n if(pt.ix<0) pt.ix += 2; else if(pt.ix>1) pt.ix -= 2;\n if(pt.iy<0) pt.iy += 2; else if(pt.iy>1) pt.iy -= 2;\n switch(swk) {' for (a,n) in act.PodActList_mpPIC(): nL = map(lambda nt: nt-1, self.num2list(n,4)) if len(filter(lambda _n: _n in (-1,2), nL))==0: continue; datPtr = '(%s)'%CFpodact_mp(act,a).getParsList(nL,'X')[0] oldPtr = 'dat0->datas[ic]' Npts = datPtr+'->Npts' Nxch = datPtr+'->Nexch' case_dict = {'ptN': datPtr, 'ptO': oldPtr,'Np': Npts,'Nx':Nxch} print ' case %(n)d:'%{'n':n-caseNshift}, #print 'if(%(ptO)s.it > %(ptN)s->it) printf("Illegal Exch!\\n"); else'%case_dict, print 'if(%(ptO)s.it > %(ptN)s->it) printf("Illegal Exch!\\n"); else if(%(ptO)s.it < %(ptN)s->it) %(ptN)s->ptslist[%(Np)s].copyfrom(pt); else { %(ptN)s->ptslist[%(Np)s].copyfrom(%(ptN)s->ptslist[%(Nx)s]); %(ptN)s->ptslist[%(Nx)s].copyfrom(pt); %(Nx)s++; } %(Np)s++; break;'%case_dict #print 'if(%(ptO)s.it < %(ptN)s->it) %(ptN)s->ptslist[%(Np)s].copyfrom(pt); else { %(ptN)s->ptslist[%(Np)s].copyfrom(%(ptN)s->ptslist[%(Nx)s]); %(ptN)s->ptslist[%(Nx)s].copyfrom(pt); %(Nx)s++; } %(Np)s++; break;'%case_dict print ' }\n dat0->datas[ic].Npts--;\n if(ip<dat0->datas[ic].Npts) pt.copyfrom(dat0->datas[ic].ptslist[dat0->datas[ic].Npts]);\n } else ip++;\n }\n dat0->datas[ic].Nexch=0; dat0->datas[ic].it++;\n }' if 'X' in self.subactTmpl.keys(): for (a,n) in act.PodActList_mp(): print ' for(int ip=0; ip<Nz; ip++) dat0->datas[%d].ptslist[ip].%s(1.0, %s);'%(n,self.subactTmpl['X']%a, ', '.join(CFpodact_mp(act,a).getParsList(n,'X'))) if 'T' in self.subactTmpl.keys(): for (a,n,tC) in act.PodActList('T'): print ' %s(%s);'%(self.subactTmpl['T']%a, ', '.join(CFpodact(act,a).getParsList(n,tC))) print '}' pass def genConeFold(self, rank='rank', actTmpl=r'%sactCF', subactTmpl=None, knot='p', exclude=[], acts4gen=[]): '''Печатает ConeFold заданного ранга, имени и типа: rank --- имя ранга (либо диапазона), строка; actTmpl --- шаблон имени, строка, на которую накатывается имя act-а; subactTmpl --- правила разбиения и имена подConeFold-ов (мЕньшего ранга), на которые разбивается ConeFold, словарь, ключи которого --- слои по времени B/T/F/X --- bottom/top/flat/flat с , уровни подConeFold-ов''' if len(exclude): print '// exclude up to: %d'%len(exclude) self.rank=rank self.actTmpl=actTmpl if subactTmpl is None: subactTmpl = { 'BT' : actTmpl } if type(subactTmpl) is str: subactTmpl = { subactTmpl : actTmpl } self.subactTmpl={}; map(lambda k: self.subactTmpl.update(dict(zip(k,(subactTmpl[k],)*len(k)))), subactTmpl.keys()) self.rules,self.rulesShift={},{} datasTypes = reduce(lambda r,t: r+filter(lambda c: c not in r+'_mp', self.Rules2act['pars'][t]), self.types, '') for pd in makeCombinations([datasTypes]*self.dim): self.add2rules(pd) if len(acts4gen) == 0: acts4gen = makeCombinations([self.types]*self.dim) for a in acts4gen: if a in exclude: continue if knot == 'p': self.makeAct(a) elif knot == 'mp': self.makeAct_mp(a) pass pass #stdout, sys.stdout = sys.stdout,open('Test.inc.hpp', 'w') #gPJ = Generator(dim=2, types='JDX') #gPJ.genConeFold(rank="FFRank-1", actTmpl=r'PIC2update%s', subactTmpl={'J': r'PIC2update%s'}, knot='mp') dim=3 incpath = sys.argv[0][:sys.argv[0].find(sys.argv[0].split('/')[-1])] stdout, sys.stdout = sys.stdout,open(incpath+'CF2Dpic.inc.hpp', 'w') g = Generator(dim=dim, types='IDX') g.genConeFold(actTmpl=r'update%s') g.genConeFold(actTmpl=r'FLDupdate%s') #g.genConeFold(actTmpl=r'FLDupdate%s', subactTmpl={'B': r'FLDupdate%s'}) sys.stdout.close(); sys.stdout = stdout stdout, sys.stdout = sys.stdout,open(incpath+'CF2Dpic.inc.hpp', 'w') print 'int NptsMax=0;' gP = Generator(dim=dim, types='IDX') gPJ = Generator(dim=dim, types='JDX') g.genConeFold(actTmpl=r'picNfld%s', subactTmpl={'B': r'picNfld%s', 'T':r'FLDupdate%s'}) gP.genConeFold(rank="PicRank+1", actTmpl=r'update%s', subactTmpl={'B': r'picNfld%s', 'T':r'FLDupdate%s'}) gP.genConeFold(rank="FFRank-1", actTmpl=r'PIC1update%s', subactTmpl={'X': r'PIC1update%s'}) gPJ.genConeFold(rank="FFRank-1", actTmpl=r'PIC2update%s', subactTmpl={'J': r'PIC2update%s'}, knot='mp') gP.genConeFold(rank="FFRank-1", actTmpl=r'PIC3update%s', subactTmpl={'X': r'PIC3update%s'}) gP.genConeFold(rank="FFRank", actTmpl=r'pic%s', subactTmpl={'B': r'PIC1update%s', 'X':r'PIC2update%s', 'T':r'PIC3update%s'}) gP.genConeFold(rank="FFRank+1", actTmpl=r'picNfld%s', subactTmpl={'B': r'pic%s', 'T':r'FLDupdate%s'}) sys.stdout.close(); sys.stdout = stdout #---------------PML------------------------------------------- stdout, sys.stdout = sys.stdout,open(incpath+'CF2Dpic.inc.hpp', 'w') #============= Non-PML ConeFold g = Generator(dim=dim, types='DX') #acts = ['DD','DX'] acts = ['D'*dim] #============= BC ConeFold for rank>PMLrank gBC = Generator(dim=dim, types='LDRX') gBC.Rules2rank['d'*dim] = 'rank+PMLrank' #gBC.Rules2rank['dx'] = 'rank+PMLrank' actsBC = makeCombinations(['LDR','LDR','LDX']) #============= PML ConeFold for rank<=PMLrank gPML = Generator(dim=dim, types='ILDSRYX') gPML.Rules2act['subacts'].update({'I':'-IIS', 'Y':'SYY-', 'S':'SSSS', 'L':'SLLD', 'R':'DRRS'}) gPML.Rules2act['pars'].update({'S':'sp', 'L':'sd', 'R':'ds', 'Y':'s_', 'I':'_s', 'J':'sp'}) for s in 'lr': gPML.Rules2dim[s] = 1 actsPML = makeCombinations(['ILDSRY','ILDSRY','ILDSX']) print '//===========any rank==============DX' g.genConeFold(actTmpl=r'%sact', acts4gen=acts) print '//=========rank>PMLrank============LDR/X' gBC.genConeFold(actTmpl=r'%sact', acts4gen=actsBC, exclude=acts) print '//=========rank<=PMLrank============DX' g.genConeFold(actTmpl=r'%sactPML', subactTmpl={'F':r'%sact'}, acts4gen=acts) print '//=========rank<PMLrank============ILDSRY' gPML.genConeFold(actTmpl=r'%sactPML', acts4gen=actsPML, exclude=acts) print '//=========rank=PMLrank============LDR/X' gPML.genConeFold(rank='PMLrank', actTmpl=r'%sact', subactTmpl={'S':r'%sactPML'}, acts4gen=actsBC, exclude=acts) sys.stdout.close(); sys.stdout = stdout
gpl-2.0
-776,956,531,473,899,800
54.430446
291
0.592831
false
2.347338
false
false
false
atom-bomb/drill_from_image
drill_from_image.py
1
11733
#!/usr/bin/python # # Hey, here's a thing: # # You can use this bit of python script to generate GCode to drill a PCB based on an image file that you used # to etch the board. # # This script makes GCode to drill the center of sections of an image that are a given color or brightness. # # All you need to do is load the image file that you used to etch and color the things you want drilled. # This should be easy since all of your drills are probably surrounded by traces and all of your traces are # probably colored black. Just use your favorite graphic editor (such as gimp) to flood fill parts of the board # that aren't traces or drills, leaving the drills as the only thing that are white. # # Run this script on your edited image and you'll get some GCode. # # Before you run the GCode, jog the spindle over where you want the topmost, leftmost hole to be drilled and # zero your machine. # The GCode will begin my moving over where the bottommost, rightmost hole would be drilled. # Move your workpiece, return to zero rewind and restart the GCode until your machine lines up with both drills, # then you can allow the machine to continue to drill your board. # from __future__ import print_function import sys import math from PIL import Image import subprocess import re import argparse class BoundingBox: def __init__(self): self.coord = [[0, 0], [0, 0]] self.empty = 1 def intersects(self, box): return (((1 ^ self.empty) and (1 ^ box.empty)) and ((self.coord[0][0] < box.coord[1][0]) and (self.coord[0][1] < box.coord[1][1]) and (self.coord[1][0] > box.coord[0][0]) and (self.coord[1][1] > box.coord[0][1]))) def center(self): return [self.coord[0][0] + ((self.coord[1][0] - self.coord[0][0]) / 2), self.coord[0][1] + ((self.coord[1][1] - self.coord[0][1]) / 2)] def boundCoord(self, coord): if (self.empty): self.coord[0][0] = coord[0] self.coord[0][1] = coord[1] self.coord[1][0] = coord[0] self.coord[1][1] = coord[1] self.empty = 0 else: if (coord[0] < self.coord[0][0]): self.coord[0][0] = coord[0] if (coord[1] < self.coord[0][1]): self.coord[0][1] = coord[1] if (coord[0] > self.coord[1][0]): self.coord[1][0] = coord[0] if (coord[1] > self.coord[1][1]): self.coord[1][1] = coord[1] class BoundingBoxList: def __init__(self): self.boxes = [] def addBox(self, box): for oldBox in self.boxes: if (oldBox.intersects(box)): return self.boxes.append(box) # use ImageMagick to figure out how many pixels per inch or cm in the image file def getDensity(filename, units = "PixelsPerInch"): pipe = subprocess.Popen(["identify", "-format", "%x,%y", "-units", units, filename], stdout=subprocess.PIPE) res = re.sub('[\t\r\n"]', '', pipe.communicate()[0]).split(',') xres = float(res[0].split(' ')[0]) yres = float(res[1].split(' ')[0]) return [xres, yres] # make a list of drill points from an image map class DrillMap: def __init__(self, filename, units = 'Inches', density = [], rgbThresh = 127 * 3): self.image = Image.open(filename) self.pixmap = self.image.load() if (len(density) == 0): if (units == 'Inches'): self.density = getDensity(filename) else: cmDensity = getDensity(filename, units = 'PixelsPerCentimeter') self.density = [float(cmDensity[0]) / 10, float(cmDensity[1]) / 10] else: self.density = density ; self.rgbThresh = rgbThresh ; self.boxlist = BoundingBoxList() self.drillList = [] self.findBoxes() self.makeDrillList() def coordOffset(self, coord): return [float(coord[0]) / float(self.density[0]), float(coord[1]) / float(self.density[1])] def isCoordOn(self, coord): pixel = self.pixmap[coord[0], coord[1]] if (self.image.mode == "RGB"): sum = pixel[0] + pixel[1] + pixel[2] return (sum > self.rgbThresh) if (self.image.mode == "1"): return pixel def scanLeftToBox(self, coord, box): y = coord[1] x = coord[0] while ((x >= 0) and self.isCoordOn([x, y])): box.boundCoord([x, y]) x = x - 1 return (x != coord[0]) def scanRightToBox(self, coord, box): y = coord[1] x = coord[0] while ((x <= self.image.size[1] - 1) and self.isCoordOn([x, y])): box.boundCoord([x, y]) x = x + 1 return (x != coord[0]) def scanLineToBox(self, coord, box): return (self.scanLeftToBox(coord, box) or self.scanRightToBox(coord, box)) def scanUpperLineToBox(self, coord, box): if (coord[1] > 0): upperCoord = [int(box.center()[0]), coord[1] - 1] if (self.scanLineToBox(upperCoord, box)): self.scanUpperLineToBox(upperCoord, box) def scanLowerLineToBox(self, coord, box): if (coord[1] < self.image.size[1] - 1): lowerCoord = [box.center()[0], coord[1] + 1] if (self.scanLineToBox(lowerCoord, box)): self.scanLowerLineToBox(lowerCoord, box) def scanToBox(self, coord): box = BoundingBox() if (self.scanRightToBox(coord, box)): self.scanUpperLineToBox(coord, box) self.scanLowerLineToBox(coord, box) return box def findBoxes(self): y = 0 while (y < self.image.size[1] - 1): x = 0 while (x < self.image.size[0] - 1): if (self.isCoordOn([x, y])): newBox = self.scanToBox([x, y]) if (not newBox.empty): self.boxlist.addBox(newBox) x = newBox.coord[1][0] + 1 else: x += 1 else: x += 1 y += 1 def makeDrillList(self): for eachBox in self.boxlist.boxes: self.drillList.append(self.coordOffset(eachBox.center())) class GCode: GCodeCommands = {'Mach3': { 'Message': '(', 'Stop': 'M0', 'Sleep': 'M01', 'SpindleCW': 'M03', 'SpindleCCW': 'M04', 'SpindleStop': 'M05', 'ToolChange': 'M06', 'Pause': 'M60', 'FastMove': 'G0', 'SlowMove': 'G1', 'Dwell': 'G4', 'InchesMode': 'G20', 'MillimetersMode': 'G21', 'MoveToOrigin': 'G28', 'ClearToolOffet': 'G49', 'Drill': 'G81', 'DrillWithDwell': 'G82', 'AbsoluteMode': 'G90', 'RelativeMode': 'G91', 'SetPosition': 'G92', }, 'EMC': { 'Message': '(MSG,', 'Stop': 'M0', 'Sleep': 'M01', 'SpindleCW': 'M03', 'SpindleCCW': 'M04', 'SpindleStop': 'M05', 'ToolChange': 'M06', 'Pause': 'M60', 'FastMove': 'G0', 'SlowMove': 'G1', 'Dwell': 'G4', 'InchesMode': 'G20', 'MillimetersMode': 'G21', 'MoveToOrigin': 'G28', 'ClearToolOffet': 'G49', 'Drill': 'G81', 'DrillWithDwell': 'G82', 'AbsoluteMode': 'G90', 'RelativeMode': 'G91', 'SetPosition': 'G92', }} def __init__(self, theGCodeType): self.variant = theGCodeType def Comment(self, string): return " ; " + string def Message(self, string): return self.GCodeCommands[self.variant]['Message'] + string + " )" def Pause(self): return self.GCodeCommands[self.variant]['Pause'] def Spindle(self, Mode): SpindleModes = {'Stop': 'SpindleStop', 'CW': 'SpindleCW', 'CCW': 'SpindleCCW'} return self.GCodeCommands[self.variant][SpindleModes[Mode]] def Units(self, theUnits): if (theUnits == 'Inches'): return self.GCodeCommands[self.variant]['InchesMode'] else: return self.GCodeCommands[self.variant]['MillimetersMode'] def Absolute(self, isAbsolute = True): if (isAbsolute): return self.GCodeCommands[self.variant]['AbsoluteMode'] else: return self.GCodeCommands[self.variant]['RelativeMode'] def _CommonArgs(self, X = None, Y = None, Z = None, rate = None): OutStr = '' if (X != None): OutStr += ' X' + format(X, ".4f") if (Y != None): OutStr += ' Y' + format(Y, ".4f") if (Z != None): OutStr += ' Z' + format(Z, ".4f") if (rate != None): OutStr += ' F' + format(rate, ".4f") return OutStr def Move(self, X = None, Y = None, Z = None, rate = None, speed='Fast'): OutStr = self.GCodeCommands[self.variant][speed + 'Move'] OutStr += self._CommonArgs(X = X, Y = Y, Z = Z, rate = rate) return OutStr def Dwell(self, seconds = 1): OutStr = self.GCodeCommands[self.variant]['Dwell'] + ' P' + `seconds` return OutStr def Drill(self, X = None, Y = None, Z = None, retract = None, seconds = None, rate = None): if (seconds != None): OutStr = self.GCodeCommands[self.variant]['DrillWithDwell'] OutStr += ' P' + `seconds` else: OutStr = self.GCodeCommands[self.variant]['Drill'] OutStr += self._CommonArgs(X = X, Y = Y, Z = Z, rate = rate) if (retract != None): OutStr += ' R' + `retract` return OutStr # -------- execution starts here # parse parameters # TODO: add density parameter & drill color parameter & check for ImageMagick parser = argparse.ArgumentParser() parser.add_argument('-v', '--verbose', action='store_true', help='spew possibly useless output') parser.add_argument('-m', '--millimeters', action='store_const', dest='units', const='Millimeters', help='set units to millimeters') parser.add_argument('-i', '--inches', action='store_const', dest='units', const='Inches', help='set units to inches') parser.add_argument('-a', '--mach3', action='store_const', dest='gcode', const='Mach3', help='set gcode type to mach3') parser.add_argument('-e', '--emc', action='store_const', dest='gcode', const='EMC', help='set gcode type to emc') parser.add_argument('-s', '--safe', nargs=1, default='0.25', type=float, help='safe height') parser.add_argument('-d', '--drill', nargs=1, default='-0.2', type=float, help='drill depth') parser.add_argument('-p', '--dwell', nargs=1, default='0.5', type=float, help='drill dwell') parser.add_argument('-f', '--feed', nargs=1, default='100', type=float, help='feed rate') parser.add_argument('input') args = parser.parse_args() if (args.gcode == None): args.gcode = 'Mach3' if (args.units == None): args.units = 'Inches' theMap = DrillMap(args.input, args.units) # make drill coordinates relative to first drill if (theMap.drillList): firstCoord = theMap.drillList[0] relativeDrillList = [] for drill in theMap.drillList: newCoord = [drill[0] - firstCoord[0], drill[1] - firstCoord[1]] relativeDrillList.append(newCoord) # output gcode for the list of drills # init machine, set units, zero axes gc = GCode(args.gcode) print(gc.Spindle('Stop')) print(gc.Units(args.units)) print(gc.Absolute()) print(gc.Pause(), gc.Comment('Check that tool is aligned with first drill')) print(gc.Move(Z = args.safe)) # move to last drill position and pause lastDrill = len(relativeDrillList) - 1 print(gc.Move(X = relativeDrillList[lastDrill][0], Y = relativeDrillList[lastDrill][1])) print(gc.Pause()) print(gc.Pause(), gc.Comment('Check that tool is aligned with last drill')) print(gc.Spindle('CW')) print(gc.Dwell(3)) print(gc.Message('Drilling')) # move to each drill position and drill for eachDrill in relativeDrillList: print(gc.Drill(X = eachDrill[0], Y = eachDrill[1], Z = args.drill, retract = args.safe, seconds = args.dwell)) # end of GCode program print(gc.Spindle('Stop')) print(gc.Pause())
unlicense
695,087,722,139,142,000
32.618911
112
0.594903
false
3.143048
false
false
false
735tesla/SkypeDump
skypedump.py
1
6469
#!/usr/bin/env python # -*- coding: utf-8 -*- import os import re import unicodedata import webbrowser import hashlib import sqlite3 as sqlite import xml.etree.ElementTree as ET import platform import sys class ChatMessage(object): def __init__(self): super(ChatMessage, self).__init__() self.from_username = '[data not available]' self.to_username = '[data not available]' self.message_body = '[data not available]' @property def from_username(self): return self._from_username @from_username.setter def from_username(self, value): self._from_username = value @property def to_username(self): return self._to_username @to_username.setter def to_username(self, value): self._to_username = value @property def message_body(self): return self._message_body @message_body.setter def message_body(self, value): self._message_body = value def to_html(self): html = """ <tr> <td>__from_username__</td> <td>__to_username__</td> <td>__msg_body__</td> </tr> """ html = html.replace('__from_username__', self.from_username) html = html.replace('__to_username__', self.to_username) html = html.replace('__msg_body__', self.message_body) return html class SkypeUser(object): def __init__(self): super(SkypeUser, self).__init__() self.actual_name = '[data not available]' self.username = '[data not available]' self.birthday = '[data not available]' self.phone_home = '[data not available]' self.phone_mobile = '[data not available]' self.email = '[data not available]' @property def actual_name(self): return self._actual_name @actual_name.setter def actual_name(self, value): self._actual_name = value @property def username(self): return self._username @username.setter def username(self, value): self._username = value @property def birthday(self): return self._birthday @birthday.setter def birthday(self, value): self._birthday = str(value) @property def phone_home(self): return self._phone_home @phone_home.setter def phone_home(self, value): self._phone_home = value @property def phone_mobile(self): return self._phone_mobile @phone_mobile.setter def phone_mobile(self, value): self._phone_mobile = value @property def email(self): return self._email @email.setter def email(self, value): self._email = value def to_html(self): html = """ <tr> <td>__username__</td> <td>__fullname__</td> <td>__birthday__</td> <td>__homphone__</td> <td>__mobphone__</td> <td>__theemail__</td> </tr> """ html = html.replace('__username__', self.username) html = html.replace('__fullname__', self.actual_name) html = html.replace('__birthday__', self.birthday) html = html.replace('__homphone__', self.phone_home) html = html.replace('__mobphone__', self.phone_mobile) html = html.replace('__theemail__', self.email) return html def process_skype_database(db_file): messages = [] user = None database_connection = sqlite.connect(db_file) database_cursor = database_connection.cursor() database_cursor.execute('SELECT author,dialog_partner,body_xml FROM Messages') for from_username,to_username,body_xml in database_cursor.fetchall(): chatmessage = ChatMessage() if from_username: chatmessage.from_username = from_username if to_username: chatmessage.to_username = to_username if body_xml: chatmessage.message_body = body_xml messages.append(chatmessage) database_cursor.execute('SELECT skypename,fullname,birthday,phone_home,phone_mobile,emails from Accounts') xml_root = ET.parse('/'.join(db_file.split('/')[:-1])+'/config.xml').getroot() auth_data = xml_root[0][0][0].text # TODO: find out how to decrypt this user = SkypeUser() user_data = database_cursor.fetchone() if user_data[0]: user.username = user_data[0] if user_data[1]: user.actual_name = user_data[1] if user_data[2]: user.birthday = user_data[2] if user_data[3]: user.phone_home = user_data[3] if user_data[4]: user.phone_mobile = user_data[4] if user_data[5]: user.email = user_data[5] return (user, messages) def verify_os_type(): if platform.system() != 'Darwin': sys.stderr.write('[!] Incompatible operating system\n') exit(-1) def get_db_list(): db_files = [] home_dir = os.path.expanduser("~") db_dir = home_dir+'/Library/Application Support/Skype' for the_dir in os.listdir(db_dir): if os.path.isdir(db_dir+'/'+the_dir) and the_dir not in ('DataRv', 'EmoticonCache.bundle', 'shared_dynco', 'shared_httpfe'): db_files.append(db_dir+'/'+the_dir+'/main.db') return db_files def main(args): html = """ <!DOCTYPE html> <html> <head> <meta charset='utf-8'> <title>SkypeDump Output Table</title> <link rel="stylesheet" href="http://maxcdn.bootstrapcdn.com/bootstrap/3.2.0/css/bootstrap.min.css"> <link rel="stylesheet" href="http://maxcdn.bootstrapcdn.com/bootstrap/3.2.0/css/bootstrap-theme.min.css"> <script src="http://ajax.googleapis.com/ajax/libs/jquery/1.11.1/jquery.min.js"></script> <script src="http://maxcdn.bootstrapcdn.com/bootstrap/3.2.0/js/bootstrap.min.js"></script> <style type="text/css"> .sd-table{ margin: 20px; } </style> </head> <body> <div class="sd-table"> <table class="table"> <thead> <tr> <th>Skype Username:</th> <th>Real Name:</th> <th>Birthday:</th> <th>Home Phone #:</th> <th>Cell Phone #:</th> <th>Email:</th> </tr> </thead> <tbody> __USER_DATA__ </tbody> </table> <table class="table"> <thead> <tr> <th>From:</th> <th>To:</th> <th>Message:</th> </tr> </thead> <tbody> __MESSAGE_DATA__ </tbody> </table> </div> </body> </html> """ user_html = '' message_html = '' for db_file in get_db_list(): print "[*] Processing database: %s\n" % (db_file) user_info, messages_info = process_skype_database(db_file) user_html += user_info.to_html() for message in messages_info: message_html += message.to_html() html = html.replace('__USER_DATA__', user_html) html = html.replace('__MESSAGE_DATA__', message_html) html = unicodedata.normalize('NFKD', html).encode('ascii', 'ignore') html = re.sub(r'[^\x00-\x7F]+', '', html) with open('/tmp/skype_db.html', 'w') as f: f.write(html) webbrowser.open_new_tab('/tmp/skype_db.html') if __name__ == '__main__': main(sys.argv)
gpl-2.0
962,441,054,399,818,200
27.004329
126
0.64554
false
2.893113
false
false
false
jtauber/czerny
prototypes/process_hanon_21.py
1
2417
#!/usr/bin/env python from align import nw_align def load_score(filename): score = [] for line in open(filename): note, duration_64 = line.strip().split() note = int(note) duration_64 = int(duration_64) score.append((note, duration_64)) return score def load_performance(filename): performance = [] # dictionary mapping pitch to offset and velocity of event when that pitch # was started note_started = {} for line in open(filename): offset, note, velocity = line.strip().split() offset = int(float(offset) * 1000000) note = int(note) velocity = int(velocity) if velocity > 0: if note in note_started: # new note at that pitch started before previous finished # not sure it should happen but let's handle it anyway (start_offset, start_velocity) = note_started.pop(note) duration = offset - start_offset performance.append( (start_offset, note, start_velocity, duration)) note_started[note] = (offset, velocity) else: # note end if note not in note_started: # note was never started so ignore pass else: (start_offset, start_velocity) = note_started.pop(note) duration = offset - start_offset performance.append( (start_offset, note, start_velocity, duration)) return performance # similarity measure used by Needleman-Wunsch algorithm def note_similarity(score_note, performance_note): # at the moment we just give a 1 if the pitch matches, 0.5 if it's # within a tone and 0 if more # over time this can be tweaked to include velocity, duration, etc if score_note[0] == performance_note[1]: return 1 elif abs(score_note[0] - performance_note[1]) < 3: return 0.5 else: return 0 if __name__ == "__main__": score = load_score("../examples/scores/hanon_21_rh.txt") performance = load_performance("../examples/recordings/hanon_21_rh.txt") # align score and performance using above similarity function and a penalty # of -1 for insertions and deletions @@@ might need a lot of tweaking for i in nw_align(score, performance, note_similarity, -1, -1): print i
mit
829,367,193,271,788,400
29.2125
79
0.599917
false
4.196181
false
false
false
Farthen/OTFBot
otfbot/plugins/ircClient/seen.py
1
2384
# This file is part of OtfBot. # # OtfBot is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # OtfBot is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with OtfBot; if not, write to the Free Software # Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA # # (c) 2009 by Thomas Wiegart # import pickle,time,os from otfbot.lib import chatMod class Plugin(chatMod.chatMod): def __init__(self, bot): self.bot = bot try: os.mkdir(datadir) except OSError: pass try: f = file(datadir + "/users", "rb") self.userdata = pickle.load(f) f.close() except IOError: self.userdata = [{}] self.bot.root.getServiceNamed('scheduler').callLater(60, self.save_data) #TODO: call this only on exit def joined(self,channel): try: self.userdata[0][channel] except KeyError: self.userdata[0][channel] = {} def msg(self, user, channel, msg): if channel[0] == "#": self.userdata[0][channel][user.split("!")[0].lower()] = {'msg':msg, 'time':time.time()} def command(self, user, channel, command, options): if command == "seen": try: zeit = self.userdata[0][channel][options.lower()]['time'] msg = self.userdata[0][channel][options.lower()]['msg'] self.bot.sendmsg(channel,"user " + options + " was last seen on " + str(time.strftime("%a, %d %b %Y %H:%M:%S",time.localtime(zeit))) + " saying '" + msg + "'.") except: self.bot.sendmsg(channel,"user " + options + " is unknown") def stop(self): self.save_data() def save_data(self): f = file(datadir + "/users", "wb") pickle.dump(self.userdata, f) f.close() self.bot.root.getServiceNamed('scheduler').callLater(60, self.save_data)
gpl-2.0
-1,188,863,002,668,787,700
36.25
176
0.599832
false
3.701863
false
false
false
QuantiModo/QuantiModo-SDK-Python
SwaggerPetstore/models/connector.py
1
3170
#!/usr/bin/env python # coding: utf-8 """ Copyright 2015 SmartBear Software Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ class Connector(object): """ NOTE: This class is auto generated by the swagger code generator program. Do not edit the class manually. """ def __init__(self): """ Swagger model :param dict swaggerTypes: The key is attribute name and the value is attribute type. :param dict attributeMap: The key is attribute name and the value is json key in definition. """ self.swagger_types = { 'id': 'int', 'name': 'str', 'display_name': 'str', 'image': 'str', 'get_it_url': 'str', 'connected': 'str', 'connect_instructions': 'str', 'last_update': 'int', 'latest_data': 'int', 'no_data_yet': 'bool' } self.attribute_map = { 'id': 'id', 'name': 'name', 'display_name': 'displayName', 'image': 'image', 'get_it_url': 'getItUrl', 'connected': 'connected', 'connect_instructions': 'connectInstructions', 'last_update': 'lastUpdate', 'latest_data': 'latestData', 'no_data_yet': 'noDataYet' } # Connector ID number self.id = None # int # Connector lowercase system name self.name = None # str # Connector pretty display name self.display_name = None # str # URL to the image of the connector logo self.image = None # str # URL to a site where one can get this device or application self.get_it_url = None # str # True if the authenticated user has this connector enabled self.connected = None # str # URL and parameters used when connecting to a service self.connect_instructions = None # str # Epoch timestamp of last sync self.last_update = None # int # Number of measurements obtained during latest update self.latest_data = None # int # True if user has no measurements for this connector self.no_data_yet = None # bool def __repr__(self): properties = [] for p in self.__dict__: if p != 'swaggerTypes' and p != 'attributeMap': properties.append('{prop}={val!r}'.format(prop=p, val=self.__dict__[p])) return '<{name} {props}>'.format(name=__name__, props=' '.join(properties))
gpl-2.0
-8,626,761,504,861,406,000
31.346939
100
0.561199
false
4.408901
false
false
false
mvsaha/blahb
blahb/label.py
1
15233
import numba from .utils import exponential_search from .strgen import * def _split_init_into_coords_init_str(dim): return "coords_{} = loc[:, {}]".format(dim, dim) def split_init_into_coords_init_str(ndim): return '\n'.join([_split_init_into_coords_init_str(dim) for dim in range(ndim)]) update_cursor_dim_0_base_string = """ if shift_0: left_edge_0 = c0 - r0 if shift_0 >= {P_shape_0}: # Re-initialize first cursor when all spans are invalidated cursors_0[0] = exponential_search( coords_0, left_edge_0, start=ends_0[-1]) ends_0[0] = exponential_search( coords_0, left_edge_0 + 1, start=cursors_0[0]) else: # Shift the spans that are still valid, but cursors must be reset for sh in range({P_shape_0} - shift_0): # cursors_0[sh] = ends_0[sh + shift_0 - 1] ends_0[sh] = ends_0[sh + shift_0] # Initialize cursors/ends for positions that are not shifted shift_0 = min(shift_0, {P_shape_0}) for sh in range({P_shape_0} - shift_0, {P_shape_0}): cursors_0[sh] = exponential_search( coords_0, left_edge_0 + sh, ends_0[sh - 1]) ends_0[sh] = exponential_search( coords_0, left_edge_0 + sh + 1, start=cursors_0[sh]) if shift_0: shift_1 = np.int64({P_shape_1}) shift_0 = np.int64(coords_0[i_coord + 1] - c0) c0 = coords_0[i_coord + 1] """ def update_cursor_section_dim_0(neigh_shape): """Propagate shift should be true if there is more than one dimension""" if not len(neigh_shape) > 1: raise ValueError( "Use specialized 1d labeling function for 1d pixelsets.") return update_cursor_dim_0_base_string.format( P_shape_0=neigh_shape[0], P_shape_1=neigh_shape[1], ) init_loop_base_string = """ start = cursors_{dim_minus_1}[{lower_dim_index}] stop = ends_{dim_minus_1}[{lower_dim_index}] cursors_{dim}[{lower_dim_index}, 0] = exponential_search( coords_{dim}, left_edge_{dim}, start=start, stop=stop) ends_{dim}[{lower_dim_index}, 0] = exponential_search( coords_{dim}, left_edge_{dim} + 1, start=cursors_{dim}[{lower_dim_index}, 0], stop=stop) """ def param_init_loop(shp, dim): assert dim <= len(shp) lower_dim_index = ', '.join( [i_(low_dim) for low_dim in range(dim)]) # 'i0, i1, ...' body = init_loop_base_string.format( dim=dim, dim_minus_1=dim - 1, lower_dim_index=lower_dim_index, ) return loop_over_shape(shp[:dim], body) shift_loop_base_string = """ for sh in range({dim_shape} - shift_{dim}): # cursors_{dim}[{lower_dim_index}, sh] = ends_{dim}[{lower_dim_index}, sh + shift_{dim} - 1] ends_{dim}[{lower_dim_index}, sh] = ends_{dim}[{lower_dim_index}, sh + shift_{dim}] """ def param_shift_loop(shp, dim): assert len(shp) > dim lower_dim_index = ', '.join( [i_(low_dim) for low_dim in range(dim)]) # 'i0, i1, ...' body = shift_loop_base_string.format( dim=dim, dim_shape=shp[dim], lower_dim_index=lower_dim_index, ) return loop_over_shape(shp[:dim], body) set_higher_shift_string = """shift_{dim_plus_1} = {dim_plus_1_shape}""" def param_set_higher_shift(shp, dim): if len(shp) - dim < 2: return '' else: return set_higher_shift_string.format( dim_plus_1=dim + 1, dim_plus_1_shape=shp[dim + 1]) set_new_cursor_loop_base_exponential_search_string = """ start = cursors_{dim_minus_1}[{lower_dim_index}] stop = ends_{dim_minus_1}[{lower_dim_index}] for sh in range({dim_shape} - shift_{dim}, {dim_shape}): start = max(start, ends_{dim}[{lower_dim_index}, sh - 1]) cursors_{dim}[{lower_dim_index}, sh] = exponential_search( coords_{dim}, left_edge_{dim} + sh, start=start, stop=stop) ends_{dim}[{lower_dim_index}, sh] = exponential_search( coords_{dim}, left_edge_{dim} + sh + 1, start=cursors_{dim}[{lower_dim_index}, sh], stop=stop) """ set_new_cursor_loop_base_linear_search_string = """ start = cursors_{dim_minus_1}[{lower_dim_index}] stop = ends_{dim_minus_1}[{lower_dim_index}] for sh in range({dim_shape} - shift_{dim}, {dim_shape}): start = max(start, ends_{dim}[{lower_dim_index}, sh - 1]) for i in range(start, stop + 1): if coords_{dim}[i] >= left_edge_{dim} + sh or i == stop: cursors_{dim}[{lower_dim_index}, sh] = i break start = cursors_{dim}[{lower_dim_index}, sh] for i in range(start, stop + 1): if coords_{dim}[i] > left_edge_{dim} + sh or i == stop: ends_{dim}[{lower_dim_index}, sh] = i break """ def param_set_new_cursor_loop(shp, dim): assert len(shp) > dim lower_dim_index = ', '.join( [i_(low_dim) for low_dim in range(dim)]) # 'i0, i1, ...' if dim < 2: base_str = set_new_cursor_loop_base_exponential_search_string else: base_str = set_new_cursor_loop_base_linear_search_string body = base_str.format( dim=dim, dim_shape=shp[dim], dim_minus_1=dim - 1, lower_dim_index=lower_dim_index ) return loop_over_shape(shp[:dim], body) minimize_shift_string = """shift_{dim} = min(shift_{dim}, {dim_shape})""" def minimize_shift(dim, dim_shape): return minimize_shift_string.format(dim=dim, dim_shape=dim_shape) cursor_loops_string = """ if shift_{dim}: left_edge_{dim} = c{dim} - r{dim} right_edge_{dim} = c{dim} + r{dim} if shift_{dim} >= {dim_shape}: {init_loop} else: {shift_loop} {minimize_shift} {set_new_cursor_loop} {set_higher_shift} shift_{dim} = np.int64(coords_{dim}[i_coord + 1] - c{dim}) c{dim} = coords_{dim}[i_coord + 1] """ def param_cursor_loops(shp, dim): return cursor_loops_string.format( dim=dim, dim_shape=shp[dim], init_loop=indent_block(param_init_loop(shp, dim), 2, first_line=0), shift_loop=indent_block(param_shift_loop(shp, dim), 2, first_line=0), minimize_shift=minimize_shift(dim, shp[dim]), set_new_cursor_loop=indent_block(param_set_new_cursor_loop(shp, dim), 1, first_line=0), set_higher_shift=param_set_higher_shift(shp, dim) ) last_dim_loop_string = """ c{dim} = coords_{dim}[i_coord] left_edge_{dim} = c{dim} - r{dim} right_edge_{dim} = c{dim} + r{dim} {do_something_with_central_pixel} {low_dim_loop}""" last_dim_loop_body_string_hyperrect = """ cursor = cursors_{dim_minus_1}[{lower_dim_index}] while cursor < ends_{dim_minus_1}[{lower_dim_index}] and coords_{dim}[cursor] < left_edge_{dim}: cursor += 1 cursors_{dim_minus_1}[{lower_dim_index}] = cursor # Save the position we reached along the shard while cursor < ends_{dim_minus_1}[{lower_dim_index}] and coords_{dim}[cursor] <= right_edge_{dim}: {do_something_with_neighbors} cursor += 1""" last_dim_loop_body_string_struct_el = """ cursor = cursors_{dim_minus_1}[{lower_dim_index}] while cursor < ends_{dim_minus_1}[{lower_dim_index}] and coords_{dim}[cursor] < left_edge_{dim}: cursor += 1 cursors_{dim_minus_1}[{lower_dim_index}] = cursor # Save the position we reached along the shard _end = ends_{dim_minus_1}[{lower_dim_index}] for i_final in range({last_dim_shape}): while cursor < _end and coords_{dim}[cursor] < left_edge_{dim} + i_final: cursor += 1 if cursor == _end: break elif coords_{dim}[cursor] == left_edge_{dim} + i_final and struct_el[{lower_dim_index}, i_final]: {do_something_with_neighbors}""" def param_last_dim_loop(shp, struct_el): """ shp : Shape of the hyperrect around the central pixel to search for neighbors struct_el: True/False on whether a structuring element of shape shp will be used.""" assert len(shp) last_dim = len(shp) - 1 lower_dim_index = ', '.join( [i_(low_dim) for low_dim in range(last_dim)]) # 'i0, i1, ...' if struct_el: loop_body = last_dim_loop_body_string_struct_el.format( dim=last_dim, dim_minus_1=last_dim - 1, last_dim_shape=shp[-1], lower_dim_index=lower_dim_index, do_something_with_neighbors="{do_something_with_neighbors}" ) else: loop_body = last_dim_loop_body_string_hyperrect.format( dim=last_dim, dim_minus_1=last_dim - 1, lower_dim_index=lower_dim_index, do_something_with_neighbors="{do_something_with_neighbors}" ) loop = loop_over_shape(shp[:-1], loop_body) return last_dim_loop_string.format( dim=last_dim, low_dim_loop=loop, do_something_with_central_pixel="{do_something_with_central_pixel}", ) # Find the ancestors of neighbor index find_central_ancestor_string = """ central_ancestor = labels[i_coord] while labels[central_ancestor] != central_ancestor: prev_central_ancestor = central_ancestor central_ancestor = labels[central_ancestor] labels[prev_central_ancestor] = central_ancestor""" find_neighbor_ancestor_string = """ #central_ancestor = labels[i_coord] neighbor_ancestor = labels[cursor] if neighbor_ancestor == central_ancestor: break #while labels[central_ancestor] != central_ancestor: # prev_central_ancestor = central_ancestor # central_ancestor = labels[central_ancestor] # labels[prev_central_ancestor] = central_ancestor while labels[neighbor_ancestor] != neighbor_ancestor: prev_neighbor_ancestor = neighbor_ancestor neighbor_ancestor = labels[neighbor_ancestor] labels[prev_neighbor_ancestor] = neighbor_ancestor if neighbor_ancestor == central_ancestor: labels[cursor] = central_ancestor labels[i_coord] = central_ancestor if neighbor_ancestor < central_ancestor: labels[cursor] = neighbor_ancestor labels[i_coord] = neighbor_ancestor labels[central_ancestor] = neighbor_ancestor central_ancestor = neighbor_ancestor else: # neighbor_ancestor > central_ancestor: labels[cursor] = central_ancestor labels[i_coord] = central_ancestor labels[neighbor_ancestor] = central_ancestor""" finalize_labels_str = """ for i in range(labels.size-1, -1, -1): i = numba.int_(i) anc = i while anc != labels[anc]: anc = numba.int_(labels[anc]) while labels[i] != anc: i_prev = i labels[i_prev] = anc i = numba.int_(labels[i]) """ label_func_string = """ def label(loc, labels, {struct_el}): {split_loc_to_coords} # Number of coordinates n = coords_0.size {shift_init_strings} {cursors_init_strings} {ends_init_strings} {coord_init_strings} {range_init_strings} for i_coord in range(n): {coord_loop_body} {finish_up} return labels""" def find_neighbors_func(neigh_shape, use_struct_el): """ Build a nopython function to label locations. Arguments --------- neigh_shape : ndim-tuple of ints Should all be odd numbers so that the central pixel remains well defined use_struct_el : bool Flag indicating that the structuring element is not a perfect hyperect neighborhood (i.e. np.all(struct_el) == False) Returns ------- Numba nopython function that labels IndexSet locations that are neighbors. """ ndim = len(neigh_shape) fn = label_func_string.format( struct_el='struct_el' if use_struct_el else '', split_loc_to_coords = indent_block( split_init_into_coords_init_str(ndim), 1, first_line=0), coord_dim_names=coord_dim_names(ndim), coord_init_strings=indent_block(coord_init_strings(ndim), first_line=0), shift_init_strings=indent_block(shift_init_strings(neigh_shape), 1, first_line=0), cursors_init_strings=indent_block( cursors_init_strings(neigh_shape, np.int64), first_line=0), ends_init_strings=indent_block( ends_init_strings(neigh_shape, np.int64), first_line=0), range_init_strings=indent_block(range_init_strings(neigh_shape), first_line=0), coord_loop_body=''.join( [indent_block(update_cursor_section_dim_0(neigh_shape), 2)] + [indent_block(param_cursor_loops(neigh_shape, i), 2) for i in range(1, ndim - 1)] + [indent_block(param_last_dim_loop(neigh_shape, use_struct_el), 2)] ), finish_up=indent_block(finalize_labels_str, 1, first_line=0), ) indent_amount = ndim + 3 if use_struct_el else ndim + 2 fn = fn.format( do_something_with_central_pixel=indent_block( find_central_ancestor_string, 2, first_line=0), do_something_with_neighbors=indent_block(find_neighbor_ancestor_string, indent_amount, first_line=0), ) return fn __saved_neighbor_funcs = dict() def build_label_func(shape, use_struct_el): if (shape, use_struct_el) in __saved_neighbor_funcs: return __saved_neighbor_funcs[(shape, use_struct_el)] fn_string = find_neighbors_func(shape, use_struct_el) _loc = dict() exec(fn_string, globals(), _loc) fn = numba.jit(_loc['label'], nopython=True, nogil=True) __saved_neighbor_funcs[(shape, use_struct_el)] = fn return fn @numba.njit def merge_chunked_labels(master_labels, chunk_labels, overlap_start, overlap_stop): n_overlapping = overlap_stop - overlap_start for i_chunk, i_master in enumerate(range(overlap_start, overlap_stop)): # print(i_chunk, i_master) anc_master = master_labels[i_master] while master_labels[anc_master] != anc_master: anc_master_prev = anc_master anc_master = master_labels[anc_master] master_labels[anc_master_prev] = anc_master anc_chunk = chunk_labels[i_chunk] + overlap_start while master_labels[anc_chunk] != anc_chunk: anc_chunk_prev = anc_chunk anc_chunk = master_labels[anc_chunk] master_labels[anc_chunk_prev] = anc_chunk if anc_chunk < anc_master: master_labels[anc_master] = anc_chunk elif anc_master < anc_chunk: master_labels[anc_chunk] = anc_master fin = overlap_stop + chunk_labels.size - n_overlapping master_labels[overlap_stop:fin] = ( chunk_labels[n_overlapping:] + overlap_start) @numba.njit([numba.void(numba.uint8[:]), numba.void(numba.uint16[:]), numba.void(numba.uint32[:]), numba.void(numba.uint64[:])], nogil=True) def finalize_labels(labels): """Ensure that labels are root or point to a root.""" for i in range(labels.size - 1, -1, -1): i = numba.int_(i) anc = i while anc != labels[anc]: anc = numba.int_(labels[anc]) while labels[i] != anc: i_prev = i labels[i_prev] = anc i = numba.int_(labels[i])
mit
230,391,288,093,049,820
31.551282
101
0.601589
false
3.208298
false
false
false
istio/tools
perf/docker/rabbitmq/client.py
1
2647
# Copyright Istio Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import time import logging import prom_client import pika import sys password = os.environ["PASSWORD"] username = os.environ["USERNAME"] address = os.environ["ADDRESS"] queue = 'queue' def with_metrics(f, valid=None): return prom_client.attempt_request( f, source='rabbitmq-client', destination='rabbitmq', valid=valid ) def with_metrics_or_fail(f, valid=None): r, success = with_metrics(f, valid) if not success: raise Exception("Function failed") return r, success def setup_client(): credentials = pika.PlainCredentials(username, password) connection = pika.BlockingConnection( pika.ConnectionParameters(address, credentials=credentials)) channel = connection.channel() channel.queue_declare(queue=queue) return channel def send(channel, message): with_metrics_or_fail( lambda: channel.basic_publish( exchange='', routing_key=queue, body=message ), valid=None ) def attempt_decode(s): if s is None: return "" return s.decode('utf-8') def receive(channel, expected): with_metrics_or_fail( lambda: attempt_decode( next(channel.consume(queue, inactivity_timeout=1))[2]), valid=lambda resp: resp == expected ) def run_test(): pub, succeeded = with_metrics(setup_client) if not succeeded: logging.error("Failed to setup client") sys.exit(1) sub, succeeded = with_metrics(setup_client) if not succeeded: logging.error("Failed to setup client") sys.exit(1) while True: message = "a message" send(pub, message) receive(sub, message) time.sleep(.5) if __name__ == "__main__": prom_client.report_metrics() prom_client.report_running('rabbitmq') time.sleep(10) # Wait for server while True: try: run_test() except Exception: logging.warning("Rerunning test due to exception") time.sleep(.5)
apache-2.0
3,253,971,791,620,770,300
23.738318
74
0.649037
false
3.944858
false
false
false
HybridF5/jacket
jacket/api/compute/openstack/compute/legacy_v2/contrib/floating_ip_pools.py
1
2154
# Copyright (c) 2011 X.commerce, a business unit of eBay Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from jacket.api.compute.openstack import extensions from jacket.compute import network authorize = extensions.extension_authorizer('compute', 'floating_ip_pools') def _translate_floating_ip_view(pool_name): return { 'name': pool_name, } def _translate_floating_ip_pools_view(pools): return { 'floating_ip_pools': [_translate_floating_ip_view(pool_name) for pool_name in pools] } class FloatingIPPoolsController(object): """The Floating IP Pool API controller for the OpenStack API.""" def __init__(self): self.network_api = network.API() super(FloatingIPPoolsController, self).__init__() def index(self, req): """Return a list of pools.""" context = req.environ['compute.context'] authorize(context) pools = self.network_api.get_floating_ip_pools(context) return _translate_floating_ip_pools_view(pools) class Floating_ip_pools(extensions.ExtensionDescriptor): """Floating IPs support.""" name = "FloatingIpPools" alias = "os-floating-ip-pools" namespace = ("http://docs.openstack.org/compute/ext/" "floating_ip_pools/api/v1.1") updated = "2012-01-04T00:00:00Z" def get_resources(self): resources = [] res = extensions.ResourceExtension('os-floating-ip-pools', FloatingIPPoolsController(), member_actions={}) resources.append(res) return resources
apache-2.0
1,853,800,950,264,657,400
31.149254
78
0.65506
false
4.018657
false
false
false
pekkosk/hotbit
hotbit/containers/chiralwedge.py
1
5906
from __future__ import division import numpy as np from box.mix import phival from math import sin,cos from weakref import proxy import warnings class ChiralWedge: def __init__(self,atoms,type): ''' Class for chiral+wedge boundary conditions. @param: atoms hotbit.Atoms instance @param: type Should equal to "ChiralWedge" More documentation for the methods can be found from hotbit.Atoms -class. ''' self.type='ChiralWedge' assert type==self.type self.atoms = proxy(atoms) self.par = {'height':(1,0),'twist':(0,1),'angle':(2,0),'physical':(1,1)} self.atoms.set_pbc((True,False,True)) #self._set_table() def get_type(self): return self.type def __repr__(self): twist, angle, height, physical = self.get('twist'), self.get('angle'), self.get('height'), self.get('physical') x='ChiralWedge: angle=%.4f (2*pi/%.2f, ' %(angle,2*np.pi/angle) if physical: x+='physical), ' else: x+='not physical), ' x+='height=%.4f Ang ' %height x+='twist angle %.4f' %twist return x def get_table(self): M = int( round(2*np.pi/self.get('angle')) ) return [{'M':M},{'M':1},{'M':np.Inf}] def get(self,key): """ Get container parameters key: 'angle','height','twist','physical' """ x = self.atoms.get_cell()[self.par[key]] if key in ['angle','height','twist']: return x else: return bool(np.round(x)) def _set(self,**kwargs): assert len(kwargs)==1 for key in kwargs: cell = self.atoms.get_cell() cell[self.par[key]] = kwargs[key] self.atoms.set_cell(cell) def set(self, angle=None, height=None, M=None, physical=True, twist=None, scale_atoms=False, container=None): """ parameters: =========== angle angle (in radians) of the wedge (and M=None) height Height of the primitive cell in z-direction M set angle to 2*pi/M (and angle=None) physical (only if M=None) if angle is small, it does not be exactly 2*pi/integer, i.e. situation has no physical meaning (use for calculating stuff continuously) twist The twist angle for z-translation scale_atoms Scale atoms according to changes in parameters """ if container!=None: assert angle==None and height==None and M==None and twist==None self.set(angle=container.get('angle'),height=container.get('height'),\ physical=container.get('physical'), twist=container.get('twist')) if angle!=None or M!=None: #assert not scale_atoms assert not (angle!=None and M!=None) old_angle = self.get('angle') if M != None: assert isinstance(M,int) self._set(angle=2*np.pi/M) elif angle != None: M = np.abs(int( round(2*np.pi/angle) )) self._set(angle=angle) # check parameters self._set( physical=float(physical) ) if np.abs(self.get('angle'))<1E-6: raise Warning('Too small angle (%f) may bring numerical problems.' %self.get('angle')) if self.get('angle')>np.pi: raise AssertionError('angle>pi') if np.abs(M-2*np.pi/np.abs(self.get('angle')))>1E-12 and self.get('physical'): raise AssertionError('angle not physical: angle != 2*pi/M') if not self.get('physical') and M<20: warnings.warn('Quite large, non-physical angle 2*pi/%.4f.' %(2*np.pi/self.get('angle')) ) if scale_atoms: if abs(old_angle)<1E-10: raise ValueError('Atoms cannot be scaled; old wedge angle too small.') newr = [] for r in self.atoms.get_positions(): x,y = r[0],r[1] rad = np.sqrt( x**2+y**2 ) newphi = phival(x,y)*(self.get('angle')/old_angle) newr.append( [rad*np.cos(newphi),rad*np.sin(newphi),r[2]] ) self.atoms.set_positions(newr) if height!=None: if scale_atoms: r = self.atoms.get_positions() r[:,2] = r[:,2] * height/self.get('height') self.atoms.set_positions(r) self._set(height=height) if twist!=None: if scale_atoms: raise NotImplementedError('Atom rescale with twist not implemented.') self._set(twist=twist) #self._set_table() def __eq__(self,other): return self.atoms == other.atoms def get_symmetry_operation_ranges(self): """ Return ranges for symmetry operations. """ M = int( round(2*np.pi/np.abs(self.get('angle'))) ) i = M//2 zi = 0 if np.mod(M,2)==1: ranges = np.array([[-i,i],[0,0],[-np.Inf,np.Inf]]) else: ranges = np.array([[-i+1,i],[0,0],[-np.Inf,np.Inf]]) return ranges def transform(self,r,n): """ Rotate around z r by (n2*angle+n0*twist) and translate by n0*height. """ R = self.rotation(n) trans = np.zeros((3)) trans = n[2]*np.array([0,0,self.get('height')]) return np.dot(R,r) + np.array(trans) def rotation(self,n,angles=False): """ Active rotation matrix of given angle wrt. z-axis.""" angle = n[0]*self.get('angle') + n[2]*self.get('twist') R = np.array([[cos(angle),-sin(angle),0],[sin(angle),cos(angle),0],[0,0,1]]) if angles: raise NotImplementedError('angles not implemented for ChiralWedge') else: return R
gpl-2.0
-158,940,071,351,519,900
35.45679
119
0.532509
false
3.612232
false
false
false
asidev/aybu-core
aybu/core/models/user.py
1
7972
#!/usr/bin/env python # -*- coding: utf-8 -*- """ Copyright 2010-2012 Asidev s.r.l. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ import ast from aybu.core.models.base import Base import collections import crypt import re import requests import urllib import json from logging import getLogger from sqlalchemy import Column from sqlalchemy import ForeignKey from sqlalchemy import Unicode from sqlalchemy import Table from sqlalchemy.ext.hybrid import hybrid_property from sqlalchemy.orm import (relationship, object_session, joinedload) from sqlalchemy.orm.exc import NoResultFound __all__ = [] log = getLogger(__name__) RemoteGroup = collections.namedtuple('Group', ['name']) class RemoteUser(object): """ This class is used in place of the User class when remote API login management is used in place of local database """ def __init__(self, url, username, crypted_password, cleartext_password, remote, groups, verify_ssl): self.url = url self.username = username self.crypted_password = crypted_password self.cleartext_password = cleartext_password self._groups = groups self.remote = remote self.verify_ssl = verify_ssl @property def groups(self): return [RemoteGroup(name=g) for g in self._groups] @property def password(self): return self.crypted_password @password.setter def password(self, password): url = "{}/{}".format(self.remote, self.username) try: response = requests.put( url, auth=(self.username, self.cleartext_password), data=dict(password=password), verify=self.verify_ssl ) response.raise_for_status() content = json.loads(response.content) except requests.exceptions.RequestException as e: log.critical("Error connection to API: {} - {}"\ .format(type(e).__name__, e)) raise ValueError('Cannot connect to API') except Exception: log.exception('Invalid login: %s', response.status_code) raise ValueError('Invalid login, upstream returned {}'\ .format(response.status_code)) else: log.info("Updated password for %s", self.username) self.crypted_password = content['crypted_password'] self.cleartext_password = password @classmethod def check(cls, request, username, password): remote = request.registry.settings.get('remote_login_url') log.info("Using API server at %s", remote) try: verify_ssl = ast.literal_eval( request.registry.settings.get('remote_login_verify_ssl')) except: log.exception('Error in ast.literal_eval') verify_ssl = False url = "{}/{}".format(remote, username) params = dict( domain=request.host, action="login" ) try: query = "?{}".format(urllib.urlencode(params)) query = "{}{}".format(url, query) log.debug("GET %s", query) response = requests.get(query, auth=(username, password), verify=verify_ssl) response.raise_for_status() log.debug("Response: %s", response) content = json.loads(response.content) except requests.exceptions.RequestException as e: log.critical("Error connection to API: {} - {}"\ .format(type(e).__name__, e)) raise ValueError('Cannot connect to API') except ValueError: log.exception("Cannot decode JSON") raise except Exception: log.error('Invalid login: %s', response.status_code) raise ValueError('Invalid login, upstream return %s', response.status_code) else: return RemoteUser(url=url, username=username, crypted_password=content['crypted_password'], cleartext_password=password, groups=content['groups'], remote=remote, verify_ssl=verify_ssl) def has_permission(self, perm): return bool(set((perm, 'admin')) & set(self._groups)) def check_password(self, password): if not self.cleartext_password == password: raise ValueError('Invalid username or password') def __repr__(self): return "<RemoteUser {}>".format(self.username) users_groups = Table('users_groups', Base.metadata, Column('users_username', Unicode(255), ForeignKey('users.username', onupdate="cascade", ondelete="cascade")), Column('groups_name', Unicode(32), ForeignKey('groups.name', onupdate="cascade", ondelete="cascade")), mysql_engine='InnoDB') class User(Base): __tablename__ = 'users' __table_args__ = ({'mysql_engine': 'InnoDB'}) hash_re = re.compile(r'(\$[1,5-6]\$|\$2a\$)') salt = "$6$" username = Column(Unicode(255), primary_key=True) crypted_password = Column("password", Unicode(128), nullable=False) groups = relationship('Group', secondary=users_groups, backref='users') @classmethod def get(cls, session, pkey): # FIXME this should raise NoResultFound if query returns None! user = session.query(cls).options(joinedload('groups')).get(pkey) if user is None: raise NoResultFound("No obj with key {} in class {}"\ .format(pkey, cls.__name__)) return user @classmethod def check(cls, session, username, password): try: user = cls.get(session, username) salt = cls.hash_re.match(user.password) length = len(salt.group()) if salt else 2 enc_password = crypt.crypt(password, user.password[0:length]) assert user.password == enc_password except (AssertionError, NoResultFound): log.warn('Invalid login for %s', username) raise ValueError('invalid username or password') else: return user @hybrid_property def password(self): return self.crypted_password @password.setter def password(self, value): self.crypted_password = crypt.crypt(value, self.salt) def check_password(self, password): return self.__class__.check(object_session(self), self.username, password) def has_permission(self, perm): return bool(set((perm, 'admin')) & set(g.name for g in self.groups)) def __repr__(self): return "<User {}>".format(self.username) class Group(Base): __tablename__ = 'groups' __table_args__ = ({'mysql_engine': 'InnoDB'}) name = Column(Unicode(32), primary_key=True) def __repr__(self): return "<Group {}>".format(self.name)
apache-2.0
5,875,499,489,000,414,000
33.214592
81
0.568239
false
4.608092
false
false
false
uw-it-aca/sqlshare-rest
sqlshare_rest/views/download.py
1
1410
from oauth2_provider.decorators import protected_resource from django.views.decorators.csrf import csrf_exempt from django.core.urlresolvers import reverse from django.http import HttpResponse from sqlshare_rest.views import get_oauth_user, get403, get404, get400, get405 from sqlshare_rest.util.db import get_backend from sqlshare_rest.views.sql import response_for_query from sqlshare_rest.models import DownloadToken from sqlshare_rest.dao.user import get_user import json @csrf_exempt def run(request, token): if request.META['REQUEST_METHOD'] != "GET": return get405() get_oauth_user(request) try: dt = DownloadToken().validate_token(token) except DownloadToken.DoesNotExist: return get404() sql = dt.sql backend = get_backend() user = dt.original_user return response_for_query(sql, user, download_name="query_results.csv") @csrf_exempt @protected_resource() def init(request): if request.META['REQUEST_METHOD'] != "POST": return get405() get_oauth_user(request) values = json.loads(request.body.decode("utf-8")) sql = values["sql"] user = get_user(request) dt = DownloadToken() dt.store_token_for_sql(sql, user) url = reverse("sqlshare_view_run_download", kwargs={"token": dt.token}) response = HttpResponse(json.dumps({'token': dt.token})) response["Location"] = url return response
apache-2.0
3,505,049,341,933,977,000
27.77551
78
0.70922
false
3.615385
false
false
false
yakky/djangocms-text-ckeditor
tests/test_field.py
1
2818
# -*- coding: utf-8 -*- from django.template import Context, Template from django.utils.safestring import SafeData from djangocms_helper.base_test import BaseTestCase from tests.test_app.forms import SimpleTextForm from tests.test_app.models import SimpleText from djangocms_text_ckeditor.fields import HTMLFormField class HtmlFieldTestCase(BaseTestCase): def test_html_form_field(self): html_field = HTMLFormField() self.assertTrue(isinstance(html_field.clean('some text'), SafeData)) class FieldTestCase(BaseTestCase): text_normal = '<p>some non malicious text</p>' text_with_iframe = ('<p>some non malicious text</p>' '<iframe src="http://www.w3schools.com"></iframe>') text_with_iframe_escaped = ('<p>some non malicious text</p>&lt;iframe ' 'src="http://www.w3schools.com"&gt;&lt;/iframe&gt;') text_with_script = ('<p>some non malicious text</p> ' '<script>alert("Hello! I am an alert box!");</script>') text_with_script_escaped = (u'<p>some non malicious text</p> &lt;script&gt;' u'alert("Hello! I am an alert box!");&lt;/script&gt;') def test_model_field_text_is_safe(self): original = 'Hello <h2>There</h2>' template = Template('{{ obj.text }}') text = SimpleText.objects.create(text='Hello <h2>There</h2>') # Fetching a new instance should now have the string marked # as safe. text = SimpleText.objects.get(pk=text.pk) rendered = template.render(Context({'obj': text})) self.assertEqual(original, rendered) def test_model_field_sanitized(self): obj = SimpleText(text=self.text_normal) obj.full_clean() obj.save() obj = SimpleText.objects.get(pk=obj.pk) self.assertEqual(obj.text, self.text_normal) obj = SimpleText(text=self.text_with_iframe) obj.full_clean() obj.save() self.assertEqual(obj.text, self.text_with_iframe_escaped) obj = SimpleText(text=self.text_with_script) obj.full_clean() obj.save() self.assertEqual(obj.text, self.text_with_script_escaped) def test_form_field_sanitized(self): form = SimpleTextForm(data={'text': self.text_normal}) self.assertTrue(form.is_valid()) self.assertEqual(form.cleaned_data['text'], self.text_normal) form = SimpleTextForm(data={'text': self.text_with_iframe}) self.assertTrue(form.is_valid()) self.assertEqual(form.cleaned_data['text'], self.text_with_iframe_escaped) form = SimpleTextForm(data={'text': self.text_with_script}) self.assertTrue(form.is_valid()) self.assertEqual(form.cleaned_data['text'], self.text_with_script_escaped)
bsd-3-clause
-3,695,870,091,653,139,500
36.078947
86
0.635912
false
3.580686
true
false
false
evansde77/cirrus
src/cirrus/delegate.py
1
2385
#!/usr/bin/env python """ _delegate_ Main cirrus command that delegates the call to the sub command verb enabling git cirrus do_a_thing to be routed to the appropriate command call for do_a_thing """ import os import os.path import pkg_resources import sys import signal import subprocess import cirrus.environment as env def install_signal_handlers(): """ Need to catch SIGINT to allow the command to be CTRL-C'ed """ def signal_handler(signal, frame): sys.exit(0) signal.signal(signal.SIGINT, signal_handler) def run_command(cmd): """ run the delegated command with the CTRL-C signal handler in place """ install_signal_handlers() return subprocess.call(cmd, shell=False) HELP = \ """ Cirrus commands available are: {0} Do git cirrus <command> -h for more information on a particular command """ def format_help(command_list): subs = '\n'.join( [c for c in command_list if c != 'cirrus'] ) return HELP.format(subs) def main(): """ _main_ response to the cirrus <verb> command Extracts the available verbs that are installed as entry points by setup.py as cirrus_commands """ home = env.virtualenv_home() commands = [] for script in pkg_resources.iter_entry_points(group="cirrus_commands"): comm = str(script).split(" = ", 1)[0] commands.append(comm) commands.sort() # switch to the current GIT_PREFIX working dir old_dir = os.getcwd() os.chdir(os.path.abspath(os.environ.get('GIT_PREFIX', '.'))) try: args = sys.argv[1:] if len(args) == 0 or args[0] == '-h': # missing command or help print(format_help(commands)) exit_code = 0 else: command_path = "{0}/bin/{1}".format(home, args[0]) if not os.path.exists(command_path): msg = "Unknown command: {}".format(args[0]) print(msg) print(format_help(commands)) exit_code = 127 else: exit_code = run_command([command_path, ] + args[1:]) except Exception as ex: msg = "Exception Details:\n{}".format(ex) print(msg) raise finally: # always return to previous dir os.chdir(old_dir) return exit_code if __name__ == "__main__": sys.exit(main())
apache-2.0
-2,823,482,492,811,789,000
21.932692
75
0.600419
false
3.61912
false
false
false
moreati/revelation
epiphany/test/test_execute_bitwise.py
1
7492
from pydgin.utils import trim_32 from epiphany.instruction import Instruction from epiphany.isa import decode from epiphany.machine import RESET_ADDR from epiphany.test.machine import StateChecker, new_state import opcode_factory import pytest @pytest.mark.parametrize('rn,rm,is16bit', [(-1, 28, True), (-1, 28, False), ( 1, 28, True), ( 1, 28, False)]) def test_execute_logical_shift_right(rn, rm, is16bit): rd = 2 state = new_state(rf0=trim_32(rn), rf1=trim_32(rm)) instr = (opcode_factory.lsr16(rd=rd, rn=0, rm=1) if is16bit else opcode_factory.lsr32(rd=rd, rn=0, rm=1)) name, executefn = decode(instr) executefn(state, Instruction(instr, None)) expected_state = StateChecker(AZ=(False if rn < 0 else True), # 1 >> 5 == 0 AV=0, AC=0, pc=((2 if is16bit else 4) + RESET_ADDR), rf2=(0b1111 if rn < 0 else 0)) expected_state.check(state) @pytest.mark.parametrize('rn,imm,is16bit', [(-1, 28, True), (-1, 28, False), ( 1, 28, True), ( 1, 28, False)]) def test_execute_logical_shift_right_imm(rn, imm, is16bit): rd = 2 state = new_state(rf0=trim_32(rn)) instr = (opcode_factory.lsr16_immediate(rd=rd, rn=0, imm=imm) if is16bit else opcode_factory.lsr32_immediate(rd=rd, rn=0, imm=imm)) name, executefn = decode(instr) executefn(state, Instruction(instr, None)) expected_state = StateChecker(AZ=(False if rn < 0 else True), # 1 >> 5 == 0 AV=0, AC=0, pc=((2 if is16bit else 4) + RESET_ADDR), rf2=(0b1111 if rn < 0 else 0)) expected_state.check(state) @pytest.mark.parametrize('rn,rm,is16bit', [(-1, 5, True), (-1, 5, False), ( 1, 5, True), ( 1, 5, False)]) def test_execute_arith_shift_right(rn, rm, is16bit): rd = 2 state = new_state(rf0=trim_32(rn), rf1=trim_32(rm)) instr = (opcode_factory.asr16(rd=rd, rn=0, rm=1) if is16bit else opcode_factory.asr32(rd=rd, rn=0, rm=1)) name, executefn = decode(instr) executefn(state, Instruction(instr, None)) expected_state = StateChecker(AZ=(False if rn < 0 else True), # 1 >> 5 == 0 AV=0, AC=0, pc=((2 if is16bit else 4) + RESET_ADDR), rf2=(trim_32(-1) if rn < 0 else 0)) expected_state.check(state) @pytest.mark.parametrize('rn,imm,is16bit', [(-1, 5, True), (-1, 5, False), ( 1, 5, True), ( 1, 5, False)]) def test_execute_arith_shift_right_imm(rn, imm, is16bit): rd = 2 state = new_state(rf0=trim_32(rn)) instr = (opcode_factory.asr16_immediate(rd=rd, rn=0, imm=imm) if is16bit else opcode_factory.asr32_immediate(rd=rd, rn=0, imm=imm)) name, executefn = decode(instr) executefn(state, Instruction(instr, None)) expected_state = StateChecker(AZ=(False if rn < 0 else True), # 1 >> 5 == 0 AV=0, AC=0, pc=((2 if is16bit else 4) + RESET_ADDR), rf2=(trim_32(-1) if rn < 0 else 0)) expected_state.check(state) @pytest.mark.parametrize('factory,is16bit', [(opcode_factory.lsl16, True), (opcode_factory.lsl32, False) ]) def test_execute_shift_left(factory, is16bit): state = new_state(rf0=5, rf1=7) instr = factory(rd=2, rn=1, rm=0) name, executefn = decode(instr) executefn(state, Instruction(instr, None)) expected_state = StateChecker(AZ=0, AN=0, AC=0, AV=0, pc=((2 if is16bit else 4) + RESET_ADDR), rf2=7 << 5) expected_state.check(state) @pytest.mark.parametrize('factory,is16bit', [(opcode_factory.lsl16_immediate, True), (opcode_factory.lsl32_immediate, False) ]) def test_execute_shift_left_immediate(factory, is16bit): state = new_state(rf1=7) instr = factory(rd=2, rn=1, imm=5) name, executefn = decode(instr) executefn(state, Instruction(instr, None)) expected_state = StateChecker(AZ=0, AN=0, AC=0, AV=0, pc=((2 if is16bit else 4) + RESET_ADDR), rf2=7 << 5) expected_state.check(state) @pytest.mark.parametrize('bits,expected,is16bit', [(0b10101010101010101010101010101010, 0b01010101010101010101010101010101, True), (0b01010101010101010101010101010101, 0b10101010101010101010101010101010, True), (0b10101010101010101010101010101010, 0b01010101010101010101010101010101, False), (0b01010101010101010101010101010101, 0b10101010101010101010101010101010, False), ]) def test_execute_bitr(bits, expected, is16bit): state = new_state(rf0=0, rf1=bits) instr = (opcode_factory.bitr16_immediate(rd=2, rn=1, imm=0) if is16bit else opcode_factory.bitr32_immediate(rd=2, rn=1, imm=0)) name, executefn = decode(instr) executefn(state, Instruction(instr, None)) expected_state = StateChecker(AZ=0, AC=0, AV=0, pc=((2 if is16bit else 4) + RESET_ADDR), rf2=expected) expected_state.check(state) @pytest.mark.parametrize('factory,expected', [(opcode_factory.and32, 5 & 7), (opcode_factory.orr32, 5 | 7), (opcode_factory.eor32, 5 ^ 7), ]) def test_execute_bitwise32(factory, expected): state = new_state(rf0=5, rf1=7) instr = factory(rd=2, rn=1, rm=0) name, executefn = decode(instr) executefn(state, Instruction(instr, None)) expected_state = StateChecker(AZ=0, AV=0, AC=0, pc=(4 + RESET_ADDR), rf2=expected) expected_state.check(state) @pytest.mark.parametrize('factory,expected', [(opcode_factory.and16, 5 & 7), (opcode_factory.orr16, 5 | 7), (opcode_factory.eor16, 5 ^ 7), ]) def test_execute_bitwise16(factory, expected): state = new_state(rf0=5, rf1=7) instr = factory(rd=2, rn=1, rm=0) name, executefn = decode(instr) executefn(state, Instruction(instr, None)) expected_state = StateChecker(AZ=0, AV=0, AC=0, pc=(2 + RESET_ADDR), rf2=expected) expected_state.check(state)
bsd-3-clause
-798,977,002,079,429,200
44.406061
79
0.49693
false
3.701581
true
false
false
nbari/my-sandbox
python/email/server.py
1
1444
import smtplib import smtpd import asyncore import email.utils from email.mime.text import MIMEText import threading class SMTPReceiver(smtpd.SMTPServer): def process_message(self, peer, mailfrom, rcpttos, data): print 'Receiving message from:', peer print 'Message addressed from:', mailfrom print 'Message addressed to :', rcpttos print 'Message length :', len(data) print data def send_response(): msg = MIMEText('Hello world!') msg['To'] = email.utils.formataddr(('Recipient', mailfrom)) msg['From'] = email.utils.formataddr( ('Author', '[email protected]')) msg['Subject'] = '' print 'Connecting to mail server' server = smtplib.SMTP() server.set_debuglevel(1) server.connect() print 'Attempting to send message' try: server.sendmail( '[email protected]', [mailfrom], msg.as_string()) except Exception as ex: print 'Could not send mail', ex finally: server.quit() print 'Finished sending message' threading.Thread(target=send_response).start() return def main(): server = SMTPReceiver(('', 2025), None) asyncore.loop() if __name__ == '__main__': main()
bsd-3-clause
-5,075,651,557,787,795,000
28.469388
71
0.549169
false
4.415902
false
false
false
laurentb/weboob
modules/onlinenet/module.py
1
2721
# -*- coding: utf-8 -*- # Copyright(C) 2016 Edouard Lambert # # This file is part of a weboob module. # # This weboob module is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This weboob module is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with this weboob module. If not, see <http://www.gnu.org/licenses/>. from __future__ import unicode_literals from weboob.capabilities.bill import DocumentTypes, CapDocument, Subscription, Document, SubscriptionNotFound, DocumentNotFound from weboob.capabilities.base import find_object, NotAvailable from weboob.tools.backend import Module, BackendConfig from weboob.tools.value import ValueBackendPassword, Value from .browser import OnlinenetBrowser __all__ = ['OnlinenetModule'] class OnlinenetModule(Module, CapDocument): NAME = 'onlinenet' DESCRIPTION = 'Online.net' MAINTAINER = 'Edouard Lambert' EMAIL = '[email protected]' LICENSE = 'LGPLv3+' VERSION = '2.1' CONFIG = BackendConfig( Value('login', label='Identifiant'), ValueBackendPassword('password', label='Mot de passe'), ) BROWSER = OnlinenetBrowser accepted_document_types = (DocumentTypes.BILL, DocumentTypes.OTHER,) def create_default_browser(self): return self.create_browser(self.config['login'].get(), self.config['password'].get()) def iter_subscription(self): return self.browser.get_subscription_list() def get_subscription(self, _id): return find_object(self.iter_subscription(), id=_id, error=SubscriptionNotFound) def get_document(self, _id): subid = _id.rsplit('_', 1)[0] subscription = self.get_subscription(subid) return find_object(self.iter_documents(subscription), id=_id, error=DocumentNotFound) def iter_documents(self, subscription): if not isinstance(subscription, Subscription): subscription = self.get_subscription(subscription) return self.browser.iter_documents(subscription) def download_document(self, document): if not isinstance(document, Document): document = self.get_document(document) if document._url is NotAvailable: return return self.browser.open(document._url).content
lgpl-3.0
-5,601,386,455,273,830,000
35.77027
127
0.714076
false
4.091729
false
false
false
kanairen/RegularIcosahedronDict
src/map/factory/base_shape_map_factory.py
1
4162
#!/usr/bin/env python # coding: utf-8 import numpy as np from src.obj.obj3d import Obj3d from src.obj.grid.base_grid import BaseGrid class BaseShapeMapFactory(object): DIST_UNDEFINED = -1 def __init__(self, model_id, obj3d, grid, n_div, cls, grid_scale): """ :type model_id: int or long: :param model_id: 対象3DモデルID :type obj3d: Obj3d :param obj3d: 形状マップ生成対象の3Dオブジェクト :type grid: TriangleGrid :param grid: 形状マップを生成するための正三角形からなるグリッド :type n_div: int or long :param n_div: グリッド分割数 :type cls: int or long :param cls: クラスラベル :type grid_scale: float :param grid_scale: グリッドのスケール率 """ assert isinstance(model_id, (int, long)) assert isinstance(obj3d, Obj3d) assert isinstance(grid, BaseGrid) assert isinstance(cls, (int, long)) assert isinstance(grid_scale, float) self.model_id = model_id # 3Dモデル:座標系の中心に置き、正規化する self.obj3d = obj3d.center().normal() # 正二十面体グリッド:3Dモデルを内部に完全に含むように拡張 self.grid = grid.center().scale(grid_scale).divide_face(n_div) # 3Dモデルの中心から最も離れた点の中心からの距離が、 # グリッドの中心から最も近い点のより中心からの距離より大きい場合はサポート外 # (原則、scale_gridは1以上で設定する) if np.linalg.norm(self.grid.vertices, axis=1).min() < np.linalg.norm( self.obj3d.vertices, axis=1).max(): raise NotImplementedError() # クラスラベル self.cls = cls @staticmethod def tomas_moller(origin, end, v0, v1, v2): """ Tomas-Mollerのアルゴリズム 線分と三角形の交点を返す 交差しない場合、Noneを返す 行列式を、外積/内積に置き換えている :type origin: np.ndarray :param origin: 線分の始点 :type end: np.ndarray :param end: 線分の終点 :type v0 : np.ndarray :param v0: 三角形の頂点その1 :type v1: np.ndarray :param v1: 三角形の頂点その2 :type v2: np.ndarray :param v2: 三角形の頂点その3 :rtype: np.ndarray :return: 交点ベクトル """ edge1 = v1 - v0 edge2 = v2 - v0 ray = end - origin P = np.cross(ray, edge2) # 分母 denominator = np.dot(P, edge1) if denominator > np.finfo(float).eps: T = origin - v0 u = np.dot(P, T) if 0 <= u <= denominator: Q = np.cross(T, edge1) v = np.dot(Q, ray) if 0 <= v <= denominator and (u + v) <= denominator: t = np.dot(Q, edge2) / denominator return origin + ray * t return None def create(self): raise NotImplementedError def _distances(self): """ グリッド頂点に対応した距離情報のマップを取得する """ grid_center = np.zeros(shape=(3,)) # 距離マップ インデックスはグリッドのverticesに対応する # 空洞など、距離が未定義のところにはDIST_UNDEFINED値を入れる distance_map = np.full(shape=(len(self.grid.vertices)), fill_value=BaseShapeMapFactory.DIST_UNDEFINED, dtype=np.float64) for i, g_vertex in enumerate(self.grid.vertices): for f0, f1, f2 in self.obj3d.vertices[self.obj3d.face_vertices]: p_cross = self.tomas_moller(grid_center, g_vertex, f0, f1, f2) if p_cross is not None: distance_map[i] = np.linalg.norm(p_cross - grid_center) break return distance_map
mit
-6,878,317,469,387,684,000
24.382353
78
0.548378
false
2.353102
false
false
false
ikben/troposphere
examples/WAF_Common_Attacks_Sample.py
1
5554
# Converted from AWS WAF Sample located at: # https://s3.amazonaws.com/cloudformation-examples/community/common-attacks.json from troposphere import ( Template, Parameter, Join, Ref ) from troposphere.waf import ( Rule, SqlInjectionMatchSet, WebACL, SizeConstraintSet, IPSet, XssMatchSet, Predicates, SqlInjectionMatchTuples, FieldToMatch, Action, Rules, SizeConstraint, XssMatchTuple ) t = Template() t.add_version("2010-09-09") t.set_description( "Creates an AWS WAF configuration that protects against common attacks" ) WebACLName = t.add_parameter(Parameter( "WebACLName", Default="CommonAttackProtection", Type="String", Description="Enter the name you want to use for the WebACL. " "This value is also added as a prefix for the names of the rules, " "conditions, and CloudWatch metrics created by this template.", )) SqliMatchSet = t.add_resource(SqlInjectionMatchSet( "SqliMatchSet", Name=Join("", [Ref(WebACLName), "SqliMatch"]), SqlInjectionMatchTuples=[ SqlInjectionMatchTuples( FieldToMatch=FieldToMatch( Type="QUERY_STRING" ), TextTransformation="URL_DECODE" ), SqlInjectionMatchTuples( FieldToMatch=FieldToMatch( Type="QUERY_STRING" ), TextTransformation="HTML_ENTITY_DECODE" ), SqlInjectionMatchTuples( FieldToMatch=FieldToMatch( Type="BODY" ), TextTransformation="URL_DECODE" ), SqlInjectionMatchTuples( FieldToMatch=FieldToMatch( Type="BODY" ), TextTransformation="HTML_ENTITY_DECODE" ), SqlInjectionMatchTuples( FieldToMatch=FieldToMatch( Type="URI" ), TextTransformation="URL_DECODE" ) ] )) SqliRule = t.add_resource(Rule( "SqliRule", Predicates=[ Predicates( DataId=Ref(SqliMatchSet), Type="SqlInjectionMatch", Negated=False ) ], Name=Join("", [Ref(WebACLName), "SqliRule"]), MetricName=Join("", [Ref(WebACLName), "SqliRule"]), )) XssMatchSet = t.add_resource(XssMatchSet( "XssMatchSet", Name=Join("", [Ref(WebACLName), "XssMatch"]), XssMatchTuples=[ XssMatchTuple( FieldToMatch=FieldToMatch( Type="QUERY_STRING", ), TextTransformation="URL_DECODE" ), XssMatchTuple( FieldToMatch=FieldToMatch( Type="QUERY_STRING", ), TextTransformation="HTML_ENTITY_DECODE" ), XssMatchTuple( FieldToMatch=FieldToMatch( Type="BODY", ), TextTransformation="URL_DECODE" ), XssMatchTuple( FieldToMatch=FieldToMatch( Type="BODY", ), TextTransformation="HTML_ENTITY_DECODE" ), XssMatchTuple( FieldToMatch=FieldToMatch( Type="URI", ), TextTransformation="URL_DECODE" ) ] )) XssRule = t.add_resource(Rule( "XssRule", Name=Join("", [Ref(WebACLName), "XssRule"]), Predicates=[ Predicates( DataId=Ref(XssMatchSet), Type="XssMatch", Negated=False ) ], MetricName=Join("", [Ref(WebACLName), "XssRule"]), )) WAFManualIPBlockSet = t.add_resource(IPSet( "WAFManualIPBlockSet", Name="Manual IP Block Set", )) ManualIPBlockRule = t.add_resource(Rule( "ManualIPBlockRule", Name=Join("", [Ref(WebACLName), "ManualIPBlockRule"]), MetricName=Join("", [Ref(WebACLName), "ManualIPBlockRule"]), Predicates=[ Predicates( DataId=Ref(WAFManualIPBlockSet), Type="IPMatch", Negated=False ) ] )) SizeMatchSet = t.add_resource(SizeConstraintSet( "SizeMatchSet", Name=Join("", [Ref(WebACLName), "LargeBodyMatch"]), SizeConstraints=[ SizeConstraint( ComparisonOperator="GT", TextTransformation="NONE", FieldToMatch=FieldToMatch( Type="BODY" ), Size="8192" ) ] )) SizeMatchRule = t.add_resource(Rule( "SizeMatchRule", Name=Join("", [Ref(WebACLName), "LargeBodyMatchRule"]), MetricName=Join("", [Ref(WebACLName), "DetectLargeBody"]), Predicates=[ Predicates( DataId=Ref(SizeMatchSet), Type="SizeConstraint", Negated=False ) ] )) MyWebACL = t.add_resource(WebACL( "MyWebACL", Name=Ref(WebACLName), DefaultAction=Action( Type="ALLOW" ), Rules=[ Rules( Action=Action( Type="BLOCK" ), Priority=1, RuleId=Ref(ManualIPBlockRule) ), Rules( Action=Action( Type="COUNT" ), Priority=2, RuleId=Ref(SizeMatchRule) ), Rules( Action=Action( Type="BLOCK" ), Priority=3, RuleId=Ref(SqliRule) ), Rules( Action=Action( Type="BLOCK" ), Priority=4, RuleId=Ref(XssRule) ) ], MetricName=Ref(WebACLName), )) print(t.to_json())
bsd-2-clause
-8,663,083,156,962,772,000
23.359649
80
0.541232
false
3.878492
false
false
false
jonlatorre/VideoCargador
video/models.py
1
1791
# encoding: utf-8 from django.db import models import os from mencoder import * class Video(models.Model): """This is a small demo using just two fields. The slug field is really not necessary, but makes the code simpler. ImageField depends on PIL or pillow (where Pillow is easily installable in a virtualenv. If you have problems installing pillow, use a more generic FileField instead. """ file = models.FileField(upload_to="uploaded_videos") slug = models.SlugField(max_length=50, blank=True) mp4_encoded = models.BooleanField(default=False) mp4_file = models.FileField(upload_to="converted_videos", blank=True) mp4_url = models.BooleanField(default=False) flv_encoded = models.BooleanField(default=False) flv_file = models.FileField(upload_to="converted_videos", blank=True) flv_url = models.BooleanField(default=False) def __unicode__(self): return self.file.name @models.permalink def get_absolute_url(self): return ('video-new', ) def save(self, *args, **kwargs): self.slug = self.file.name super(Video, self).save(*args, **kwargs) def delete(self, *args, **kwargs): """delete -- Remove to leave file.""" self.file.delete(False) super(Video, self).delete(*args, **kwargs) def encode_mp4(self): print "Vamos a convertir a mp4" destino = self.mp4_file.storage.base_location destino = os.path.join(destino,"converted_videos") ret,salida = call_mencoder_mp4(self.file.path,destino) if ret == 0: print "Codificacion OK" self.mp4_file.name = "converted_videos/"+salida self.mp4_encoded = True self.save() def upload_mp4(self): print "Subimos el MP4"
mit
5,213,071,513,827,455,000
35.55102
79
0.654941
false
3.589178
false
false
false
mrshu/scikit-learn
examples/plot_permutation_test_for_classification.py
1
2236
""" ================================================================= Test with permutations the significance of a classification score ================================================================= In order to test if a classification score is significative a technique in repeating the classification procedure after randomizing, permuting, the labels. The p-value is then given by the percentage of runs for which the score obtained is greater than the classification score obtained in the first place. """ # Author: Alexandre Gramfort <[email protected]> # License: BSD print __doc__ import numpy as np import pylab as pl from sklearn.svm import SVC from sklearn.cross_validation import StratifiedKFold, permutation_test_score from sklearn import datasets from sklearn.metrics import zero_one_score ############################################################################## # Loading a dataset iris = datasets.load_iris() X = iris.data y = iris.target n_classes = np.unique(y).size # Some noisy data not correlated random = np.random.RandomState(seed=0) E = random.normal(size=(len(X), 2200)) # Add noisy data to the informative features for make the task harder X = np.c_[X, E] svm = SVC(kernel='linear') cv = StratifiedKFold(y, 2) score, permutation_scores, pvalue = permutation_test_score( svm, X, y, zero_one_score, cv=cv, n_permutations=100, n_jobs=1) print "Classification score %s (pvalue : %s)" % (score, pvalue) ############################################################################### # View histogram of permutation scores pl.hist(permutation_scores, 20, label='Permutation scores') ylim = pl.ylim() # BUG: vlines(..., linestyle='--') fails on older versions of matplotlib #pl.vlines(score, ylim[0], ylim[1], linestyle='--', # color='g', linewidth=3, label='Classification Score' # ' (pvalue %s)' % pvalue) #pl.vlines(1.0 / n_classes, ylim[0], ylim[1], linestyle='--', # color='k', linewidth=3, label='Luck') pl.plot(2 * [score], ylim, '--g', linewidth=3, label='Classification Score' ' (pvalue %s)' % pvalue) pl.plot(2 * [1. / n_classes], ylim, '--k', linewidth=3, label='Luck') pl.ylim(ylim) pl.legend() pl.xlabel('Score') pl.show()
bsd-3-clause
4,989,806,373,800,379,000
31.882353
79
0.61449
false
3.594855
false
false
false
codebikeclimb/NASARobotComp
Robot2017_Master/Robot2016/motorTest.py
1
2963
#!/usr/bin/python from Adafruit_MotorHAT import Adafruit_MotorHAT, Adafruit_DCMotor import serial import time import atexit #initialize i2c communication with motor shield roboMotor = Adafruit_MotorHAT(addr=0x60) #initialize serial communications with XBee RF reciever xBee = serial.Serial('/dev/ttyACM1',57600) compass = serial.Serial('/dev/ttyACM0', 9600) def turnOffMotors(): roboMotor.getMotor(3).run(Adafruit_MotorHAT.RELEASE) roboMotor.getMotor(4).run(Adafruit_MotorHAT.RELEASE) atexit.register(turnOffMotors) #create motor objects leftFrontRear = roboMotor.getMotor(3) rightFrontRear = roboMotor.getMotor(4) #set speed to start ---- 0(off) - 255(Max) #beacon navigation def beaconNavigation(): bHeadings = [] botHeadings = [] for x in range(0,2): botHeading = compass.readline() botHeading = float(botHeading) botHeadings.append(botHeading) print(botHeading) beaconHeading = xBee.readline() beaconHeading = float(beaconHeading) bHeadings.append(beaconHeading) print(beaconHeading) botTotal = sum(botHeadings) botLength = len(botHeadings) avgBotHeading = botTotal / botLength print "avg bot heading: ", avgBotHeading total = sum(bHeadings) l = len(bHeadings) avgHeading = total / l print "avg b heading: ", avgHeading #calculate opposite heading x = avgHeading + 180 oppositeHeading = x % 360 oppositeHeading = float(oppositeHeading) print "opposite beacon heading: ", oppositeHeading # while(botHeading <= oppositeHeading or botHeading >= oppositeHeading): while(botHeading < oppositeHeading or botHeading > oppositeHeading + 1.0): botHeading = compass.readline() botHeading = float(botHeading) print botHeading # rightRotate() forward() # toTheBeacon() #for x in range(0,20): # heading = xBee.readline() # botBearing = compass.readline() # print(heading) # print(botBearing) #drive forwards def forward(): # beaconNavigation() while(True): leftFrontRear.setSpeed(80) rightFrontRear.setSpeed(80) leftFrontRear.run(Adafruit_MotorHAT.FORWARD) rightFrontRear.run(Adafruit_MotorHAT.FORWARD) #drive backwards def reverse(): rightFrontRear.setSpeed(150) leftFrontRear.setSpeed(150) rightFrontRear.run(Adafruit_MotorHAT.BACKWARD) leftFrontRear.run(Adafruit_MotorHAT.BACKWARD) #rotate left, rotate right def leftRotate(): rightFrontRear.setSpeed(70) rightFrontRear.run(Adafruit_MotorHAT.FORWARD) def rightRotate(): leftFrontRear.setSpeed(90) rightFrontRear.setSpeed(90) leftFrontRear.run(Adafruit_MotorHAT.FORWARD) rightFrontRear.run(Adafruit_MotorHAT.BACKWARD) #turn left, turn right def leftTurn(): rightFrontRear.setSpeed(200) leftFrontRear.setSpeed(125) rightFrontRear.run(Adafruit_MotorHAT.FORWARD) leftFrontRear.run(Adafruit_MotorHAT.FORWARD) def rightTurn(): rightFrontRear.setSpeed(150) leftFrontRear.setSpeed(200) leftFrontRear.run(Adafruit_MotorHAT.FORWARD) rightFrontRear.run(Adafruit_MotorHAT.FORWARD) beaconNavigation() forward()
gpl-3.0
-6,703,005,234,935,376,000
21.792308
75
0.76949
false
2.746061
false
false
false
rzinkstok/skymap
skymap/labeling/runner.py
1
2495
import time import random from PIL import Image, ImageDraw from skymap.labeling.common import Point, BoundingBox, evaluate, POSITION_WEIGHT from skymap.labeling.greedy import GreedyLabeler, AdvancedGreedyLabeler from skymap.labeling.grasp import GraspLabeler from skymap.labeling.genetic import GeneticLabeler, CachedGeneticLabeler from deap import creator, base def draw(points, width, height): SCALE = 4 im = Image.new("RGB", (SCALE * width, SCALE * height), (255, 255, 255)) d = ImageDraw.Draw(im) for p in points: x = p.x * SCALE y = (height - p.y) * SCALE r = p.radius * SCALE if p.label is None: color = (200, 200, 200) else: color = "black" d.ellipse([x - r, y - r, x + r, y + r], fill=color) if p.label: x1 = p.label.minx * SCALE x2 = p.label.maxx * SCALE y1 = (height - p.label.miny) * SCALE y2 = (height - p.label.maxy) * SCALE if p.label.penalty > POSITION_WEIGHT * p.label.position: color = (256, 0, 0) else: color = (200, 200, 200) d.rectangle((x1, y1, x2, y2), outline=color) im.show() if __name__ == "__main__": print("Starting") random.seed(1) creator.create("FitnessMax", base.Fitness, weights=(1.0,)) creator.create("Individual", list, fitness=creator.FitnessMax) npoints = 1000 nlabels = 200 mapwidth = 500 mapheight = 500 bounding_box = BoundingBox(0, 0, mapwidth, mapheight) points = [] for i in range(npoints): x = mapwidth * random.random() y = mapheight * random.random() if random.random() < float(nlabels) / npoints: text = f"Label for point {i}" p = Point(x, y, 1, text, 0) else: p = Point(x, y, 1) points.append(p) method = 5 if method == 1: g = GreedyLabeler(points, bounding_box) elif method == 2: g = AdvancedGreedyLabeler(points, bounding_box) elif method == 3: g = GraspLabeler(points, bounding_box) elif method == 4: g = GeneticLabeler(points, bounding_box) elif method == 5: g = CachedGeneticLabeler(creator, points, bounding_box) t1 = time.clock() g.run() t2 = time.clock() print(f"Run time: {t2 - t1}") penalty = evaluate(g.points, g.bounding_box) print(f"Penalty: {penalty}") # draw(points, mapwidth, mapheight)
gpl-3.0
-6,816,814,384,139,079,000
27.678161
80
0.578357
false
3.198718
false
false
false
google/mannequinchallenge
loaders/aligned_data_loader.py
1
1933
# Copyright 2019 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import torch.utils.data from loaders import image_folder class DAVISDataLoader(): def __init__(self, list_path, _batch_size): dataset = image_folder.DAVISImageFolder(list_path=list_path) self.data_loader = torch.utils.data.DataLoader(dataset, batch_size=_batch_size, shuffle=False, num_workers=int(1)) self.dataset = dataset def load_data(self): return self.data_loader def name(self): return 'TestDataLoader' def __len__(self): return len(self.dataset) class TUMDataLoader(): def __init__(self, opt, list_path, is_train, _batch_size, num_threads): dataset = image_folder.TUMImageFolder(opt=opt, list_path=list_path) self.data_loader = torch.utils.data.DataLoader(dataset, batch_size=_batch_size, shuffle=False, num_workers=int(num_threads)) self.dataset = dataset def load_data(self): return self.data_loader def name(self): return 'TUMDataLoader' def __len__(self): return len(self.dataset)
apache-2.0
-2,236,632,270,222,474,800
34.796296
84
0.578893
false
4.423341
false
false
false
xemul/p.haul
phaul/connection.py
1
1171
# # p.haul connection module contain logic needed to establish connection # between p.haul and p.haul-service. # import logging import socket import util class connection(object): """p.haul connection Class encapsulate connections reqired for p.haul work, including rpc socket (socket for RPC calls), memory socket (socket for c/r images migration) and module specific definition of fs channel needed for disk migration. """ def __init__(self, rpc_sk, mem_sk, fdfs): self.rpc_sk = rpc_sk self.mem_sk = mem_sk self.fdfs = fdfs def close(self): self.rpc_sk.close() self.mem_sk.close() def establish(fdrpc, fdmem, fdfs): """Construct required socket objects from file descriptors Expect that each file descriptor represent socket opened in blocking mode with domain AF_INET and type SOCK_STREAM. """ logging.info( "Use existing connections, fdrpc=%d fdmem=%d fdfs=%s", fdrpc, fdmem, fdfs) # Create rpc socket rpc_sk = socket.fromfd(fdrpc, socket.AF_INET, socket.SOCK_STREAM) util.set_cloexec(rpc_sk) # Create memory socket mem_sk = socket.fromfd(fdmem, socket.AF_INET, socket.SOCK_STREAM) return connection(rpc_sk, mem_sk, fdfs)
lgpl-2.1
-7,130,818,278,763,077,000
23.914894
76
0.733561
false
3.147849
false
false
false
AQORN/thunder-engine
thunder_web/api/views.py
1
1791
from django.shortcuts import render from rest_framework import status from rest_framework.decorators import api_view # from rest_framework.response import Response from task.models import Task from api.serializers import TaskSerializer # @api_view(['GET', 'POST']) def task_list(request): """ List all tasks, or create a new task. """ if request.method == 'GET': tasks = Task.objects.all() print tasks.query serializer = TaskSerializer(tasks, many=True) print tasks return Response(serializer.data) elif request.method == 'POST': serializer = TaskSerializer(data=request.data) if serializer.is_valid(): serializer.save() return Response(serializer.data, status=status.HTTP_201_CREATED) else: return Response( serializer.errors, status=status.HTTP_400_BAD_REQUEST) @api_view(['GET', 'PUT', 'DELETE']) #@permission_classes((IsAuthenticated, )) def task_detail(request, pk): """ Get, udpate, or delete a specific task """ try: task = Task.objects.get(pk=pk) except Task.DoesNotExist: return Response(status=status.HTTP_404_NOT_FOUND) if request.method == 'GET': serializer = TaskSerializer(task) return Response(serializer.data) elif request.method == 'PUT': serializer = TaskSerializer(task, data=request.data) if serializer.is_valid(): serializer.save() return Response(serializer.data) else: return Response( serializer.errors, status=status.HTTP_400_BAD_REQUEST) elif request.method == 'DELETE': task.delete() return Response(status=status.HTTP_204_NO_CONTENT) # Create your views here.
gpl-3.0
-5,743,528,586,565,458,000
27.428571
76
0.638749
false
4.174825
false
false
false
PyBossa/pybossa
test/test_sched_depth_first_all.py
1
48364
# -*- coding: utf8 -*- # This file is part of PYBOSSA. # # Copyright (C) 2015 Scifabric LTD. # # PYBOSSA is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # PYBOSSA is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with PYBOSSA. If not, see <http://www.gnu.org/licenses/>. import json import random from mock import patch from helper import sched from default import Test, db, with_context from pybossa.model.task import Task from pybossa.model.project import Project from pybossa.model.user import User from pybossa.model.task_run import TaskRun from pybossa.model.category import Category from pybossa.sched import get_depth_first_all_task from pybossa.core import task_repo, project_repo from factories import TaskFactory, ProjectFactory, TaskRunFactory, UserFactory from factories import AnonymousTaskRunFactory, ExternalUidTaskRunFactory from factories import reset_all_pk_sequences import pybossa class TestSched(sched.Helper): endpoints = ['project', 'task', 'taskrun'] def get_headers_jwt(self, project): """Return headesr JWT token.""" # Get JWT token url = 'api/auth/project/%s/token' % project.short_name res = self.app.get(url, headers={'Authorization': project.secret_key}) authorization_token = 'Bearer %s' % res.data return {'Authorization': authorization_token} # Tests @with_context def test_anonymous_01_newtask(self): """ Test SCHED newtask returns a Task for the Anonymous User""" project = ProjectFactory.create(info=dict(sched='depth_first_all')) TaskFactory.create_batch(2, project=project, info='hola') res = self.app.get('api/project/%s/newtask' %project.id) data = json.loads(res.data) task_id = data['id'] assert data['info'] == 'hola', data taskrun = dict(project_id=data['project_id'], task_id=data['id'], info="hola") res = self.app.post('api/taskrun', data=json.dumps(taskrun)) res = self.app.get('api/project/%s/newtask' %project.id) data = json.loads(res.data) assert data['info'] == 'hola', data assert data['id'] != task_id, data @with_context def test_anonymous_01_newtask_limits(self): """ Test SCHED newtask returns a list of Tasks for the Anonymous User""" project = ProjectFactory.create(info=dict(sched='depth_first_all')) TaskFactory.create_batch(100, project=project, info='hola') url = 'api/project/%s/newtask?limit=100' % project.id res = self.app.get(url) data = json.loads(res.data) assert len(data) == 100 for t in data: assert t['info'] == 'hola', t task_ids = [task['id'] for task in data] task_ids = set(task_ids) assert len(task_ids) == 100, task_ids url = 'api/project/%s/newtask?limit=200' % project.id res = self.app.get(url) data = json.loads(res.data) assert len(data) == 100 for t in data: assert t['info'] == 'hola', t task_ids = [task['id'] for task in data] task_ids = set(task_ids) assert len(task_ids) == 100, task_ids @with_context def test_anonymous_02_gets_different_tasks(self): """ Test SCHED newtask returns N different Tasks for the Anonymous User""" assigned_tasks = [] # Get a Task until scheduler returns None project = ProjectFactory.create(info=dict(sched='depth_first_all')) tasks = TaskFactory.create_batch(3, project=project, info={}) res = self.app.get('api/project/%s/newtask' % project.id) data = json.loads(res.data) while data.get('info') is not None: # Save the assigned task assigned_tasks.append(data) task = db.session.query(Task).get(data['id']) # Submit an Answer for the assigned task tr = AnonymousTaskRunFactory.create(project=project, task=task) res = self.app.get('api/project/%s/newtask' %project.id) data = json.loads(res.data) # Check if we received the same number of tasks that the available ones assert len(assigned_tasks) == len(tasks), len(assigned_tasks) # Check if all the assigned Task.id are equal to the available ones err_msg = "Assigned Task not found in DB Tasks" for at in assigned_tasks: assert self.is_task(at['id'], tasks), err_msg # Check that there are no duplicated tasks err_msg = "One Assigned Task is duplicated" for at in assigned_tasks: assert self.is_unique(at['id'], assigned_tasks), err_msg @with_context def test_anonymous_02_gets_different_tasks_limits(self): """ Test SCHED newtask returns N different list of Tasks for the Anonymous User""" assigned_tasks = [] # Get a Task until scheduler returns None project = ProjectFactory.create(info=dict(sched='depth_first_all')) tasks = TaskFactory.create_batch(10, project=project, info={}) res = self.app.get('api/project/%s/newtask?limit=5' % project.id) data = json.loads(res.data) while len(data) > 0: # Save the assigned task for t in data: assigned_tasks.append(t) task = db.session.query(Task).get(t['id']) # Submit an Answer for the assigned task tr = AnonymousTaskRunFactory.create(project=project, task=task) res = self.app.get('api/project/%s/newtask?limit=5' % project.id) data = json.loads(res.data) # Check if we received the same number of tasks that the available ones assert len(assigned_tasks) == len(tasks), len(assigned_tasks) # Check if all the assigned Task.id are equal to the available ones err_msg = "Assigned Task not found in DB Tasks" for at in assigned_tasks: assert self.is_task(at['id'], tasks), err_msg # Check that there are no duplicated tasks err_msg = "One Assigned Task is duplicated" for at in assigned_tasks: assert self.is_unique(at['id'], assigned_tasks), err_msg @with_context def test_external_uid_02_gets_different_tasks(self): """ Test SCHED newtask returns N different Tasks for a external User ID.""" assigned_tasks = [] # Get a Task until scheduler returns None project = ProjectFactory.create(info=dict(sched='depth_first_all')) tasks = TaskFactory.create_batch(3, project=project, info={}) headers = self.get_headers_jwt(project) url = 'api/project/%s/newtask?external_uid=%s' % (project.id, '1xa') res = self.app.get(url, headers=headers) data = json.loads(res.data) while data.get('info') is not None: # Save the assigned task assigned_tasks.append(data) task = db.session.query(Task).get(data['id']) # Submit an Answer for the assigned task tr = ExternalUidTaskRunFactory.create(project=project, task=task) res = self.app.get(url, headers=headers) data = json.loads(res.data) # Check if we received the same number of tasks that the available ones assert len(assigned_tasks) == len(tasks), len(assigned_tasks) # Check if all the assigned Task.id are equal to the available ones err_msg = "Assigned Task not found in DB Tasks" for at in assigned_tasks: assert self.is_task(at['id'], tasks), err_msg # Check that there are no duplicated tasks err_msg = "One Assigned Task is duplicated" for at in assigned_tasks: assert self.is_unique(at['id'], assigned_tasks), err_msg # Check that there are task runs saved with the external UID answers = task_repo.filter_task_runs_by(external_uid='1xa') print answers err_msg = "There should be the same amount of task_runs than tasks" assert len(answers) == len(assigned_tasks), err_msg assigned_tasks_ids = sorted([at['id'] for at in assigned_tasks]) task_run_ids = sorted([a.task_id for a in answers]) err_msg = "There should be an answer for each assigned task" assert assigned_tasks_ids == task_run_ids, err_msg @with_context def test_external_uid_02_gets_different_tasks_limits(self): """ Test SCHED newtask returns N different list of Tasks for a external User ID.""" assigned_tasks = [] # Get a Task until scheduler returns None project = ProjectFactory.create(info=dict(sched='depth_first_all')) tasks = TaskFactory.create_batch(10, project=project, info={}) headers = self.get_headers_jwt(project) url = 'api/project/%s/newtask?limit=5&external_uid=%s' % (project.id, '1xa') res = self.app.get(url, headers=headers) data = json.loads(res.data) while len(data) > 0 : # Save the assigned task for t in data: assigned_tasks.append(t) task = db.session.query(Task).get(t['id']) # Submit an Answer for the assigned task tr = ExternalUidTaskRunFactory.create(project=project, task=task) res = self.app.get(url, headers=headers) data = json.loads(res.data) # Check if we received the same number of tasks that the available ones assert len(assigned_tasks) == len(tasks), len(assigned_tasks) # Check if all the assigned Task.id are equal to the available ones err_msg = "Assigned Task not found in DB Tasks" for at in assigned_tasks: assert self.is_task(at['id'], tasks), err_msg # Check that there are no duplicated tasks err_msg = "One Assigned Task is duplicated" for at in assigned_tasks: assert self.is_unique(at['id'], assigned_tasks), err_msg # Check that there are task runs saved with the external UID answers = task_repo.filter_task_runs_by(external_uid='1xa') print answers err_msg = "There should be the same amount of task_runs than tasks" assert len(answers) == len(assigned_tasks), err_msg assigned_tasks_ids = sorted([at['id'] for at in assigned_tasks]) task_run_ids = sorted([a.task_id for a in answers]) err_msg = "There should be an answer for each assigned task" assert assigned_tasks_ids == task_run_ids, err_msg @with_context def test_anonymous_03_respects_limit_tasks(self): """ Test SCHED newtask respects the limit of 10 TaskRuns per Task""" assigned_tasks = [] project = ProjectFactory.create(owner=UserFactory.create(id=500), info=dict(sched='depth_first_all')) user = UserFactory.create() task = TaskFactory.create(project=project, n_answers=10) tasks = get_depth_first_all_task(project.id, user.id) assert len(tasks) == 1, len(tasks) assert tasks[0].id == task.id, tasks assert tasks[0].state == 'ongoing', tasks for i in range(10): tr = TaskRun(project_id=project.id, task_id=task.id, user_ip='127.0.0.%s' % i) db.session.add(tr) db.session.commit() tasks = get_depth_first_all_task(project.id, user.id) assert len(tasks) == 1, len(tasks) assert tasks[0].id == task.id, tasks assert tasks[0].state == 'completed', tasks for i in range(10): tasks = get_depth_first_all_task(project.id, user_id=None, user_ip='127.0.0.%s' % i) assert len(tasks) == 0, tasks tr = TaskRun(project_id=project.id, task_id=task.id, user_id=user.id) db.session.add(tr) db.session.commit() tasks = get_depth_first_all_task(project.id, user.id) assert len(tasks) == 0, tasks @with_context def test_anonymous_03_respects_limit_tasks_limits(self): """ Test SCHED newtask limit respects the limit of 30 TaskRuns per Task using limits""" assigned_tasks = [] user = UserFactory.create() project = ProjectFactory.create(info=dict(sched='depth_first_all')) orig_tasks = TaskFactory.create_batch(2, project=project, n_answers=5) tasks = get_depth_first_all_task(project.id, user.id, limit=2) assert len(tasks) == 2, len(tasks) assert tasks[0].id == orig_tasks[0].id assert tasks[1].id == orig_tasks[1].id for i in range(5): tr = TaskRun(project_id=project.id, task_id=tasks[0].id, user_ip='127.0.0.%s' % i) db.session.add(tr) db.session.commit() # Task should be marked as completed, but as user has no # participated it should get the completed one as well. tasks = get_depth_first_all_task(project.id, user.id, limit=2, orderby='id', desc=False) assert len(tasks) == 2, len(tasks) assert tasks[0].id == orig_tasks[0].id, tasks[0] assert tasks[0].state == 'completed', tasks[0].state assert len(tasks[0].task_runs) == 5 assert tasks[1].id == orig_tasks[1].id assert tasks[1].state == 'ongoing', tasks[1].state assert len(tasks[1].task_runs) == 0 # User contributes, so only one task should be returned tr = TaskRun(project_id=project.id, task_id=tasks[0].id, user_id=user.id) db.session.add(tr) db.session.commit() tasks = get_depth_first_all_task(project.id, user.id, limit=2, orderby='id', desc=False) assert len(tasks) == 1, len(tasks) assert tasks[0].id == orig_tasks[1].id, tasks[0] assert tasks[0].state == 'ongoing', tasks[0].state assert len(tasks[0].task_runs) == 0 @with_context def test_external_uid_03_respects_limit_tasks(self): """ Test SCHED newtask external uid respects the limit of 30 TaskRuns per Task for external user id""" assigned_tasks = [] project = ProjectFactory.create(info=dict(sched='depth_first_all'), owner=UserFactory.create(id=500)) user = UserFactory.create() task = TaskFactory.create(project=project, n_answers=10) uid = '1xa' tasks = get_depth_first_all_task(project.id, external_uid=uid) assert len(tasks) == 1, len(tasks) assert tasks[0].id == task.id, tasks assert tasks[0].state == 'ongoing', tasks # Add taskruns for i in range(10): tr = TaskRun(project_id=project.id, task_id=task.id, user_ip='127.0.0.%s' % i) db.session.add(tr) db.session.commit() tasks = get_depth_first_all_task(project.id, external_uid=uid) assert len(tasks) == 1, len(tasks) assert tasks[0].id == task.id, tasks assert tasks[0].state == 'completed', tasks assert len(tasks[0].task_runs) == 10, tasks url = 'api/project/%s/newtask?external_uid=%s' % (project.id, uid) headers = self.get_headers_jwt(project) res = self.app.get(url, headers=headers) data = json.loads(res.data) assert data['id'] == task.id assert data['state'] == 'completed' tr = TaskRun(project_id=project.id, task_id=task.id, external_uid=uid) db.session.add(tr) db.session.commit() tasks = get_depth_first_all_task(project.id, external_uid=uid) assert len(tasks) == 0, len(tasks) res = self.app.get(url, headers=headers) data = json.loads(res.data) assert len(data) == 0, data @with_context def test_external_uid_03_respects_limit_tasks_limits(self): """ Test SCHED newtask external uid limits respects the limit of 30 TaskRuns per list of Tasks for external user id""" # Get Task until scheduler returns None project = ProjectFactory.create(info=dict(sched='depth_first_all')) orig_tasks = TaskFactory.create_batch(2, project=project, n_answers=5) headers = self.get_headers_jwt(project) uid = '1xa' url = 'api/project/%s/newtask?external_uid=%s&limit=2' % (project.id, uid) tasks = get_depth_first_all_task(project.id, external_uid=uid, limit=2) assert len(tasks) == 2, len(tasks) assert tasks[0].id == orig_tasks[0].id, tasks assert tasks[0].state == 'ongoing', tasks assert tasks[1].id == orig_tasks[1].id, tasks assert tasks[1].state == 'ongoing', tasks # Add taskruns for i in range(5): tr = TaskRun(project_id=project.id, task_id=tasks[0].id, user_ip='127.0.0.%s' % i) db.session.add(tr) db.session.commit() tasks = get_depth_first_all_task(project.id, external_uid=uid, limit=2, orderby='id', desc=False) assert len(tasks) == 2, len(tasks) assert tasks[0].id == orig_tasks[0].id, tasks assert tasks[0].state == 'completed', tasks assert len(tasks[0].task_runs) == 5, tasks assert tasks[1].id == orig_tasks[1].id, tasks assert tasks[1].state == 'ongoing', tasks assert len(tasks[1].task_runs) == 0, tasks url = 'api/project/%s/newtask?external_uid=%s&limit=2&orderby=id&desc=False' % (project.id,uid) headers = self.get_headers_jwt(project) res = self.app.get(url, headers=headers) data = json.loads(res.data) assert data[0]['id'] == orig_tasks[0].id assert data[0]['state'] == 'completed' assert data[1]['id'] == orig_tasks[1].id assert data[1]['state'] == 'ongoing' tr = TaskRun(project_id=project.id, task_id=tasks[0].id, external_uid=uid) db.session.add(tr) db.session.commit() tasks = get_depth_first_all_task(project.id, external_uid=uid, limit=2, orderby='id', desc=False) assert len(tasks) == 1, len(tasks) assert tasks[0].id == orig_tasks[1].id assert tasks[0].state == 'ongoing' res = self.app.get(url, headers=headers) data = json.loads(res.data) assert data['id'] == orig_tasks[1].id assert data['state'] == 'ongoing' @with_context def test_newtask_default_orderby(self): """Test SCHED depth first works with orderby.""" project = ProjectFactory.create(info=dict(sched="depth_first_all")) task1 = TaskFactory.create(project=project, fav_user_ids=None) task2 = TaskFactory.create(project=project, fav_user_ids=[1,2,3]) url = "/api/project/%s/newtask?orderby=%s&desc=%s" % (project.id, 'id', False) res = self.app.get(url) data = json.loads(res.data) assert data['id'] == task1.id, data url = "/api/project/%s/newtask?orderby=%s&desc=%s" % (project.id, 'id', True) res = self.app.get(url) data = json.loads(res.data) assert data['id'] == task2.id, data url = "/api/project/%s/newtask?orderby=%s&desc=%s" % (project.id, 'created', False) res = self.app.get(url) data = json.loads(res.data) assert data['id'] == task1.id, data url = "/api/project/%s/newtask?orderby=%s&desc=%s" % (project.id, 'created', True) res = self.app.get(url) data = json.loads(res.data) assert data['id'] == task2.id, data url = "/api/project/%s/newtask?orderby=%s&desc=%s" % (project.id, 'fav_user_ids', False) res = self.app.get(url) data = json.loads(res.data) assert data['id'] == task1.id, data url = "/api/project/%s/newtask?orderby=%s&desc=%s" % (project.id, 'fav_user_ids', True) res = self.app.get(url) data = json.loads(res.data) assert data['id'] == task2.id, data assert data['fav_user_ids'] == task2.fav_user_ids, data @with_context def test_user_01_newtask(self): """ Test SCHED newtask returns a Task for John Doe User""" project = ProjectFactory.create(info=dict(sched='depth_first_all'), owner=UserFactory.create(id=500)) TaskFactory.create_batch(2, project=project, n_answers=2) # Register self.register() self.signin() url = 'api/project/%s/newtask' % project.id res = self.app.get(url) data = json.loads(res.data) task_id = data['id'] assert data['id'], data taskrun = dict(project_id=data['project_id'], task_id=data['id'], info="hola") res = self.app.post('api/taskrun', data=json.dumps(taskrun)) res = self.app.get(url) data = json.loads(res.data) assert data['id'], data assert data['id'] != task_id, data self.signout() @with_context def test_user_01_newtask_limits(self): """ Test SCHED newtask returns a Task for John Doe User with limits""" self.register() self.signin() project = ProjectFactory.create(info=dict(sched='depth_first_all'), owner=UserFactory.create(id=500)) tasks = TaskFactory.create_batch(10, project=project, info=dict(foo=1)) # Register url = 'api/project/%s/newtask?limit=2' % project.id res = self.app.get(url) data = json.loads(res.data) assert len(data) == 2, data for t in data: assert t['info']['foo'] == 1, t self.signout() @with_context def test_user_02_gets_different_tasks(self): """ Test SCHED newtask returns N different Tasks for John Doe User""" project = ProjectFactory.create(info=dict(sched='depth_first_all'), owner=UserFactory.create(id=500)) TaskFactory.create_batch(10, project=project) # Register self.register() self.signin() assigned_tasks = [] # Get Task until scheduler returns None url = 'api/project/%s/newtask' % project.id res = self.app.get(url) data = json.loads(res.data) while data.get('id') is not None: # Check that we received a Task assert data.get('id'), data # Save the assigned task assigned_tasks.append(data) # Submit an Answer for the assigned task tr = dict(project_id=data['project_id'], task_id=data['id'], info={'answer': 'No'}) tr = json.dumps(tr) self.app.post('/api/taskrun', data=tr) res = self.app.get(url) data = json.loads(res.data) # Check if we received the same number of tasks that the available ones tasks = db.session.query(Task).filter_by(project_id=1).all() assert len(assigned_tasks) == len(tasks), assigned_tasks # Check if all the assigned Task.id are equal to the available ones tasks = db.session.query(Task).filter_by(project_id=1).all() err_msg = "Assigned Task not found in DB Tasks" for at in assigned_tasks: assert self.is_task(at['id'], tasks), err_msg # Check that there are no duplicated tasks err_msg = "One Assigned Task is duplicated" for at in assigned_tasks: assert self.is_unique(at['id'], assigned_tasks), err_msg @with_context def test_user_02_gets_different_tasks_limit(self): """ Test SCHED newtask returns N different list of Tasks for John Doe User""" # Register self.register() self.signin() project = ProjectFactory.create(info=dict(sched='depth_first_all'), owner=UserFactory.create(id=500)) TaskFactory.create_batch(10, project=project) assigned_tasks = [] # Get Task until scheduler returns None url = 'api/project/%s/newtask?limit=5' % project.id res = self.app.get(url) data = json.loads(res.data) while len(data) > 0: # Check that we received a Task for t in data: assert t.get('id'), t # Save the assigned task assigned_tasks.append(t) # Submit an Answer for the assigned task tr = dict(project_id=t['project_id'], task_id=t['id'], info={'answer': 'No'}) tr = json.dumps(tr) self.app.post('/api/taskrun', data=tr) res = self.app.get(url) data = json.loads(res.data) # Check if we received the same number of tasks that the available ones tasks = db.session.query(Task).filter_by(project_id=1).all() assert len(assigned_tasks) == len(tasks), assigned_tasks # Check if all the assigned Task.id are equal to the available ones tasks = db.session.query(Task).filter_by(project_id=1).all() err_msg = "Assigned Task not found in DB Tasks" for at in assigned_tasks: assert self.is_task(at['id'], tasks), err_msg # Check that there are no duplicated tasks err_msg = "One Assigned Task is duplicated" for at in assigned_tasks: assert self.is_unique(at['id'], assigned_tasks), err_msg @with_context def test_user_03_respects_limit_tasks(self): """ Test SCHED newtask respects the limit of 30 TaskRuns per Task""" project = ProjectFactory.create(info=dict(sched='depth_first_all'), owner=UserFactory.create(id=500)) orig_tasks = TaskFactory.create_batch(1, project=project, n_answers=10) user = UserFactory.create() tasks = get_depth_first_all_task(project.id, user.id) assert len(tasks) == 1, len(tasks) assert tasks[0].id == orig_tasks[0].id, tasks assert tasks[0].state == 'ongoing', tasks for i in range(10): tr = TaskRun(project_id=project.id, task_id=orig_tasks[0].id, user_ip='127.0.0.%s' % i) db.session.add(tr) db.session.commit() tasks = get_depth_first_all_task(project.id, user.id) assert len(tasks) == 1, len(tasks) assert tasks[0].id == orig_tasks[0].id, tasks assert tasks[0].state == 'completed', tasks assert len(tasks[0].task_runs) == 10, tasks tr = TaskRun(project_id=project.id, task_id=orig_tasks[0].id, user_id=user.id) db.session.add(tr) db.session.commit() tasks = get_depth_first_all_task(project.id, user.id) assert len(tasks) == 0, tasks @with_context def test_user_03_respects_limit_tasks_limit(self): """ Test SCHED limit arg newtask respects the limit of 30 TaskRuns per list of Tasks""" # Del previous TaskRuns assigned_tasks = [] project = ProjectFactory.create(info=dict(sched='depth_first_all'), owner=UserFactory.create(id=500)) user = UserFactory.create() orig_tasks = TaskFactory.create_batch(2, project=project, n_answers=10) tasks = get_depth_first_all_task(project.id, user.id, limit=2, orderby='id', desc=False) assert len(tasks) == 2, len(tasks) assert tasks[0].id == orig_tasks[0].id, tasks assert tasks[0].state == 'ongoing', tasks assert tasks[1].id == orig_tasks[1].id, tasks assert tasks[1].state == 'ongoing', tasks for i in range(10): tr = TaskRun(project_id=project.id, task_id=tasks[0].id, user_ip='127.0.0.%s' % i) db.session.add(tr) db.session.commit() tasks = get_depth_first_all_task(project.id, user.id, limit=2, orderby='id', desc=False) assert len(tasks) == 2, len(tasks) assert tasks[0].id == orig_tasks[0].id, tasks assert tasks[0].state == 'completed', tasks assert len(tasks[0].task_runs) == 10, tasks assert tasks[1].id == orig_tasks[1].id, tasks assert tasks[1].state == 'ongoing', tasks assert len(tasks[1].task_runs) == 0, tasks tr = TaskRun(project_id=project.id, task_id=tasks[0].id, user_id=user.id) db.session.add(tr) db.session.commit() tasks = get_depth_first_all_task(project.id, user.id, limit=2, orderby='id', desc=False) assert len(tasks) == 1, tasks assert tasks[0].id == orig_tasks[1].id assert tasks[0].state == 'ongoing' @with_context def test_task_preloading(self): """Test TASK Pre-loading works""" # Del previous TaskRuns project = ProjectFactory.create(info=dict(sched='depth_first_all'), owner=UserFactory.create(id=500)) TaskFactory.create_batch(10, project=project) # Register self.register() self.signin() assigned_tasks = [] # Get Task until scheduler returns None url = 'api/project/%s/newtask' % project.id res = self.app.get(url) task1 = json.loads(res.data) # Check that we received a Task assert task1.get('id'), task1 # Pre-load the next task for the user res = self.app.get(url + '?offset=1') task2 = json.loads(res.data) # Check that we received a Task assert task2.get('id'), task2 # Check that both tasks are different assert task1.get('id') != task2.get('id'), "Tasks should be different" ## Save the assigned task assigned_tasks.append(task1) assigned_tasks.append(task2) # Submit an Answer for the assigned and pre-loaded task for t in assigned_tasks: tr = dict(project_id=t['project_id'], task_id=t['id'], info={'answer': 'No'}) tr = json.dumps(tr) self.app.post('/api/taskrun', data=tr) # Get two tasks again res = self.app.get(url) task3 = json.loads(res.data) # Check that we received a Task assert task3.get('id'), task1 # Pre-load the next task for the user res = self.app.get(url + '?offset=1') task4 = json.loads(res.data) # Check that we received a Task assert task4.get('id'), task2 # Check that both tasks are different assert task3.get('id') != task4.get('id'), "Tasks should be different" assert task1.get('id') != task3.get('id'), "Tasks should be different" assert task2.get('id') != task4.get('id'), "Tasks should be different" # Check that a big offset returns None res = self.app.get(url + '?offset=11') assert json.loads(res.data) == {}, res.data @with_context def test_task_preloading_limit(self): """Test TASK Pre-loading with limit works""" # Register project = ProjectFactory.create(info=dict(sched='depth_first_all'), owner=UserFactory.create(id=500)) TaskFactory.create_batch(10, project=project) self.register() self.signin() assigned_tasks = [] url = 'api/project/%s/newtask?limit=2' % project.id res = self.app.get(url) tasks1 = json.loads(res.data) # Check that we received a Task for t in tasks1: assert t.get('id'), t # Pre-load the next tasks for the user res = self.app.get(url + '&offset=2') tasks2 = json.loads(res.data) # Check that we received a Task for t in tasks2: assert t.get('id'), t # Check that both tasks are different tasks1_ids = set([t['id'] for t in tasks1]) tasks2_ids = set([t['id'] for t in tasks2]) assert len(tasks1_ids.union(tasks2_ids)) == 4, "Tasks should be different" ## Save the assigned task for t in tasks1: assigned_tasks.append(t) for t in tasks2: assigned_tasks.append(t) # Submit an Answer for the assigned and pre-loaded task for t in assigned_tasks: tr = dict(project_id=t['project_id'], task_id=t['id'], info={'answer': 'No'}) tr = json.dumps(tr) self.app.post('/api/taskrun', data=tr) # Get two tasks again res = self.app.get(url) tasks3 = json.loads(res.data) # Check that we received a Task for t in tasks3: assert t.get('id'), t # Pre-load the next task for the user res = self.app.get(url + '&offset=2') tasks4 = json.loads(res.data) # Check that we received a Task for t in tasks4: assert t.get('id'), t # Check that both tasks are different tasks3_ids = set([t['id'] for t in tasks3]) tasks4_ids = set([t['id'] for t in tasks4]) assert len(tasks3_ids.union(tasks4_ids)) == 4, "Tasks should be different" # Check that a big offset returns None res = self.app.get(url + '&offset=11') assert json.loads(res.data) == {}, res.data @with_context def test_task_preloading_external_uid(self): """Test TASK Pre-loading for external user IDs works""" project = ProjectFactory.create(info=dict(sched='depth_first_all'), owner=UserFactory.create(id=500)) TaskFactory.create_batch(10, project=project) assigned_tasks = [] # Get Task until scheduler returns None project = project_repo.get(1) headers = self.get_headers_jwt(project) url = 'api/project/%s/newtask?external_uid=2xb' % project.id res = self.app.get(url, headers=headers) task1 = json.loads(res.data) # Check that we received a Task assert task1.get('id'), task1 # Pre-load the next task for the user res = self.app.get(url + '&offset=1', headers=headers) task2 = json.loads(res.data) # Check that we received a Task assert task2.get('id'), task2 # Check that both tasks are different assert task1.get('id') != task2.get('id'), "Tasks should be different" ## Save the assigned task assigned_tasks.append(task1) assigned_tasks.append(task2) # Submit an Answer for the assigned and pre-loaded task for t in assigned_tasks: tr = dict(project_id=t['project_id'], task_id=t['id'], info={'answer': 'No'}, external_uid='2xb') tr = json.dumps(tr) res = self.app.post('/api/taskrun?external_uid=2xb', data=tr, headers=headers) # Get two tasks again res = self.app.get(url, headers=headers) task3 = json.loads(res.data) # Check that we received a Task assert task3.get('id'), task1 # Pre-load the next task for the user res = self.app.get(url + '&offset=1', headers=headers) task4 = json.loads(res.data) # Check that we received a Task assert task4.get('id'), task2 # Check that both tasks are different assert task3.get('id') != task4.get('id'), "Tasks should be different" assert task1.get('id') != task3.get('id'), "Tasks should be different" assert task2.get('id') != task4.get('id'), "Tasks should be different" # Check that a big offset returns None res = self.app.get(url + '&offset=11', headers=headers) assert json.loads(res.data) == {}, res.data @with_context def test_task_preloading_external_uid_limit(self): """Test TASK Pre-loading for external user IDs works with limit""" # Del previous TaskRuns project = ProjectFactory.create(info=dict(sched='depth_first_all'), owner=UserFactory.create(id=500)) TaskFactory.create_batch(10, project=project) assigned_tasks = [] # Get Task until scheduler returns None headers = self.get_headers_jwt(project) url = 'api/project/%s/newtask?external_uid=2xb&limit=2' % project.id res = self.app.get(url, headers=headers) tasks1 = json.loads(res.data) # Check that we received a Task for t in tasks1: assert t.get('id'), task1 # Pre-load the next task for the user res = self.app.get(url + '&offset=2', headers=headers) tasks2 = json.loads(res.data) # Check that we received a Task for t in tasks2: assert t.get('id'), t # Check that both tasks are different tasks1_ids = set([task['id'] for task in tasks1]) tasks2_ids = set([task['id'] for task in tasks2]) assert len(tasks1_ids.union(tasks2_ids)) == 4, "Tasks should be different" ## Save the assigned task for t in tasks1: assigned_tasks.append(t) for t in tasks2: assigned_tasks.append(t) # Submit an Answer for the assigned and pre-loaded task for t in assigned_tasks: tr = dict(project_id=t['project_id'], task_id=t['id'], info={'answer': 'No'}, external_uid='2xb') tr = json.dumps(tr) res = self.app.post('/api/taskrun?external_uid=2xb', data=tr, headers=headers) # Get two tasks again res = self.app.get(url, headers=headers) tasks3 = json.loads(res.data) # Check that we received a Task for t in tasks3: assert t.get('id'), t # Pre-load the next task for the user res = self.app.get(url + '&offset=2', headers=headers) tasks4 = json.loads(res.data) # Check that we received a Task for t in tasks4: assert t.get('id'), t # Check that both tasks are different tasks3_ids = set([task['id'] for task in tasks3]) tasks4_ids = set([task['id'] for task in tasks4]) assert len(tasks3_ids.union(tasks4_ids)) == 4, "Tasks should be different" # Check that a big offset returns None res = self.app.get(url + '&offset=11', headers=headers) assert json.loads(res.data) == {}, res.data @with_context def test_task_priority(self): """Test SCHED respects priority_0 field""" project = ProjectFactory.create(info=dict(sched='depth_first_all'), owner=UserFactory.create(id=500)) TaskFactory.create_batch(10, project=project) # Register self.register() self.signin() # By default, tasks without priority should be ordered by task.id (FIFO) tasks = db.session.query(Task).filter_by(project_id=1).order_by('id').all() url = 'api/project/%s/newtask' % project.id res = self.app.get(url) task1 = json.loads(res.data) # Check that we received a Task err_msg = "Task.id should be the same" assert task1.get('id') == tasks[0].id, err_msg # Now let's change the priority to a random task import random t = random.choice(tasks) # Increase priority to maximum t.priority_0 = 1 db.session.add(t) db.session.commit() # Request again a new task res = self.app.get(url + '?orderby=priority_0&desc=true') task1 = json.loads(res.data) # Check that we received a Task err_msg = "Task.id should be the same" assert task1.get('id') == t.id, err_msg err_msg = "Task.priority_0 should be the 1" assert task1.get('priority_0') == 1, err_msg @with_context def test_task_priority_limit(self): """Test SCHED respects priority_0 field with limit""" project = ProjectFactory.create(info=dict(sched='depth_first_all'), owner=UserFactory.create(id=500)) TaskFactory.create_batch(10, project=project) # Register self.register() self.signin() # By default, tasks without priority should be ordered by task.id (FIFO) tasks = db.session.query(Task).filter_by(project_id=project.id).order_by('id').all() url = 'api/project/%s/newtask?limit=2' % project.id res = self.app.get(url) tasks1 = json.loads(res.data) # Check that we received a Task err_msg = "Task.id should be the same" assert tasks1[0].get('id') == tasks[0].id, err_msg # Now let's change the priority to a random task import random t = random.choice(tasks) # Increase priority to maximum t.priority_0 = 1 db.session.add(t) db.session.commit() # Request again a new task res = self.app.get(url + '&orderby=priority_0&desc=true') tasks1 = json.loads(res.data) # Check that we received a Task err_msg = "Task.id should be the same" assert tasks1[0].get('id') == t.id, (err_msg, tasks1[0]) err_msg = "Task.priority_0 should be the 1" assert tasks1[0].get('priority_0') == 1, err_msg @with_context def test_task_priority_external_uid(self): """Test SCHED respects priority_0 field for externa uid""" project = ProjectFactory.create(info=dict(sched='depth_first_all'), owner=UserFactory.create(id=500)) TaskFactory.create_batch(10, project=project) # By default, tasks without priority should be ordered by task.id (FIFO) tasks = db.session.query(Task).filter_by(project_id=1).order_by('id').all() project = project_repo.get(1) headers = self.get_headers_jwt(project) url = 'api/project/%s/newtask?external_uid=342' % project.id res = self.app.get(url, headers=headers) task1 = json.loads(res.data) # Check that we received a Task err_msg = "Task.id should be the same" assert task1.get('id') == tasks[0].id, err_msg # Now let's change the priority to a random task import random t = random.choice(tasks) # Increase priority to maximum t.priority_0 = 1 db.session.add(t) db.session.commit() # Request again a new task res = self.app.get(url + '&orderby=priority_0&desc=true', headers=headers) task1 = json.loads(res.data) # Check that we received a Task err_msg = "Task.id should be the same" assert task1.get('id') == t.id, (err_msg, task1, t) err_msg = "Task.priority_0 should be the 1" assert task1.get('priority_0') == 1, err_msg @with_context def test_task_priority_external_uid_limit(self): """Test SCHED respects priority_0 field for externa uid with limit""" project = ProjectFactory.create(info=dict(sched='depth_first_all'), owner=UserFactory.create(id=500)) TaskFactory.create_batch(10, project=project) # By default, tasks without priority should be ordered by task.id (FIFO) tasks = db.session.query(Task).filter_by(project_id=project.id).order_by('id').all() headers = self.get_headers_jwt(project) url = 'api/project/%s/newtask?external_uid=342&limit=2' % project.id res = self.app.get(url, headers=headers) tasks1 = json.loads(res.data) # Check that we received a Task err_msg = "Task.id should be the same" assert tasks1[0].get('id') == tasks[0].id, err_msg # Now let's change the priority to a random task import random t = random.choice(tasks) # Increase priority to maximum t.priority_0 = 1 db.session.add(t) db.session.commit() # Request again a new task res = self.app.get(url + '&orderby=priority_0&desc=true', headers=headers) tasks1 = json.loads(res.data) # Check that we received a Task err_msg = "Task.id should be the same" assert tasks1[0].get('id') == t.id, err_msg err_msg = "Task.priority_0 should be the 1" assert tasks1[0].get('priority_0') == 1, err_msg def _add_task_run(self, app, task, user=None): tr = AnonymousTaskRunFactory.create(project=app, task=task) @with_context def test_no_more_tasks(self): """Test that a users gets always tasks""" owner = UserFactory.create() project = ProjectFactory.create(info=dict(sched='depth_first_all'), owner=owner, short_name='egil', name='egil', description='egil') project_id = project.id all_tasks = TaskFactory.create_batch(20, project=project, n_answers=10) for t in all_tasks[0:10]: TaskRunFactory.create_batch(10, task=t, project=project) tasks = db.session.query(Task).filter_by(project_id=project.id, state='ongoing').all() assert tasks[0].n_answers == 10 url = 'api/project/%s/newtask' % project.id res = self.app.get(url) data = json.loads(res.data) err_msg = "User should get a task" assert 'project_id' in data.keys(), err_msg assert data['project_id'] == project_id, err_msg assert data['id'] == all_tasks[0].id, err_msg assert data['state'] == 'completed', err_msg @with_context def test_no_more_tasks_limit(self): """Test that a users gets always tasks with limit""" owner = UserFactory.create() project = ProjectFactory.create(info=dict(sched='depth_first_all'), owner=owner, short_name='egil', name='egil', description='egil') project_id = project.id all_tasks = TaskFactory.create_batch(20, project=project, n_answers=10) for t in all_tasks[0:10]: TaskRunFactory.create_batch(10, task=t, project=project) tasks = db.session.query(Task).filter_by(project_id=project.id, state='ongoing').all() assert tasks[0].n_answers == 10 url = 'api/project/%s/newtask?limit=2&orderby=id' % project_id res = self.app.get(url) data = json.loads(res.data) err_msg = "User should get a task" i = 0 for t in data: print t['id'] assert 'project_id' in t.keys(), err_msg assert t['project_id'] == project_id, err_msg assert t['id'] == all_tasks[i].id, (err_msg, t, all_tasks[i].id) assert t['state'] == 'completed', err_msg i += 1
agpl-3.0
3,603,119,875,285,052,000
39.744735
106
0.57549
false
3.753512
true
false
false
alxnov/ansible-modules-core
cloud/amazon/ec2_vol.py
1
22132
#!/usr/bin/python # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. DOCUMENTATION = ''' --- module: ec2_vol short_description: create and attach a volume, return volume id and device map description: - creates an EBS volume and optionally attaches it to an instance. If both an instance ID and a device name is given and the instance has a device at the device name, then no volume is created and no attachment is made. This module has a dependency on python-boto. version_added: "1.1" options: instance: description: - instance ID if you wish to attach the volume. Since 1.9 you can set to None to detach. required: false default: null name: description: - volume Name tag if you wish to attach an existing volume (requires instance) required: false default: null version_added: "1.6" id: description: - volume id if you wish to attach an existing volume (requires instance) or remove an existing volume required: false default: null version_added: "1.6" volume_size: description: - size of volume (in GB) to create. required: false default: null volume_type: description: - Type of EBS volume; standard (magnetic), gp2 (SSD), io1 (Provisioned IOPS). "Standard" is the old EBS default and continues to remain the Ansible default for backwards compatibility. required: false default: standard version_added: "1.9" iops: description: - the provisioned IOPs you want to associate with this volume (integer). required: false default: 100 version_added: "1.3" encrypted: description: - Enable encryption at rest for this volume. default: false version_added: "1.8" device_name: description: - device id to override device mapping. Assumes /dev/sdf for Linux/UNIX and /dev/xvdf for Windows. Can figure out a free device_name if device_name is a string with {}, {X}, {N} templates. Template syntax: {},{X} is a character in the [f-p] range, {N} is the character in the [1-6] range, according to EBS attachment notation docs here: http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/device_naming.html . required: false default: null delete_on_termination: description: - When set to "yes", the volume will be deleted upon instance termination. required: false default: "no" choices: ["yes", "no"] version_added: "2.1" zone: description: - zone in which to create the volume, if unset uses the zone the instance is in (if set) required: false default: null aliases: ['aws_zone', 'ec2_zone'] snapshot: description: - snapshot ID on which to base the volume required: false default: null version_added: "1.5" validate_certs: description: - When set to "no", SSL certificates will not be validated for boto versions >= 2.6.0. required: false default: "yes" choices: ["yes", "no"] version_added: "1.5" state: description: - whether to ensure the volume is present or absent, or to list existing volumes (The C(list) option was added in version 1.8). required: false default: present choices: ['absent', 'present', 'list'] version_added: "1.6" author: "Lester Wade (@lwade)" extends_documentation_fragment: - aws - ec2 ''' EXAMPLES = ''' # Simple attachment action - ec2_vol: instance: XXXXXX volume_size: 5 device_name: sdd # Example using custom iops params - ec2_vol: instance: XXXXXX volume_size: 5 iops: 100 device_name: sdd # Example using snapshot id - ec2_vol: instance: XXXXXX snapshot: "{{ snapshot }}" # Playbook example combined with instance launch - ec2: keypair: "{{ keypair }}" image: "{{ image }}" wait: yes count: 3 register: ec2 - ec2_vol: instance: "{{ item.id }} " volume_size: 5 with_items: ec2.instances register: ec2_vol # Example: Launch an instance and then add a volume if not already attached # * Volume will be created with the given name if not already created. # * Nothing will happen if the volume is already attached. # * Requires Ansible 2.0 - ec2: keypair: "{{ keypair }}" image: "{{ image }}" zone: YYYYYY id: my_instance wait: yes count: 1 register: ec2 - ec2_vol: instance: "{{ item.id }}" name: my_existing_volume_Name_tag device_name: /dev/xvdf with_items: ec2.instances register: ec2_vol # Example: Launch an instance and then add a volume if not already attached # * Volume will be created with the given name if not already created. # * Volume will pick the first free /dev/xvd* slot according to template. # * Nothing will happen if the volume is already attached. # * Requires Ansible 2.0 - ec2_vol: instance: "{{ item.id }}" name: my_existing_volume_Name_tag device_name: /dev/xvd{} with_items: ec2.instances register: ec2_vol # Remove a volume - ec2_vol: id: vol-XXXXXXXX state: absen # Detach a volume (since 1.9) - ec2_vol: id: vol-XXXXXXXX instance: None # List volumes for an instance - ec2_vol: instance: i-XXXXXX state: list # Create new volume using SSD storage - ec2_vol: instance: XXXXXX volume_size: 50 volume_type: gp2 device_name: /dev/xvdf # Attach an existing volume to instance. The volume will be deleted upon instance termination. - ec2_vol: instance: XXXXXX id: XXXXXX device_name: /dev/sdf delete_on_termination: yes ''' RETURN = ''' device: description: device name of attached volume returned: when success type: string sample: "/def/sdf" volume_id: description: the id of volume returned: when success type: string sample: "vol-35b333d9" volume_type: description: the volume type returned: when success type: string sample: "standard" volume: description: a dictionary containing detailed attributes of the volume returned: when success type: string sample: { "attachment_set": { "attach_time": "2015-10-23T00:22:29.000Z", "deleteOnTermination": "false", "device": "/dev/sdf", "instance_id": "i-8356263c", "status": "attached" }, "create_time": "2015-10-21T14:36:08.870Z", "encrypted": false, "id": "vol-35b333d9", "iops": null, "size": 1, "snapshot_id": "", "status": "in-use", "tags": { "env": "dev" }, "type": "standard", "zone": "us-east-1b" } ''' import time from distutils.version import LooseVersion try: import boto.ec2 from boto.exception import BotoServerError from boto.ec2.blockdevicemapping import BlockDeviceType, BlockDeviceMapping HAS_BOTO = True except ImportError: HAS_BOTO = False def get_volume(module, ec2): name = module.params.get('name') id = module.params.get('id') zone = module.params.get('zone') filters = {} volume_ids = None # If no name or id supplied, just try volume creation based on module parameters if id is None and name is None: return None if zone: filters['availability_zone'] = zone if name: filters = {'tag:Name': name} if id: volume_ids = [id] try: vols = ec2.get_all_volumes(volume_ids=volume_ids, filters=filters) except boto.exception.BotoServerError, e: module.fail_json(msg = "%s: %s" % (e.error_code, e.error_message)) if not vols: if id: msg = "Could not find the volume with id: %s" % id if name: msg += (" and name: %s" % name) module.fail_json(msg=msg) else: return None if len(vols) > 1: module.fail_json(msg="Found more than one volume in zone (if specified) with name: %s" % name) return vols[0] def get_volumes(module, ec2): instance = module.params.get('instance') try: if not instance: vols = ec2.get_all_volumes() else: vols = ec2.get_all_volumes(filters={'attachment.instance-id': instance}) except boto.exception.BotoServerError, e: module.fail_json(msg = "%s: %s" % (e.error_code, e.error_message)) return vols def delete_volume(module, ec2): volume_id = module.params['id'] try: ec2.delete_volume(volume_id) module.exit_json(changed=True) except boto.exception.EC2ResponseError as ec2_error: if ec2_error.code == 'InvalidVolume.NotFound': module.exit_json(changed=False) module.fail_json(msg=ec2_error.message) def boto_supports_volume_encryption(): """ Check if Boto library supports encryption of EBS volumes (added in 2.29.0) Returns: True if boto library has the named param as an argument on the request_spot_instances method, else False """ return hasattr(boto, 'Version') and LooseVersion(boto.Version) >= LooseVersion('2.29.0') def create_volume(module, ec2, zone): changed = False name = module.params.get('name') iops = module.params.get('iops') encrypted = module.params.get('encrypted') volume_size = module.params.get('volume_size') volume_type = module.params.get('volume_type') snapshot = module.params.get('snapshot') # If custom iops is defined we use volume_type "io1" rather than the default of "standard" if iops: volume_type = 'io1' volume = get_volume(module, ec2) if volume is None: try: if boto_supports_volume_encryption(): volume = ec2.create_volume(volume_size, zone, snapshot, volume_type, iops, encrypted) changed = True else: volume = ec2.create_volume(volume_size, zone, snapshot, volume_type, iops) changed = True while volume.status != 'available': time.sleep(3) volume.update() if name: ec2.create_tags([volume.id], {"Name": name}) except boto.exception.BotoServerError, e: module.fail_json(msg = "%s: %s" % (e.error_code, e.error_message)) return volume, changed # See: http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/device_naming.html # http://docs.aws.amazon.com/AWSEC2/latest/WindowsGuide/device_naming.html # DEVICE_LETTER_RANGE = tuple('fghijklmnop') DEVICE_NUMBER_RANGE = tuple('123456') ALL_DEVICE_NAME_TEMPLATES = ('{}', '{X}', '{N}') def get_default_device_name_template(instance, ec2): """ Try to infer whether to use Windows or linux device name pattern. Use instance.platform, password_data as indicators that instance is a Windows machine. """ instance.update() if (instance.platform or '').lower() == 'windows': device_name = '/dev/xvd{}' elif ec2.get_password_data(instance.id): device_name = '/dev/xvd{}' else: device_name = '/dev/sd{}' return device_name def is_device_name_templated(device_name): return any(t in device_name for t in ALL_DEVICE_NAME_TEMPLATES) def get_next_device_name_from_template(device_name, module, ec2): """ Look at already attached volumes and device_name template, and return the next free device name in alphabetical order """ volumes = get_volumes(module, ec2) # python 2.6 str.format does not like unnamed items in templates device_name = device_name.replace('{}', '{X}') dev_choice_set = set( device_name.format(X=c, N=n) for c in DEVICE_LETTER_RANGE for n in DEVICE_NUMBER_RANGE ) dev_busy_set = set(v.attach_data.device for v in volumes) dev_choices_left = sorted(dev_choice_set.difference(dev_busy_set)) if 0 == len(dev_choices_left): module.fail_json(msg="Cant attach %s to %s: all /dev/ EBS devices busy" % (volume.id, instance), changed=True) device_name = dev_choices_left[0] return device_name def attach_volume(module, ec2, volume, instance): device_name = module.params.get('device_name') delete_on_termination = module.params.get('delete_on_termination') changed = False # If device_name isn't set, make a choice based on best practices here: # http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/block-device-mapping-concepts.html # In future this needs to be more dynamic but combining block device mapping best practices # (bounds for devices, as above) with instance.block_device_mapping data would be tricky. For me ;) # Use password data attribute to tell whether the instance is Windows or Linux if device_name is None: try: device_name = get_default_device_name_template(instance, ec2) except boto.exception.BotoServerError, e: module.fail_json(msg = "%s: %s" % (e.error_code, e.error_message)) if volume.attachment_state() is not None: adata = volume.attach_data if adata.instance_id != instance.id: module.fail_json(msg = "Volume %s is already attached to another instance: %s" % (volume.id, adata.instance_id)) else: # If device_name is a template to grab an available spot, # bring it into consistency with actual attachment data device_name = adata.device # Volume is already attached to right instance changed = modify_dot_attribute(module, ec2, instance, device_name) else: if is_device_name_templated(device_name): t = device_name device_name = get_next_device_name_from_template(t, module, ec2) try: volume.attach(instance.id, device_name) while volume.attachment_state() != 'attached': time.sleep(3) volume.update() changed = True except boto.exception.BotoServerError, e: module.fail_json(msg = "%s: %s" % (e.error_code, e.error_message)) modify_dot_attribute(module, ec2, instance, device_name) return volume, changed def modify_dot_attribute(module, ec2, instance, device_name): """ Modify delete_on_termination attribute """ delete_on_termination = module.params.get('delete_on_termination') changed = False try: instance.update() dot = instance.block_device_mapping[device_name].delete_on_termination except boto.exception.BotoServerError, e: module.fail_json(msg = "%s: %s" % (e.error_code, e.error_message)) if delete_on_termination != dot: try: bdt = BlockDeviceType(delete_on_termination=delete_on_termination) bdm = BlockDeviceMapping() bdm[device_name] = bdt ec2.modify_instance_attribute(instance_id=instance.id, attribute='blockDeviceMapping', value=bdm) while instance.block_device_mapping[device_name].delete_on_termination != delete_on_termination: time.sleep(3) instance.update() changed = True except boto.exception.BotoServerError, e: module.fail_json(msg = "%s: %s" % (e.error_code, e.error_message)) return changed def detach_volume(module, ec2, volume): changed = False if volume.attachment_state() is not None: adata = volume.attach_data volume.detach() while volume.attachment_state() is not None: time.sleep(3) volume.update() changed = True return volume, changed def get_volume_info(volume, state): # If we're just listing volumes then do nothing, else get the latest update for the volume if state != 'list': volume.update() volume_info = {} attachment = volume.attach_data volume_info = { 'create_time': volume.create_time, 'encrypted': volume.encrypted, 'id': volume.id, 'iops': volume.iops, 'size': volume.size, 'snapshot_id': volume.snapshot_id, 'status': volume.status, 'type': volume.type, 'zone': volume.zone, 'attachment_set': { 'attach_time': attachment.attach_time, 'device': attachment.device, 'instance_id': attachment.instance_id, 'status': attachment.status }, 'tags': volume.tags } if hasattr(attachment, 'deleteOnTermination'): volume_info['attachment_set']['deleteOnTermination'] = attachment.deleteOnTermination return volume_info def main(): argument_spec = ec2_argument_spec() argument_spec.update(dict( instance = dict(), id = dict(), name = dict(), volume_size = dict(), volume_type = dict(choices=['standard', 'gp2', 'io1'], default='standard'), iops = dict(), encrypted = dict(type='bool', default=False), device_name = dict(), delete_on_termination = dict(type='bool', default=False), zone = dict(aliases=['availability_zone', 'aws_zone', 'ec2_zone']), snapshot = dict(), state = dict(choices=['absent', 'present', 'list'], default='present') ) ) module = AnsibleModule(argument_spec=argument_spec) if not HAS_BOTO: module.fail_json(msg='boto required for this module') id = module.params.get('id') name = module.params.get('name') instance = module.params.get('instance') volume_size = module.params.get('volume_size') encrypted = module.params.get('encrypted') device_name = module.params.get('device_name') zone = module.params.get('zone') snapshot = module.params.get('snapshot') state = module.params.get('state') # Ensure we have the zone or can get the zone if instance is None and zone is None and state == 'present': module.fail_json(msg="You must specify either instance or zone") # Set volume detach flag if instance == 'None' or instance == '': instance = None detach_vol_flag = True else: detach_vol_flag = False # Set changed flag changed = False region, ec2_url, aws_connect_params = get_aws_connection_info(module) if region: try: ec2 = connect_to_aws(boto.ec2, region, **aws_connect_params) except (boto.exception.NoAuthHandlerFound, AnsibleAWSError), e: module.fail_json(msg=str(e)) else: module.fail_json(msg="region must be specified") if state == 'list': returned_volumes = [] vols = get_volumes(module, ec2) for v in vols: attachment = v.attach_data returned_volumes.append(get_volume_info(v, state)) module.exit_json(changed=False, volumes=returned_volumes) if encrypted and not boto_supports_volume_encryption(): module.fail_json(msg="You must use boto >= v2.29.0 to use encrypted volumes") # Here we need to get the zone info for the instance. This covers situation where # instance is specified but zone isn't. # Useful for playbooks chaining instance launch with volume create + attach and where the # zone doesn't matter to the user. inst = None if instance: try: reservation = ec2.get_all_instances(instance_ids=instance) except BotoServerError as e: module.fail_json(msg=e.message) inst = reservation[0].instances[0] zone = inst.placement # Check if there is a volume already mounted there. if device_name: if device_name in inst.block_device_mapping: module.exit_json(msg="Volume mapping for %s already exists on instance %s" % (device_name, instance), volume_id=inst.block_device_mapping[device_name].volume_id, device=device_name, changed=False) # Delaying the checks until after the instance check allows us to get volume ids for existing volumes # without needing to pass an unused volume_size if not volume_size and not (id or name or snapshot): module.fail_json(msg="You must specify volume_size or identify an existing volume by id, name, or snapshot") # Cannot resize existing volumes, but can make a new volume of larger size # from snapshot if volume_size and id: module.fail_json(msg="Cannot specify volume_size together with id") if state == 'present': volume, changed = create_volume(module, ec2, zone) if detach_vol_flag: volume, changed = detach_volume(module, ec2, volume) elif inst is not None: volume, changed = attach_volume(module, ec2, volume, inst) # Add device, volume_id and volume_type parameters separately to maintain backward compatability volume_info = get_volume_info(volume, state) module.exit_json(changed=changed, volume=volume_info, device=volume_info['attachment_set']['device'], volume_id=volume_info['id'], volume_type=volume_info['type']) elif state == 'absent': delete_volume(module, ec2) # import module snippets from ansible.module_utils.basic import * from ansible.module_utils.ec2 import * if __name__ == '__main__': main()
gpl-3.0
-8,963,196,909,828,346,000
32.482602
414
0.628682
false
3.838363
false
false
false
tjcsl/cslbot
cslbot/commands/metar.py
1
2400
# -*- coding: utf-8 -*- # Copyright (C) 2013-2018 Samuel Damashek, Peter Foley, James Forcier, Srijay Kasturi, Reed Koser, Christopher Reffett, and Tris Wilson # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License # as published by the Free Software Foundation; either version 2 # of the License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. from xml.etree import ElementTree from requests import get from ..helpers import arguments from ..helpers.command import Command @Command(['metar'], ['nick', 'config', 'db', 'name', 'source', 'handler']) def cmd(send, msg, args): """Gets the weather. Syntax: {command} <station> [station2...] """ parser = arguments.ArgParser(args['config']) parser.add_argument('stations', nargs='*') try: cmdargs = parser.parse_args(msg) except arguments.ArgumentException as e: send(str(e)) return if not cmdargs.stations: send("What station?") return if isinstance(cmdargs.stations, list): cmdargs.stations = ','.join(cmdargs.stations) req = get('http://aviationweather.gov/adds/dataserver_current/httpparam', params={ 'datasource': 'metars', 'requestType': 'retrieve', 'format': 'xml', 'mostRecentForEachStation': 'constraint', 'hoursBeforeNow': '1.25', 'stationString': cmdargs.stations }) xml = ElementTree.fromstring(req.text) errors = xml.find('./errors') if len(errors): errstring = ','.join([error.text for error in errors]) send('Error: %s' % errstring) return data = xml.find('./data') if data is None or data.attrib['num_results'] == '0': send('No results found.') else: for station in data: send(station.find('raw_text').text)
gpl-2.0
6,708,503,875,459,143,000
35.923077
135
0.64125
false
3.896104
false
false
false
googleapis/google-api-java-client-services
generator/src/googleapis/codegen/schema.py
1
18349
#!/usr/bin/python2.7 # Copyright 2010 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """API data models - schemas and their properties. This module handles the objects created for the "schema" section of an API. """ __author__ = '[email protected] (Tony Aiuto)' import collections import logging from googleapis.codegen import data_types from googleapis.codegen import template_objects from googleapis.codegen.api_exception import ApiException _ADDITIONAL_PROPERTIES = 'additionalProperties' _LOGGER = logging.getLogger('codegen') class Schema(data_types.ComplexDataType): """The definition of a schema.""" def __init__(self, api, default_name, def_dict, parent=None): """Construct a Schema object from a discovery dictionary. Schemas represent data models in the API. Args: api: (Api) the Api instance owning the Schema default_name: (str) the default name of the Schema. If there is an 'id' member in the definition, that is used for the name instead. def_dict: (dict) a discovery dictionary parent: (Schema) The containing schema. To be used to establish unique names for anonymous sub-schemas. """ super(Schema, self).__init__(default_name, def_dict, api, parent=parent) name = def_dict.get('id', default_name) _LOGGER.debug('Schema(%s)', name) # Protect against malicious discovery template_objects.CodeObject.ValidateName(name) self.SetTemplateValue('wireName', name) class_name = api.ToClassName(name, self, element_type='schema') self.SetTemplateValue('className', class_name) self.SetTemplateValue('isSchema', True) self.SetTemplateValue('properties', []) self._module = (template_objects.Module.ModuleFromDictionary(self.values) or api.model_module) @classmethod def Create(cls, api, default_name, def_dict, wire_name, parent=None): """Construct a Schema or DataType from a discovery dictionary. Schemas contain either object declarations, simple type declarations, or references to other Schemas. Object declarations conceptually map to real classes. Simple types will map to a target language built-in type. References should effectively be replaced by the referenced Schema. Args: api: (Api) the Api instance owning the Schema default_name: (str) the default name of the Schema. If there is an 'id' member in the definition, that is used for the name instead. def_dict: (dict) a discovery dictionary wire_name: The name which will identify objects of this type in data on the wire. The path of wire_names can trace an item back through discovery. parent: (Schema) The containing schema. To be used to establish nesting for anonymous sub-schemas. Returns: A Schema or DataType. Raises: ApiException: If the definition dict is not correct. """ schema_id = def_dict.get('id') if schema_id: name = schema_id else: name = default_name class_name = api.ToClassName(name, None, element_type='schema') _LOGGER.debug('Create: %s, parent=%s', name, parent.values.get('wireName', '<anon>') if parent else 'None') # Schema objects come in several patterns. # # 1. Simple objects # { type: object, properties: { "foo": {schema} ... }} # # 2. Maps of objects # { type: object, additionalProperties: { "foo": {inner_schema} ... }} # # What we want is a data type which is Map<string, {inner_schema}> # The schema we create here is essentially a built in type which we # don't want to generate a class for. # # 3. Arrays of objects # { type: array, items: { inner_schema }} # # Same kind of issue as the map, but with List<{inner_schema}> # # 4. Primitive data types, described by type and format. # { type: string, format: int32 } # { type: string, enum: ["value", ...], enumDescriptions: ["desc", ...]} # # 5. Refs to another schema. # { $ref: name } # # 6. Variant schemas # { type: object, variant: { discriminant: "prop", map: # [ { 'type_value': value, '$ref': wireName }, ... ] } } # # What we do is map the variant schema to a schema with a single # property for the discriminant. To that property, we attach # the variant map which specifies which discriminator values map # to which schema references. We also collect variant information # in the api so we can later associate discriminator value and # base type with the generated variant subtypes. if 'type' in def_dict: # The 'type' field of the schema can either be 'array', 'object', or a # base json type. json_type = def_dict['type'] if json_type == 'object': # Look for variants variant = def_dict.get('variant') if variant: return cls._CreateVariantType(variant, api, name, def_dict, wire_name, parent) # Look for full object definition. You can have properties or # additionalProperties, but it does not do anything useful to have # both. # Replace properties dict with Property's props = def_dict.get('properties') if props: # This case 1 from above return cls._CreateObjectWithProperties(props, api, name, def_dict, wire_name, parent) # Look for case 2 additional_props = def_dict.get(_ADDITIONAL_PROPERTIES) if additional_props: return cls._CreateMapType(additional_props, api, name, wire_name, class_name, parent) # no properties return cls._CreateSchemaWithoutProperties(api, name, def_dict, wire_name, parent) elif json_type == 'array': # Case 3: Look for array definition return cls._CreateArrayType(api, def_dict, wire_name, class_name, schema_id, parent) else: # Case 4: This must be a basic type. Create a DataType for it. return data_types.CreatePrimitiveDataType(def_dict, api, wire_name, parent=parent) referenced_schema = def_dict.get('$ref') if referenced_schema: # Case 5: Reference to another Schema. # # There are 4 ways you can see '$ref' in discovery. # 1. In a property of a schema or a method request/response, pointing # back to a previously defined schema # 2. As above, pointing to something not defined yet. # 3. In a method request or response or property of a schema pointing to # something undefined. # # For case 1, the schema will be in the API name to schema map. # # For case 2, just creating this placeholder here is fine. When the # actual schema is hit in the loop in _BuildSchemaDefinitions, we will # replace the entry and DataTypeFromJson will resolve the to the new def. # # For case 3, we will end up with a dangling reference and fail later. schema = api.SchemaByName(referenced_schema) # The stored "schema" may not be an instance of Schema, but rather a # data_types.PrimitiveDataType, which has no 'wireName' value. if schema: _LOGGER.debug('Schema.Create: %s => %s', default_name, schema.values.get('wireName', '<unknown>')) return schema return data_types.SchemaReference(referenced_schema, api) raise ApiException('Cannot decode JSON Schema for: %s' % def_dict) @classmethod def _CreateObjectWithProperties(cls, props, api, name, def_dict, wire_name, parent): properties = [] schema = cls(api, name, def_dict, parent=parent) if wire_name: schema.SetTemplateValue('wireName', wire_name) for prop_name in sorted(props): prop_dict = props[prop_name] _LOGGER.debug(' adding prop: %s to %s', prop_name, name) properties.append(Property(api, schema, prop_name, prop_dict)) # Some APIs express etag directly in the response, others don't. # Knowing that we have it explicitly makes special case code generation # easier if prop_name == 'etag': schema.SetTemplateValue('hasEtagProperty', True) schema.SetTemplateValue('properties', properties) # check for @ clashing. E.g. No 'foo' and '@foo' in the same object. names = set() for p in properties: wire_name = p.GetTemplateValue('wireName') no_at_sign = wire_name.replace('@', '') if no_at_sign in names: raise ApiException( 'Property name clash in schema %s:' ' %s conflicts with another property' % (name, wire_name)) names.add(no_at_sign) return schema @classmethod def _CreateVariantType(cls, variant, api, name, def_dict, wire_name, parent): """Creates a variant type.""" variants = collections.OrderedDict() schema = cls(api, name, def_dict, parent=parent) if wire_name: schema.SetTemplateValue('wireName', wire_name) discriminant = variant['discriminant'] # Walk over variants building the variant map and register # variant info on the api. for variant_entry in variant['map']: discriminant_value = variant_entry['type_value'] variant_schema = api.DataTypeFromJson(variant_entry, name, parent=parent) variants[discriminant_value] = variant_schema # Set variant info. We get the original wire name from the JSON properties # via '$ref' it is not currently accessible via variant_schema. api.SetVariantInfo(variant_entry.get('$ref'), discriminant, discriminant_value, schema) prop = Property(api, schema, discriminant, {'type': 'string'}, key_for_variants=variants) schema.SetTemplateValue('is_variant_base', True) schema.SetTemplateValue('discriminant', prop) schema.SetTemplateValue('properties', [prop]) return schema @classmethod def _CreateMapType(cls, additional_props, api, name, wire_name, class_name, parent): _LOGGER.debug('Have only additionalProps for %s, dict=%s', name, additional_props) # TODO(user): Remove this hack at the next large breaking change # The "Items" added to the end is unneeded and ugly. This is for # temporary backwards compatibility. Same for _CreateArrayType(). if additional_props.get('type') == 'array': name = '%sItem' % name subtype_name = additional_props.get('id', name + 'Element') # Note, since this is an interim, non class just to hold the map # make the parent schema the parent passed in, not myself. _LOGGER.debug('name:%s, wire_name:%s, subtype name %s', name, wire_name, subtype_name) # When there is a parent, we synthesize a wirename when none exists. # Purpose is to avoid generating an extremely long class name, since we # don't do so for other nested classes. if parent and wire_name: base_wire_name = wire_name + 'Element' else: base_wire_name = None base_type = api.DataTypeFromJson( additional_props, subtype_name, parent=parent, wire_name=base_wire_name) map_type = data_types.MapDataType(name, base_type, parent=parent, wire_name=wire_name) map_type.SetTemplateValue('className', class_name) _LOGGER.debug(' %s is MapOf<string, %s>', class_name, base_type.class_name) return map_type @classmethod def _CreateSchemaWithoutProperties(cls, api, name, def_dict, wire_name, parent): if parent: # code objects have __getitem__(), but not .get() try: pname = parent['id'] except KeyError: pname = '<unknown>' name_to_log = '%s.%s' % (pname, name) else: name_to_log = name logging.warning('object without properties %s: %s', name_to_log, def_dict) schema = cls(api, name, def_dict, parent=parent) if wire_name: schema.SetTemplateValue('wireName', wire_name) return schema @classmethod def _CreateArrayType(cls, api, def_dict, wire_name, class_name, schema_id, parent): items = def_dict.get('items') if not items: raise ApiException('array without items in: %s' % def_dict) tentative_class_name = class_name # TODO(user): We should not rename things items. # if we have an anonymous type within a map or array, it should be # called 'Item', and let the namespacing sort it out. if schema_id: _LOGGER.debug('Top level schema %s is an array', class_name) tentative_class_name += 'Items' base_type = api.DataTypeFromJson(items, tentative_class_name, parent=parent, wire_name=wire_name) _LOGGER.debug(' %s is ArrayOf<%s>', class_name, base_type.class_name) array_type = data_types.ArrayDataType(tentative_class_name, base_type, wire_name=wire_name, parent=parent) if schema_id: array_type.SetTemplateValue('className', schema_id) return array_type @property def class_name(self): return self.values['className'] @property def anonymous(self): return 'id' not in self.raw @property def properties(self): return self.values['properties'] @property def isContainerWrapper(self): """Is this schema just a simple wrapper around another container. A schema is just a wrapper for another datatype if it is an object that contains just a single container datatype and (optionally) a kind and etag field. This may be used by language generators to create iterators directly on the schema. E.g. You could have SeriesList ret = api.GetSomeSeriesMethod(args).Execute(); for (series in ret) { ... } rather than for (series in ret->items) { ... } Returns: None or ContainerDataType """ return self._GetPropertyWhichWeWrap() is not None @property def containerProperty(self): """If isContainerWrapper, returns the propery which holds the container.""" return self._GetPropertyWhichWeWrap() def _GetPropertyWhichWeWrap(self): """Returns the property which is the type we are wrapping.""" container_property = None for p in self.values['properties']: if p.values['wireName'] == 'kind' or p.values['wireName'] == 'etag': continue if p.data_type.GetTemplateValue('isContainer'): if container_property: return None container_property = p else: return None return container_property def __str__(self): return '<%s Schema {%s}>' % (self.values['wireName'], self.values) class Property(template_objects.CodeObject): """The definition of a schema property. Example property in the discovery schema: "id": {"type": "string"} """ def __init__(self, api, schema, name, def_dict, key_for_variants=None): """Construct a Property. A Property requires several elements in its template value dictionary which are set here: wireName: the string which labels this Property in the JSON serialization. dataType: the DataType of this property. Args: api: (Api) The Api which owns this Property schema: (Schema) the schema this Property is part of name: (string) the name for this Property def_dict: (dict) the JSON schema dictionary key_for_variants: (dict) if given, maps discriminator values to variant schemas. Raises: ApiException: If we have an array type without object definitions. """ super(Property, self).__init__(def_dict, api, wire_name=name) self.ValidateName(name) self.schema = schema self._key_for_variants = key_for_variants # TODO(user): find a better way to mark a schema as an array type # so we can display schemas like BlogList in method responses try: if self.values['wireName'] == 'items' and self.values['type'] == 'array': self.schema.values['isList'] = True except KeyError: pass # If the schema value for this property defines a new object directly, # rather than refering to another schema, we will have to create a class # name for it. We create a unique name by prepending the schema we are # in to the object name. tentative_class_name = api.NestedClassNameForProperty(name, schema) self._data_type = api.DataTypeFromJson(def_dict, tentative_class_name, parent=schema, wire_name=name) @property def code_type(self): if self._language_model: self._data_type.SetLanguageModel(self._language_model) return self._data_type.code_type @property def safe_code_type(self): if self._language_model: self._data_type.SetLanguageModel(self._language_model) return self._data_type.safe_code_type @property def primitive_data_type(self): if self._language_model: self._data_type.SetLanguageModel(self._language_model) return self._data_type.primitive_data_type @property def data_type(self): return self._data_type @property def member_name_is_json_name(self): return self.memberName == self.values['wireName'] @property def is_variant_key(self): return self._key_for_variants @property def variant_map(self): return self._key_for_variants
apache-2.0
-3,918,453,363,792,271,400
37.548319
80
0.642378
false
4.031861
false
false
false
dthgeek/QuickOSM
core/actions.py
1
4118
# -*- coding: utf-8 -*- """ /*************************************************************************** QuickOSM A QGIS plugin OSM Overpass API frontend ------------------- begin : 2014-06-11 copyright : (C) 2014 by 3Liz email : info at 3liz dot com contributor : Etienne Trimaille ***************************************************************************/ /*************************************************************************** * * * This program is free software; you can redistribute it and/or modify * * it under the terms of the GNU General Public License as published by * * the Free Software Foundation; either version 2 of the License, or * * (at your option) any later version. * * * ***************************************************************************/ """ from PyQt4.QtWebKit import QWebView from PyQt4.QtGui import QDesktopServices from PyQt4.QtCore import QUrl from qgis.utils import iface from qgis.gui import QgsMessageBar from QuickOSM.core.utilities.tools import tr class Actions(object): """ Manage actions available on layers """ @staticmethod def run(field, value): """ Run an action with only one value as parameter @param field:Type of the action @type field:str @param value:Value of the field for one entity @type value:str """ if value == '': iface.messageBar().pushMessage( tr("QuickOSM", u"Sorry man, this field is empty for this entity."), level=QgsMessageBar.WARNING, duration=7) else: field = unicode(field, "UTF-8") value = unicode(value, "UTF-8") if field in ["url", "website", "wikipedia"]: var = QDesktopServices() url = None if field == "url" or field == "website": url = value if field == "ref_UAI": url = "http://www.education.gouv.fr/pid24302/annuaire-" \ "resultat-recherche.html?lycee_name=" + value if field == "wikipedia": url = "http://en.wikipedia.org/wiki/" + value var.openUrl(QUrl(url)) elif field == "josm": import urllib2 try: url = "http://localhost:8111/load_object?objects=" + value urllib2.urlopen(url).read() except urllib2.URLError: iface.messageBar().pushMessage( tr("QuickOSM", u"The JOSM remote seems to be disabled."), level=QgsMessageBar.CRITICAL, duration=7) # NOT USED elif field == "rawedit": url = QUrl("http://rawedit.openstreetmap.fr/edit/" + value) web_browser = QWebView(None) web_browser.load(url) web_browser.show() @staticmethod def run_sketch_line(network, ref): """ Run an action with two values for sketchline @param network:network of the bus @type network:str @param ref:ref of the bus @type ref:str """ network = unicode(network, "UTF-8") ref = unicode(ref, "UTF-8") if network == '' or ref == '': iface.messageBar().pushMessage( tr("QuickOSM", u"Sorry man, this field is empty for this entity."), level=QgsMessageBar.WARNING, duration=7) else: var = QDesktopServices() url = "http://www.overpass-api.de/api/sketch-line?" \ "network=" + network + "&ref=" + ref var.openUrl(QUrl(url))
gpl-2.0
1,764,082,413,191,741,200
34.196581
78
0.448033
false
4.805134
false
false
false
Amber-MD/ambertools-conda-build
conda_tools/test/test_fix_conda_gfortran_linking_osx.py
1
1041
# pytest -vs . import os import sys from mock import patch import shutil sys.path.insert(0, '..') from fix_conda_gfortran_linking_osx import repack_conda_package, main this_dir = os.path.dirname(__file__) PACK_SCRIPT = os.path.join(this_dir, '..', 'pack_binary_without_conda_install.py') FAKE_TAR = os.path.join(this_dir, 'fake_data', 'fake_osx.tar.bz2') has_gfortran_local = os.path.exists('/usr/local/gfortran/') def test_repack_conda_package(): class Opt(): pass opt = Opt() opt.tarfile = FAKE_TAR opt.output_dir = '.' opt.date = False opt.dry_run = False with patch('update_gfortran_libs_osx.main') as mock_g_main: repack_conda_package(opt) mock_g_main.assert_called_with(['.']) os.remove(os.path.basename(FAKE_TAR)) def test_main(): junk = './tmp_fdasfda' output_dir = '{}/heyhey'.format(junk) main([FAKE_TAR, '-o', output_dir]) assert os.path.exists(os.path.join(output_dir, os.path.basename(FAKE_TAR))) shutil.rmtree(junk)
mit
669,227,652,938,590,500
26.394737
79
0.635927
false
2.949008
false
false
false
brightiup/brightiup
brightiup/compiler/bt_lexer.py
1
1391
import ply.lex as lex class BTLexerException(Exception): def __init__(self, message): self.message = message def __str__(self): return self.message class BTLexer(object): """BT lexer""" keywords = [ # "import", "state", ] tokens = [keyword.upper() for keyword in keywords] + [ 'ID', 'VARIABLE', ] t_ignore = " \t" t_VARIABLE = r'''\$[A-Za-z][A-Za-z0-9_]*''' literals = ".{};=" _keyword_map = {} for keyword in keywords: _keyword_map[keyword] = keyword.upper() @staticmethod def t_NEWLINE(t): r'''\n+''' t.lexer.lineno += t.value.count('\n') @staticmethod def t_error(t): raise BTLexerException('Illegal character %s at line %s'%(t.value[0], t.lineno)) @staticmethod def t_ID(t): r'''[A-Za-z][A-Za-z0-9_]*''' t.type = BTLexer._keyword_map.get(t.value, 'ID') return t def __init__(self, **kwargs): self.lexer = lex.lex(module=self, **kwargs) def test(self, data): self.lexer.input(data) while True: tok = self.lexer.token() if not tok: break print tok if __name__ == '__main__': lexer = BTLexer() lexer.test(open('../script/http.bt').read())
gpl-2.0
-243,517,580,238,202,750
21.803279
88
0.4867
false
3.557545
false
false
false
holzman/glideinwms-old
lib/condorMonitor.py
1
24800
# # Project: # glideinWMS # # File Version: # # Description: # This module implements classes to query the condor daemons # and manipulate the results # Please notice that it also converts \" into " # # Author: # Igor Sfiligoi (Aug 30th 2006) # import condorExe import condorSecurity import os import string import copy import socket import xml.parsers.expat # # Configuration # # Set path to condor binaries def set_path(new_condor_bin_path): global condor_bin_path condor_bin_path = new_condor_bin_path # # Caching classes # # dummy caching class, when you don't want caching # used as base class below, too class NoneScheddCache: #returns (cms arg schedd string,LOCAL_DIR) def getScheddId(self,schedd_name,pool_name): return (self.iGetCmdScheddStr(schedd_name),{}) # INTERNAL and for inheritance def iGetCmdScheddStr(self,schedd_name): if schedd_name is None: schedd_str="" else: schedd_str = "-name %s " % schedd_name return schedd_str # The schedd can be found either through -name attr # or through the local disk lookup # Remember which one to use class LocalScheddCache(NoneScheddCache): def __init__(self): self.enabled=True # dictionary of # (schedd_name,pool_name)=>(cms arg schedd string,env) self.cache={} self.my_ips=socket.gethostbyname_ex(socket.gethostname())[2] try: self.my_ips+=socket.gethostbyname_ex('localhost')[2] except socket.gaierror,e: pass # localhost not defined, ignore def enable(self): self.enabled=True def disable(self): self.enabled=False #returns (cms arg schedd string,env) def getScheddId(self,schedd_name,pool_name): if schedd_name is None: # special case, do not cache return ("",{}) if self.enabled: k=(schedd_name,pool_name) if not self.cache.has_key(k): # not in cache, discover it env=self.iGetEnv(schedd_name, pool_name) if env is None: # self.cache[k]=(self.iGetCmdScheddStr(schedd_name),{}) else: self.cache[k]=("",env) return self.cache[k] else: # not enabled, just return the str return (self.iGetCmdScheddStr(schedd_name),{}) # # PRIVATE # # return None if not found # Can raise exceptions def iGetEnv(self,schedd_name, pool_name): cs=CondorStatus('schedd',pool_name) data=cs.fetch(constraint='Name=?="%s"'%schedd_name,format_list=[('ScheddIpAddr','s'),('SPOOL_DIR_STRING','s'),('LOCAL_DIR_STRING','s')]) if not data.has_key(schedd_name): raise RuntimeError, "Schedd '%s' not found"%schedd_name el=data[schedd_name] if 'SPOOL_DIR_STRING' not in el and 'LOCAL_DIR_STRING' not in el: # not advertising, cannot use disk optimization return None if not el.has_key('ScheddIpAddr'): # This should never happen raise RuntimeError, "Schedd '%s' is not advertising ScheddIpAddr"%schedd_name schedd_ip=el['ScheddIpAddr'][1:].split(':')[0] if schedd_ip in self.my_ips: #seems local, go for the dir l=el.get('SPOOL_DIR_STRING', el.get('LOCAL_DIR_STRING')) if os.path.isdir(l): # making sure the directory exists if 'SPOOL_DIR_STRING' in el: return {'_CONDOR_SPOOL': '%s' %l } else: # LOCAL_DIR_STRING return {'_CONDOR_SPOOL': '%s/spool' %l } else: #dir does not exist, likely not relevant, revert to standard behaviour return None else: # not local return None # default global object local_schedd_cache=LocalScheddCache() def condorq_attrs(q_constraint, attribute_list): """ Retrieves a list of a single item from the all the factory queues. """ attr_str = "" for attr in attribute_list: attr_str += " -attr %s" % attr xml_data = condorExe.exe_cmd("condor_q","-g -l %s -xml -constraint '%s'" % (attr_str, q_constraint)) classads_xml = [] tmp_list = [] for line in xml_data: # look for the xml header if line[:5] == "<?xml": if len(tmp_list) > 0: classads_xml.append(tmp_list) tmp_list = [] tmp_list.append(line) q_proxy_list = [] for ad_xml in classads_xml: cred_list = xml2list(ad_xml) q_proxy_list.extend(cred_list) return q_proxy_list # # Condor monitoring classes # # Generic, you most probably don't want to use these class AbstractQuery: # pure virtual, just to have a minimum set of methods defined # returns the data, will not modify self def fetch(self,constraint=None,format_list=None): raise NotImplementedError,"Fetch not implemented" # will fetch in self.stored_data def load(self,constraint=None,format_list=None): raise NotImplementedError,"Load not implemented" # constraint_func is a boolean function, with only one argument (data el) # same output as fetch, but limited to constraint_func(el)==True # # if constraint_func==None, return all the data def fetchStored(self,constraint_func=None): raise NotImplementedError,"fetchStored not implemented" class StoredQuery(AbstractQuery): # still virtual, only fetchStored defined stored_data = {} def fetchStored(self,constraint_func=None): return applyConstraint(self.stored_data,constraint_func) # # format_list is a list of # (attr_name, attr_type) # where attr_type is one of # "s" - string # "i" - integer # "r" - real (float) # "b" - bool # # # security_obj, if defined, should be a child of condorSecurity.ProtoRequest class QueryExe(StoredQuery): # first fully implemented one, execute commands def __init__(self,exe_name,resource_str,group_attribute,pool_name=None,security_obj=None,env={}): self.exe_name=exe_name self.env=env self.resource_str=resource_str self.group_attribute=group_attribute self.pool_name=pool_name if pool_name is None: self.pool_str="" else: self.pool_str = "-pool %s" % pool_name if security_obj is not None: if security_obj.has_saved_state(): raise RuntimeError, "Cannot use a security object which has saved state." self.security_obj=copy.deepcopy(security_obj) else: self.security_obj=condorSecurity.ProtoRequest() def require_integrity(self,requested_integrity): # if none, dont change, else forse that one if requested_integrity is None: condor_val=None elif requested_integrity: condor_val="REQUIRED" else: # if not required, still should not fail if the other side requires it condor_val='OPTIONAL' self.security_obj.set('CLIENT','INTEGRITY',condor_val) def get_requested_integrity(self): condor_val = self.security_obj.get('CLIENT','INTEGRITY') if condor_val is None: return None return (condor_val=='REQUIRED') def require_encryption(self,requested_encryption): # if none, dont change, else forse that one if requested_encryption is None: condor_val=None elif requested_encryption: condor_val="REQUIRED" else: # if not required, still should not fail if the other side requires it condor_val='OPTIONAL' self.security_obj.set('CLIENT','ENCRYPTION',condor_val) def get_requested_encryption(self): condor_val = self.security_obj.get('CLIENT','ENCRYPTION') if condor_val is None: return None return (condor_val=='REQUIRED') def fetch(self,constraint=None,format_list=None): if constraint is None: constraint_str="" else: constraint_str="-constraint '%s'"%constraint full_xml=(format_list is None) if format_list is not None: format_arr=[] for format_el in format_list: attr_name,attr_type=format_el attr_format={'s':'%s','i':'%i','r':'%f','b':'%i'}[attr_type] format_arr.append('-format "%s" "%s"'%(attr_format,attr_name)) format_str=string.join(format_arr," ") # set environment for security settings self.security_obj.save_state() self.security_obj.enforce_requests() if full_xml: xml_data = condorExe.exe_cmd(self.exe_name,"%s -xml %s %s"%(self.resource_str,self.pool_str,constraint_str),env=self.env); else: xml_data = condorExe.exe_cmd(self.exe_name,"%s %s -xml %s %s"%(self.resource_str,format_str,self.pool_str,constraint_str),env=self.env); # restore old values self.security_obj.restore_state() list_data = xml2list(xml_data) del xml_data dict_data = list2dict(list_data, self.group_attribute) return dict_data def load(self, constraint=None, format_list=None): self.stored_data = self.fetch(constraint, format_list) # # Fully usable query functions # # condor_q class CondorQ(QueryExe): def __init__(self,schedd_name=None,pool_name=None,security_obj=None,schedd_lookup_cache=local_schedd_cache): self.schedd_name=schedd_name if schedd_lookup_cache is None: schedd_lookup_cache=NoneScheddCache() schedd_str,env=schedd_lookup_cache.getScheddId(schedd_name, pool_name) QueryExe.__init__(self,"condor_q",schedd_str,["ClusterId","ProcId"],pool_name,security_obj,env) def fetch(self, constraint=None, format_list=None): if format_list is not None: # check that ClusterId and ProcId are present, and if not add them format_list = complete_format_list(format_list, [("ClusterId", 'i'), ("ProcId", 'i')]) return QueryExe.fetch(self, constraint=constraint, format_list=format_list) # condor_q, where we have only one ProcId x ClusterId class CondorQLite(QueryExe): def __init__(self,schedd_name=None,pool_name=None,security_obj=None,schedd_lookup_cache=local_schedd_cache): self.schedd_name=schedd_name if schedd_lookup_cache is None: schedd_lookup_cache=NoneScheddCache() schedd_str,env=schedd_lookup_cache.getScheddId(schedd_name, pool_name) QueryExe.__init__(self,"condor_q",schedd_str,"ClusterId",pool_name,security_obj,env) def fetch(self, constraint=None, format_list=None): if format_list is not None: # check that ClusterId is present, and if not add it format_list = complete_format_list(format_list, [("ClusterId", 'i')]) return QueryExe.fetch(self, constraint=constraint, format_list=format_list) # condor_status class CondorStatus(QueryExe): def __init__(self,subsystem_name=None,pool_name=None,security_obj=None): if subsystem_name is None: subsystem_str="" else: subsystem_str = "-%s" % subsystem_name QueryExe.__init__(self,"condor_status",subsystem_str,"Name",pool_name,security_obj,{}) def fetch(self, constraint=None, format_list=None): if format_list is not None: # check that Name present and if not, add it format_list = complete_format_list(format_list, [("Name",'s')]) return QueryExe.fetch(self, constraint=constraint, format_list=format_list) # # Subquery classes # # Generic, you most probably don't want to use this class BaseSubQuery(StoredQuery): def __init__(self, query, subquery_func): self.query = query self.subquery_func = subquery_func def fetch(self, constraint=None): indata = self.query.fetch(constraint) return self.subquery_func(self, indata) # # NOTE: You need to call load on the SubQuery object to use fetchStored # and had query.load issued before # def load(self, constraint=None): indata = self.query.fetchStored(constraint) self.stored_data = self.subquery_func(indata) # # Fully usable subquery functions # class SubQuery(BaseSubQuery): def __init__(self, query, constraint_func=None): BaseSubQuery.__init__(self, query, lambda d:applyConstraint(d, constraint_func)) class Group(BaseSubQuery): # group_key_func - Key extraction function # One argument: classad dictionary # Returns: value of the group key # group_data_func - Key extraction function # One argument: list of classad dictionaries # Returns: a summary classad dictionary def __init__(self, query, group_key_func, group_data_func): BaseSubQuery.__init__(self, query, lambda d:doGroup(d, group_key_func, group_data_func)) # # Summarizing classes # class Summarize: # hash_func - Hashing function # One argument: classad dictionary # Returns: hash value # if None, will not be counted # if a list, all elements will be used def __init__(self, query, hash_func=lambda x:1): self.query = query self.hash_func = hash_func # Parameters: # constraint - string to be passed to query.fetch() # hash_func - if !=None, use this instead of the main one # Returns a dictionary of hash values # Elements are counts (or more dictionaries if hash returns lists) def count(self, constraint=None, hash_func=None): data = self.query.fetch(constraint) return fetch2count(data, self.getHash(hash_func)) # Use data pre-stored in query # Same output as count def countStored(self, constraint_func=None, hash_func=None): data = self.query.fetchStored(constraint_func) return fetch2count(data, self.getHash(hash_func)) # Parameters, same as count # Returns a dictionary of hash values # Elements are lists of keys (or more dictionaries if hash returns lists) def list(self, constraint=None, hash_func=None): data = self.query.fetch(constraint) return fetch2list(data, self.getHash(hash_func)) # Use data pre-stored in query # Same output as list def listStored(self,constraint_func=None,hash_func=None): data=self.query.fetchStored(constraint_func) return fetch2list(data,self.getHash(hash_func)) ### Internal def getHash(self, hash_func): if hash_func is None: return self.hash_func else: return hash_func class SummarizeMulti: def __init__(self, queries, hash_func=lambda x:1): self.counts = [] for query in queries: self.counts.append(self.count(query,hash_func)) self.hash_func=hash_func # see Count for description def count(self, constraint=None, hash_func=None): out = {} for c in self.counts: data = c.count(constraint, hash_func) addDict(out, data) return out # see Count for description def countStored(self, constraint_func=None, hash_func=None): out = {} for c in self.counts: data = c.countStored(constraint_func, hash_func) addDict(out, data) return out ############################################################ # # P R I V A T E, do not use # ############################################################ # check that req_format_els are present in in_format_list, and if not add them # return a new format_list def complete_format_list(in_format_list, req_format_els): out_format_list = in_format_list[0:] for req_format_el in req_format_els: found = False for format_el in in_format_list: if format_el[0] == req_format_el[0]: found = True break if not found: out_format_list.append(req_format_el) return out_format_list # # Convert Condor XML to list # # For Example: # #<?xml version="1.0"?> #<!DOCTYPE classads SYSTEM "classads.dtd"> #<classads> #<c> # <a n="MyType"><s>Job</s></a> # <a n="TargetType"><s>Machine</s></a> # <a n="AutoClusterId"><i>0</i></a> # <a n="ExitBySignal"><b v="f"/></a> # <a n="TransferOutputRemaps"><un/></a> # <a n="WhenToTransferOutput"><s>ON_EXIT</s></a> #</c> #<c> # <a n="MyType"><s>Job</s></a> # <a n="TargetType"><s>Machine</s></a> # <a n="AutoClusterId"><i>0</i></a> # <a n="OnExitRemove"><b v="t"/></a> # <a n="x509userproxysubject"><s>/DC=gov/DC=fnal/O=Fermilab/OU=People/CN=Igor Sfiligoi/UID=sfiligoi</s></a> #</c> #</classads> # # 3 xml2list XML handler functions def xml2list_start_element(name, attrs): global xml2list_data, xml2list_inclassad, xml2list_inattr, xml2list_intype if name == "c": xml2list_inclassad = {} elif name == "a": xml2list_inattr = {"name": attrs["n"], "val": ""} xml2list_intype = "s" elif name == "i": xml2list_intype = "i" elif name == "r": xml2list_intype = "r" elif name == "b": xml2list_intype = "b" if attrs.has_key('v'): xml2list_inattr["val"] = (attrs["v"] in ('T', 't', '1')) else: # extended syntax... value in text area xml2list_inattr["val"] = None elif name == "un": xml2list_intype = "un" xml2list_inattr["val"] = None elif name in ("s", "e"): pass # nothing to do elif name == "classads": pass # top element, nothing to do else: raise TypeError, "Unsupported type: %s" % name def xml2list_end_element(name): global xml2list_data, xml2list_inclassad, xml2list_inattr, xml2list_intype if name == "c": xml2list_data.append(xml2list_inclassad) xml2list_inclassad = None elif name == "a": xml2list_inclassad[xml2list_inattr["name"]] = xml2list_inattr["val"] xml2list_inattr = None elif name in ("i", "b", "un", "r"): xml2list_intype = "s" elif name in ("s", "e"): pass # nothing to do elif name == "classads": pass # top element, nothing to do else: raise TypeError, "Unexpected type: %s" % name def xml2list_char_data(data): global xml2list_data, xml2list_inclassad, xml2list_inattr, xml2list_intype if xml2list_inattr is None: # only process when in attribute return if xml2list_intype == "i": xml2list_inattr["val"] = int(data) elif xml2list_intype == "r": xml2list_inattr["val"] = float(data) elif xml2list_intype == "b": if xml2list_inattr["val"] is not None: #nothing to do, value was in attribute pass else: xml2list_inattr["val"] = (data[0] in ('T', 't', '1')) elif xml2list_intype == "un": #nothing to do, value was in attribute pass else: unescaped_data = string.replace(data, '\\"', '"') xml2list_inattr["val"] += unescaped_data def xml2list(xml_data): global xml2list_data, xml2list_inclassad, xml2list_inattr, xml2list_intype xml2list_data = [] xml2list_inclassad = None xml2list_inattr = None xml2list_intype = None p = xml.parsers.expat.ParserCreate() p.StartElementHandler = xml2list_start_element p.EndElementHandler = xml2list_end_element p.CharacterDataHandler = xml2list_char_data found_xml = -1 for line in range(len(xml_data)): # look for the xml header if xml_data[line][:5] == "<?xml": found_xml = line break if found_xml >= 0: try: p.Parse(string.join(xml_data[found_xml:]), 1) except TypeError, e: raise RuntimeError, "Failed to parse XML data, TypeError: %s" % e except: raise RuntimeError, "Failed to parse XML data, generic error" # else no xml, so return an empty list return xml2list_data # # Convert a list to a dictionary # def list2dict(list_data, attr_name): if type(attr_name) in (type([]), type((1, 2))): attr_list = attr_name else: attr_list = [attr_name] dict_data = {} for list_el in list_data: if type(attr_name) in (type([]), type((1, 2))): dict_name = [] list_keys=list_el.keys() for an in attr_name: if an in list_keys: dict_name.append(list_el[an]) else: # Try lower cases for k in list_keys: if an.lower()==k.lower(): dict_name.append(list_el[k]) break dict_name=tuple(dict_name) else: dict_name = list_el[attr_name] # dict_el will have all the elements but those in attr_list dict_el = {} for a in list_el: if not (a in attr_list): dict_el[a] = list_el[a] dict_data[dict_name] = dict_el return dict_data def applyConstraint(data, constraint_func): if constraint_func is None: return data else: outdata = {} for key in data.keys(): if constraint_func(data[key]): outdata[key] = data[key] return outdata def doGroup(indata, group_key_func, group_data_func): gdata = {} for k in indata.keys(): inel = indata[k] gkey = group_key_func(inel) if gdata.has_key(gkey): gdata[gkey].append(inel) else: gdata[gkey] = [inel] outdata = {} for k in gdata.keys(): outdata[k] = group_data_func(gdata[k]) return outdata # # Inputs # data - data from a fetch() # hash_func - Hashing function # One argument: classad dictionary # Returns: hash value # if None, will not be counted # if a list, all elements will be used # # Returns a dictionary of hash values # Elements are counts (or more dictionaries if hash returns lists) # def fetch2count(data, hash_func): count = {} for k in data.keys(): el = data[k] hid = hash_func(el) if hid is None: # hash tells us it does not want to count this continue # cel will point to the real counter cel = count # check if it is a list if (type(hid) == type([])): # have to create structure inside count for h in hid[:-1]: if not cel.has_key(h): cel[h] = {} cel = cel[h] hid = hid[-1] if cel.has_key(hid): count_el = cel[hid] + 1 else: count_el = 1 cel[hid] = count_el return count # # Inputs # data - data from a fetch() # hash_func - Hashing function # One argument: classad dictionary # Returns: hash value # if None, will not be counted # if a list, all elements will be used # # Returns a dictionary of hash values # Elements are lists of keys (or more dictionaries if hash returns lists) # def fetch2list(data, hash_func): return_list = {} for k in data.keys(): el = data[k] hid = hash_func(el) if hid is None: # hash tells us it does not want to list this continue # lel will point to the real list lel = return_list # check if it is a list if (type(hid) == type([])): # have to create structure inside list for h in hid[:-1]: if not lel.has_key(h): lel[h] = {} lel = lel[h] hid = hid[-1] if lel.has_key(hid): list_el = lel[hid].append[k] else: list_el = [k] lel[hid] = list_el return return_list # # Recursivelly add two dictionaries # Do it in place, using the first one # def addDict(base_dict, new_dict): for k in new_dict.keys(): new_el = new_dict[k] if not base_dict.has_key(k): # nothing there?, just copy base_dict[k] = new_el else: if type(new_el) == type({}): #another dictionary, recourse addDict(base_dict[k], new_el) else: base_dict[k] += new_el
bsd-3-clause
6,990,342,089,642,197,000
31.124352
148
0.586048
false
3.561172
false
false
false
apuigsech/emv-framework
iso7816.py
1
5968
#!/usr/bin/python # # Python ISO7816 (as part of EMV Framework) # Copyrigh 2012 Albert Puigsech Galicia <[email protected]> # # This code is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License # as published by the Free Software Foundation; either version 2 # of the License, or (at your option) any later version. # from smartcard.CardType import AnyCardType from smartcard.CardRequest import CardRequest from smartcard.CardConnection import CardConnection from smartcard.CardConnectionObserver import ConsoleCardConnectionObserver from smartcard.Exceptions import CardRequestTimeoutException from tlv import * INS_DB = ( { 'name':'READ_BINARY', 'code':0xb0 }, { 'name':'WRITE_BINARY', 'code':0xd0 }, { 'name':'UPDATE_BINARY', 'code':0xd6 }, { 'name':'ERASE_BINARY', 'code':0x0e }, { 'name':'READ_RECORD', 'code':0xb2 }, { 'name':'WRITE_RECORD', 'code':0xd2 }, { 'name':'APPEND_RECORD', 'code':0xe2 }, { 'name':'UPDATE RECORD', 'code':0xdc }, { 'name':'GET_DATA', 'code':0xca }, { 'name':'PUT_DATA', 'code':0xda }, { 'name':'SELECT_FILE', 'code':0xa4 }, { 'name':'VERIFY', 'code':0x20 }, { 'name':'INTERNAL_AUTHENTICATE', 'code':0x88 }, { 'name':'EXTERNAL AUTHENTICATE', 'code':0xb2 }, { 'name':'GET_CHALLENGE', 'code':0xb4 }, { 'name':'MANAGE_CHANNEL', 'code':0x70 }, { 'name':'GET_RESPONSE', 'code':0xc0 } ) class APDU_Command: def __init__(self, cla=0x00, ins=0x00, p1=0x00, p2=0x00, lc=None, data=None, le=None): self.cla = cla self.ins = ins self.p1 = p1 self.p2 = p2 if data != None and lc == None: lc = len(data) self.lc = lc self.data = data self.le = le def raw(self): apdu_cmd_raw = [self.cla, self.ins, self.p1, self.p2] if self.data != None: apdu_cmd_raw += [self.lc] + self.data if self.le != None: apdu_cmd_raw += [self.le] return apdu_cmd_raw def str(self): apdu_cmd_str = '{0:02x} {1:02x} {2:02x} {3:02x}'.format(self.cla, self.ins, self.p1, self.p2) if self.data != None: apdu_cmd_str += ' {0:02x}'.format(self.lc) for d in self.data: apdu_cmd_str += ' {0:02x}'.format(d) if self.le != None: apdu_cmd_str += ' {0:02x}'.format(self.le) return apdu_cmd_str class APDU_Response: def __init__(self, sw1=0x00, sw2=0x00, data=None): self.sw1 = sw1 self.sw2 = sw2 self.data = data def raw(self): apdu_res_raw = [] if self.data != None: apdu_res_raw += self.data apdu_res_raw += [self.sw1, self.sw2] return apdu_res_raw def str(self): apdu_res_str = '' if self.data != None: for d in self.data: apdu_res_str += '{0:02x} '.format(d) apdu_res_str += '{0:02x} {1:02x}'.format(self.sw1, self.sw2) return apdu_res_str class ISO7816: def __init__(self): cardtype = AnyCardType() cardrequest = CardRequest(timeout=10, cardType=cardtype) self.card = cardrequest.waitforcard() self.card.connection.connect() self.ins_db = [] self.ins_db_update(INS_DB) self.log = [] self.auto_get_response = True def ins_db_update(self, new): self.ins_db += new def ins_db_resolv(self, name=None, code=None): for e in self.ins_db: if name != None and e['name'] == name: return e['code'] if code != None and e['code'] == code: return e['name'] return None def send_command(self, cmd, p1=0, p2=0, tlvparse=False, cla=0x00, data=None, le=None): ins = self.ins_db_resolv(name=cmd) return self.send_apdu(APDU_Command(ins=ins, p1=p1, p2=p2, cla=cla, data=data, le=le)) def send_apdu(self, apdu_cmd): #print '>>> ' + apdu_cmd.str() data,sw1,sw2 = self.send_apdu_raw(apdu_cmd.raw()) apdu_res = APDU_Response(sw1=sw1, sw2=sw2, data=data) #print '<<< ' + apdu_res.str() if self.auto_get_response == True: if sw1 == 0x6c: apdu_cmd.le = sw2 apdu_res = self.send_apdu(apdu_cmd) if sw1 == 0x61: apdu_res = self.GET_RESPONSE(sw2) return apdu_res def send_apdu_raw(self, apdu): return self.card.connection.transmit(apdu) def log_add(self, log_item): self.log.append(log_item) def log_print(self): return def READ_BINARY(self, p1=0x00, p2=0x00, len=0x00): return self.send_command('READ_BINARY', p1=p1, p2=p2, le=len) def WRITE_BINARY(self, p1=0x00, p2=0x00, data=[]): return self.send_command('WRITE_BINARY', p1=p1, p2=p2, data=data) def UPDATE_BINRY(self, p1=0x00, p2=0x00, data=[]): return self.send_command('UPDATE_BINRY', p1=p1, p2=p2, data=data) def ERASE_BINARY(self, p1=0x00, p2=0x00, data=None): return self.send_command('ERASE_BINARY', p1=p1, p2=p2, data=data) def READ_RECORD(self, sfi, record=0x00, variation=0b100): return self.send_command('READ_RECORD', p1=record, p2=(sfi<<3)+variation, le=0) def WRITE_RECORD(self, sfi, data, record=0x00, variation=0b100): return self.send_command('WRITE_RECORD', p1=record, p2=(sfi<<3)+variation, data=data) def APPEND_RECORD(self, sfi, variation=0b100): return self.send_command('APPEND_RECORD', p1=0x00, p2=(sfi<<3)+variation, data=data) def UPDATE_RECORD(self, sfi, data, record=0x00, variation=0b100): return self.send_command('UPDATE_RECORD', p1=record, p2=(sfi<<3)+variation, data=data) def GET_DATA(self, data_id): return self.send_command('GET_DATA', p1=data_id[0], p2=data_id[1]) def PUT_DATA(self, data_id, data): return self.send_command('PUT_DATA', p1=data_id[0], p2=data_id[1], data=data) def SELECT_FILE(self, data, p1=0x00, p2=0x00): return self.send_command('SELECT_FILE', p1=p1, p2=p2, data=data) def VERIFY(self): return def INTERNAL_AUTHENTICATE(self): return def EXTERNAL_AUTHENTICATE(self): return def GET_CHALLENGE(self): return def MANAGE_CHANNEL(self): return def GET_RESPONSE(self, le): return self.send_command('GET_RESPONSE', le=le) def ENVELOPPE(self): return def SEARCH_RECORD(self): return def DISABLE_CHV(self): return def UNBLOCK_CHV(self): return
gpl-3.0
7,305,658,734,531,248,000
22.131783
95
0.649464
false
2.385292
false
false
false
guegue/forocacao
forocacao/users/views.py
1
3882
# -*- coding: utf-8 -*- from __future__ import absolute_import, unicode_literals import base64 from PIL import Image, ImageDraw, ImageFont from django.core.urlresolvers import reverse from django.http import HttpResponse from django.views.generic import DetailView, ListView, RedirectView, UpdateView from braces.views import LoginRequiredMixin from easy_thumbnails.files import get_thumbnailer from .models import User class UserBadgeJPEG(LoginRequiredMixin, DetailView): model = User slug_field = "username" slug_url_kwarg = "username" def get(self, request, username): participant = self.get_object() event = participant.event img = Image.new('RGBA', (event.badge_size_x, event.badge_size_y), event.badge_color) draw = ImageDraw.Draw(img) match = { 'event': event.name, 'name': "%s %s" % (participant.first_name.partition(' ')[0], participant.last_name.partition(' ')[0] ), 'first_name': participant.first_name, 'last_name': participant.last_name, 'profession': participant.profession, 'country': participant.country.name, 'type': participant.type, 'email': participant.email, } for field in event.eventbadge_set.all(): x = field.x y = field.y size = field.size if field.field == 'logo': if participant.event.logo: logo = Image.open(participant.event.logo.file.file) logo.thumbnail((size,size)) img.paste(logo, (x,y)) elif field.field == 'photo': if participant.photo: photo = Image.open(participant.photo) photo.thumbnail((size,size)) img.paste(photo, (x,y)) else: if field.field == 'text': content = field.format else: content = match[field.field] fnt = ImageFont.truetype(field.font.filename, size) color = field.color draw.text((x,y), ("%s") % (content), font=fnt, fill=color) response = HttpResponse(content_type="image/png") img.save(response, "PNG") return HttpResponse(response, content_type="image/png") class UserBadgeView(LoginRequiredMixin, DetailView): model = User # These next two lines tell the view to index lookups by username slug_field = "username" slug_url_kwarg = "username" template_name = 'users/user_badge.html' class UserDetailView(LoginRequiredMixin, DetailView): model = User # These next two lines tell the view to index lookups by username slug_field = "username" slug_url_kwarg = "username" class UserRedirectView(LoginRequiredMixin, RedirectView): permanent = False def get_redirect_url(self): return reverse("users:detail", kwargs={"username": self.request.user.username}) class UserUpdateView(LoginRequiredMixin, UpdateView): fields = ['first_name', 'last_name', 'phone', 'activities' ] #FIXME : add all needed fields # we already imported User in the view code above, remember? model = User # send the user back to their own page after a successful update def get_success_url(self): return reverse("users:detail", kwargs={"username": self.request.user.username}) def get_object(self): # Only get the User record for the user making the request return User.objects.get(username=self.request.user.username) class UserListView(LoginRequiredMixin, ListView): model = User # These next two lines tell the view to index lookups by username slug_field = "username" slug_url_kwarg = "username"
bsd-3-clause
-8,969,089,653,582,079,000
34.290909
119
0.61154
false
4.16971
false
false
false
icgood/pymap
pymap/parsing/specials/options.py
1
5476
from __future__ import annotations import re from collections.abc import Iterable, Mapping from typing import Optional from . import AString, SequenceSet from .. import Params, Parseable from ..exceptions import NotParseable from ..primitives import Number, List from ...bytes import BytesFormat, rev __all__ = ['ExtensionOption', 'ExtensionOptions'] class ExtensionOption(Parseable[bytes]): """Represents a single command option, which may or may not have an associated value. See Also: `RFC 4466 2.1. <https://tools.ietf.org/html/rfc4466#section-2.1>`_ Args: option: The name of the option. arg: The option argument, if any. """ _opt_pattern = rev.compile(br'[a-zA-Z_.-][a-zA-Z0-9_.:-]*') def __init__(self, option: bytes, arg: List) -> None: super().__init__() self.option = option self.arg = arg self._raw_arg: Optional[bytes] = None @property def value(self) -> bytes: return self.option def __bytes__(self) -> bytes: if self.arg.value: return BytesFormat(b'%b %b') % (self.option, self.raw_arg) else: return self.option @property def raw_arg(self) -> bytes: if self._raw_arg is None: if not self.arg: self._raw_arg = b'' elif len(self.arg) == 1: arg_0 = self.arg.value[0] if isinstance(arg_0, (Number, SequenceSet)): self._raw_arg = bytes(arg_0) else: self._raw_arg = bytes(self.arg) else: self._raw_arg = bytes(self.arg) return self._raw_arg @classmethod def _parse_arg(cls, buf: memoryview, params: Params) \ -> tuple[List, memoryview]: try: num, buf = Number.parse(buf, params) except NotParseable: pass else: arg = List([num]) return arg, buf try: seq_set, buf = SequenceSet.parse(buf, params) except NotParseable: pass else: arg = List([seq_set]) return arg, buf try: params_copy = params.copy(list_expected=[AString, List]) return List.parse(buf, params_copy) except NotParseable: pass return List([]), buf @classmethod def parse(cls, buf: memoryview, params: Params) \ -> tuple[ExtensionOption, memoryview]: start = cls._whitespace_length(buf) match = cls._opt_pattern.match(buf, start) if not match: raise NotParseable(buf[start:]) option = match.group(0).upper() buf = buf[match.end(0):] arg, buf = cls._parse_arg(buf, params) return cls(option, arg), buf class ExtensionOptions(Parseable[Mapping[bytes, List]]): """Represents a set of command options, which may or may not have an associated argument. Command options are always optional, so the parsing will not fail, it will just return an empty object. See Also: `RFC 4466 2.1. <https://tools.ietf.org/html/rfc4466#section-2.1>`_ Args: options: The mapping of options to argument. """ _opt_pattern = re.compile(br'[a-zA-Z_.-][a-zA-Z0-9_.:-]*') _empty: Optional[ExtensionOptions] = None def __init__(self, options: Iterable[ExtensionOption]) -> None: super().__init__() self.options: Mapping[bytes, List] = \ {opt.option: opt.arg for opt in options} self._raw: Optional[bytes] = None @classmethod def empty(cls) -> ExtensionOptions: """Return an empty set of command options.""" if cls._empty is None: cls._empty = ExtensionOptions({}) return cls._empty @property def value(self) -> Mapping[bytes, List]: return self.options def has(self, option: bytes) -> bool: return option in self.options def get(self, option: bytes) -> Optional[List]: return self.options.get(option, None) def __bool__(self) -> bool: return bool(self.options) def __len__(self) -> int: return len(self.options) def __bytes__(self) -> bytes: if self._raw is None: parts = [ExtensionOption(option, arg) for option, arg in sorted(self.options.items())] self._raw = b'(' + BytesFormat(b' ').join(parts) + b')' return self._raw @classmethod def _parse_paren(cls, buf: memoryview, paren: bytes) -> memoryview: start = cls._whitespace_length(buf) if buf[start:start + 1] != paren: raise NotParseable(buf) return buf[start + 1:] @classmethod def _parse(cls, buf: memoryview, params: Params) \ -> tuple[ExtensionOptions, memoryview]: buf = cls._parse_paren(buf, b'(') result: list[ExtensionOption] = [] while True: try: option, buf = ExtensionOption.parse(buf, params) except NotParseable: break else: result.append(option) buf = cls._parse_paren(buf, b')') return cls(result), buf @classmethod def parse(cls, buf: memoryview, params: Params) \ -> tuple[ExtensionOptions, memoryview]: try: return cls._parse(buf, params) except NotParseable: return cls.empty(), buf
mit
3,286,872,938,212,346,000
29.422222
76
0.56355
false
3.976761
false
false
false
squirrelo/qiita
qiita_ware/dispatchable.py
1
8731
# ----------------------------------------------------------------------------- # Copyright (c) 2014--, The Qiita Development Team. # # Distributed under the terms of the BSD 3-clause License. # # The full license is in the file LICENSE, distributed with this software. # ----------------------------------------------------------------------------- from .analysis_pipeline import RunAnalysis from qiita_ware.commands import submit_EBI, submit_VAMPS from qiita_db.analysis import Analysis def submit_to_ebi(preprocessed_data_id, submission_type): """Submit a study to EBI""" submit_EBI(preprocessed_data_id, submission_type, True) def submit_to_VAMPS(preprocessed_data_id): """Submit a study to VAMPS""" return submit_VAMPS(preprocessed_data_id) def run_analysis(analysis_id, commands, comm_opts=None, rarefaction_depth=None, merge_duplicated_sample_ids=False, **kwargs): """Run an analysis""" analysis = Analysis(analysis_id) ar = RunAnalysis(**kwargs) return ar(analysis, commands, comm_opts, rarefaction_depth, merge_duplicated_sample_ids) def create_raw_data(artifact_type, prep_template, filepaths, name=None): """Creates a new raw data Needs to be dispachable because it moves large files Parameters ---------- artifact_type: str The artifact type prep_template : qiita_db.metadata_template.prep_template.PrepTemplate The template to attach the artifact filepaths : list of (str, str) The list with filepaths and their filepath types name : str, optional The name of the new artifact Returns ------- dict of {str: str} A dict of the form {'status': str, 'message': str} """ from qiita_db.artifact import Artifact status = 'success' msg = '' try: Artifact.create(filepaths, artifact_type, name=name, prep_template=prep_template) except Exception as e: # We should hit this exception rarely (that's why it is an # exception) since at this point we have done multiple checks. # However, it can occur in weird cases, so better let the GUI know # that this failed return {'status': 'danger', 'message': "Error creating artifact: %s" % str(e)} return {'status': status, 'message': msg} def copy_raw_data(prep_template, artifact_id): """Creates a new raw data by copying from artifact_id Parameters ---------- prep_template : qiita_db.metadata_template.prep_template.PrepTemplate The template to attach the artifact artifact_id : int The id of the artifact to duplicate Returns ------- dict of {str: str} A dict of the form {'status': str, 'message': str} """ from qiita_db.artifact import Artifact status = 'success' msg = '' try: Artifact.copy(Artifact(artifact_id), prep_template) except Exception as e: # We should hit this exception rarely (that's why it is an # exception) since at this point we have done multiple checks. # However, it can occur in weird cases, so better let the GUI know # that this failed return {'status': 'danger', 'message': "Error creating artifact: %s" % str(e)} return {'status': status, 'message': msg} def delete_artifact(artifact_id): """Deletes an artifact from the system Parameters ---------- artifact_id : int The artifact to delete Returns ------- dict of {str: str} A dict of the form {'status': str, 'message': str} """ from qiita_db.artifact import Artifact status = 'success' msg = '' try: Artifact.delete(artifact_id) except Exception as e: status = 'danger' msg = str(e) return {'status': status, 'message': msg} def create_sample_template(fp, study, is_mapping_file, data_type=None): """Creates a sample template Parameters ---------- fp : str The file path to the template file study : qiita_db.study.Study The study to add the sample template to is_mapping_file : bool Whether `fp` contains a mapping file or a sample template data_type : str, optional If `is_mapping_file` is True, the data type of the prep template to be created Returns ------- dict of {str: str} A dict of the form {'status': str, 'message': str} """ # The imports need to be in here because this code is executed in # the ipython workers import warnings from os import remove from qiita_db.metadata_template.sample_template import SampleTemplate from qiita_db.metadata_template.util import load_template_to_dataframe from qiita_ware.metadata_pipeline import ( create_templates_from_qiime_mapping_file) status = 'success' msg = '' try: with warnings.catch_warnings(record=True) as warns: if is_mapping_file: create_templates_from_qiime_mapping_file(fp, study, data_type) else: SampleTemplate.create(load_template_to_dataframe(fp), study) remove(fp) # join all the warning messages into one. Note that this # info will be ignored if an exception is raised if warns: msg = '\n'.join(set(str(w.message) for w in warns)) status = 'warning' except Exception as e: # Some error occurred while processing the sample template # Show the error to the user so they can fix the template status = 'danger' msg = str(e) return {'status': status, 'message': msg} def update_sample_template(study_id, fp): """Updates a sample template Parameters ---------- study_id : int Study id whose template is going to be updated fp : str The file path to the template file Returns ------- dict of {str: str} A dict of the form {'status': str, 'message': str} """ import warnings from os import remove from qiita_db.metadata_template.util import load_template_to_dataframe from qiita_db.metadata_template.sample_template import SampleTemplate msg = '' status = 'success' try: with warnings.catch_warnings(record=True) as warns: # deleting previous uploads and inserting new one st = SampleTemplate(study_id) df = load_template_to_dataframe(fp) st.extend(df) st.update(df) remove(fp) # join all the warning messages into one. Note that this info # will be ignored if an exception is raised if warns: msg = '\n'.join(set(str(w.message) for w in warns)) status = 'warning' except Exception as e: status = 'danger' msg = str(e) return {'status': status, 'message': msg} def delete_sample_template(study_id): """Delete a sample template Parameters ---------- study_id : int Study id whose template is going to be deleted Returns ------- dict of {str: str} A dict of the form {'status': str, 'message': str} """ from qiita_db.metadata_template.sample_template import SampleTemplate msg = '' status = 'success' try: SampleTemplate.delete(study_id) except Exception as e: status = 'danger' msg = str(e) return {'status': status, 'message': msg} def update_prep_template(prep_id, fp): """Updates a prep template Parameters ---------- prep_id : int Prep template id to be updated fp : str The file path to the template file Returns ------- dict of {str: str} A dict of the form {'status': str, 'message': str} """ import warnings from os import remove from qiita_db.metadata_template.util import load_template_to_dataframe from qiita_db.metadata_template.prep_template import PrepTemplate msg = '' status = 'success' prep = PrepTemplate(prep_id) try: with warnings.catch_warnings(record=True) as warns: df = load_template_to_dataframe(fp) prep.extend(df) prep.update(df) remove(fp) if warns: msg = '\n'.join(set(str(w.message) for w in warns)) status = 'warning' except Exception as e: status = 'danger' msg = str(e) return {'status': status, 'message': msg}
bsd-3-clause
4,269,830,663,802,809,300
28.59661
79
0.590654
false
4.171524
false
false
false
bepatient-fr/itools
itools/pkg/build_gulp.py
1
4272
# -*- coding: UTF-8 -*- # Copyright (C) 2016 Sylvain Taverne <[email protected]> # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # Import from standard library import sys from subprocess import Popen # Import from itools from itools.fs.lfs import LocalFolder from itools.uri import get_uri_name, Path class GulpBuilder(object): """ Run "gulp build" in project's repository & add generated files $ ui/{SKINS}/* into the project MANIFEST file. That allow to avoid commit compiled JS/CSS files into GIT. """ def __init__(self, package_root, worktree, manifest): self.package_root = package_root if self.package_root != '.': self.ui_path = '{0}/ui/'.format(self.package_root) else: self.ui_path = 'ui/' self.worktree = worktree self.manifest = manifest self.fs = LocalFolder('.') if self.fs.is_folder(self.ui_path): self.dist_folders = tuple(['{0}{1}'.format(self.ui_path, x) for x in LocalFolder(self.ui_path).get_names()]) def run(self): npm_done = self.launch_npm_install() gulp_done = self.launch_gulp_build() webpack_done = self.launch_webpack() # Add DIST files into manifest if (npm_done or gulp_done or webpack_done) and self.fs.exists(self.ui_path): for path in self.fs.traverse(self.ui_path): relative_path = self.fs.get_relative_path(path) if (relative_path and relative_path.startswith(self.dist_folders) and self.fs.is_file(path)): self.manifest.add(relative_path) def launch_npm_install(self): done = False for path in self.manifest: filename = get_uri_name(path) if filename == 'package.json': print '***'*25 print '*** Run $ npm install on ', path print '***'*25 path = str(Path(path)[:-1]) + '/' p = Popen(['npm', 'install'], cwd=path) p.wait() if p.returncode == 1: print '***'*25 print '*** Error running npm install ', path print '***'*25 sys.exit(1) done = True return done def launch_gulp_build(self): done = False for path in self.manifest: filename = get_uri_name(path) if filename == 'gulpfile.js': print '***'*25 print '*** Run $ gulp build on ', path print '***'*25 path = str(Path(path)[:-1]) + '/' p = Popen(['gulp', 'build'], cwd=path) p.wait() if p.returncode == 1: print '***'*25 print '*** Error running gulp ', path print '***'*25 sys.exit(1) done = True return done def launch_webpack(self): done = False for path in self.manifest: filename = get_uri_name(path) if filename == 'webpack.config.js': print '***'*25 print '*** Run $ webpack ', path print '***'*25 path = str(Path(path)[:-1]) + '/' p = Popen(['webpack', '--mode=production'], cwd=path) p.wait() if p.returncode == 1: print '***'*25 print '*** Error running webpack ', path print '***'*25 sys.exit(1) done = True return done
gpl-3.0
1,900,275,981,483,111,000
34.6
91
0.526919
false
4.111646
false
false
false
lnls-fac/sirius
pymodels/TS_V03_03/lattice.py
1
10721
"""Lattice module. In this module the lattice of the corresponding accelerator is defined. """ import math as _math import numpy as _np from pyaccel import lattice as _pyacc_lat, elements as _pyacc_ele, \ accelerator as _pyacc_acc, optics as _pyacc_opt energy = 0.15e9 # [eV] default_optics_mode = 'M1' class LatticeError(Exception): """LatticeError class.""" def create_lattice(optics_mode=default_optics_mode): """Create lattice function.""" strengths, twiss_at_start = get_optics_mode(optics_mode) # -- shortcut symbols -- marker = _pyacc_ele.marker drift = _pyacc_ele.drift quadrupole = _pyacc_ele.quadrupole rbend_sirius = _pyacc_ele.rbend hcorrector = _pyacc_ele.hcorrector vcorrector = _pyacc_ele.vcorrector # --- drift spaces --- ldif = 0.1442 l015 = drift('l015', 0.1500) l020 = drift('l020', 0.2000) l025 = drift('l025', 0.2500) l040 = drift('l040', 0.4000) l060 = drift('l060', 0.6000) l080 = drift('l080', 0.8000) l090 = drift('l090', 0.9000) l130 = drift('l130', 1.3000) l220 = drift('l220', 2.2000) l280 = drift('l280', 2.8000) la2p = drift('la2p', 0.08323) lb2p = drift('lb2p', 0.1330) ld2p = drift('ld2p', 0.1920) ld3p = drift('ld3p', 0.1430) la3p = drift('la3p', 0.2320 - ldif) lb1p = drift('lb1p', 0.2200 - ldif) lb3p = drift('lb3p', 0.19897 - ldif) lc1p = drift('lc1p', 0.18704 - ldif) lc2p = drift('lc2p', 0.2260 - ldif) ld1p = drift('ld1p', 0.21409 - ldif) # --- markers --- inicio = marker('start') fim = marker('end') # --- beam screens --- scrn = marker('Scrn') # --- beam current monitors --- ict = marker('ICT') fct = marker('FCT') # --- beam position monitors --- bpm = marker('BPM') # --- correctors --- ch = hcorrector('CH', 0.0) cv = vcorrector('CV', 0.0) # --- quadrupoles --- qf1a = quadrupole('QF1A', 0.14, strengths['qf1a']) qf1b = quadrupole('QF1B', 0.14, strengths['qf1b']) qd2 = quadrupole('QD2', 0.14, strengths['qd2']) qf2 = quadrupole('QF2', 0.20, strengths['qf2']) qf3 = quadrupole('QF3', 0.20, strengths['qf3']) qd4a = quadrupole('QD4A', 0.14, strengths['qd4a']) qf4 = quadrupole('QF4', 0.20, strengths['qf4']) qd4b = quadrupole('QD4B', 0.14, strengths['qd4b']) # --- bending magnets --- d2r = (_math.pi/180) # -- b -- f = 5.011542/5.333333 h1 = rbend_sirius( 'B', 0.196, d2r*0.8597*f, 0, 0, 0, 0, 0, [0, 0, 0], _np.array([0, -0.163, -1.443, 0])*f) h2 = rbend_sirius( 'B', 0.192, d2r*0.8467*f, 0, 0, 0, 0, 0, [0, 0, 0], _np.array([0, -0.154, -1.418, 0])*f) h3 = rbend_sirius( 'B', 0.182, d2r*0.8099*f, 0, 0, 0, 0, 0, [0, 0, 0], _np.array([0, -0.140, -1.403, 0])*f) h4 = rbend_sirius( 'B', 0.010, d2r*0.0379*f, 0, 0, 0, 0, 0, [0, 0, 0], _np.array([0, -0.175, -1.245, 0])*f) h5 = rbend_sirius( 'B', 0.010, d2r*0.0274*f, 0, 0, 0, 0, 0, [0, 0, 0], _np.array([0, -0.115, -0.902, 0])*f) h6 = rbend_sirius( 'B', 0.013, d2r*0.0244*f, 0, 0, 0, 0, 0, [0, 0, 0], _np.array([0, -0.042, -1.194, 0])*f) h7 = rbend_sirius( 'B', 0.017, d2r*0.0216*f, 0, 0, 0, 0, 0, [0, 0, 0], _np.array([0, -0.008, -1.408, 0])*f) h8 = rbend_sirius( 'B', 0.020, d2r*0.0166*f, 0, 0, 0, 0, 0, [0, 0, 0], _np.array([0, 0.004, -1.276, 0])*f) h9 = rbend_sirius( 'B', 0.030, d2r*0.0136*f, 0, 0, 0, 0, 0, [0, 0, 0], _np.array([0, 0.006, -0.858, 0])*f) h10 = rbend_sirius( 'B', 0.05, d2r*0.0089*f, 0, 0, 0, 0, 0, [0, 0, 0], _np.array([0, 0.000, -0.050, 0])*f) mbend = marker('mB') bend = [h10, h9, h8, h7, h6, h5, h4, h3, h2, h1, mbend, h1, h2, h3, h4, h5, h6, h7, h8, h9, h10] # -- Thin Septum -- dip_nam = 'EjeSeptF' dip_len = 0.5773 dip_ang = -3.6 * d2r dip_K = 0.0 dip_S = 0.00 h1 = rbend_sirius( dip_nam, dip_len/2, dip_ang/2, 1*dip_ang/2, 0*dip_ang, 0, 0, 0, [0, 0, 0], [0, dip_K, dip_S]) h2 = rbend_sirius( dip_nam, dip_len/2, dip_ang/2, 0*dip_ang/2, 1*dip_ang/2, 0, 0, 0, [0, 0, 0], [0, dip_K, dip_S]) bejesf = marker('bEjeSeptF') # marker at the beginning of thin septum mejesf = marker('mEjeSeptF') # marker at the center of thin septum eejesf = marker('eEjeSeptF') # marker at the end of thin septum ejesf = [bejesf, h1, mejesf, h2, eejesf] # -- bo thick ejection septum -- dip_nam = 'EjeSeptG' dip_len = 0.5773 dip_ang = -3.6 * d2r dip_K = 0.0 dip_S = 0.00 h1 = rbend_sirius( dip_nam, dip_len/2, dip_ang/2, 1*dip_ang/2, 0*dip_ang, 0, 0, 0, [0, 0, 0], [0, dip_K, dip_S]) h2 = rbend_sirius( dip_nam, dip_len/2, dip_ang/2, 0*dip_ang, 1*dip_ang/2, 0, 0, 0, [0, 0, 0], [0, dip_K, dip_S]) bejesg = marker('bEjeSeptG') # marker at the beginning of thick septum mejesg = marker('mEjeSeptG') # marker at the center of thick septum eejesg = marker('eEjeSeptG') # marker at the end of thick septum ejesg = [bejesg, h1, mejesg, h2, eejesg] # -- si thick injection septum (2 of these are used) -- dip_nam = 'InjSeptG' dip_len = 0.5773 dip_ang = +3.6 * d2r dip_K = 0.0 dip_S = 0.00 h1 = rbend_sirius( dip_nam, dip_len/2, dip_ang/2, 1*dip_ang/2, 0*dip_ang, 0, 0, 0, [0, 0, 0], [0, dip_K, dip_S]) h2 = rbend_sirius( dip_nam, dip_len/2, dip_ang/2, 0*dip_ang, 1*dip_ang/2, 0, 0, 0, [0, 0, 0], [0, dip_K, dip_S]) binjsg = marker('bInjSeptG') # marker at the beginning of thick septum minjsg = marker('mInjSeptG') # marker at the center of thick septum einjsg = marker('eInjSeptG') # marker at the end of thick septum injsg = [binjsg, h1, minjsg, h2, einjsg] # -- si thin injection septum -- dip_nam = 'InjSeptF' dip_len = 0.5773 dip_ang = +3.118 * d2r dip_K = 0.0 dip_S = 0.00 h1 = rbend_sirius( dip_nam, dip_len/2, dip_ang/2, 1*dip_ang/2, 0*dip_ang, 0, 0, 0, [0, 0, 0], [0, dip_K, dip_S]) h2 = rbend_sirius( dip_nam, dip_len/2, dip_ang/2, 0*dip_ang, 1*dip_ang/2, 0, 0, 0, [0, 0, 0], [0, dip_K, dip_S]) binjsf = marker('bInjSeptF') # marker at the beginning of thin septum minjsf = marker('mInjSeptF') # marker at the center of thin septum einjsf = marker('eInjSeptF') # marker at the end of thin septum injsf = [binjsf, h1, minjsf, h2, einjsf] # --- lines --- sec01 = [ ejesf, l025, ejesg, l060, cv, l090, qf1a, la2p, ict, l280, scrn, bpm, l020, ch, l020, qf1b, l020, cv, l020, la3p, bend] sec02 = [ l080, lb1p, qd2, lb2p, l080, scrn, bpm, l020, qf2, l020, ch, l025, cv, l015, lb3p, bend] sec03 = [lc1p, l220, qf3, l025, scrn, bpm, l020, ch, l025, cv, lc2p, bend] sec04 = [ ld1p, l130, qd4a, ld2p, l060, scrn, bpm, l020, cv, l025, ch, l020, qf4, ld3p, l020, qd4b, l060, fct, l040, ict, l040, scrn, bpm, cv, l020, injsg, l025, injsg, l025, injsf, scrn] elist = [inicio, sec01, sec02, sec03, sec04, fim] the_line = _pyacc_lat.build(elist) # shifts model to marker 'start' idx = _pyacc_lat.find_indices(the_line, 'fam_name', 'start') the_line = _pyacc_lat.shift(the_line, idx[0]) lengths = _pyacc_lat.get_attribute(the_line, 'length') for length in lengths: if length < 0: raise LatticeError('Model with negative drift!') # sets number of integration steps set_num_integ_steps(the_line) # -- define vacuum chamber for all elements the_line = set_vacuum_chamber(the_line) return the_line, twiss_at_start def get_optics_mode(optics_mode): """Return magnet strengths of a given opics mode.""" twiss_at_start = _pyacc_opt.Twiss.make_new( beta=[9.321, 12.881], alpha=[-2.647, 2.000], etax=[0.231, 0.069]) # -- selection of optics mode -- if optics_mode == 'M1': strengths = { 'qf1a': 1.70521151606, 'qf1b': 1.734817173998, 'qd2': -2.8243902951, 'qf2': 2.76086143922, 'qf3': 2.632182549934, 'qd4a': -3.048732667316, 'qf4': 3.613066375692, 'qd4b': -1.46213606815, } elif optics_mode == 'M2': strengths = { 'qf1a': 1.670801801437, 'qf1b': 2.098494339697, 'qd2': -2.906779151209, 'qf2': 2.807031512313, 'qf3': 2.533815202102, 'qd4a': -2.962460334623, 'qf4': 3.537403658428, 'qd4b': -1.421177262593, } else: _pyacc_acc.AcceleratorException( 'Invalid TS optics mode: ' + optics_mode) return strengths, twiss_at_start def set_num_integ_steps(the_line): """Set number of integration steps in each lattice element.""" for i, _ in enumerate(the_line): if the_line[i].angle: length = the_line[i].length the_line[i].nr_steps = max(10, int(_math.ceil(length/0.035))) elif the_line[i].polynom_b[1]: the_line[i].nr_steps = 10 elif the_line[i].polynom_b[2]: the_line[i].nr_steps = 5 else: the_line[i].nr_steps = 1 def set_vacuum_chamber(the_line): """Set vacuum chamber for all elements.""" # -- default physical apertures -- for i, _ in enumerate(the_line): the_line[i].hmin = -0.012 the_line[i].hmax = +0.012 the_line[i].vmin = -0.012 the_line[i].vmax = +0.012 # -- bo ejection septa -- beg = _pyacc_lat.find_indices(the_line, 'fam_name', 'bEjeSeptF')[0] end = _pyacc_lat.find_indices(the_line, 'fam_name', 'eEjeSeptG')[0] for i in range(beg, end+1): the_line[i].hmin = -0.0150 the_line[i].hmax = +0.0150 the_line[i].vmin = -0.0040 the_line[i].vmax = +0.0040 # -- si thick injection septum -- beg = _pyacc_lat.find_indices(the_line, 'fam_name', 'bInjSeptG')[0] end = _pyacc_lat.find_indices(the_line, 'fam_name', 'eInjSeptG')[0] for i in range(beg, end+1): the_line[i].hmin = -0.0045 the_line[i].hmax = +0.0045 the_line[i].vmin = -0.0035 the_line[i].vmax = +0.0035 # -- si thin injection septum -- beg = _pyacc_lat.find_indices(the_line, 'fam_name', 'bInjSeptF')[0] end = _pyacc_lat.find_indices(the_line, 'fam_name', 'eInjSeptF')[0] for i in range(beg, end+1): the_line[i].hmin = -0.0150 the_line[i].hmax = +0.0150 the_line[i].vmin = -0.0035 the_line[i].vmax = +0.0035 return the_line
mit
-4,336,585,749,294,970,400
33.583871
78
0.544819
false
2.336239
false
false
false
toshka/torrt
torrt/notifiers/telegram.py
1
1820
import logging import requests from requests import RequestException from torrt.base_notifier import BaseNotifier from torrt.utils import NotifierClassesRegistry LOGGER = logging.getLogger(__name__) class TelegramNotifier(BaseNotifier): """Telegram bot notifier. See instructions how to create bot at https://core.telegram.org/bots/api""" alias = 'telegram' url = 'https://api.telegram.org/bot' def __init__(self, token, chat_id): """ :param token: str - Telegram's bot token :param chat_id: str - Telegram's chat ID """ self.token = token self.chat_id = chat_id def make_message(self, torrent_data): return '''The following torrents were updated:\n%s''' \ % '\n'.join(map(lambda t: t['name'], torrent_data.values())) def test_configuration(self): url = '%s%s/getMe' % (self.url, self.token) r = requests.get(url) return r.json().get('ok', False) def send_message(self, msg): url = '%s%s/sendMessage' % (self.url, self.token) try: response = requests.post(url, data={'chat_id': self.chat_id, 'text': msg}) except RequestException as e: LOGGER.error('Failed to send Telegram message: %s', e) else: if response.ok: json_data = response.json() if json_data['ok']: LOGGER.debug('Telegram message was sent to user %s', self.chat_id) else: LOGGER.error('Telegram notification not send: %s', json_data['description']) else: LOGGER.error('Telegram notification not send. Response code: %s (%s)', response.status_code, response.reason) NotifierClassesRegistry.add(TelegramNotifier)
bsd-3-clause
-1,036,498,508,307,981,600
34
105
0.593407
false
3.947939
false
false
false
oaubert/advene
setup.py
1
6968
#! /usr/bin/env python3 import logging logger = logging.getLogger(__name__) import os from setuptools import setup, find_packages import sys # We define the main script name here (file in bin), since we have to change it for MacOS X SCRIPTNAME='advene' def check_changelog(maindir, version): """Check that the changelog for maindir matches the given version.""" with open(os.path.join( maindir, "CHANGES.txt" ), 'r') as f: l=f.readline() if not l.startswith('advene (' + version + ')'): logger.error("The CHANGES.txt does not seem to match version %s\n%s\nUpdate either the CHANGES.txt or the lib/advene/core/version.py file", version, l) sys.exit(1) return True def get_plugin_list(*package): """Return a plugin list from the given package. package is in fact a list of path/module path elements. No recursion is done. """ package= [ 'advene' ] + list(package) path=os.path.sep.join(package) prefix='.'.join(package) plugins=[] d=os.path.join('lib', path) if not os.path.exists(d): raise Exception("%s does not match a directory (%s does not exist)" % (prefix, d)) for n in os.listdir(d): name, ext = os.path.splitext(n) if ext != '.py': continue # Poor man's grep. if [ l for l in open(os.path.join(d, n)).readlines() if 'def register' in l ]: # It may be a plugin. Include it. plugins.append('.'.join((prefix, name))) return plugins def get_version(): """Get the version number of the package.""" maindir = os.path.dirname(os.path.abspath(sys.argv[0])) if os.path.exists(os.path.join(maindir, "setup.py")): # Chances are that we were in a development tree... libpath=os.path.join(maindir, "lib") sys.path.insert (0, libpath) import advene.core.version version=advene.core.version.version else: raise Exception("Unable to determine advene version number.") check_changelog(maindir, version) return version _version=get_version() platform_options={} def get_packages_list(): """Recursively find packages in lib. Return a list of packages (dot notation) suitable as packages parameter for distutils. """ if 'linux' in sys.platform: return find_packages('lib', exclude=["cherrypy.*"]) else: return find_packages('lib') def generate_data_dir(dir_, prefix="", postfix=""): """Return a structure suitable for datafiles from a directory. It will return a sequence of (directory, files) corresponding to the data in the given directory. prefix and postfix are dumbly added to dirname, so do not forget the trailing / for prefix, and leading / for postfix if necessary. """ l = [] installdir=prefix+dir_+postfix for dirname, dnames, fnames in os.walk(dir_): if fnames: if dirname.startswith(dir_): installdirname=dirname.replace(dir_, installdir, 1) l.append((installdirname, [ absf for absf in [ os.path.sep.join((dirname,f)) for f in fnames ] if not os.path.isdir(absf) ])) return l def generate_data_files(): # On Win32, we will install data files in # \Program Files\Advene\share\... # On MacOS X, it will be in Advene.app/Contents/Resources # On Unix, it will be # /usr/share/advene/... if sys.platform == 'win32' or sys.platform == 'darwin': prefix='' postfix='' else: prefix="share"+os.path.sep postfix=os.path.sep+"advene" r=generate_data_dir("share", postfix=postfix) r.extend(generate_data_dir("doc", prefix=prefix, postfix=postfix)) if not os.path.isdir("locale"): logger.warning("""**WARNING** Generating the locales with "cd po; make mo".""") os.system("pwd; cd po; make mo") if os.path.isdir("locale"): r.extend(generate_data_dir("locale", prefix=prefix)) else: logger.warning("""**WARNING** Cannot find locale directory.""") if sys.platform.startswith('linux'): # Install specific data files r.append( ( 'share/applications', [ 'share/advene.desktop' ] ) ) return r myname = "Olivier Aubert" myemail = "[email protected]" setup (name = "advene", version = _version, description = "Annotate DVds, Exchange on the NEt", keywords = "dvd,video,annotation", author = "Advene project team", author_email = myemail, maintainer = myname, maintainer_email = myemail, url = "https://www.advene.org/", license = "GPL", long_description = """Annotate DVds, Exchange on the NEt The Advene (Annotate DVd, Exchange on the NEt) project is aimed towards communities exchanging discourses (analysis, studies) about audiovisual documents (e.g. movies) in DVD format. This requires that audiovisual content and hypertext facilities be integrated, thanks to annotations providing explicit structures on audiovisual streams, upon which hypervideo documents can be engineered. . The cross-platform Advene application allows users to easily create comments and analyses of video comments, through the definition of time-aligned annotations and their mobilisation into automatically-generated or user-written comment views (HTML documents). Annotations can also be used to modify the rendition of the audiovisual document, thus providing virtual montage, captioning, navigation... capabilities. Users can exchange their comments/analyses in the form of Advene packages, independently from the video itself. . The Advene framework provides models and tools allowing to design and reuse annotations schemas; annotate video streams according to these schemas; generate and create Stream-Time Based (mainly video-centred) or User-Time Based (mainly text-centred) visualisations of the annotations. Schemas (annotation- and relation-types), annotations and relations, queries and views can be clustered and shared in units called packages. Hypervideo documents are generated when needed, both from packages (for annotation and view description) and DVDs (audiovisual streams). """, package_dir = {'': 'lib'}, packages = get_packages_list(), scripts = [ 'bin/%s' % SCRIPTNAME, 'bin/advene_import', 'bin/advene_export' ], data_files = generate_data_files(), classifiers = [ 'Environment :: X11 Applications :: GTK', 'Environment :: Win32 (MS Windows)', 'Development Status :: 5 - Production/Stable', 'License :: OSI Approved :: GNU General Public License (GPL)', 'Programming Language :: Python', 'Intended Audience :: End Users/Desktop', 'Operating System :: OS Independent', 'Topic :: Multimedia :: Video :: Non-Linear Editor' ], **platform_options )
gpl-2.0
829,313,294,184,711,600
37.076503
159
0.65729
false
3.862528
false
false
false
willcassella/SinGE
Tools/SinGED/types.py
1
12310
# types.py import bpy from bpy.types import PropertyGroup from bpy.props import BoolProperty, IntProperty, FloatProperty, StringProperty, PointerProperty, EnumProperty, FloatVectorProperty from functools import partial def get_unused_component_types(scene=None, context=None): # Unused arguments del scene, context node_id = bpy.context.active_object.sge_node_id sge_scene = SinGEDProps.sge_scene node = sge_scene.get_node(node_id) used_component = sge_scene.get_node_components(node) result = [] for component_type in SinGEDProps.sge_typedb.component_types: if component_type not in (c.type.type_name for c in used_component): result.append((component_type, component_type, '')) return result def construct_property_display_name(prop_name): return prop_name.replace("_", " ") def construct_property_path(property_path_str, prop_name): if len(property_path_str) == 0: return [prop_name] return property_path_str.split('.') + [prop_name] def property_getter(component_type_name, property_path, default): try: # Get the active node and component instance sge_scene = SinGEDProps.sge_scene node_id = bpy.context.active_object.sge_node_id node = sge_scene.get_node(node_id) component_type = sge_scene.get_component_type(component_type_name) component_instance = component_type.get_instance(node) # Get the property value return component_instance.get_sub_property_immediate(property_path, default) except Exception: path = [component_type_name] path.extend(property_path) print("ERROR RETREIVING PROPERTY: {}".format(path)) return default def property_setter(component_type_name, property_path, value): # Get the active node and component instance sge_scene = SinGEDProps.sge_scene node_id = bpy.context.active_object.sge_node_id node = sge_scene.get_node(node_id) component_type = sge_scene.get_component_type(component_type_name) component_instance = component_type.get_instance(node) # Set the property value component_instance.set_sub_property_immediate(property_path, value) class SGETypes(PropertyGroup): sge_component_types = EnumProperty(items=get_unused_component_types) class SinGEDProps(PropertyGroup): sge_host = StringProperty(name='Host', default='localhost') sge_port = IntProperty(name='Port', default=1995) sge_types = PointerProperty(type=SGETypes) sge_realtime_update_delay = FloatProperty(default=0.033, precision=3, unit='TIME') sge_scene_path = StringProperty(name='Path', default='') sge_lightmap_light_dir = FloatVectorProperty(name="Light direction", subtype='XYZ', size=3, default=[0.0, -0.5, -0.5]) sge_lightmap_light_color = FloatVectorProperty(name="Light color", subtype='COLOR', size=3, default=[0.5, 0.5, 0.5]) sge_lightmap_light_intensity = FloatProperty(name="Light intensity", default=8.0) sge_lightmap_ambient_color = FloatVectorProperty(name="Ambient light color", subtype='COLOR', size=3, default=[0.5, 0.5, 0.5]) sge_lightmap_ambient_intensity = FloatProperty(name="Ambient light intensity", default=0.0) sge_lightmap_num_indirect_sample_sets = IntProperty(name="Indirect sample sets", subtype='UNSIGNED', default=16) sge_lightmap_num_accumulation_steps = IntProperty(name="Accumulation steps", subtype='UNSIGNED', default=1) sge_lightmap_num_post_steps = IntProperty(name="Post processing steps", subtype='UNSIGNED', default=2) sge_lightmap_path = StringProperty(name="Lightmap path") sge_session = None sge_typedb = None sge_scene = None sge_resource_manager = None class SGETypeBase(PropertyGroup): @classmethod def sge_unregister(cls): bpy.utils.unregister_class(cls) @classmethod def sge_create_property(cls, name): return PointerProperty(name=name, type=cls) @classmethod def sge_draw(cls, layout, parent_obj, parent_attr_name): # Draw each property recursively self = getattr(parent_obj, parent_attr_name) for attr_name, prop_name, prop_type in cls.sge_property_list: # If the property is a primitive type, don't give it a label if not issubclass(prop_type, SGEPrimitiveBase): layout.label(construct_property_display_name(prop_name)) prop_type.sge_draw(layout.column(), self, attr_name) class SGEPrimitiveBase(object): @staticmethod def sge_unregister(): return @staticmethod def sge_draw(layout, parent_obj, parent_attr_name): # Draw the property layout.prop(parent_obj, parent_attr_name) class SGEBool(SGEPrimitiveBase): @staticmethod def sge_create_property(name): return BoolProperty( name=construct_property_display_name(name), get=lambda outer: property_getter(outer.sge_component_type_name, construct_property_path(outer.sge_property_path, name), False), set=lambda outer, value: property_setter(outer.sge_component_type_name, construct_property_path(outer.sge_property_path, name), value)) class SGEInt(SGEPrimitiveBase): @staticmethod def sge_create_property(name): return IntProperty( name=construct_property_display_name(name), get=lambda outer: property_getter(outer.sge_component_type_name, construct_property_path(outer.sge_property_path, name), 0), set=lambda outer, value: property_setter(outer.sge_component_type_name, construct_property_path(outer.sge_property_path, name), value)) class SGEUInt(SGEPrimitiveBase): @staticmethod def sge_create_property(name): return IntProperty( name=construct_property_display_name(name), subtype='UNSIGNED', get=lambda outer: property_getter(outer.sge_component_type_name, construct_property_path(outer.sge_property_path, name), 0), set=lambda outer, value: property_setter(outer.sge_component_type_name, construct_property_path(outer.sge_property_path, name), value)) class SGEFloat(SGEPrimitiveBase): @staticmethod def sge_create_property(name): return FloatProperty( name=construct_property_display_name(name), get=lambda outer: property_getter(outer.sge_component_type_name, construct_property_path(outer.sge_property_path, name), 0.0), set=lambda outer, value: property_setter(outer.sge_component_type_name, construct_property_path(outer.sge_property_path, name), value)) class SGEString(SGEPrimitiveBase): @staticmethod def sge_create_property(name): return StringProperty( name=construct_property_display_name(name), get=lambda outer: property_getter(outer.sge_component_type_name, construct_property_path(outer.sge_property_path, name), ""), set=lambda outer, value: property_setter(outer.sge_component_type_name, construct_property_path(outer.sge_property_path, name), value)) class SGEAngle(SGEPrimitiveBase): @staticmethod def sge_create_property(name): return FloatProperty( name=construct_property_display_name(name), subtype='ANGLE', get=lambda outer: property_getter(outer.sge_component_type_name, construct_property_path(outer.sge_property_path, name), 0), set=lambda outer, value: property_setter(outer.sge_component_type_name, construct_property_path(outer.sge_property_path, name), value)) class SGEColorRGBA8(SGEPrimitiveBase): @staticmethod def sge_get(outer, prop_name): value = property_getter(outer.sge_component_type_name, construct_property_path(outer.sge_property_path, prop_name), "ffffffff") red = int(value[: 2], 16) green = int(value[2: 4], 16) blue = int(value[4: 6], 16) alpha = int(value[6: 8], 16) return [float(red)/255, float(green)/255, float(blue)/255, float(alpha)/255] @staticmethod def sge_set(outer, prop_name, value): red = int(value[0] * 255) green = int(value[1] * 255) blue = int(value[2] * 255) alpha = int(value[3] * 255) property_setter(outer.sge_component_type_name, construct_property_path(outer.sge_property_path, prop_name), "%0.2x%0.2x%0.2x%0.2x" % (red, green, blue, alpha)) @staticmethod def sge_create_property(name): return FloatVectorProperty( name=name, subtype='COLOR', size=4, min=0.0, max=1.0, get=lambda outer: SGEColorRGBA8.sge_get(outer, name), set=lambda outer, value: SGEColorRGBA8.sge_set(outer, name, value)) class SGEColorRGBF32(SGEPrimitiveBase): @staticmethod def sge_create_property(name): return FloatVectorProperty( name=construct_property_display_name(name), subtype='COLOR', size=3, get=lambda outer: property_getter(outer.sge_component_type_name, construct_property_path(outer.sge_property_path, name), [0.0, 0.0, 0.0]), set=lambda outer, value: property_setter(outer.sge_component_type_name, construct_property_path(outer.sge_property_path, name), value)) class SGEVec2(SGEPrimitiveBase): @staticmethod def sge_get(outer, prop_name): value = property_getter(outer.sge_component_type_name, construct_property_path(outer.sge_property_path, prop_name), None) if value is None: return [0.0, 0.0] else: return [value['x'], value['y']] @staticmethod def sge_set(outer, prop_name, value): property_setter(outer.sge_component_type_name, construct_property_path(outer.sge_property_path, prop_name), {'x': value[0], 'y': value[1]}) @staticmethod def sge_create_property(name): return FloatVectorProperty( name=construct_property_display_name(name), subtype='XYZ', size=2, get=lambda outer: SGEVec2.sge_get(outer, name), set=lambda outer, value: SGEVec2.sge_set(outer, name, value)) class SGEVec3(SGEPrimitiveBase): @staticmethod def sge_get(outer, prop_name): value = property_getter(outer.sge_component_type_name, construct_property_path(outer.sge_property_path, prop_name), None) if value is None: return [0.0, 0.0, 0.0] else: return [value['x'], value['y'], value['z']] @staticmethod def sge_set(outer, prop_name, value): property_setter(outer.sge_component_type_name, construct_property_path(outer.sge_property_path, prop_name), {'x': value[0], 'y': value[1], 'z': value[2]}) @staticmethod def sge_create_property(name): return FloatVectorProperty( name=construct_property_display_name(name), subtype='XYZ', size=3, get=lambda outer: SGEVec3.sge_get(outer, name), set=lambda outer, value: SGEVec3.sge_set(outer, name, value)) def create_blender_type(typedb, type_name, type_info): # Create dictionaries for the class and the properties property_list = list() class_dict = { 'sge_type_name': type_name, 'sge_property_list': property_list, 'sge_component_type_name': StringProperty(), 'sge_property_path': StringProperty(), } # Define each property if 'properties' in type_info: properties = list(type_info['properties'].items()) properties.sort(key=lambda prop: prop[1]['index']) for prop_name, prop_info in properties: # Get the property's type prop_type = typedb.get_type(prop_info['type']) # Create an attribute name for the property attr_name = "sge_prop_{}".format(prop_name) # Create the class dictionary entry class_dict[attr_name] = prop_type.sge_create_property(prop_name) # Create the property list entry property_list.append((attr_name, prop_name, prop_type)) # Generate a sanitary name for the type class_name = type_name.replace("::", "_") # Create the type blender_type = type(class_name, (SGETypeBase,), class_dict) # Register it with Blender bpy.utils.register_class(blender_type) return blender_type
mit
-3,868,031,757,075,581,000
39.89701
167
0.670106
false
3.507123
false
false
false
Seattle-Meal-Maps/seattle-meal-maps-api
meal_api/meal_api/urls.py
1
1205
"""meal_api URL Configuration The `urlpatterns` list routes URLs to views. For more information please see: https://docs.djangoproject.com/en/1.9/topics/http/urls/ Examples: Function views 1. Add an import: from my_app import views 2. Add a URL to urlpatterns: url(r'^$', views.home, name='home') Class-based views 1. Add an import: from other_app.views import Home 2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home') Including another URLconf 1. Import the include() function: from django.conf.urls import url, include 2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls')) """ from django.conf.urls import url, include from django.contrib import admin from rest_framework import routers from api.views import DataViewSet, HoursViewSet router = routers.DefaultRouter() router.register(r'services', DataViewSet) router.register(r'hours', HoursViewSet) hours_list = HoursViewSet.as_view({ 'get': 'list' }) data_list = DataViewSet.as_view({ 'get': 'list' }) urlpatterns = [ url(r'^admin/', admin.site.urls), url(r'^', include(router.urls)), url(r'^api-auth/', include('rest_framework.urls', namespace='rest_framework')) ]
mit
-6,016,341,527,609,289,000
32.472222
82
0.703734
false
3.37535
false
false
false
ZeitOnline/z3c.celery
src/z3c/celery/session.py
1
1860
import threading import transaction import zope.interface import transaction.interfaces class CelerySession(threading.local): """Thread local session of data to be sent to Celery.""" def __init__(self): self.tasks = [] self._needs_to_join = True def add_call(self, method, *args, **kw): self._join_transaction() self.tasks.append((method, args, kw)) def reset(self): self.tasks = [] self._needs_to_join = True def _join_transaction(self): if not self._needs_to_join: return dm = CeleryDataManager(self) transaction.get().join(dm) self._needs_to_join = False def _flush(self): for method, args, kw in self.tasks: method(*args, **kw) self.reset() def __len__(self): """Number of tasks in the session.""" return len(self.tasks) celery_session = CelerySession() @zope.interface.implementer(transaction.interfaces.IDataManager) class CeleryDataManager(object): """DataManager embedding the access to celery into the transaction.""" transaction_manager = None def __init__(self, session): self.session = session def abort(self, transaction): self.session.reset() def tpc_begin(self, transaction): pass def commit(self, transaction): pass tpc_abort = abort def tpc_vote(self, transaction): self.session._flush() def tpc_finish(self, transaction): pass def sortKey(self): # Sort last, so that sending to celery is done after all other # DataManagers signalled an okay. return "~z3c.celery" def __repr__(self): """Custom repr.""" return '<{0.__module__}.{0.__name__} for {1}, {2}>'.format( self.__class__, transaction.get(), self.session)
bsd-3-clause
-8,065,323,295,136,975,000
23.473684
74
0.601075
false
3.982869
false
false
false
bnoi/scikit-tracker
sktracker/tracker/cost_function/tests/test_abstract_cost_functions.py
1
1500
# -*- coding: utf-8 -*- from __future__ import unicode_literals from __future__ import division from __future__ import absolute_import from __future__ import print_function from nose.tools import assert_raises import sys import pandas as pd import numpy as np from sktracker.tracker.cost_function import AbstractCostFunction def test_abstract_cost_function(): cost_func = AbstractCostFunction(context={}, parameters={}) assert cost_func.get_block() == None def test_abstract_cost_function_check_context(): cost_func = AbstractCostFunction(context={'cost': 1}, parameters={}) assert_raises(ValueError, cost_func.check_context, 'test_string', str) cost_func.context['test_string'] = 5 assert_raises(TypeError, cost_func.check_context, 'test_string', str) cost_func.context['test_string'] = "i am a string" ### This fails in py2.7 if sys.version_info[0] > 2: cost_func.check_context('test_string', str) assert True def test_abstract_cost_function_check_columns(): cost_func = AbstractCostFunction(context={}, parameters={}) df = pd.DataFrame([np.arange(0, 5), np.arange(20, 25)], columns=['x', 'y', 'z', 'w', 't']) cost_func.check_columns(df, ['t', 'z', 'y']) cost_func.check_columns([df], ['t', 'z', 'y']) df = pd.DataFrame([np.arange(0, 4), np.arange(20, 24)], columns=['x', 'y', 'w', 't']) assert_raises(ValueError, cost_func.check_columns, df, ['t', 'z', 'y'])
bsd-3-clause
8,376,291,811,204,249,000
26.777778
75
0.64
false
3.440367
true
false
false
chubbymaggie/idalink
idalink/memory.py
1
10682
#!/usr/bin/env python # -*- coding: utf-8 -*- # Copyright (C) 2013- Yan Shoshitaishvili aka. zardus # Ruoyu Wang aka. fish # Andrew Dutcher aka. rhelmot # Kevin Borgolte aka. cao # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA __all__ = ["get_memory", "IDAMemory", "CachedIDAMemory", "IDAPermissions", "CachedIDAPermissions"] import collections import itertools import logging import operator LOG = logging.getLogger("idalink.ida_mem") # Helper functions. def _dict_values_sorted_by_key(dictionary): # This should be a yield from instead. """Internal helper to return the values of a dictionary, sorted by key. """ for _, value in sorted(dictionary.iteritems(), key=operator.itemgetter(0)): yield value def _ondemand(f): """Decorator to only request information if not in cache already. """ name = f.__name__ def func(self, *args, **kwargs): if not args and not kwargs: if hasattr(self, "_" + name): return getattr(self, "_" + name) a = f(self, *args, **kwargs) setattr(self, "_" + name, a) return a else: return f(self, *args, **kwargs) func.__name__ = name return func # Functions others are allowed to call. def get_memory(idaapi, start, size, default_byte=None): # TODO: Documentation if idaapi is None: idaapi = __import__("idaapi") if size == 0: return {} # We are optimistic and assume it's a continous memory area at_address = idaapi.get_many_bytes(start, size) d = {} if at_address is None: # It was not, resort to binary research if size == 1: if default_byte is not None: LOG.debug("Using default byte for %d", start) d[start] = default_byte return d mid = start + size / 2 first_size = mid - start second_size = size - first_size left = get_memory(idaapi, start, first_size, default_byte=default_byte) right = get_memory(idaapi, mid, second_size, default_byte=default_byte) if default_byte is None: # will be nonsequential d.update(left) d.update(right) else: # it will be sequential, so let's combine it chained = itertools.chain(_dict_values_sorted_by_key(left), _dict_values_sorted_by_key(right)) d[start] = "".join(chained) else: d[start] = at_address return d class IDAKeys(collections.MutableMapping): # pylint: disable=W0223 # TODO: delitem, setitem, getitem are abstract, should be fixed, # disabled warning should be removed def __init__(self, ida): self.ida = ida # Gets the "heads" (instructions and data items) and head sizes from IDA @_ondemand def heads(self, exclude=()): # TODO: Documentation LOG.debug("Getting heads from IDA for file %s", self.ida.filename) keys = [-1] + list(exclude) + [self.ida.idc.MAXADDR + 1] ranges = [] for i in range(len(keys) - 1): a, b = keys[i], keys[i+1] if a - b > 1: ranges.append((a+1, b-1)) heads = {} for start, end in ranges: for head in self.ida.idautils.Heads(start, end, 1): heads[head] = self.ida.idc.ItemSize(head) return heads @_ondemand def segments(self): # TODO: Documentation LOG.debug("Getting segments from IDA for file %s", self.ida.filename) segments_size = {} for s in self.ida.idautils.Segments(): segments_size[s] = self.ida.idc.SegEnd(s) - self.ida.idc.SegStart(s) return segments_size @_ondemand def idakeys(self): # TODO: Documentation keys = set() for h, s in self.segments().iteritems(): for i in range(s): keys.add(h + i) for h, s in self.heads(exclude=keys).iteritems(): for i in range(s): keys.add(h + i) LOG.debug("Done getting keys.") return keys def __iter__(self): # TODO: Refactor to be more pythonic for key in self.idakeys(): yield key def __len__(self): # This is significantly faster than list(self.__iter__) because # we do not need to keep the whole list in memory, just the accumulator. return sum(1 for _ in self) def __contains__(self, key): return key in self.keys() def reset(self): # TODO: Documentation if hasattr(self, "_heads"): delattr(self, "_heads") if hasattr(self, "_segments"): delattr(self, "_segments") if hasattr(self, "_idakeys"): delattr(self, "_idakeys") class IDAPermissions(IDAKeys): def __init__(self, ida, default_perm=7): super(IDAPermissions, self).__init__(ida) self.default_perm = default_perm def __getitem__(self, address): # Only do things that we actually have in IDA if address not in self: raise KeyError(address) seg_start = self.ida.idc.SegStart(address) if seg_start == self.ida.idc.BADADDR: # We can really only return the default here return self.default_perm return self.ida.idc.GetSegmentAttr(seg_start, self.ida.idc.SEGATTR_PERM) def __setitem__(self, address, value): # Nothing we can do here pass def __delitem__(self, address, value): # Nothing we can do here pass class CachedIDAPermissions(IDAPermissions): def __init__(self, ida, default_perm=7): super(CachedIDAPermissions, self).__init__(ida) self.permissions = {} self.default_perm = default_perm def __getitem__(self, address): if address in self.permissions: return self.permissions[address] p = super(CachedIDAPermissions, self).__getitem__(address) # cache the segment seg_start = self.ida.idc.SegStart(address) seg_end = self.ida.idc.SegEnd(address) if seg_start == self.ida.idc.BADADDR: self.permissions[address] = p else: for i in range(seg_start, seg_end): self.permissions[i] = p return p def __setitem__(self, address, value): self.permissions[address] = value def __delitem__(self, address): self.permissions.pop(address, None) def reset(self): # TODO: Documentation self.permissions.clear() super(CachedIDAPermissions, self).reset() class IDAMemory(IDAKeys): def __init__(self, ida, default_byte=chr(0xff)): super(IDAMemory, self).__init__(ida) self.default_byte = default_byte def __getitem__(self, address): # only do things that we actually have in IDA if address not in self: raise KeyError(address) value = self.ida.idaapi.get_many_bytes(address, 1) if value is None: value = self.default_byte return value def __setitem__(self, address, value): self.ida.idaapi.patch_byte(address, value) def __delitem__(self, address): # nothing we can really do here pass class CachedIDAMemory(IDAMemory): def __init__(self, ida, default_byte=chr(0xff)): super(CachedIDAMemory, self).__init__(ida, default_byte) self.local = {} self._pulled = False @property def pulled(self): """Check if memory has been pulled from the remote link. """ return self._pulled def __getitem__(self, address): if address in self.local: return self.local[address] LOG.debug("Uncached byte: 0x%x", address) one = super(CachedIDAMemory, self).__getitem__(address) # cache the byte if it's not in a segment seg_start = self.ida.idc.SegStart(address) if seg_start == self.ida.idc.BADADDR: self.local[address] = one else: # otherwise, cache the segment seg_end = self.ida.idc.SegEnd(address) seg_size = seg_end - seg_start self._load_memory(seg_start, seg_size) return one def __iter__(self): if self.pulled: return self.local.__iter__() else: return super(CachedIDAMemory, self).__iter__() def __setitem__(self, address, value): self.local[address] = value def __delitem__(self, address): self.local.pop(address, None) def get_memory(self, start, size): """Retrieve an area of memory from IDA. Returns a sparse dictionary of address -> value. """ LOG.debug("get_memory: %d bytes from %x", size, start) return get_memory(self.ida.idaapi, start, size, default_byte=self.default_byte) def pull_defined(self): if self.pulled: return start = self.ida.idc.MinEA() size = self.ida.idc.MaxEA() - start LOG.debug("Loading memory of %s (%d bytes)...", self.ida.filename, size) chunks = self.ida.remote_idalink_module.get_memory(None, start, size) LOG.debug("Storing loaded memory of %s...", self.ida.filename) self._store_loaded_chunks(chunks) self._pulled = True def reset(self): self.local.clear() self._pulled = False super(CachedIDAMemory, self).reset() # Helpers def _load_memory(self, start, size): chunks = self.get_memory(start, size) self.store_loaded_chunks(chunks) def _store_loaded_chunks(self, chunks): LOG.debug("Updating cache with %d chunks", len(chunks)) for start, buff in chunks.iteritems(): for n, i in enumerate(buff): if start + n not in self.local: self.local[start + n] = i
gpl-3.0
-4,595,868,428,126,769,700
30.791667
80
0.586407
false
3.812277
false
false
false
sdpp/python-keystoneclient
keystoneclient/tests/unit/v2_0/test_service_catalog.py
1
9165
# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from keystoneclient import access from keystoneclient import exceptions from keystoneclient import fixture from keystoneclient.tests.unit.v2_0 import client_fixtures from keystoneclient.tests.unit.v2_0 import utils class ServiceCatalogTest(utils.TestCase): def setUp(self): super(ServiceCatalogTest, self).setUp() self.AUTH_RESPONSE_BODY = client_fixtures.auth_response_body() def test_building_a_service_catalog(self): auth_ref = access.AccessInfo.factory(None, self.AUTH_RESPONSE_BODY) sc = auth_ref.service_catalog self.assertEqual(sc.url_for(service_type='compute'), "https://compute.north.host/v1/1234") self.assertEqual(sc.url_for('tenantId', '1', service_type='compute'), "https://compute.north.host/v1/1234") self.assertEqual(sc.url_for('tenantId', '2', service_type='compute'), "https://compute.north.host/v1.1/3456") self.assertRaises(exceptions.EndpointNotFound, sc.url_for, "region", "South", service_type='compute') def test_service_catalog_endpoints(self): auth_ref = access.AccessInfo.factory(None, self.AUTH_RESPONSE_BODY) sc = auth_ref.service_catalog public_ep = sc.get_endpoints(service_type='compute', endpoint_type='publicURL') self.assertEqual(public_ep['compute'][1]['tenantId'], '2') self.assertEqual(public_ep['compute'][1]['versionId'], '1.1') self.assertEqual(public_ep['compute'][1]['internalURL'], "https://compute.north.host/v1.1/3456") def test_service_catalog_regions(self): self.AUTH_RESPONSE_BODY['access']['region_name'] = "North" # Setting region_name on the catalog is deprecated. with self.deprecations.expect_deprecations_here(): auth_ref = access.AccessInfo.factory(None, self.AUTH_RESPONSE_BODY) sc = auth_ref.service_catalog url = sc.url_for(service_type='image', endpoint_type='publicURL') self.assertEqual(url, "https://image.north.host/v1/") self.AUTH_RESPONSE_BODY['access']['region_name'] = "South" auth_ref = access.AccessInfo.factory(None, self.AUTH_RESPONSE_BODY) sc = auth_ref.service_catalog url = sc.url_for(service_type='image', endpoint_type='internalURL') self.assertEqual(url, "https://image-internal.south.host/v1/") def test_service_catalog_empty(self): self.AUTH_RESPONSE_BODY['access']['serviceCatalog'] = [] auth_ref = access.AccessInfo.factory(None, self.AUTH_RESPONSE_BODY) self.assertRaises(exceptions.EmptyCatalog, auth_ref.service_catalog.url_for, service_type='image', endpoint_type='internalURL') def test_service_catalog_get_endpoints_region_names(self): auth_ref = access.AccessInfo.factory(None, self.AUTH_RESPONSE_BODY) sc = auth_ref.service_catalog endpoints = sc.get_endpoints(service_type='image', region_name='North') self.assertEqual(len(endpoints), 1) self.assertEqual(endpoints['image'][0]['publicURL'], 'https://image.north.host/v1/') endpoints = sc.get_endpoints(service_type='image', region_name='South') self.assertEqual(len(endpoints), 1) self.assertEqual(endpoints['image'][0]['publicURL'], 'https://image.south.host/v1/') endpoints = sc.get_endpoints(service_type='compute') self.assertEqual(len(endpoints['compute']), 2) endpoints = sc.get_endpoints(service_type='compute', region_name='North') self.assertEqual(len(endpoints['compute']), 2) endpoints = sc.get_endpoints(service_type='compute', region_name='West') self.assertEqual(len(endpoints['compute']), 0) def test_service_catalog_url_for_region_names(self): auth_ref = access.AccessInfo.factory(None, self.AUTH_RESPONSE_BODY) sc = auth_ref.service_catalog url = sc.url_for(service_type='image', region_name='North') self.assertEqual(url, 'https://image.north.host/v1/') url = sc.url_for(service_type='image', region_name='South') self.assertEqual(url, 'https://image.south.host/v1/') url = sc.url_for(service_type='compute', region_name='North', attr='versionId', filter_value='1.1') self.assertEqual(url, 'https://compute.north.host/v1.1/3456') self.assertRaises(exceptions.EndpointNotFound, sc.url_for, service_type='image', region_name='West') def test_servcie_catalog_get_url_region_names(self): auth_ref = access.AccessInfo.factory(None, self.AUTH_RESPONSE_BODY) sc = auth_ref.service_catalog urls = sc.get_urls(service_type='image') self.assertEqual(len(urls), 2) urls = sc.get_urls(service_type='image', region_name='North') self.assertEqual(len(urls), 1) self.assertEqual(urls[0], 'https://image.north.host/v1/') urls = sc.get_urls(service_type='image', region_name='South') self.assertEqual(len(urls), 1) self.assertEqual(urls[0], 'https://image.south.host/v1/') urls = sc.get_urls(service_type='image', region_name='West') self.assertIsNone(urls) def test_service_catalog_param_overrides_body_region(self): self.AUTH_RESPONSE_BODY['access']['region_name'] = "North" # Setting region_name on the catalog is deprecated. with self.deprecations.expect_deprecations_here(): auth_ref = access.AccessInfo.factory(None, self.AUTH_RESPONSE_BODY) sc = auth_ref.service_catalog url = sc.url_for(service_type='image') self.assertEqual(url, 'https://image.north.host/v1/') url = sc.url_for(service_type='image', region_name='South') self.assertEqual(url, 'https://image.south.host/v1/') endpoints = sc.get_endpoints(service_type='image') self.assertEqual(len(endpoints['image']), 1) self.assertEqual(endpoints['image'][0]['publicURL'], 'https://image.north.host/v1/') endpoints = sc.get_endpoints(service_type='image', region_name='South') self.assertEqual(len(endpoints['image']), 1) self.assertEqual(endpoints['image'][0]['publicURL'], 'https://image.south.host/v1/') def test_service_catalog_service_name(self): auth_ref = access.AccessInfo.factory(resp=None, body=self.AUTH_RESPONSE_BODY) sc = auth_ref.service_catalog url = sc.url_for(service_name='Image Servers', endpoint_type='public', service_type='image', region_name='North') self.assertEqual('https://image.north.host/v1/', url) self.assertRaises(exceptions.EndpointNotFound, sc.url_for, service_name='Image Servers', service_type='compute') urls = sc.get_urls(service_type='image', service_name='Image Servers', endpoint_type='public') self.assertIn('https://image.north.host/v1/', urls) self.assertIn('https://image.south.host/v1/', urls) urls = sc.get_urls(service_type='image', service_name='Servers', endpoint_type='public') self.assertIsNone(urls) def test_service_catalog_multiple_service_types(self): token = fixture.V2Token() token.set_scope() for i in range(3): s = token.add_service('compute') s.add_endpoint(public='public-%d' % i, admin='admin-%d' % i, internal='internal-%d' % i, region='region-%d' % i) auth_ref = access.AccessInfo.factory(resp=None, body=token) urls = auth_ref.service_catalog.get_urls(service_type='compute', endpoint_type='publicURL') self.assertEqual(set(['public-0', 'public-1', 'public-2']), set(urls)) urls = auth_ref.service_catalog.get_urls(service_type='compute', endpoint_type='publicURL', region_name='region-1') self.assertEqual(('public-1', ), urls)
apache-2.0
-4,503,097,674,112,767,500
43.926471
79
0.602728
false
3.950431
true
false
false
jantman/biweeklybudget
biweeklybudget/interest.py
1
37651
""" The latest version of this package is available at: <http://github.com/jantman/biweeklybudget> ################################################################################ Copyright 2016 Jason Antman <[email protected]> <http://www.jasonantman.com> This file is part of biweeklybudget, also known as biweeklybudget. biweeklybudget is free software: you can redistribute it and/or modify it under the terms of the GNU Affero General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. biweeklybudget is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License for more details. You should have received a copy of the GNU Affero General Public License along with biweeklybudget. If not, see <http://www.gnu.org/licenses/>. The Copyright and Authors attributions contained herein may not be removed or otherwise altered, except to add the Author attribution of a contributor to this work. (Additional Terms pursuant to Section 7b of the AGPL v3) ################################################################################ While not legally required, I sincerely request that anyone who finds bugs please submit them at <https://github.com/jantman/biweeklybudget> or to me via email, and that you send any contributions or improvements either as a pull request on GitHub, or to me via email. ################################################################################ AUTHORS: Jason Antman <[email protected]> <http://www.jasonantman.com> ################################################################################ """ import logging from datetime import timedelta from decimal import Decimal from dateutil.relativedelta import relativedelta from calendar import monthrange from biweeklybudget.models.account import Account, AcctType logger = logging.getLogger(__name__) class InterestHelper(object): def __init__(self, db_sess, increases={}, onetimes={}): """ Initialize interest calculation helper. :param db_sess: Database Session :type db_sess: sqlalchemy.orm.session.Session :param increases: dict of :py:class:`datetime.date` to :py:class:`decimal.Decimal` for new max payment amount to take effect on the specified date. :type increases: dict :param onetimes: dict of :py:class:`datetime.date` to :py:class:`decimal.Decimal` for additional amounts to add to the first maximum payment on or after the given date :type onetimes: dict """ self._sess = db_sess self._accounts = self._get_credit_accounts() self._statements = self._make_statements(self._accounts) self._increases = increases self._onetimes = onetimes @property def accounts(self): """ Return a dict of `account_id` to :py:class:`~.Account` for all Credit type accounts with OFX data present. :return: dict of account_id to Account instance :rtype: dict """ return self._accounts def _get_credit_accounts(self): """ Return a dict of `account_id` to :py:class:`~.Account` for all Credit type accounts with OFX data present. :return: dict of account_id to Account instance :rtype: dict """ accts = self._sess.query(Account).filter( Account.acct_type.__eq__(AcctType.Credit), Account.is_active.__eq__(True) ).all() res = {a.id: a for a in accts} return res def _make_statements(self, accounts): """ Make :py:class:`~.CCStatement` instances for each account; return a dict of `account_id` to CCStatement instance. :param accounts: dict of (int) account_id to Account instance :type accounts: dict :return: dict of (int) account_id to CCStatement instance :rtype: dict """ res = {} for a_id, acct in accounts.items(): icls = INTEREST_CALCULATION_NAMES[acct.interest_class_name]['cls']( acct.effective_apr ) bill_period = _BillingPeriod(acct.balance.ledger_date.date()) min_pay_cls = MIN_PAYMENT_FORMULA_NAMES[ acct.min_payment_class_name]['cls']() res[a_id] = CCStatement( icls, abs(acct.balance.ledger), min_pay_cls, bill_period, end_balance=abs(acct.balance.ledger), interest_amt=acct.last_interest_charge ) logger.debug('Statements: %s', res) return res @property def min_payments(self): """ Return a dict of `account_id` to minimum payment for the latest statement, for each account. :return: dict of `account_id` to minimum payment (Decimal) :rtype: dict """ res = {} for a_id, stmt in self._statements.items(): res[a_id] = stmt.minimum_payment logger.debug('Minimum payments by account_id: %s', res) return res def calculate_payoffs(self): """ Calculate payoffs for each account/statement. :return: dict of payoff information. Keys are payoff method names. Values are dicts, with keys "description" (str description of the payoff method), "doc" (the docstring of the class), and "results". The "results" dict has integer `account_id` as the key, and values are dicts with keys "payoff_months" (int), "total_payments" (Decimal), "total_interest" (Decimal) and ``next_payment`` (Decimal). :rtype: dict """ res = {} max_total = sum(list(self.min_payments.values())) for name in sorted(PAYOFF_METHOD_NAMES.keys()): cls = PAYOFF_METHOD_NAMES[name]['cls'] klass = cls( max_total, increases=self._increases, onetimes=self._onetimes ) if not cls.show_in_ui: continue res[name] = { 'description': PAYOFF_METHOD_NAMES[name]['description'], 'doc': PAYOFF_METHOD_NAMES[name]['doc'] } try: res[name]['results'] = self._calc_payoff_method(klass) except Exception as ex: res[name]['error'] = str(ex) logger.error('Minimum payment method %s failed: %s', name, ex) return res def _calc_payoff_method(self, cls): """ Calculate payoffs using one method. :param cls: payoff method class :type cls: biweeklybudget.interest._PayoffMethod :return: Dict with integer `account_id` as the key, and values are dicts with keys "payoff_months" (int), "total_payments" (Decimal), "total_interest" (Decimal), "next_payment" (Decimal). :rtype: dict """ balances = { x: self._statements[x].principal for x in self._statements.keys() } res = {} calc = calculate_payoffs(cls, list(self._statements.values())) for idx, result in enumerate(calc): a_id = list(self._statements.keys())[idx] res[a_id] = { 'payoff_months': result[0], 'total_payments': result[1], 'total_interest': result[1] - balances[a_id], 'next_payment': result[2] } return res class _InterestCalculation(object): #: Human-readable string name of the interest calculation type. description = None def __init__(self, apr): """ :param apr: Annual Percentage Rate as a decimal :type apr: decimal.Decimal """ self._apr = apr def __repr__(self): return '<%s(decimal.Decimal(\'%s\'))>' % ( self.__class__.__name__, self.apr ) @property def apr(self): return self._apr def calculate(self, principal, first_d, last_d, transactions={}): """ Calculate compound interest for the specified principal. :param principal: balance at beginning of statement period :type principal: decimal.Decimal :param first_d: date of beginning of statement period :type first_d: datetime.date :param last_d: last date of statement period :type last_d: datetime.date :param transactions: dict of datetime.date to float amount adjust the balance by on the specified dates. :type transactions: dict :return: dict describing the result: end_balance (float), interest_paid (float) :rtype: dict """ raise NotImplementedError("Must implement in subclass") class AdbCompoundedDaily(_InterestCalculation): """ Average Daily Balance method, compounded daily (like American Express). """ #: Human-readable string name of the interest calculation type. description = 'Average Daily Balance Compounded Daily (AmEx)' def calculate(self, principal, first_d, last_d, transactions={}): """ Calculate compound interest for the specified principal. :param principal: balance at beginning of statement period :type principal: decimal.Decimal :param first_d: date of beginning of statement period :type first_d: datetime.date :param last_d: last date of statement period :type last_d: datetime.date :param transactions: dict of datetime.date to float amount adjust the balance by on the specified dates. :type transactions: dict :return: dict describing the result: end_balance (float), interest_paid (float) :rtype: dict """ dpr = self._apr / Decimal(365.0) interest = Decimal(0.0) num_days = 0 bal_total = Decimal(0.0) bal = principal d = first_d while d <= last_d: num_days += 1 if d in transactions: bal += transactions[d] int_amt = bal * dpr interest += int_amt bal += int_amt bal_total += bal d += timedelta(days=1) adb = bal_total / Decimal(num_days) final = adb * self._apr * num_days / Decimal(365.0) bal += final * dpr return { 'interest_paid': final, 'end_balance': bal } class SimpleInterest(_InterestCalculation): """ Simple interest, charged on balance at the end of the billing period. """ #: Human-readable string name of the interest calculation type. description = 'Interest charged once on the balance at end of period.' def calculate(self, principal, first_d, last_d, transactions={}): """ Calculate compound interest for the specified principal. :param principal: balance at beginning of statement period :type principal: decimal.Decimal :param first_d: date of beginning of statement period :type first_d: datetime.date :param last_d: last date of statement period :type last_d: datetime.date :param transactions: dict of datetime.date to float amount adjust the balance by on the specified dates. :type transactions: dict :return: dict describing the result: end_balance (float), interest_paid (float) :rtype: dict """ num_days = 0 bal = principal d = first_d while d <= last_d: num_days += 1 if d in transactions: bal += transactions[d] d += timedelta(days=1) final = bal * self._apr * num_days / Decimal(365.0) return { 'interest_paid': final, 'end_balance': bal + final } class _BillingPeriod(object): #: human-readable string description of the billing period type description = None def __init__(self, end_date, start_date=None): """ Construct a billing period that is defined by a number of days. :param end_date: end date of the billing period :type end_date: datetime.date :param start_date: start date for billing period; if specified, will override calculation of start date :type start_date: datetime.date """ self._period_for_date = end_date if start_date is None: if end_date.day < 15: # if end date is < 15, period is month before end_date self._end_date = (end_date.replace(day=1) - timedelta(days=1)) self._start_date = self._end_date.replace(day=1) else: # if end date >= 15, period is month containing end_date self._start_date = end_date.replace(day=1) self._end_date = end_date.replace( day=(monthrange( end_date.year, end_date.month )[1]) ) else: self._start_date = start_date self._end_date = self._start_date.replace( day=(monthrange( self._start_date.year, self._start_date.month )[1]) ) def __repr__(self): return '<BillingPeriod(%s, start_date=%s)>' % ( self._end_date, self._start_date ) @property def start_date(self): return self._start_date @property def end_date(self): return self._end_date @property def payment_date(self): period_length = (self._end_date - self._start_date).days return self._start_date + timedelta(days=int(period_length / 2)) @property def next_period(self): """ Return the next billing period after this one. :return: next billing period :rtype: _BillingPeriod """ return _BillingPeriod( self._end_date + relativedelta(months=1), start_date=(self._end_date + timedelta(days=1)) ) @property def prev_period(self): """ Return the previous billing period before this one. :return: previous billing period :rtype: _BillingPeriod """ e = self._start_date - timedelta(days=1) return _BillingPeriod(e, start_date=e.replace(day=1)) class _MinPaymentFormula(object): #: human-readable string description of the formula description = None def __init__(self): pass def calculate(self, balance, interest): """ Calculate the minimum payment for a statement with the given balance and interest amount. :param balance: balance amount for the statement :type balance: decimal.Decimal :param interest: interest charged for the statement period :type interest: decimal.Decimal :return: minimum payment for the statement :rtype: decimal.Decimal """ raise NotImplementedError() class MinPaymentAmEx(_MinPaymentFormula): """ Interest on last statement plus 1% of balance, or $35 if balance is less than $35. """ #: human-readable string description of the formula description = 'AmEx - Greatest of Interest Plus 1% of Principal, or $35' def __init__(self): super(MinPaymentAmEx, self).__init__() def calculate(self, balance, interest): """ Calculate the minimum payment for a statement with the given balance and interest amount. :param balance: balance amount for the statement :type balance: decimal.Decimal :param interest: interest charged for the statement period :type interest: decimal.Decimal :return: minimum payment for the statement :rtype: decimal.Decimal """ amt = interest + (balance * Decimal('.01')) if amt < 35: amt = 35 return amt class MinPaymentDiscover(_MinPaymentFormula): """ Greater of: - $35; or - 2% of the New Balance shown on your billing statement; or - $20, plus any of the following charges as shown on your billing statement: fees for any debt protection product that you enrolled in on or after 2/1/2015; Interest Charges; and Late Fees. """ #: human-readable string description of the formula description = 'Discover - Greatest of 2% of Principal, or $20 plus ' \ 'Interest, or $35' def __init__(self): super(MinPaymentDiscover, self).__init__() def calculate(self, balance, interest): """ Calculate the minimum payment for a statement with the given balance and interest amount. :param balance: balance amount for the statement :type balance: decimal.Decimal :param interest: interest charged for the statement period :type interest: decimal.Decimal :return: minimum payment for the statement :rtype: decimal.Decimal """ options = [ Decimal(35), balance * Decimal('0.02'), Decimal(20) + interest ] return max(options) class MinPaymentCiti(_MinPaymentFormula): """ Greater of: - $25; - The new balance, if it's less than $25; - 1 percent of the new balance, plus the current statement's interest charges or minimum interest charges, plus late fees; - 1.5% of the new balance, rounded to the nearest dollar amount. In all cases, add past fees and finance charges due, plus any amount in excess of credit line. """ #: human-readable string description of the formula description = 'Citi - Greatest of 1.5% of Principal, or 1% of Principal ' \ 'plus interest and fees, or $25, or Principal' def __init__(self): super(MinPaymentCiti, self).__init__() def calculate(self, balance, interest): """ Calculate the minimum payment for a statement with the given balance and interest amount. :param balance: balance amount for the statement :type balance: decimal.Decimal :param interest: interest charged for the statement period :type interest: decimal.Decimal :return: minimum payment for the statement :rtype: decimal.Decimal """ options = [ 25, (balance * Decimal('0.01')) + interest, round(balance * Decimal('0.015')) ] if balance < Decimal('25'): options.append(balance) return max(options) class _PayoffMethod(object): """ A payoff method for multiple cards; a method of figuring out how much to pay on each card, each month. """ #: human-readable string name of the payoff method description = None def __init__(self, max_total_payment=None, increases={}, onetimes={}): """ Initialize a payment method. :param max_total_payment: maximum total payment for all statements :type max_total_payment: decimal.Decimal :param increases: dict of :py:class:`datetime.date` to :py:class:`decimal.Decimal` for new max payment amount to take effect on the specified date. :type increases: dict :param onetimes: dict of :py:class:`datetime.date` to :py:class:`decimal.Decimal` for additional amounts to add to the first maximum payment on or after the given date :type onetimes: dict """ self._max_total = max_total_payment self._increases = increases self._onetimes = onetimes def __repr__(self): return '<%s(%s, increases=%s, onetimes=%s)>' % ( self.__class__.__name__, self._max_total, self._increases, self._onetimes ) def max_total_for_period(self, period): """ Given a :py:class:`~._BillingPeriod`, calculate the maximum total payment for that period, including both `self._max_total` and the increases and onetimes specified on the class constructor. :param period: billing period to get maximum total payment for :type period: _BillingPeriod :return: maximum total payment for the period :rtype: decimal.Decimal """ res = self._max_total for inc_d in sorted(self._increases.keys(), reverse=True): if inc_d > period.payment_date: continue inc_amt = self._increases[inc_d] logger.debug('Found increase of %s starting on %s, applied to ' 'period %s', inc_amt, inc_d, period) res = inc_amt break for ot_d, ot_amt in self._onetimes.items(): if period.prev_period.payment_date < ot_d <= period.payment_date: logger.debug('Found onetime of %s on %s in period %s', ot_amt, ot_d, period) res += ot_amt logger.debug('Period %s _max_total=%s max_total_for_period=%s', period, self._max_total, res) return res def find_payments(self, statements): """ Given a list of statements, return a list of payment amounts to make on each of the statements. :param statements: statements to pay, list of :py:class:`~.CCStatement` :type statements: list :return: list of payment amounts to make, same order as ``statements`` :rtype: list """ raise NotImplementedError() class MinPaymentMethod(_PayoffMethod): """ Pay only the minimum on each statement. """ description = 'Minimum Payment Only' show_in_ui = True def find_payments(self, statements): """ Given a list of statements, return a list of payment amounts to make on each of the statements. :param statements: statements to pay, list of :py:class:`~.CCStatement` :type statements: list :return: list of payment amounts to make, same order as ``statements`` :rtype: list """ return [s.minimum_payment for s in statements] class FixedPaymentMethod(_PayoffMethod): """ TESTING ONLY - pay the same amount on every statement. """ description = 'TESTING ONLY - Fixed Payment for All Statements' show_in_ui = False def find_payments(self, statements): """ Given a list of statements, return a list of payment amounts to make on each of the statements. :param statements: statements to pay, list of :py:class:`~.CCStatement` :type statements: list :return: list of payment amounts to make, same order as ``statements`` :rtype: list """ return [self._max_total for _ in statements] class HighestBalanceFirstMethod(_PayoffMethod): """ Pay statements off from highest to lowest balance. """ description = 'Highest to Lowest Balance' show_in_ui = True def find_payments(self, statements): """ Given a list of statements, return a list of payment amounts to make on each of the statements. :param statements: statements to pay, list of :py:class:`~.CCStatement` :type statements: list :return: list of payment amounts to make, same order as ``statements`` :rtype: list """ max_total = self.max_total_for_period(statements[0].billing_period) min_sum = sum([s.minimum_payment for s in statements]) if min_sum > max_total: raise TypeError( 'ERROR: Max total payment of %s is less than sum of minimum ' 'payments (%s)' % (max_total, min_sum) ) max_bal = Decimal('0.00') max_idx = None for idx, stmt in enumerate(statements): if stmt.principal > max_bal: max_bal = stmt.principal max_idx = idx res = [None for _ in statements] max_pay = max_total - ( min_sum - statements[max_idx].minimum_payment ) for idx, stmt in enumerate(statements): if idx == max_idx: res[idx] = max_pay else: res[idx] = statements[idx].minimum_payment return res class HighestInterestRateFirstMethod(_PayoffMethod): """ Pay statements off from highest to lowest interest rate. """ description = 'Highest to Lowest Interest Rate' show_in_ui = True def find_payments(self, statements): """ Given a list of statements, return a list of payment amounts to make on each of the statements. :param statements: statements to pay, list of :py:class:`~.CCStatement` :type statements: list :return: list of payment amounts to make, same order as ``statements`` :rtype: list """ max_total = self.max_total_for_period(statements[0].billing_period) min_sum = sum([s.minimum_payment for s in statements]) if min_sum > max_total: raise TypeError( 'ERROR: Max total payment of %s is less than sum of minimum ' 'payments (%s)' % (max_total, min_sum) ) max_apr = Decimal('0.00') max_idx = None for idx, stmt in enumerate(statements): if stmt.apr > max_apr: max_apr = stmt.apr max_idx = idx res = [None for _ in statements] max_pay = max_total - ( min_sum - statements[max_idx].minimum_payment ) for idx, stmt in enumerate(statements): if idx == max_idx: res[idx] = max_pay else: res[idx] = statements[idx].minimum_payment return res class LowestBalanceFirstMethod(_PayoffMethod): """ Pay statements off from lowest to highest balance, a.k.a. the "snowball" method. """ description = 'Lowest to Highest Balance (a.k.a. Snowball Method)' show_in_ui = True def find_payments(self, statements): """ Given a list of statements, return a list of payment amounts to make on each of the statements. :param statements: statements to pay, list of :py:class:`~.CCStatement` :type statements: list :return: list of payment amounts to make, same order as ``statements`` :rtype: list """ max_total = self.max_total_for_period(statements[0].billing_period) min_sum = sum([s.minimum_payment for s in statements]) if min_sum > max_total: raise TypeError( 'ERROR: Max total payment of %s is less than sum of minimum ' 'payments (%s)' % (max_total, min_sum) ) min_bal = Decimal('+Infinity') min_idx = None for idx, stmt in enumerate(statements): if stmt.principal < min_bal: min_bal = stmt.principal min_idx = idx res = [None for _ in statements] min_pay = max_total - ( min_sum - statements[min_idx].minimum_payment ) for idx, stmt in enumerate(statements): if idx == min_idx: res[idx] = min_pay else: res[idx] = statements[idx].minimum_payment return res class LowestInterestRateFirstMethod(_PayoffMethod): """ Pay statements off from lowest to highest interest rate. """ description = 'Lowest to Highest Interest Rate' show_in_ui = True def find_payments(self, statements): """ Given a list of statements, return a list of payment amounts to make on each of the statements. :param statements: statements to pay, list of :py:class:`~.CCStatement` :type statements: list :return: list of payment amounts to make, same order as ``statements`` :rtype: list """ max_total = self.max_total_for_period(statements[0].billing_period) min_sum = sum([s.minimum_payment for s in statements]) if min_sum > max_total: raise TypeError( 'ERROR: Max total payment of %s is less than sum of minimum ' 'payments (%s)' % (max_total, min_sum) ) min_apr = Decimal('+Infinity') min_idx = None for idx, stmt in enumerate(statements): if stmt.apr < min_apr: min_apr = stmt.apr min_idx = idx res = [None for _ in statements] min_pay = max_total - ( min_sum - statements[min_idx].minimum_payment ) for idx, stmt in enumerate(statements): if idx == min_idx: res[idx] = min_pay else: res[idx] = statements[idx].minimum_payment return res def calculate_payoffs(payment_method, statements): """ Calculate the amount of time (in years) and total amount of money required to pay off the cards associated with the given list of statements. Return a list of (`float` number of years, `decimal.Decimal` amount paid, `decimal.Decimal` first payment amount) tuples for each item in `statements`. :param payment_method: method used for calculating payment amount to make on each statement; subclass of _PayoffMethod :type payment_method: _PayoffMethod :param statements: list of :py:class:`~.CCStatement` objects to pay off. :type statements: list :return: list of (`float` number of billing periods, `decimal.Decimal` amount paid, `decimal.Decimal` first payment amount) tuples for each item in `statements` :rtype: list """ def unpaid(s): return [x for x in s.keys() if s[x]['done'] is False] payoffs = {} logger.debug( 'calculating payoff via %s for: %s', payment_method, statements ) for idx, stmt in enumerate(statements): payoffs[stmt] = { 'months': 0, 'amt': Decimal('0.0'), 'idx': idx, 'done': False, 'next_pymt_amt': None } while len(unpaid(payoffs)) > 0: u = unpaid(payoffs) to_pay = payment_method.find_payments(u) for stmt, p_amt in dict(zip(u, to_pay)).items(): if stmt.principal <= Decimal('0'): payoffs[stmt]['done'] = True continue if stmt.principal <= p_amt: payoffs[stmt]['done'] = True payoffs[stmt]['months'] += 1 # increment months payoffs[stmt]['amt'] += stmt.principal if payoffs[stmt]['next_pymt_amt'] is None: payoffs[stmt]['next_pymt_amt'] = stmt.principal continue payoffs[stmt]['months'] += 1 # increment months payoffs[stmt]['amt'] += p_amt if payoffs[stmt]['next_pymt_amt'] is None: payoffs[stmt]['next_pymt_amt'] = p_amt new_s = stmt.pay(Decimal('-1') * p_amt) payoffs[new_s] = payoffs[stmt] del payoffs[stmt] res = [] for s in sorted(payoffs, key=lambda x: payoffs[x]['idx']): tmp = ( payoffs[s]['months'], payoffs[s]['amt'], payoffs[s]['next_pymt_amt'] ) if payoffs[s]['next_pymt_amt'] is None: tmp = ( payoffs[s]['months'], payoffs[s]['amt'], Decimal('0.0') ) res.append(tmp) return res class CCStatement(object): """ Represent a credit card statement (one billing period). """ def __init__(self, interest_cls, principal, min_payment_cls, billing_period, transactions={}, end_balance=None, interest_amt=None): """ Initialize a CCStatement. At least one of `start_date` and `end_date` must be specified. :param interest_cls: Interest calculation method :type interest_cls: _InterestCalculation :param principal: starting principal for this billing period :type principal: decimal.Decimal :param min_payment_cls: Minimum payment calculation method :type min_payment_cls: _MinPaymentFormula :param billing_period: Billing period :type billing_period: _BillingPeriod :param transactions: transactions applied during this statement. Dict of :py:class:`datetime.date` to :py:class:`decimal.Decimal`. :type transactions: dict :param end_balance: the ending balance of the statement, if known. If not specified, this value will be calculated. :type end_balance: decimal.Decimal :param interest_amt: The amount of interest charged this statement. If not specified, this value will be calculated. :type interest_amt: decimal.Decimal """ if not isinstance(billing_period, _BillingPeriod): raise TypeError( 'billing_period must be an instance of _BillingPeriod' ) self._billing_period = billing_period if not isinstance(interest_cls, _InterestCalculation): raise TypeError( 'interest_cls must be an instance of _InterestCalculation' ) self._interest_cls = interest_cls if not isinstance(min_payment_cls, _MinPaymentFormula): raise TypeError( 'min_payment_cls must be an instance of _MinPaymentFormula' ) self._min_pay_cls = min_payment_cls self._orig_principal = principal self._min_pay = None self._transactions = transactions self._principal = end_balance self._interest_amt = interest_amt if end_balance is None or interest_amt is None: res = self._interest_cls.calculate( principal, self._billing_period.start_date, self._billing_period.end_date, self._transactions ) if end_balance is None: self._principal = res['end_balance'] if interest_amt is None: self._interest_amt = res['interest_paid'] def __repr__(self): return '<CCStatement(interest_cls=%s principal=%s min_payment_cls=%s ' \ 'transactions=%s end_balance=%s ' \ 'interest_amt=%s start_date=%s end_date=%s)>' % ( self._interest_cls, self._principal, self._min_pay_cls, self._transactions, self._principal, self._interest_amt, self.start_date, self.end_date ) @property def principal(self): return self._principal @property def billing_period(self): """ Return the Billing Period for this statement. :return: billing period for this statement :rtype: _BillingPeriod """ return self._billing_period @property def interest(self): return self._interest_amt @property def start_date(self): return self._billing_period.start_date @property def end_date(self): return self._billing_period.end_date @property def apr(self): return self._interest_cls.apr @property def minimum_payment(self): """ Return the minimum payment for the next billing cycle. :return: minimum payment for the next billing cycle :rtype: decimal.Decimal """ return self._min_pay_cls.calculate( self._principal, self._interest_amt ) def next_with_transactions(self, transactions={}): """ Return a new CCStatement reflecting the next billing period, with a payment of `amount` applied to it. :param transactions: dict of transactions, `datetime.date` to `Decimal` :type transactions: dict :return: next period statement, with transactions applied :rtype: CCStatement """ return CCStatement( self._interest_cls, self._principal, self._min_pay_cls, self._billing_period.next_period, transactions=transactions ) def pay(self, amount): """ Return a new CCStatement reflecting the next billing period, with a payment of `amount` applied to it at the middle of the period. :param amount: amount to pay during the next statement period :type amount: decimal.Decimal :return: next period statement, with payment applied :rtype: CCStatement """ return self.next_with_transactions({ self._billing_period.next_period.payment_date: amount }) def subclass_dict(klass): d = {} for cls in klass.__subclasses__(): d[cls.__name__] = { 'description': cls.description, 'doc': cls.__doc__.strip(), 'cls': cls } return d #: Dict mapping interest calculation class names to their description and #: docstring. INTEREST_CALCULATION_NAMES = subclass_dict(_InterestCalculation) #: Dict mapping Minimum Payment Formula class names to their description and #: docstring. MIN_PAYMENT_FORMULA_NAMES = subclass_dict(_MinPaymentFormula) #: Dict mapping Payoff Method class names to their description and docstring. PAYOFF_METHOD_NAMES = subclass_dict(_PayoffMethod)
agpl-3.0
-4,902,928,390,450,669,000
34.319887
80
0.587235
false
4.281929
false
false
false
rouge8/20questions
admin.py
1
2796
#!/usr/bin/env python # -*- coding: utf-8 -*- # ''' admin.py Andy Freeland and Dan Levy 5 June 2010 Provides administrative functions, such as retraining characters and deleting objects and characters. Accessed at the /admin url. Laughably insecure. ''' import web import config, model import twentyquestions as game urls = ( '', 'admin', '/', 'admin', '/dq', 'delete_question', '/do', 'delete_object', '/data', 'data', '/retrain/(\d+)', 'retrain' ) render = web.template.render('templates', base='base') app = web.application(urls, locals()) class admin: def GET(self): '''Renders the admin page, presenting a menu of administrative functions.''' return render.admin() class delete_question: def GET(self): '''Lists all of the questions so that selected questions can be deleted.''' questions = model.get_questions() return render.delete_question(questions) def POST(self): '''Deletes selected questions and returns to the admin page.''' question_ids = web.input() for id in question_ids: model.delete_question(id) raise web.seeother('/') class delete_object: def GET(self): '''Lists all of the objects so that selected objects can be deleted.''' objects = model.get_objects() return render.delete_object(objects) def POST(self): '''Deletes selected objects. and returns to the admin page.''' object_ids = web.input() for id in object_ids: model.delete_object(id) raise web.seeother('/') class data: def GET(self): '''Renders a page listing all of the objects so that they can be retrained.''' objects = model.get_objects() return render.data(list(objects)) class retrain: def GET(self, object_id): '''Renders a page with all of the questions and values for a specified object_id so that it can be retrained manually.''' object = model.get_object_by_id(object_id) questions = model.get_questions() data = model.get_data_dictionary() if object: return render.retrain(object, list(questions), data) else: raise web.seeother('/') # returns to admin page def POST(self, object_id): '''Updates object_id with the newly selected answers to questions.''' inputs = web.input() for question_id in inputs: answer = inputs[question_id] if answer in ['yes','no']: value = eval('game.' + answer) * game.RETRAIN_SCALE # STRONGLY weights values learned this way model.update_data(object_id, question_id, value) raise web.seeother('/data')
mit
3,521,186,683,873,155,000
31.511628
110
0.609084
false
4.111765
false
false
false
gunny26/webstorage
bin/filename_to_checksum_dict.py
1
6212
#!/usr/bin/python3 # pylint: disable=line-too-long # disable=locally-disabled, multiple-statements, fixme, line-too-long """ command line program to create/restore/test WebStorageArchives """ import os import hashlib import datetime import dateutil.parser import time import sys import socket import argparse import stat import re import sqlite3 import logging logging.basicConfig(stream=sys.stdout, level=logging.DEBUG, format='%(message)s') logging.getLogger("requests").setLevel(logging.WARNING) logging.getLogger("urllib3").setLevel(logging.WARNING) import json import dbm # own modules from webstorage import WebStorageArchive as WebStorageArchive from webstorage import FileStorageClient as FileStorageClient class NtoM(object): """ build n : m dependent key value stores """ def __init__(self, keyname1, keyname2): self.__keyname1 = keyname1 self.__keyname2 = keyname2 self.__filename = filename self.__data = { self.__keyname1 : {}, self.__keyname2 : {} } self.__dirty = False # indicate if data is modified in memory def add(self, **kwds): key1 = kwds[self.__keyname1] key2 = kwds[self.__keyname2] if key1 in self.__data[self.__keyname1]: if key2 not in self.__data[self.__keyname1][key1]: self.__data[self.__keyname1][key1].add(key2) # ignore if value is already in list else: self.__data[self.__keyname1][key1] = set([key2, ]) if key2 in self.__data[self.__keyname2]: if key1 not in self.__data[self.__keyname2][key2]: self.__data[self.__keyname2][key2].add(key1) # ignore if value is already in list else: self.__data[self.__keyname2][key2] = set([key1, ]) self.__dirty = True def save(self, filename): """ dump internal data to sqlite database """ starttime = time.time() conn = sqlite3.connect(filename) cur = conn.cursor() # key 1 tablename1 = "%s_to_%s" % (self.__keyname1, self.__keyname2) logging.debug("saving to %s", tablename1) cur.execute("drop table if exists %s" % tablename1) conn.commit() cur.execute("create table if not exists %s ('%s', '%s')" % (tablename1, self.__keyname1, self.__keyname2)) for key, value in self.__data[self.__keyname1].items(): cur.execute("insert into %s values (?, ?)" % tablename1, (key, json.dumps(list(value)))) conn.commit() # key 2 tablename2 = "%s_to_%s" % (self.__keyname2, self.__keyname1) logging.debug("saving to %s", tablename2) cur.execute("drop table if exists %s" % tablename2) conn.commit() cur.execute("create table if not exists %s ('%s', '%s')" % (tablename2, self.__keyname2, self.__keyname1)) for key, value in self.__data[self.__keyname2].items(): cur.execute("insert into %s values (?, ?)" % tablename2, (key, json.dumps(list(value)))) conn.commit() logging.debug("save done in %0.2f s", time.time()-starttime) logging.debug("saved %d in %s", len(self.__data[self.__keyname1]), self.__keyname1) logging.debug("saved %d in %s", len(self.__data[self.__keyname2]), self.__keyname2) self.__dirty = False def load(self, filename): """ dump internal data to sqlite database """ starttime = time.time() conn = sqlite3.connect(filename) cur = conn.cursor() try: # key 1 tablename1 = "%s_to_%s" % (self.__keyname1, self.__keyname2) for row in cur.execute("select * from %s" % tablename1).fetchall(): self.__data[self.__keyname1][row[0]] = set(json.loads(row[1])) # key 2 tablename2 = "%s_to_%s" % (self.__keyname2, self.__keyname1) for row in cur.execute("select * from %s" % tablename2).fetchall(): self.__data[self.__keyname2][row[0]] = set(json.loads(row[1])) logging.debug("load done in %0.2f s", time.time()-starttime) logging.debug("loaded %d in %s", len(self.__data[self.__keyname1]), self.__keyname1) logging.debug("loaded %d in %s", len(self.__data[self.__keyname2]), self.__keyname2) except sqlite3.OperationalError as exc: logging.info("ignoring if table does not exist") def update(filename): conn = sqlite3.connect(filename) cur = conn.cursor() cur.execute("create table if not exists backupsets_done (backupset)") myhostname = socket.gethostname() wsa = WebStorageArchive() backupsets = wsa.get_backupsets(myhostname) # like wse0000107_mesznera_2016-12-06T13:48:13.400565.wstar.gz filename_to_checksum = NtoM("absfile", "checksum") filename_to_checksum.load(filename) filename_to_backupset = NtoM("absfile", "backupset") filename_to_backupset.load(filename) backupsets_done = [row[0] for row in cur.execute("select backupset from backupsets_done").fetchall()] for backupset in backupsets: starttime = time.time() #if backupset in backupsets_done: # print(" backupset %s already done" % backupset) # continue hostname, tag, isoformat_ext = backupset.split("_") isoformat = isoformat_ext[:-9] datestring = dateutil.parser.parse(isoformat) print(hostname, tag, dateutil.parser.parse(isoformat)) data = wsa.get(backupset) for absfile in data["filedata"].keys(): checksum = data["filedata"][absfile]["checksum"] filename_to_checksum.add(absfile=absfile, checksum=checksum) filename_to_backupset.add(absfile=absfile, backupset=backupset) # print(data["filedata"][absfile]) #cur.execute("insert into backupsets_done values (?)", (backupset,)) #conn.commit() logging.info(" done in %0.2f s", time.time()-starttime) filename_to_checksum.save(filename) filename_to_backupset.save(filename) if __name__ == "__main__": filename = "filename_to_checksum_dict.db" #main(filename) update(filename)
gpl-2.0
6,207,287,639,097,450,000
40.139073
114
0.61027
false
3.715311
false
false
false
ionux/bitforge
bitforge/utils/encoding.py
1
10832
# -*- coding: utf-8 -*- """ Various utilities useful for converting one Bitcoin format to another, including some the human-transcribable format hashed_base58. The MIT License (MIT) Copyright (c) 2013 by Richard Kiss Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """ import hashlib from .intbytes import byte_to_int, bytes_from_int BASE58_ALPHABET = b'123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz' BASE58_BASE = len(BASE58_ALPHABET) BASE58_LOOKUP = dict((c, i) for i, c in enumerate(BASE58_ALPHABET)) class EncodingError(Exception): pass def ripemd160(data): return hashlib.new("ripemd160", data) try: ripemd160(b'').digest() except Exception: # stupid Google App Engine hashlib doesn't support ripemd160 for some stupid reason # import it from pycrypto. You need to add # - name: pycrypto # version: "latest" # to the "libraries" section of your app.yaml from Crypto.Hash.RIPEMD import RIPEMD160Hash as ripemd160 def to_long(base, lookup_f, s): """ Convert an array to a (possibly bignum) integer, along with a prefix value of how many prefixed zeros there are. base: the source base lookup_f: a function to convert an element of s to a value between 0 and base-1. s: the value to convert """ prefix = 0 v = 0 for c in s: v *= base try: v += lookup_f(c) except Exception: raise EncodingError("bad character %s in string %s" % (c, s)) if v == 0: prefix += 1 return v, prefix def from_long(v, prefix, base, charset): """The inverse of to_long. Convert an integer to an arbitrary base. v: the integer value to convert prefix: the number of prefixed 0s to include base: the new base charset: an array indicating what printable character to use for each value. """ l = bytearray() while v > 0: try: v, mod = divmod(v, base) l.append(charset(mod)) except Exception: raise EncodingError("can't convert to character corresponding to %d" % mod) l.extend([charset(0)] * prefix) l.reverse() return bytes(l) def to_bytes_32(v): v = from_long(v, 0, 256, lambda x: x) if len(v) > 32: raise ValueError("input to to_bytes_32 is too large") return ((b'\0' * 32) + v)[-32:] if hasattr(int, "to_bytes"): to_bytes_32 = lambda v: v.to_bytes(32, byteorder="big") def from_bytes_32(v): if len(v) != 32: raise ValueError("input to from_bytes_32 is wrong length") return to_long(256, byte_to_int, v)[0] if hasattr(int, "from_bytes"): from_bytes_32 = lambda v: int.from_bytes(v, byteorder="big") def double_sha256(data): """A standard compound hash.""" return hashlib.sha256(hashlib.sha256(data).digest()).digest() def hash160(data): """A standard compound hash.""" return ripemd160(hashlib.sha256(data).digest()).digest() def b2a_base58(s): """Convert binary to base58 using BASE58_ALPHABET. Like Bitcoin addresses.""" v, prefix = to_long(256, byte_to_int, s) s = from_long(v, prefix, BASE58_BASE, lambda v: BASE58_ALPHABET[v]) return s.decode("utf8") def a2b_base58(s): """Convert base58 to binary using BASE58_ALPHABET.""" v, prefix = to_long(BASE58_BASE, lambda c: BASE58_LOOKUP[c], s.encode("utf8")) return from_long(v, prefix, 256, lambda x: x) def b2a_hashed_base58(data): """ A "hashed_base58" structure is a base58 integer (which looks like a string) with four bytes of hash data at the end. Bitcoin does this in several places, including Bitcoin addresses. This function turns data (of type "bytes") into its hashed_base58 equivalent. """ return b2a_base58(data + double_sha256(data)[:4]) def a2b_hashed_base58(s): """ If the passed string is hashed_base58, return the binary data. Otherwise raises an EncodingError. """ data = a2b_base58(s) data, the_hash = data[:-4], data[-4:] if double_sha256(data)[:4] == the_hash: return data raise EncodingError("hashed base58 has bad checksum %s" % s) def is_hashed_base58_valid(base58): """Return True if and only if base58 is valid hashed_base58.""" try: a2b_hashed_base58(base58) except EncodingError: return False return True def wif_to_tuple_of_prefix_secret_exponent_compressed(wif): """ Return a tuple of (prefix, secret_exponent, is_compressed). """ decoded = a2b_hashed_base58(wif) actual_prefix, private_key = decoded[:1], decoded[1:] compressed = len(private_key) > 32 return actual_prefix, from_bytes_32(private_key[:32]), compressed def wif_to_tuple_of_secret_exponent_compressed(wif, allowable_wif_prefixes=[b'\x80']): """Convert a WIF string to the corresponding secret exponent. Private key manipulation. Returns a tuple: the secret exponent, as a bignum integer, and a boolean indicating if the WIF corresponded to a compressed key or not. Not that it matters, since we can use the secret exponent to generate both the compressed and uncompressed Bitcoin address.""" actual_prefix, secret_exponent, is_compressed = wif_to_tuple_of_prefix_secret_exponent_compressed(wif) if actual_prefix not in allowable_wif_prefixes: raise EncodingError("unexpected first byte of WIF %s" % wif) return secret_exponent, is_compressed def wif_to_secret_exponent(wif, allowable_wif_prefixes=[b'\x80']): """Convert a WIF string to the corresponding secret exponent.""" return wif_to_tuple_of_secret_exponent_compressed(wif, allowable_wif_prefixes=allowable_wif_prefixes)[0] def is_valid_wif(wif, allowable_wif_prefixes=[b'\x80']): """Return a boolean indicating if the WIF is valid.""" try: wif_to_secret_exponent(wif, allowable_wif_prefixes=allowable_wif_prefixes) except EncodingError: return False return True def secret_exponent_to_wif(secret_exp, compressed=True, wif_prefix=b'\x80'): """Convert a secret exponent (correspdong to a private key) to WIF format.""" d = wif_prefix + to_bytes_32(secret_exp) if compressed: d += b'\01' return b2a_hashed_base58(d) def public_pair_to_sec(public_pair, compressed=True): """Convert a public pair (a pair of bignums corresponding to a public key) to the gross internal sec binary format used by OpenSSL.""" x_str = to_bytes_32(public_pair[0]) if compressed: return bytes_from_int((2 + (public_pair[1] & 1))) + x_str y_str = to_bytes_32(public_pair[1]) return b'\4' + x_str + y_str def sec_to_public_pair(sec): """Convert a public key in sec binary format to a public pair.""" x = from_bytes_32(sec[1:33]) sec0 = sec[:1] if sec0 == b'\4': y = from_bytes_32(sec[33:65]) from ecdsa import is_public_pair_valid from secp256k1 import generator_secp256k1 public_pair = (x, y) # verify this is on the curve if not is_public_pair_valid(generator_secp256k1, public_pair): raise EncodingError("invalid (x, y) pair") return public_pair if sec0 in (b'\2', b'\3'): from ecdsa import public_pair_for_x from secp256k1 import generator_secp256k1 return public_pair_for_x(generator_secp256k1, x, is_even=(sec0 == b'\2')) raise EncodingError("bad sec encoding for public key") def is_sec_compressed(sec): """Return a boolean indicating if the sec represents a compressed public key.""" return sec[:1] in (b'\2', b'\3') def public_pair_to_hash160_sec(public_pair, compressed=True): """Convert a public_pair (corresponding to a public key) to hash160_sec format. This is a hash of the sec representation of a public key, and is used to generate the corresponding Bitcoin address.""" return hash160(public_pair_to_sec(public_pair, compressed=compressed)) def hash160_sec_to_bitcoin_address(hash160_sec, address_prefix=b'\0'): """Convert the hash160 of a sec version of a public_pair to a Bitcoin address.""" return b2a_hashed_base58(address_prefix + hash160_sec) def bitcoin_address_to_hash160_sec_with_prefix(bitcoin_address): """ Convert a Bitcoin address back to the hash160_sec format and also return the prefix. """ blob = a2b_hashed_base58(bitcoin_address) if len(blob) != 21: raise EncodingError("incorrect binary length (%d) for Bitcoin address %s" % (len(blob), bitcoin_address)) if blob[:1] not in [b'\x6f', b'\0']: raise EncodingError("incorrect first byte (%s) for Bitcoin address %s" % (blob[0], bitcoin_address)) return blob[1:], blob[:1] def bitcoin_address_to_hash160_sec(bitcoin_address, address_prefix=b'\0'): """Convert a Bitcoin address back to the hash160_sec format of the public key. Since we only know the hash of the public key, we can't get the full public key back.""" hash160, actual_prefix = bitcoin_address_to_hash160_sec_with_prefix(bitcoin_address) if (address_prefix == actual_prefix): return hash160 raise EncodingError("Bitcoin address %s for wrong network" % bitcoin_address) def public_pair_to_bitcoin_address(public_pair, compressed=True, address_prefix=b'\0'): """Convert a public_pair (corresponding to a public key) to a Bitcoin address.""" return hash160_sec_to_bitcoin_address(public_pair_to_hash160_sec( public_pair, compressed=compressed), address_prefix=address_prefix) def is_valid_bitcoin_address(bitcoin_address, allowable_prefixes=b'\0'): """Return True if and only if bitcoin_address is valid.""" try: hash160, prefix = bitcoin_address_to_hash160_sec_with_prefix(bitcoin_address) except EncodingError: return False return prefix in allowable_prefixes
mit
-1,994,404,400,331,621,600
34.631579
108
0.681961
false
3.545663
false
false
false
DedMemez/ODS-August-2017
dna/DNAVisGroup.py
1
2344
# Fuck you Disyer. Stealing my fucking paypal. GET FUCKED: toontown.dna.DNAVisGroup from panda3d.core import LVector3, LVector3f import DNAGroup import DNABattleCell import DNAUtil class DNAVisGroup(DNAGroup.DNAGroup): COMPONENT_CODE = 2 def __init__(self, name): DNAGroup.DNAGroup.__init__(self, name) self.visibles = [] self.suitEdges = [] self.battleCells = [] def getVisGroup(self): return self def addBattleCell(self, battleCell): self.battleCells.append(battleCell) def addSuitEdge(self, suitEdge): self.suitEdges.append(suitEdge) def addVisible(self, visible): self.visibles.append(visible) def getBattleCell(self, i): return self.battleCells[i] def getNumBattleCells(self): return len(self.battleCells) def getNumSuitEdges(self): return len(self.suitEdges) def getNumVisibles(self): return len(self.visibles) def getSuitEdge(self, i): return self.suitEdges[i] def getVisibleName(self, i): return self.visibles[i] def getVisibles(self): return self.visibles def removeBattleCell(self, cell): self.battleCells.remove(cell) def removeSuitEdge(self, edge): self.suitEdges.remove(edge) def removeVisible(self, visible): self.visibles.remove(visible) def makeFromDGI(self, dgi, dnaStorage): DNAGroup.DNAGroup.makeFromDGI(self, dgi) numEdges = dgi.getUint16() for _ in xrange(numEdges): index = dgi.getUint16() endPoint = dgi.getUint16() self.addSuitEdge(dnaStorage.getSuitEdge(index, endPoint)) numVisibles = dgi.getUint16() for _ in xrange(numVisibles): self.addVisible(DNAUtil.dgiExtractString8(dgi)) numCells = dgi.getUint16() for _ in xrange(numCells): w = dgi.getUint8() h = dgi.getUint8() x, y, z = [ dgi.getInt32() / 100.0 for i in xrange(3) ] self.addBattleCell(DNABattleCell.DNABattleCell(w, h, LVector3f(x, y, z))) def destroy(self): del self.visibles[:] del self.suitEdges[:] del self.battleCells[:] DNAGroup.DNAGroup.destroy(self)
apache-2.0
3,608,324,131,246,925,300
26.962963
85
0.611348
false
3.397101
false
false
false
CopyChat/Plotting
Python/PythonNetCDF.py
1
10821
''' NAME NetCDF with Python PURPOSE To demonstrate how to read and write data with NetCDF files using a NetCDF file from the NCEP/NCAR Reanalysis. Plotting using Matplotlib and Basemap is also shown. PROGRAMMER(S) Chris Slocum REVISION HISTORY 20140320 -- Initial version created and posted online 20140722 -- Added basic error handling to ncdump Thanks to K.-Michael Aye for highlighting the issue REFERENCES netcdf4-python -- http://code.google.com/p/netcdf4-python/ NCEP/NCAR Reanalysis -- Kalnay et al. 1996 http://dx.doi.org/10.1175/1520-0477(1996)077<0437:TNYRP>2.0.CO;2 ''' import datetime as dt # Python standard library datetime module import numpy as np from netCDF4 import Dataset # http://code.google.com/p/netcdf4-python/ import matplotlib.pyplot as plt from mpl_toolkits.basemap import Basemap, addcyclic, shiftgrid def ncdump(nc_fid, verb=True): ''' ncdump outputs dimensions, variables and their attribute information. The information is similar to that of NCAR's ncdump utility. ncdump requires a valid instance of Dataset. Parameters ---------- nc_fid : netCDF4.Dataset A netCDF4 dateset object verb : Boolean whether or not nc_attrs, nc_dims, and nc_vars are printed Returns ------- nc_attrs : list A Python list of the NetCDF file global attributes nc_dims : list A Python list of the NetCDF file dimensions nc_vars : list A Python list of the NetCDF file variables ''' def print_ncattr(key): """ Prints the NetCDF file attributes for a given key Parameters ---------- key : unicode a valid netCDF4.Dataset.variables key """ try: print "\t\ttype:", repr(nc_fid.variables[key].dtype) for ncattr in nc_fid.variables[key].ncattrs(): print '\t\t%s:' % ncattr,\ repr(nc_fid.variables[key].getncattr(ncattr)) except KeyError: print "\t\tWARNING: %s does not contain variable attributes" % key # NetCDF global attributes nc_attrs = nc_fid.ncattrs() if verb: print "NetCDF Global Attributes:" for nc_attr in nc_attrs: print '\t%s:' % nc_attr, repr(nc_fid.getncattr(nc_attr)) nc_dims = [dim for dim in nc_fid.dimensions] # list of nc dimensions # Dimension shape information. if verb: print "NetCDF dimension information:" for dim in nc_dims: print "\tName:", dim print "\t\tsize:", len(nc_fid.dimensions[dim]) print_ncattr(dim) # Variable information. nc_vars = [var for var in nc_fid.variables] # list of nc variables if verb: print "NetCDF variable information:" for var in nc_vars: if var not in nc_dims: print '\tName:', var print "\t\tdimensions:", nc_fid.variables[var].dimensions print "\t\tsize:", nc_fid.variables[var].size print_ncattr(var) return nc_attrs, nc_dims, nc_vars nc_f = './CLM45_Micro_UW_SRF.2005120100.for.test.nc' # Your filename nc_fid = Dataset(nc_f, 'r') # Dataset is the class behavior to open the file # and create an instance of the ncCDF4 class nc_attrs, nc_dims, nc_vars = ncdump(nc_fid) # Extract data from NetCDF file lats = nc_fid.variables['xlat'][:] # extract/copy the data lons = nc_fid.variables['xlon'][:] time = nc_fid.variables['time'][:] rsds = nc_fid.variables['rsds'][:] # shape is time, lat, lon as shown above time_idx = 237 # some random day in 2012 # Python and the renalaysis are slightly off in time so this fixes that problem offset = dt.timedelta(hours=48) # List of all times in the file as datetime objects dt_time = [dt.date(1, 1, 1) + dt.timedelta(hours=t/20) - offset\ for t in time] cur_time = dt_time[time_idx] # Plot of global temperature on our random day fig = plt.figure() fig.subplots_adjust(left=0., right=1., bottom=0., top=0.9) # Setup the map. See http://matplotlib.org/basemap/users/mapsetup.html # for other projections. m = Basemap(projection='moll', llcrnrlat=-90, urcrnrlat=90,\ llcrnrlon=0, urcrnrlon=360, resolution='c', lon_0=0) m.drawcoastlines() m.drawmapboundary() # Make the plot continuous test=rsds[0,:,:] print test.shape print rsds.shape print lons.shape rsds_cyclic, lons_cyclic = addcyclic(rsds[time_idx,:,:], lons) # Shift the grid so lons go from -180 to 180 instead of 0 to 360. rsds_cyclic, lons_cyclic = shiftgrid(180., rsds_cyclic, lons_cyclic, start=False) # Create 2D lat/lon arrays for Basemap lon2d, lat2d = np.meshgrid(lons_cyclic, lats) # Transforms lat/lon into plotting coordinates for projection x, y = m(lon2d, lat2d) # Plot of rsds temperature with 11 contour intervals cs = m.contourf(x, y, rsds_cyclic, 11, cmap=plt.cm.Spectral_r) cbar = plt.colorbar(cs, orientation='horizontal', shrink=0.5) cbar.set_label("%s (%s)" % (nc_fid.variables['rsds'].var_desc,\ nc_fid.variables['rsds'].units)) plt.title("%s on %s" % (nc_fid.variables['rsds'].var_desc, cur_time)) # Writing NetCDF files # For this example, we will create two NetCDF4 files. One with the global rsds # temperature departure from its value at Darwin, Australia. The other with # the temperature profile for the entire year at Darwin. darwin = {'name': 'Darwin, Australia', 'lat': -12.45, 'lon': 130.83} # Find the nearest latitude and longitude for Darwin lat_idx = np.abs(lats - darwin['lat']).argmin() lon_idx = np.abs(lons - darwin['lon']).argmin() # Simple example: temperature profile for the entire year at Darwin. # Open a new NetCDF file to write the data to. For format, you can choose from # 'NETCDF3_CLASSIC', 'NETCDF3_64BIT', 'NETCDF4_CLASSIC', and 'NETCDF4' w_nc_fid = Dataset('darwin_2012.nc', 'w', format='NETCDF4') w_nc_fid.description = "NCEP/NCAR Reanalysis %s from its value at %s. %s" %\ (nc_fid.variables['rsds'].var_desc.lower(),\ darwin['name'], nc_fid.description) # Using our previous dimension info, we can create the new time dimension # Even though we know the size, we are going to set the size to unknown w_nc_fid.createDimension('time', None) w_nc_dim = w_nc_fid.createVariable('time', nc_fid.variables['time'].dtype,\ ('time',)) # You can do this step yourself but someone else did the work for us. for ncattr in nc_fid.variables['time'].ncattrs(): w_nc_dim.setncattr(ncattr, nc_fid.variables['time'].getncattr(ncattr)) # Assign the dimension data to the new NetCDF file. w_nc_fid.variables['time'][:] = time w_nc_var = w_nc_fid.createVariable('rsds', 'f8', ('time')) w_nc_var.setncatts({'long_name': u"mean Daily Air temperature",\ 'units': u"degK", 'level_desc': u'Surface',\ 'var_desc': u"Air temperature",\ 'statistic': u'Mean\nM'}) w_nc_fid.variables['rsds'][:] = rsds[time_idx, lat_idx, lon_idx] w_nc_fid.close() # close the new file # A plot of the temperature profile for Darwin in 2012 fig = plt.figure() plt.plot(dt_time, rsds[:, lat_idx, lon_idx], c='r') plt.plot(dt_time[time_idx], rsds[time_idx, lat_idx, lon_idx], c='b', marker='o') plt.text(dt_time[time_idx], rsds[time_idx, lat_idx, lon_idx], cur_time,\ ha='right') fig.autofmt_xdate() plt.ylabel("%s (%s)" % (nc_fid.variables['rsds'].var_desc,\ nc_fid.variables['rsds'].units)) plt.xlabel("Time") plt.title("%s from\n%s for %s" % (nc_fid.variables['rsds'].var_desc,\ darwin['name'], cur_time.year)) # Complex example: global temperature departure from its value at Darwin departure = rsds[:, :, :] - rsds[:, lat_idx, lon_idx].reshape((time.shape[0],\ 1, 1)) # Open a new NetCDF file to write the data to. For format, you can choose from # 'NETCDF3_CLASSIC', 'NETCDF3_64BIT', 'NETCDF4_CLASSIC', and 'NETCDF4' w_nc_fid = Dataset('rsds.departure.sig995.2012.nc', 'w', format='NETCDF4') w_nc_fid.description = "The departure of the NCEP/NCAR Reanalysis " +\ "%s from its value at %s. %s" %\ (nc_fid.variables['rsds'].var_desc.lower(),\ darwin['name'], nc_fid.description) # Using our previous dimension information, we can create the new dimensions data = {} for dim in nc_dims: w_nc_fid.createDimension(dim, nc_fid.variables[dim].size) data[dim] = w_nc_fid.createVariable(dim, nc_fid.variables[dim].dtype,\ (dim,)) # You can do this step yourself but someone else did the work for us. for ncattr in nc_fid.variables[dim].ncattrs(): data[dim].setncattr(ncattr, nc_fid.variables[dim].getncattr(ncattr)) # Assign the dimension data to the new NetCDF file. w_nc_fid.variables['time'][:] = time w_nc_fid.variables['lat'][:] = lats w_nc_fid.variables['lon'][:] = lons # Ok, time to create our departure variable w_nc_var = w_nc_fid.createVariable('rsds_dep', 'f8', ('time', 'lat', 'lon')) w_nc_var.setncatts({'long_name': u"mean Daily Air temperature departure",\ 'units': u"degK", 'level_desc': u'Surface',\ 'var_desc': u"Air temperature departure",\ 'statistic': u'Mean\nM'}) w_nc_fid.variables['rsds_dep'][:] = departure w_nc_fid.close() # close the new file # Rounded maximum absolute value of the departure used for contouring max_dep = np.round(np.abs(departure[time_idx, :, :]).max()+5., decimals=-1) # Generate a figure of the departure for a single day fig = plt.figure() fig.subplots_adjust(left=0., right=1., bottom=0., top=0.9) m = Basemap(projection='moll', llcrnrlat=-90, urcrnrlat=90,\ llcrnrlon=0, urcrnrlon=360, resolution='c', lon_0=0) m.drawcoastlines() m.drawmapboundary() dep_cyclic, lons_cyclic = addcyclic(departure[time_idx, :, :], lons) dep_cyclic, lons_cyclic = shiftgrid(180., dep_cyclic, lons_cyclic, start=False) lon2d, lat2d = np.meshgrid(lons_cyclic, lats) x, y = m(lon2d, lat2d) levels = np.linspace(-max_dep, max_dep, 11) cs = m.contourf(x, y, dep_cyclic, levels=levels, cmap=plt.cm.bwr) x, y = m(darwin['lon'], darwin['lat']) plt.plot(x, y, c='c', marker='o') plt.text(x, y, 'Darwin,\nAustralia', color='r', weight='semibold') cbar = plt.colorbar(cs, orientation='horizontal', shrink=0.5) cbar.set_label("%s departure (%s)" % (nc_fid.variables['rsds'].var_desc,\ nc_fid.variables['rsds'].units)) plt.title("Departure of Global %s from\n%s for %s" %\ (nc_fid.variables['rsds'].var_desc, darwin['name'], cur_time)) plt.show() # Close original NetCDF file. nc_fid.close()
gpl-3.0
2,362,746,802,840,747,500
42.633065
81
0.641253
false
3.204323
false
false
false
tek/amino
amino/tree.py
1
14049
import abc from typing import Callable, TypeVar, Generic, Union, cast, Any from amino.logging import Logging from amino import LazyList, Boolean, __, _, Either, Right, Maybe, Left, L, Map, curried from amino.boolean import false, true from amino.tc.base import Implicits from amino.tc.flat_map import FlatMap from amino.func import call_by_name from amino.lazy_list import LazyLists def indent(strings: LazyList[str]) -> LazyList[str]: return strings.map(' ' + _) Data = TypeVar('Data') Data1 = TypeVar('Data1') Sub = TypeVar('Sub') Sub1 = TypeVar('Sub1') A = TypeVar('A') B = TypeVar('B') Z = TypeVar('Z') Key = Union[str, int] class Node(Generic[Data, Sub], Logging, abc.ABC, Implicits, implicits=True, auto=True): @abc.abstractproperty def sub(self) -> Sub: ... @abc.abstractproperty def sub_l(self) -> LazyList['Node[Data, Any]']: ... @abc.abstractmethod def _strings(self) -> LazyList[str]: ... @property def strings(self) -> LazyList[str]: return self._strings() def _show(self) -> str: return self._strings().mk_string('\n') @property def show(self) -> str: return self._show() @abc.abstractmethod def foreach(self, f: Callable[['Node'], None]) -> None: ... @abc.abstractmethod def filter(self, pred: Callable[['Node'], bool]) -> 'Node': ... def filter_not(self, pred: Callable[['Node'], bool]) -> 'Node': return self.filter(lambda a: not pred(a)) @abc.abstractproperty def flatten(self) -> 'LazyList[Any]': ... @abc.abstractmethod def contains(self, target: 'Node') -> Boolean: ... @abc.abstractmethod def lift(self, key: Key) -> 'SubTree': ... def __getitem__(self, key: Key) -> 'SubTree': return self.lift(key) @abc.abstractproperty def s(self) -> 'SubTree': ... @abc.abstractproperty def empty(self) -> Boolean: ... @curried def fold_left(self, z: Z, f: Callable[[Z, 'Node'], Z]) -> Z: z1 = f(z, self) return self.sub_l.fold_left(z1)(lambda z2, a: a.fold_left(z2)(f)) @abc.abstractmethod def replace(self, data: LazyList['Node[Data1, Sub1]']) -> 'Node[Data1, Sub1]': ... @abc.abstractmethod def map_nodes(self, f: Callable[['Node[Data, Sub]'], 'Node[Data, Sub]']) -> 'Node[Data, Sub]': ... class Inode(Generic[Data, Sub], Node[Data, Sub]): @abc.abstractproperty def sub(self) -> LazyList[Any]: ... def foreach(self, f: Callable[[Node], None]) -> None: f(self) self.sub_l.foreach(__.foreach(f)) @property def flatten(self) -> LazyList[Any]: return self.sub_l.flat_map(_.flatten).cons(self) def contains(self, target: Node) -> Boolean: return self.sub_l.contains(target) @property def empty(self) -> Boolean: return self.data.empty class ListNode(Generic[Data], Inode[Data, LazyList[Node[Data, Any]]]): def __init__(self, sub: LazyList[Node[Data, Any]]) -> None: self.data = sub @property def sub(self) -> LazyList[Node[Data, Any]]: return self.data @property def sub_l(self) -> LazyList[Node[Data, Any]]: return self.sub @property def _desc(self) -> str: return '[]' def _strings(self) -> LazyList[str]: return indent(self.sub // (lambda a: a._strings())).cons(self._desc) @property def head(self) -> 'SubTree': return self.lift(0) @property def last(self) -> 'SubTree': return self.lift(-1) def __str__(self) -> str: return '{}({})'.format(self.__class__.__name__, self.sub.map(str).mk_string(',')) def __repr__(self) -> str: return str(self) def lift(self, key: Key) -> 'SubTree': return ( SubTreeInvalid(key, 'ListNode index must be int') if isinstance(key, str) else self.sub.lift(key) / L(SubTree.cons)(_, key) | (lambda: SubTreeInvalid(key, 'ListNode index oob')) ) def replace(self, sub: LazyList[Any]) -> Node: return ListNode(sub) def filter(self, pred: Callable[[Node], bool]) -> Node: def filt(n: Node) -> bool: return ( pred(n) if isinstance(n, LeafNode) else not n.empty ) return self.replace(self.sub.map(__.filter(pred)).filter(filt)) @property def s(self) -> 'SubTree': return SubTreeList(self, 'root') def map_nodes(self, f: Callable[['Node[Data, Sub]'], 'Node[Data, Sub]']) -> 'Node[Data, Sub]': return f(ListNode(self.sub.map(lambda a: a.map_nodes(f)))) class MapNode(Generic[Data], Inode[Data, Map[str, Node[Data, Any]]]): def __init__(self, data: Map[str, Node[Data, Any]]) -> None: self.data = data @property def sub(self) -> Map[str, Node[Data, Any]]: return self.data @property def sub_l(self) -> LazyList[Node[Data, Any]]: return LazyList(self.data.v) @property def _desc(self) -> str: return '{}' def _strings(self) -> LazyList[str]: return indent(self.sub_l // (lambda a: a._strings())).cons(self._desc) def __str__(self) -> str: return '{}({})'.format(self.__class__.__name__, self.sub_l) def __repr__(self) -> str: return str(self) # TODO allow int indexes into sub_l def lift(self, key: Key) -> 'SubTree': def err() -> 'SubTree': keys = ', '.join(self.data.keys()) return SubTreeInvalid(key, f'MapNode({self.rule}) invalid key ({keys})') return ( self.data.lift(key) / L(SubTree.cons)(_, key) | err ) def replace(self, sub: Map[str, Node]) -> Node: return MapNode(sub) def filter(self, pred: Callable[[Node], bool]) -> Node: def filt(n: Node) -> bool: return ( pred(n) if isinstance(n, LeafNode) else not n.empty ) return self.replace(self.data.valmap(__.filter(pred)).valfilter(filt)) @property def s(self) -> 'SubTree': return SubTreeMap(self, 'root') def map_nodes(self, f: Callable[['Node[Data, Sub]'], 'Node[Data, Sub]']) -> 'Node[Data, Sub]': return f(MapNode(self.sub.valmap(lambda a: a.map_nodes(f)))) class LeafNode(Generic[Data], Node[Data, None]): def __init__(self, data: Data) -> None: self.data = data def _strings(self) -> LazyList[Data]: return LazyLists.cons(self.data) @property def sub(self) -> None: pass @property def sub_l(self) -> LazyList[Node[Data, Any]]: return LazyList([]) def foreach(self, f: Callable[[Node], None]) -> None: f(self) def filter(self, pred: Callable[[Node], bool]) -> Node: return self @property def flatten(self) -> LazyList[Any]: return LazyLists.cons(self) def contains(self, target: Node) -> Boolean: return false def lift(self, key: Key) -> 'SubTree': return SubTreeInvalid(key, 'LeafNode cannot be indexed') def __str__(self) -> str: return '{}({})'.format(self.__class__.__name__, self.data) def __repr__(self) -> str: return str(self) @property def empty(self) -> Boolean: return false @property def s(self) -> 'SubTree': return SubTreeLeaf(self, 'root') def replace(self, sub: Data) -> Node: return LeafNode(sub) def map_nodes(self, f: Callable[['Node[Data, Sub]'], 'Node[Data, Sub]']) -> 'Node[Data, Sub]': return f(self) class TreeFlatMap(FlatMap, tpe=Node): def flat_map(self, fa: Node[A, Any], f: Callable[[A], Node[B, Any]]) -> Node[B, Any]: return ( self.flat_map_inode(fa, f) if isinstance(fa, Inode) else self.flat_map_leaf(fa, f) ) def flat_map_inode(self, fa: Inode[A, Any], f: Callable[[A], Node[B, Any]]) -> Node[B, Any]: def err() -> Inode[A, Any]: raise Exception(f'invalid sub for `TreeFlatMap.flat_map_inode`: {fa}') return ( self.flat_map_map(fa, f) if isinstance(fa, MapNode) else self.flat_map_list(fa, f) if isinstance(fa, ListNode) else err() ) def flat_map_map(self, fa: MapNode[A], f: Callable[[A], Node[B, Any]]) -> Node[B, Any]: return MapNode(fa.sub.valmap(lambda a: self.flat_map(a, f))) def flat_map_list(self, fa: ListNode[A], f: Callable[[A], Node[B, Any]]) -> Node[B, Any]: return ListNode(fa.sub.map(lambda a: self.flat_map(a, f))) def flat_map_leaf(self, fa: LeafNode[A], f: Callable[[A], Node[B, Any]]) -> Node[B, Any]: return f(fa.data) def map(self, fa: Node[A, Any], f: Callable[[A], B]) -> Node[B, Any]: return ( self.map_inode(fa, f) if isinstance(fa, Inode) else self.map_leaf(fa, f) ) def map_inode(self, fa: Inode[A, Any], f: Callable[[A], B]) -> Node[B, Any]: def err() -> Inode[A, Any]: raise Exception(f'invalid sub for `TreeFlatMap.map_inode`: {fa}') return ( self.map_map(fa, f) if isinstance(fa, MapNode) else self.map_list(fa, f) if isinstance(fa, ListNode) else err() ) def map_map(self, fa: MapNode[A], f: Callable[[A], B]) -> Node[B, Any]: return MapNode(fa.data.valmap(lambda a: self.map(a, f))) def map_list(self, fa: ListNode[A], f: Callable[[A], B]) -> Node[B, Any]: return ListNode(fa.sub.map(lambda a: self.map(a, f))) def map_leaf(self, fa: LeafNode[A], f: Callable[[A], B]) -> Node[B, Any]: return LeafNode(f(fa.data)) class SubTree(Implicits, implicits=True, auto=True): @staticmethod def cons(fa: Node, key: Key) -> 'SubTree': return ( # type: ignore cast(SubTree, SubTreeList(fa, key)) if isinstance(fa, ListNode) else SubTreeLeaf(fa, key) if isinstance(fa, LeafNode) else SubTreeMap(fa, key) ) @staticmethod def from_maybe(data: Maybe[Node], key: Key, err: str) -> 'SubTree': return data.cata(SubTree.cons, SubTreeInvalid(key, err)) def __getattr__(self, key: Key) -> 'SubTree': try: return super().__getattr__(key) except AttributeError: return self._getattr(key) @abc.abstractmethod def _getattr(self, key: Key) -> 'SubTree': ... def __getitem__(self, key: Key) -> 'SubTree': return self._getitem(key) @abc.abstractmethod def _getitem(self, key: Key) -> 'SubTree': ... def cata(self, f: Callable[[Node], A], b: Union[A, Callable[[], A]]) -> A: return ( f(self.data) if isinstance(self, SubTreeValid) else call_by_name(b) ) @abc.abstractproperty def e(self) -> Either[str, Node]: ... @abc.abstractproperty def valid(self) -> Boolean: ... @abc.abstractproperty def strings(self) -> LazyList[str]: ... @abc.abstractproperty def show(self) -> LazyList[str]: ... @property def rule(self) -> Either[str, str]: return self.e.map(_.rule) class SubTreeValid(SubTree): def __init__(self, data: Node, key: Key) -> None: self.data = data self._key = key def __str__(self) -> str: return '{}({})'.format(self.__class__.__name__, self.data) @property def e(self) -> Either[str, Node]: return Right(self.data) @property def valid(self) -> Boolean: return true @property def strings(self) -> LazyList[str]: return self.data.strings @property def show(self) -> str: return self.data.show class SubTreeList(SubTreeValid): @property def head(self) -> SubTree: return self[0] @property def last(self) -> SubTree: return self[-1] def _getattr(self, key: Key) -> SubTree: return SubTreeInvalid(key, 'cannot access attrs in SubTreeList') def _getitem(self, key: Key) -> SubTree: return self.data.lift(key) def __str__(self) -> str: return '{}({})'.format(self.__class__.__name__, self.data.sub_l.drain.join_comma) @property def _keys(self) -> LazyList[str]: return self.data.k class SubTreeLeaf(SubTreeValid): def err(self, key: Key) -> SubTree: return SubTreeInvalid(key, 'cannot access attrs in SubTreeLeaf') def _getattr(self, key: Key) -> SubTree: return self.err(key) def _getitem(self, key: Key) -> SubTree: return self.err(key) class SubTreeMap(SubTreeValid): def _getattr(self, key: Key) -> SubTree: return self.data.lift(key) def _getitem(self, key: Key) -> SubTree: return self.data.lift(key) @property def _keys(self) -> LazyList[str]: return self.data.k class SubTreeInvalid(SubTree): def __init__(self, key: Key, reason: str) -> None: self.key = key self.reason = reason def __str__(self) -> str: s = 'SubTreeInvalid({}, {})' return s.format(self.key, self.reason) def __repr__(self) -> str: return str(self) @property def valid(self) -> Boolean: return false @property def _error(self) -> str: return 'no subtree `{}`: {}'.format(self.key, self.reason) def _getattr(self, key: Key) -> SubTree: return self def _getitem(self, key: Key) -> SubTree: return self @property def e(self) -> Either[str, Node]: return Left(self._error) @property def strings(self) -> LazyList[str]: return LazyList([]) @property def show(self) -> LazyList[str]: return str(self) __all__ = ('Node', 'Inode', 'LeafNode', 'MapNode', 'LeafNode', 'ListNode')
mit
-5,395,947,376,289,710,000
25.76
110
0.55947
false
3.380414
false
false
false
KyleJamesWalker/ansible-modules-core
cloud/amazon/ec2_asg.py
1
34388
#!/usr/bin/python # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. DOCUMENTATION = """ --- module: ec2_asg short_description: Create or delete AWS Autoscaling Groups description: - Can create or delete AWS Autoscaling Groups - Works with the ec2_lc module to manage Launch Configurations version_added: "1.6" author: "Gareth Rushgrove (@garethr)" options: state: description: - register or deregister the instance required: true choices: ['present', 'absent'] name: description: - Unique name for group to be created or deleted required: true load_balancers: description: - List of ELB names to use for the group required: false availability_zones: description: - List of availability zone names in which to create the group. Defaults to all the availability zones in the region if vpc_zone_identifier is not set. required: false launch_config_name: description: - Name of the Launch configuration to use for the group. See the ec2_lc module for managing these. required: true min_size: description: - Minimum number of instances in group, if unspecified then the current group value will be used. required: false max_size: description: - Maximum number of instances in group, if unspecified then the current group value will be used. required: false desired_capacity: description: - Desired number of instances in group, if unspecified then the current group value will be used. required: false replace_all_instances: description: - In a rolling fashion, replace all instances with an old launch configuration with one from the current launch configuration. required: false version_added: "1.8" default: False replace_batch_size: description: - Number of instances you'd like to replace at a time. Used with replace_all_instances. required: false version_added: "1.8" default: 1 replace_instances: description: - List of instance_ids belonging to the named ASG that you would like to terminate and be replaced with instances matching the current launch configuration. required: false version_added: "1.8" default: None lc_check: description: - Check to make sure instances that are being replaced with replace_instances do not aready have the current launch_config. required: false version_added: "1.8" default: True vpc_zone_identifier: description: - List of VPC subnets to use required: false default: None tags: description: - A list of tags to add to the Auto Scale Group. Optional key is 'propagate_at_launch', which defaults to true. required: false default: None version_added: "1.7" health_check_period: description: - Length of time in seconds after a new EC2 instance comes into service that Auto Scaling starts checking its health. required: false default: 500 seconds version_added: "1.7" health_check_type: description: - The service you want the health status from, Amazon EC2 or Elastic Load Balancer. required: false default: EC2 version_added: "1.7" choices: ['EC2', 'ELB'] default_cooldown: description: - The number of seconds after a scaling activity completes before another can begin. required: false default: 300 seconds version_added: "2.0" wait_timeout: description: - how long before wait instances to become viable when replaced. Used in concjunction with instance_ids option. default: 300 version_added: "1.8" wait_for_instances: description: - Wait for the ASG instances to be in a ready state before exiting. If instances are behind an ELB, it will wait until the ELB determines all instances have a lifecycle_state of "InService" and a health_status of "Healthy". version_added: "1.9" default: yes required: False termination_policies: description: - An ordered list of criteria used for selecting instances to be removed from the Auto Scaling group when reducing capacity. - For 'Default', when used to create a new autoscaling group, the "Default" value is used. When used to change an existent autoscaling group, the current termination policies are mantained required: false default: Default choices: ['OldestInstance', 'NewestInstance', 'OldestLaunchConfiguration', 'ClosestToNextInstanceHour', 'Default'] version_added: "2.0" extends_documentation_fragment: - aws - ec2 """ EXAMPLES = ''' # Basic configuration - ec2_asg: name: special load_balancers: [ 'lb1', 'lb2' ] availability_zones: [ 'eu-west-1a', 'eu-west-1b' ] launch_config_name: 'lc-1' min_size: 1 max_size: 10 desired_capacity: 5 vpc_zone_identifier: [ 'subnet-abcd1234', 'subnet-1a2b3c4d' ] tags: - environment: production propagate_at_launch: no # Rolling ASG Updates Below is an example of how to assign a new launch config to an ASG and terminate old instances. All instances in "myasg" that do not have the launch configuration named "my_new_lc" will be terminated in a rolling fashion with instances using the current launch configuration, "my_new_lc". This could also be considered a rolling deploy of a pre-baked AMI. If this is a newly created group, the instances will not be replaced since all instances will have the current launch configuration. - name: create launch config ec2_lc: name: my_new_lc image_id: ami-lkajsf key_name: mykey region: us-east-1 security_groups: sg-23423 instance_type: m1.small assign_public_ip: yes - ec2_asg: name: myasg launch_config_name: my_new_lc health_check_period: 60 health_check_type: ELB replace_all_instances: yes min_size: 5 max_size: 5 desired_capacity: 5 region: us-east-1 To only replace a couple of instances instead of all of them, supply a list to "replace_instances": - ec2_asg: name: myasg launch_config_name: my_new_lc health_check_period: 60 health_check_type: ELB replace_instances: - i-b345231 - i-24c2931 min_size: 5 max_size: 5 desired_capacity: 5 region: us-east-1 ''' import time import logging as log from ansible.module_utils.basic import * from ansible.module_utils.ec2 import * log.getLogger('boto').setLevel(log.CRITICAL) #log.basicConfig(filename='/tmp/ansible_ec2_asg.log',level=log.DEBUG, format='%(asctime)s: %(message)s', datefmt='%m/%d/%Y %I:%M:%S %p') try: import boto.ec2.autoscale from boto.ec2.autoscale import AutoScaleConnection, AutoScalingGroup, Tag from boto.exception import BotoServerError HAS_BOTO = True except ImportError: HAS_BOTO = False ASG_ATTRIBUTES = ('availability_zones', 'default_cooldown', 'desired_capacity', 'health_check_period', 'health_check_type', 'launch_config_name', 'load_balancers', 'max_size', 'min_size', 'name', 'placement_group', 'termination_policies', 'vpc_zone_identifier') INSTANCE_ATTRIBUTES = ('instance_id', 'health_status', 'lifecycle_state', 'launch_config_name') def enforce_required_arguments(module): ''' As many arguments are not required for autoscale group deletion they cannot be mandatory arguments for the module, so we enforce them here ''' missing_args = [] for arg in ('min_size', 'max_size', 'launch_config_name'): if module.params[arg] is None: missing_args.append(arg) if missing_args: module.fail_json(msg="Missing required arguments for autoscaling group create/update: %s" % ",".join(missing_args)) def get_properties(autoscaling_group): properties = dict((attr, getattr(autoscaling_group, attr)) for attr in ASG_ATTRIBUTES) # Ugly hack to make this JSON-serializable. We take a list of boto Tag # objects and replace them with a dict-representation. Needed because the # tags are included in ansible's return value (which is jsonified) if 'tags' in properties and isinstance(properties['tags'], list): serializable_tags = {} for tag in properties['tags']: serializable_tags[tag.key] = [tag.value, tag.propagate_at_launch] properties['tags'] = serializable_tags properties['healthy_instances'] = 0 properties['in_service_instances'] = 0 properties['unhealthy_instances'] = 0 properties['pending_instances'] = 0 properties['viable_instances'] = 0 properties['terminating_instances'] = 0 instance_facts = {} if autoscaling_group.instances: properties['instances'] = [i.instance_id for i in autoscaling_group.instances] for i in autoscaling_group.instances: instance_facts[i.instance_id] = {'health_status': i.health_status, 'lifecycle_state': i.lifecycle_state, 'launch_config_name': i.launch_config_name } if i.health_status == 'Healthy' and i.lifecycle_state == 'InService': properties['viable_instances'] += 1 if i.health_status == 'Healthy': properties['healthy_instances'] += 1 else: properties['unhealthy_instances'] += 1 if i.lifecycle_state == 'InService': properties['in_service_instances'] += 1 if i.lifecycle_state == 'Terminating': properties['terminating_instances'] += 1 if i.lifecycle_state == 'Pending': properties['pending_instances'] += 1 properties['instance_facts'] = instance_facts properties['load_balancers'] = autoscaling_group.load_balancers if getattr(autoscaling_group, "tags", None): properties['tags'] = dict((t.key, t.value) for t in autoscaling_group.tags) return properties def elb_dreg(asg_connection, module, group_name, instance_id): region, ec2_url, aws_connect_params = get_aws_connection_info(module) as_group = asg_connection.get_all_groups(names=[group_name])[0] wait_timeout = module.params.get('wait_timeout') props = get_properties(as_group) count = 1 if as_group.load_balancers and as_group.health_check_type == 'ELB': try: elb_connection = connect_to_aws(boto.ec2.elb, region, **aws_connect_params) except boto.exception.NoAuthHandlerFound, e: module.fail_json(msg=str(e)) else: return for lb in as_group.load_balancers: elb_connection.deregister_instances(lb, instance_id) log.debug("De-registering {0} from ELB {1}".format(instance_id, lb)) wait_timeout = time.time() + wait_timeout while wait_timeout > time.time() and count > 0: count = 0 for lb in as_group.load_balancers: lb_instances = elb_connection.describe_instance_health(lb) for i in lb_instances: if i.instance_id == instance_id and i.state == "InService": count += 1 log.debug("{0}: {1}, {2}".format(i.instance_id, i.state, i.description)) time.sleep(10) if wait_timeout <= time.time(): # waiting took too long module.fail_json(msg = "Waited too long for instance to deregister. {0}".format(time.asctime())) def elb_healthy(asg_connection, elb_connection, module, group_name): healthy_instances = [] as_group = asg_connection.get_all_groups(names=[group_name])[0] props = get_properties(as_group) # get healthy, inservice instances from ASG instances = [] for instance, settings in props['instance_facts'].items(): if settings['lifecycle_state'] == 'InService' and settings['health_status'] == 'Healthy': instances.append(instance) log.debug("ASG considers the following instances InService and Healthy: {0}".format(instances)) log.debug("ELB instance status:") for lb in as_group.load_balancers: # we catch a race condition that sometimes happens if the instance exists in the ASG # but has not yet show up in the ELB try: lb_instances = elb_connection.describe_instance_health(lb, instances=instances) except boto.exception.InvalidInstance: pass for i in lb_instances: if i.state == "InService": healthy_instances.append(i.instance_id) log.debug("{0}: {1}".format(i.instance_id, i.state)) return len(healthy_instances) def wait_for_elb(asg_connection, module, group_name): region, ec2_url, aws_connect_params = get_aws_connection_info(module) wait_timeout = module.params.get('wait_timeout') # if the health_check_type is ELB, we want to query the ELBs directly for instance # status as to avoid health_check_grace period that is awarded to ASG instances as_group = asg_connection.get_all_groups(names=[group_name])[0] if as_group.load_balancers and as_group.health_check_type == 'ELB': log.debug("Waiting for ELB to consider intances healthy.") try: elb_connection = connect_to_aws(boto.ec2.elb, region, **aws_connect_params) except boto.exception.NoAuthHandlerFound, e: module.fail_json(msg=str(e)) wait_timeout = time.time() + wait_timeout healthy_instances = elb_healthy(asg_connection, elb_connection, module, group_name) while healthy_instances < as_group.min_size and wait_timeout > time.time(): healthy_instances = elb_healthy(asg_connection, elb_connection, module, group_name) log.debug("ELB thinks {0} instances are healthy.".format(healthy_instances)) time.sleep(10) if wait_timeout <= time.time(): # waiting took too long module.fail_json(msg = "Waited too long for ELB instances to be healthy. %s" % time.asctime()) log.debug("Waiting complete. ELB thinks {0} instances are healthy.".format(healthy_instances)) def create_autoscaling_group(connection, module): group_name = module.params.get('name') load_balancers = module.params['load_balancers'] availability_zones = module.params['availability_zones'] launch_config_name = module.params.get('launch_config_name') min_size = module.params['min_size'] max_size = module.params['max_size'] desired_capacity = module.params.get('desired_capacity') vpc_zone_identifier = module.params.get('vpc_zone_identifier') set_tags = module.params.get('tags') health_check_period = module.params.get('health_check_period') health_check_type = module.params.get('health_check_type') default_cooldown = module.params.get('default_cooldown') wait_for_instances = module.params.get('wait_for_instances') as_groups = connection.get_all_groups(names=[group_name]) wait_timeout = module.params.get('wait_timeout') termination_policies = module.params.get('termination_policies') if not vpc_zone_identifier and not availability_zones: region, ec2_url, aws_connect_params = get_aws_connection_info(module) try: ec2_connection = connect_to_aws(boto.ec2, region, **aws_connect_params) except (boto.exception.NoAuthHandlerFound, AnsibleAWSError), e: module.fail_json(msg=str(e)) elif vpc_zone_identifier: vpc_zone_identifier = ','.join(vpc_zone_identifier) asg_tags = [] for tag in set_tags: for k,v in tag.iteritems(): if k !='propagate_at_launch': asg_tags.append(Tag(key=k, value=v, propagate_at_launch=bool(tag.get('propagate_at_launch', True)), resource_id=group_name)) if not as_groups: if not vpc_zone_identifier and not availability_zones: availability_zones = module.params['availability_zones'] = [zone.name for zone in ec2_connection.get_all_zones()] enforce_required_arguments(module) launch_configs = connection.get_all_launch_configurations(names=[launch_config_name]) ag = AutoScalingGroup( group_name=group_name, load_balancers=load_balancers, availability_zones=availability_zones, launch_config=launch_configs[0], min_size=min_size, max_size=max_size, desired_capacity=desired_capacity, vpc_zone_identifier=vpc_zone_identifier, connection=connection, tags=asg_tags, health_check_period=health_check_period, health_check_type=health_check_type, default_cooldown=default_cooldown, termination_policies=termination_policies) try: connection.create_auto_scaling_group(ag) if wait_for_instances: wait_for_new_inst(module, connection, group_name, wait_timeout, desired_capacity, 'viable_instances') wait_for_elb(connection, module, group_name) as_group = connection.get_all_groups(names=[group_name])[0] asg_properties = get_properties(as_group) changed = True return(changed, asg_properties) except BotoServerError, e: module.fail_json(msg=str(e)) else: as_group = as_groups[0] changed = False for attr in ASG_ATTRIBUTES: if module.params.get(attr, None) is not None: module_attr = module.params.get(attr) if attr == 'vpc_zone_identifier': module_attr = ','.join(module_attr) group_attr = getattr(as_group, attr) # we do this because AWS and the module may return the same list # sorted differently try: module_attr.sort() except: pass try: group_attr.sort() except: pass if group_attr != module_attr: changed = True setattr(as_group, attr, module_attr) if len(set_tags) > 0: have_tags = {} want_tags = {} for tag in asg_tags: want_tags[tag.key] = [tag.value, tag.propagate_at_launch] dead_tags = [] for tag in as_group.tags: have_tags[tag.key] = [tag.value, tag.propagate_at_launch] if tag.key not in want_tags: changed = True dead_tags.append(tag) if dead_tags != []: connection.delete_tags(dead_tags) if have_tags != want_tags: changed = True connection.create_or_update_tags(asg_tags) # handle loadbalancers separately because None != [] load_balancers = module.params.get('load_balancers') or [] if load_balancers and as_group.load_balancers != load_balancers: changed = True as_group.load_balancers = module.params.get('load_balancers') if changed: try: as_group.update() except BotoServerError, e: module.fail_json(msg=str(e)) if wait_for_instances: wait_for_new_inst(module, connection, group_name, wait_timeout, desired_capacity, 'viable_instances') wait_for_elb(connection, module, group_name) try: as_group = connection.get_all_groups(names=[group_name])[0] asg_properties = get_properties(as_group) except BotoServerError, e: module.fail_json(msg=str(e)) return(changed, asg_properties) def delete_autoscaling_group(connection, module): group_name = module.params.get('name') groups = connection.get_all_groups(names=[group_name]) if groups: group = groups[0] group.max_size = 0 group.min_size = 0 group.desired_capacity = 0 group.update() instances = True while instances: tmp_groups = connection.get_all_groups(names=[group_name]) if tmp_groups: tmp_group = tmp_groups[0] if not tmp_group.instances: instances = False time.sleep(10) group.delete() while len(connection.get_all_groups(names=[group_name])): time.sleep(5) changed=True return changed else: changed=False return changed def get_chunks(l, n): for i in xrange(0, len(l), n): yield l[i:i+n] def update_size(group, max_size, min_size, dc): log.debug("setting ASG sizes") log.debug("minimum size: {0}, desired_capacity: {1}, max size: {2}".format(min_size, dc, max_size )) group.max_size = max_size group.min_size = min_size group.desired_capacity = dc group.update() def replace(connection, module): batch_size = module.params.get('replace_batch_size') wait_timeout = module.params.get('wait_timeout') group_name = module.params.get('name') max_size = module.params.get('max_size') min_size = module.params.get('min_size') desired_capacity = module.params.get('desired_capacity') lc_check = module.params.get('lc_check') replace_instances = module.params.get('replace_instances') as_group = connection.get_all_groups(names=[group_name])[0] wait_for_new_inst(module, connection, group_name, wait_timeout, as_group.min_size, 'viable_instances') props = get_properties(as_group) instances = props.get('instances', []) if replace_instances: instances = replace_instances # check to see if instances are replaceable if checking launch configs new_instances, old_instances = get_instances_by_lc(props, lc_check, instances) num_new_inst_needed = desired_capacity - len(new_instances) if lc_check: if num_new_inst_needed == 0 and old_instances: log.debug("No new instances needed, but old instances are present. Removing old instances") terminate_batch(connection, module, old_instances, instances, True) as_group = connection.get_all_groups(names=[group_name])[0] props = get_properties(as_group) changed = True return(changed, props) # we don't want to spin up extra instances if not necessary if num_new_inst_needed < batch_size: log.debug("Overriding batch size to {0}".format(num_new_inst_needed)) batch_size = num_new_inst_needed if not old_instances: changed = False return(changed, props) #check if min_size/max_size/desired capacity have been specified and if not use ASG values if min_size is None: min_size = as_group.min_size if max_size is None: max_size = as_group.max_size if desired_capacity is None: desired_capacity = as_group.desired_capacity # set temporary settings and wait for them to be reached # This should get overriden if the number of instances left is less than the batch size. as_group = connection.get_all_groups(names=[group_name])[0] update_size(as_group, max_size + batch_size, min_size + batch_size, desired_capacity + batch_size) wait_for_new_inst(module, connection, group_name, wait_timeout, as_group.min_size, 'viable_instances') wait_for_elb(connection, module, group_name) as_group = connection.get_all_groups(names=[group_name])[0] props = get_properties(as_group) instances = props.get('instances', []) if replace_instances: instances = replace_instances log.debug("beginning main loop") for i in get_chunks(instances, batch_size): # break out of this loop if we have enough new instances break_early, desired_size, term_instances = terminate_batch(connection, module, i, instances, False) wait_for_term_inst(connection, module, term_instances) wait_for_new_inst(module, connection, group_name, wait_timeout, desired_size, 'viable_instances') wait_for_elb(connection, module, group_name) as_group = connection.get_all_groups(names=[group_name])[0] if break_early: log.debug("breaking loop") break update_size(as_group, max_size, min_size, desired_capacity) as_group = connection.get_all_groups(names=[group_name])[0] asg_properties = get_properties(as_group) log.debug("Rolling update complete.") changed=True return(changed, asg_properties) def get_instances_by_lc(props, lc_check, initial_instances): new_instances = [] old_instances = [] # old instances are those that have the old launch config if lc_check: for i in props.get('instances', []): if props['instance_facts'][i]['launch_config_name'] == props['launch_config_name']: new_instances.append(i) else: old_instances.append(i) else: log.debug("Comparing initial instances with current: {0}".format(initial_instances)) for i in props.get('instances', []): if i not in initial_instances: new_instances.append(i) else: old_instances.append(i) log.debug("New instances: {0}, {1}".format(len(new_instances), new_instances)) log.debug("Old instances: {0}, {1}".format(len(old_instances), old_instances)) return new_instances, old_instances def list_purgeable_instances(props, lc_check, replace_instances, initial_instances): instances_to_terminate = [] instances = ( inst_id for inst_id in replace_instances if inst_id in props.get('instances', [])) # check to make sure instances given are actually in the given ASG # and they have a non-current launch config if lc_check: for i in instances: if props['instance_facts'][i]['launch_config_name'] != props['launch_config_name']: instances_to_terminate.append(i) else: for i in instances: if i in initial_instances: instances_to_terminate.append(i) return instances_to_terminate def terminate_batch(connection, module, replace_instances, initial_instances, leftovers=False): batch_size = module.params.get('replace_batch_size') min_size = module.params.get('min_size') desired_capacity = module.params.get('desired_capacity') group_name = module.params.get('name') wait_timeout = int(module.params.get('wait_timeout')) lc_check = module.params.get('lc_check') decrement_capacity = False break_loop = False as_group = connection.get_all_groups(names=[group_name])[0] props = get_properties(as_group) desired_size = as_group.min_size new_instances, old_instances = get_instances_by_lc(props, lc_check, initial_instances) num_new_inst_needed = desired_capacity - len(new_instances) # check to make sure instances given are actually in the given ASG # and they have a non-current launch config instances_to_terminate = list_purgeable_instances(props, lc_check, replace_instances, initial_instances) log.debug("new instances needed: {0}".format(num_new_inst_needed)) log.debug("new instances: {0}".format(new_instances)) log.debug("old instances: {0}".format(old_instances)) log.debug("batch instances: {0}".format(",".join(instances_to_terminate))) if num_new_inst_needed == 0: decrement_capacity = True if as_group.min_size != min_size: as_group.min_size = min_size as_group.update() log.debug("Updating minimum size back to original of {0}".format(min_size)) #if are some leftover old instances, but we are already at capacity with new ones # we don't want to decrement capacity if leftovers: decrement_capacity = False break_loop = True instances_to_terminate = old_instances desired_size = min_size log.debug("No new instances needed") if num_new_inst_needed < batch_size and num_new_inst_needed !=0 : instances_to_terminate = instances_to_terminate[:num_new_inst_needed] decrement_capacity = False break_loop = False log.debug("{0} new instances needed".format(num_new_inst_needed)) log.debug("decrementing capacity: {0}".format(decrement_capacity)) for instance_id in instances_to_terminate: elb_dreg(connection, module, group_name, instance_id) log.debug("terminating instance: {0}".format(instance_id)) connection.terminate_instance(instance_id, decrement_capacity=decrement_capacity) # we wait to make sure the machines we marked as Unhealthy are # no longer in the list return break_loop, desired_size, instances_to_terminate def wait_for_term_inst(connection, module, term_instances): batch_size = module.params.get('replace_batch_size') wait_timeout = module.params.get('wait_timeout') group_name = module.params.get('name') lc_check = module.params.get('lc_check') as_group = connection.get_all_groups(names=[group_name])[0] props = get_properties(as_group) count = 1 wait_timeout = time.time() + wait_timeout while wait_timeout > time.time() and count > 0: log.debug("waiting for instances to terminate") count = 0 as_group = connection.get_all_groups(names=[group_name])[0] props = get_properties(as_group) instance_facts = props['instance_facts'] instances = ( i for i in instance_facts if i in term_instances) for i in instances: lifecycle = instance_facts[i]['lifecycle_state'] health = instance_facts[i]['health_status'] log.debug("Instance {0} has state of {1},{2}".format(i,lifecycle,health )) if lifecycle == 'Terminating' or healthy == 'Unhealthy': count += 1 time.sleep(10) if wait_timeout <= time.time(): # waiting took too long module.fail_json(msg = "Waited too long for old instances to terminate. %s" % time.asctime()) def wait_for_new_inst(module, connection, group_name, wait_timeout, desired_size, prop): # make sure we have the latest stats after that last loop. as_group = connection.get_all_groups(names=[group_name])[0] props = get_properties(as_group) log.debug("Waiting for {0} = {1}, currently {2}".format(prop, desired_size, props[prop])) # now we make sure that we have enough instances in a viable state wait_timeout = time.time() + wait_timeout while wait_timeout > time.time() and desired_size > props[prop]: log.debug("Waiting for {0} = {1}, currently {2}".format(prop, desired_size, props[prop])) time.sleep(10) as_group = connection.get_all_groups(names=[group_name])[0] props = get_properties(as_group) if wait_timeout <= time.time(): # waiting took too long module.fail_json(msg = "Waited too long for new instances to become viable. %s" % time.asctime()) log.debug("Reached {0}: {1}".format(prop, desired_size)) return props def main(): argument_spec = ec2_argument_spec() argument_spec.update( dict( name=dict(required=True, type='str'), load_balancers=dict(type='list'), availability_zones=dict(type='list'), launch_config_name=dict(type='str'), min_size=dict(type='int'), max_size=dict(type='int'), desired_capacity=dict(type='int'), vpc_zone_identifier=dict(type='list'), replace_batch_size=dict(type='int', default=1), replace_all_instances=dict(type='bool', default=False), replace_instances=dict(type='list', default=[]), lc_check=dict(type='bool', default=True), wait_timeout=dict(type='int', default=300), state=dict(default='present', choices=['present', 'absent']), tags=dict(type='list', default=[]), health_check_period=dict(type='int', default=300), health_check_type=dict(default='EC2', choices=['EC2', 'ELB']), default_cooldown=dict(type='int', default=300), wait_for_instances=dict(type='bool', default=True), termination_policies=dict(type='list', default='Default') ), ) module = AnsibleModule( argument_spec=argument_spec, mutually_exclusive = [['replace_all_instances', 'replace_instances']] ) if not HAS_BOTO: module.fail_json(msg='boto required for this module') state = module.params.get('state') replace_instances = module.params.get('replace_instances') replace_all_instances = module.params.get('replace_all_instances') region, ec2_url, aws_connect_params = get_aws_connection_info(module) try: connection = connect_to_aws(boto.ec2.autoscale, region, **aws_connect_params) if not connection: module.fail_json(msg="failed to connect to AWS for the given region: %s" % str(region)) except boto.exception.NoAuthHandlerFound, e: module.fail_json(msg=str(e)) changed = create_changed = replace_changed = False if state == 'present': create_changed, asg_properties=create_autoscaling_group(connection, module) elif state == 'absent': changed = delete_autoscaling_group(connection, module) module.exit_json( changed = changed ) if replace_all_instances or replace_instances: replace_changed, asg_properties=replace(connection, module) if create_changed or replace_changed: changed = True module.exit_json( changed = changed, **asg_properties ) if __name__ == '__main__': main()
gpl-3.0
-6,694,148,389,157,273,000
40.331731
232
0.645022
false
3.857319
true
false
false
sireliah/poniat
menu.py
1
2882
#-*- coding: utf-8 -*- import sys import pygame from pygame.locals import * from utils import * from initial import LoadMenuTextures class MainMenu(LoadMenuTextures): def __init__(self, modes, win_w, win_h): self.showmain = True self.submenu = False self.click = False self.modes = modes LoadMenuTextures.__init__(self, win_w, win_h) self.menuloop() def mousepos(self): self.pos = pygame.mouse.get_pos() def is_inside(self, coords): x, y = self.pos if (x > coords[0] and x < coords[4]) and (y > coords[1] and y < coords[5]): return True else: return False def startbutton(self): if self.is_inside(self.start_coords): self.start.show_button(hover=True) if self.click: self.showmain = False else: self.start.show_button() def aboutbutton(self): if self.is_inside(self.about_coords): self.about.show_button(hover=True) if self.click: self.submenu = True else: self.about.show_button() def gobackbutton(self): if self.is_inside(self.goback_coords): self.goback.show_button(hover=True) if self.click: self.submenu = False else: self.goback.show_button() def exitbutton(self): if self.is_inside(self.exit_coords): self.exit.show_button(hover=True) if self.click: sys.exit() else: self.exit.show_button() def events(self): self.mousepos() self.click = False for event in pygame.event.get(): if event.type == QUIT: print("koniec") if event.type == KEYDOWN: if event.key == K_ESCAPE: exit() if event.key == K_SPACE: pass if event.key == K_RETURN: self.showmain = False if event.key == K_LCTRL: pass elif event.type == MOUSEBUTTONDOWN: self.click = True def menuloop(self): while self.showmain: clear() self.events() self.mainback.show(0, 0) if self.submenu: self.aboutback.show(0, 0) self.gobackbutton() else: self.startbutton() self.aboutbutton() self.exitbutton() self.font.show(u"X: %s, Y: %s" % (self.pos), DARKRED, 10, 30, 1, 1) pygame.display.flip() clear() self.mainback.show(0, 0) self.frame.show(13, 14, 1.0, 1.0) self.font.show(u"Ładuję...", DARKRED, 10, 30, 2, 2) pygame.display.flip()
gpl-3.0
1,126,106,222,080,723,800
27.514851
83
0.498958
false
3.829787
false
false
false
FeodorM/some_code
some_nice_python_things/weather.py
1
2096
#! /usr/bin/env python3 import pyowm import datetime owm = pyowm.OWM('2642ecf7132b8918b8f073910006483c', language='ru') now = pyowm.timeutils.now().date() tomorrow = pyowm.timeutils.tomorrow().date() def to_human_time(unix): return datetime.datetime.fromtimestamp(unix) def weather_date(weather): return to_human_time(weather.get_reference_time()).date() def temperature_to_str(weather): rain = weather.get_rain() if not rain: rain = 'no rain' return "{}: {}, {}C, {}, humidity: {}%\n".format( to_human_time(weather.get_reference_time()).time(), weather.get_detailed_status(), weather.get_temperature('celsius')['temp'], rain, weather.get_humidity() ) def forecast(): f = owm.three_hours_forecast('Voronezh,RU') weathers = f.get_forecast().get_weathers() if weather_date(weathers[0]) == now: print('Сегодня:\n') for w in (weather for weather in weathers if weather_date(weather) == now): print(temperature_to_str(w)) print('Завтра:\n') for w in (weather for weather in weathers if weather_date(weather) == tomorrow): print(temperature_to_str(w)) def current_weather(): w = owm.weather_at_place('Voronezh,RU').get_weather() print(""" {} Temperature: {}C -- {}C ({}C) Clouds: {}% Rain: {} Humidity: {}% Wind speed: {}m/s Time: {} """.format( w.get_detailed_status(), w.get_temperature('celsius')['temp_min'], w.get_temperature('celsius')['temp_max'], w.get_temperature('celsius')['temp'], w.get_clouds(), w.get_rain(), w.get_humidity(), w.get_wind()['speed'], w.get_reference_time('iso') )) if __name__ == '__main__': import sys arg = '' if len(sys.argv) == 1 else sys.argv[1] if arg == '': current_weather() forecast() elif arg == '-n' or arg == '--now': current_weather() elif arg == '-f' or arg == '--forecast': forecast() else: print('Wrong argument')
mit
2,245,925,329,467,947,500
23.797619
84
0.572732
false
3.194785
false
false
false
jeremiedecock/snippets
python/tkinter/python3/cairo_with_pil.py
1
3643
#!/usr/bin/env python3 # -*- coding: utf-8 -*- # Copyright (c) 2015 Jérémie DECOCK (http://www.jdhp.org) # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. # SEE: http://stackoverflow.com/questions/25480853/cairo-with-tkinter # http://effbot.org/tkinterbook/photoimage.htm#patterns # Required Debian package (Debian 8.1 Jessie): python3-pil.imagetk import tkinter as tk import PIL.Image as pil # PIL.Image is a module not a class... import PIL.ImageTk as piltk # PIL.ImageTk is a module not a class... import cairo if tk.TkVersion < 8.6: print("*" * 80) print("WARNING: Tk version {} is installed on your system.".format(tk.TkVersion)) print("Tk < 8.6 only supports three file formats: GIF, PGM and PPM.") print("You need to install Tk >= 8.6 if you want to read JPEG and PNG images!") print("*" * 80) # CAIRO w, h = 800, 600 cairo_surface = cairo.ImageSurface(cairo.FORMAT_ARGB32, w, h) cairo_context = cairo.Context(cairo_surface) # Draw something cairo_context.scale(w, h) cairo_context.rectangle(0, 0, 1, 1) cairo_context.set_source_rgba(1, 0, 0, 0.8) cairo_context.fill() # TKINTER # WARNING: # A Tk window MUST be created before you can call PhotoImage! # See: http://stackoverflow.com/questions/3177231/python-pil-imagetk-photoimage-is-giving-me-a-bus-error # http://stackoverflow.com/questions/1236540/how-do-i-use-pil-with-tkinter root = tk.Tk() # PIL # WARNING: # You must keep a reference to the image object in your Python program, # either by storing it in a global variable, or by attaching it to another # object! # # When a PhotoImage object is garbage-collected by Python (e.g. when you # return from a function which stored an image in a local variable), the # image is cleared even if it’s being displayed by a Tkinter widget. # # To avoid this, the program must keep an extra reference to the image # object. A simple way to do this is to assign the image to a widget # attribute, like this: # # label = Label(image=tk_photo) # label.image = tk_photo # keep a reference! # label.pack() # # (src: http://effbot.org/tkinterbook/photoimage.htm#patterns) # See also http://infohost.nmt.edu/tcc/help/pubs/pil/image-tk.html # WARNING: # "cairo_surface.get_data()" is not yet implemented for Python3 (but it works with Python2). # See http://www.cairographics.org/documentation/pycairo/3/reference/surfaces.html#cairo.ImageSurface.get_data pil_image = pil.frombuffer("RGBA", (w,h), cairo_surface.get_data(), "raw", "BGRA", 0, 1) tk_photo = piltk.PhotoImage(pil_image) # TKINTER label = tk.Label(root, image=tk_photo) label.pack() root.mainloop()
mit
2,852,471,698,596,769,000
34.676471
110
0.732344
false
3.332418
false
false
false
ostinelli/pyopenspime
lib/dns/rdataset.py
1
11607
# Copyright (C) 2001-2007 Nominum, Inc. # # Permission to use, copy, modify, and distribute this software and its # documentation for any purpose with or without fee is hereby granted, # provided that the above copyright notice and this permission notice # appear in all copies. # # THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES # WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF # MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR # ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES # WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN # ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT # OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. """DNS rdatasets (an rdataset is a set of rdatas of a given type and class)""" import random import StringIO import struct import dns.exception import dns.rdatatype import dns.rdataclass import dns.rdata import dns.set # define SimpleSet here for backwards compatibility SimpleSet = dns.set.Set class DifferingCovers(dns.exception.DNSException): """Raised if an attempt is made to add a SIG/RRSIG whose covered type is not the same as that of the other rdatas in the rdataset.""" pass class IncompatibleTypes(dns.exception.DNSException): """Raised if an attempt is made to add rdata of an incompatible type.""" pass class Rdataset(dns.set.Set): """A DNS rdataset. @ivar rdclass: The class of the rdataset @type rdclass: int @ivar rdtype: The type of the rdataset @type rdtype: int @ivar covers: The covered type. Usually this value is dns.rdatatype.NONE, but if the rdtype is dns.rdatatype.SIG or dns.rdatatype.RRSIG, then the covers value will be the rdata type the SIG/RRSIG covers. The library treats the SIG and RRSIG types as if they were a family of types, e.g. RRSIG(A), RRSIG(NS), RRSIG(SOA). This makes RRSIGs much easier to work with than if RRSIGs covering different rdata types were aggregated into a single RRSIG rdataset. @type covers: int @ivar ttl: The DNS TTL (Time To Live) value @type ttl: int """ __slots__ = ['rdclass', 'rdtype', 'covers', 'ttl'] def __init__(self, rdclass, rdtype, covers=dns.rdatatype.NONE): """Create a new rdataset of the specified class and type. @see: the description of the class instance variables for the meaning of I{rdclass} and I{rdtype}""" super(Rdataset, self).__init__() self.rdclass = rdclass self.rdtype = rdtype self.covers = covers self.ttl = 0 def _clone(self): obj = super(Rdataset, self)._clone() obj.rdclass = self.rdclass obj.rdtype = self.rdtype obj.covers = self.covers obj.ttl = self.ttl return obj def update_ttl(self, ttl): """Set the TTL of the rdataset to be the lesser of the set's current TTL or the specified TTL. If the set contains no rdatas, set the TTL to the specified TTL. @param ttl: The TTL @type ttl: int""" if len(self) == 0: self.ttl = ttl elif ttl < self.ttl: self.ttl = ttl def add(self, rd, ttl=None): """Add the specified rdata to the rdataset. If the optional I{ttl} parameter is supplied, then self.update_ttl(ttl) will be called prior to adding the rdata. @param rd: The rdata @type rd: dns.rdata.Rdata object @param ttl: The TTL @type ttl: int""" # # If we're adding a signature, do some special handling to # check that the signature covers the same type as the # other rdatas in this rdataset. If this is the first rdata # in the set, initialize the covers field. # if self.rdclass != rd.rdclass or self.rdtype != rd.rdtype: raise IncompatibleTypes if not ttl is None: self.update_ttl(ttl) if self.rdtype == dns.rdatatype.RRSIG or \ self.rdtype == dns.rdatatype.SIG: covers = rd.covers() if len(self) == 0 and self.covers == dns.rdatatype.NONE: self.covers = covers elif self.covers != covers: raise DifferingCovers if dns.rdatatype.is_singleton(rd.rdtype) and len(self) > 0: self.clear() super(Rdataset, self).add(rd) def union_update(self, other): self.update_ttl(other.ttl) super(Rdataset, self).union_update(other) def intersection_update(self, other): self.update_ttl(other.ttl) super(Rdataset, self).intersection_update(other) def update(self, other): """Add all rdatas in other to self. @param other: The rdataset from which to update @type other: dns.rdataset.Rdataset object""" self.update_ttl(other.ttl) super(Rdataset, self).update(other) def __repr__(self): if self.covers == 0: ctext = '' else: ctext = '(' + dns.rdatatype.to_text(self.covers) + ')' return '<DNS ' + dns.rdataclass.to_text(self.rdclass) + ' ' + \ dns.rdatatype.to_text(self.rdtype) + ctext + ' rdataset>' def __str__(self): return self.to_text() def __eq__(self, other): """Two rdatasets are equal if they have the same class, type, and covers, and contain the same rdata. @rtype: bool""" if not isinstance(other, Rdataset): return False if self.rdclass != other.rdclass or \ self.rdtype != other.rdtype or \ self.covers != other.covers: return False return super(Rdataset, self).__eq__(other) def __ne__(self, other): return not self.__eq__(other) def to_text(self, name=None, origin=None, relativize=True, override_rdclass=None, **kw): """Convert the rdataset into DNS master file format. @see: L{dns.name.Name.choose_relativity} for more information on how I{origin} and I{relativize} determine the way names are emitted. Any additional keyword arguments are passed on to the rdata to_text() method. @param name: If name is not None, emit a RRs with I{name} as the owner name. @type name: dns.name.Name object @param origin: The origin for relative names, or None. @type origin: dns.name.Name object @param relativize: True if names should names be relativized @type relativize: bool""" if not name is None: name = name.choose_relativity(origin, relativize) ntext = str(name) pad = ' ' else: ntext = '' pad = '' s = StringIO.StringIO() if not override_rdclass is None: rdclass = override_rdclass else: rdclass = self.rdclass if len(self) == 0: # # Empty rdatasets are used for the question section, and in # some dynamic updates, so we don't need to print out the TTL # (which is meaningless anyway). # print >> s, '%s%s%s %s' % (ntext, pad, dns.rdataclass.to_text(rdclass), dns.rdatatype.to_text(self.rdtype)) else: for rd in self: print >> s, '%s%s%d %s %s %s' % \ (ntext, pad, self.ttl, dns.rdataclass.to_text(rdclass), dns.rdatatype.to_text(self.rdtype), rd.to_text(origin=origin, relativize=relativize, **kw)) # # We strip off the final \n for the caller's convenience in printing # return s.getvalue()[:-1] def to_wire(self, name, file, compress=None, origin=None, override_rdclass=None, want_shuffle=True): """Convert the rdataset to wire format. @param name: The owner name of the RRset that will be emitted @type name: dns.name.Name object @param file: The file to which the wire format data will be appended @type file: file @param compress: The compression table to use; the default is None. @type compress: dict @param origin: The origin to be appended to any relative names when they are emitted. The default is None. @returns: the number of records emitted @rtype: int """ if not override_rdclass is None: rdclass = override_rdclass want_shuffle = False else: rdclass = self.rdclass file.seek(0, 2) if len(self) == 0: name.to_wire(file, compress, origin) stuff = struct.pack("!HHIH", self.rdtype, rdclass, 0, 0) file.write(stuff) return 1 else: if want_shuffle: l = list(self) random.shuffle(l) else: l = self for rd in l: name.to_wire(file, compress, origin) stuff = struct.pack("!HHIH", self.rdtype, rdclass, self.ttl, 0) file.write(stuff) start = file.tell() rd.to_wire(file, compress, origin) end = file.tell() assert end - start < 65536 file.seek(start - 2) stuff = struct.pack("!H", end - start) file.write(stuff) file.seek(0, 2) return len(self) def match(self, rdclass, rdtype, covers): """Returns True if this rdataset matches the specified class, type, and covers""" if self.rdclass == rdclass and \ self.rdtype == rdtype and \ self.covers == covers: return True return False def from_text_list(rdclass, rdtype, ttl, text_rdatas): """Create an rdataset with the specified class, type, and TTL, and with the specified list of rdatas in text format. @rtype: dns.rdataset.Rdataset object """ if isinstance(rdclass, str): rdclass = dns.rdataclass.from_text(rdclass) if isinstance(rdtype, str): rdtype = dns.rdatatype.from_text(rdtype) r = Rdataset(rdclass, rdtype) r.update_ttl(ttl) for t in text_rdatas: rd = dns.rdata.from_text(r.rdclass, r.rdtype, t) r.add(rd) return r def from_text(rdclass, rdtype, ttl, *text_rdatas): """Create an rdataset with the specified class, type, and TTL, and with the specified rdatas in text format. @rtype: dns.rdataset.Rdataset object """ return from_text_list(rdclass, rdtype, ttl, text_rdatas) def from_rdata_list(ttl, rdatas): """Create an rdataset with the specified TTL, and with the specified list of rdata objects. @rtype: dns.rdataset.Rdataset object """ if len(rdatas) == 0: raise ValueError, "rdata list must not be empty" r = None for rd in rdatas: if r is None: r = Rdataset(rd.rdclass, rd.rdtype) r.update_ttl(ttl) first_time = False r.add(rd) return r def from_rdata(ttl, *rdatas): """Create an rdataset with the specified TTL, and with the specified rdata objects. @rtype: dns.rdataset.Rdataset object """ return from_rdata_list(ttl, rdatas)
gpl-3.0
7,146,983,658,648,308,000
34.279635
78
0.591023
false
4.012098
false
false
false
killbill/killbill-client-python
killbill/api/account_api.py
1
210059
# coding: utf-8 # # Copyright 2010-2014 Ning, Inc. # Copyright 2014-2020 Groupon, Inc # Copyright 2020-2021 Equinix, Inc # Copyright 2014-2021 The Billing Project, LLC # # The Billing Project, LLC licenses this file to you under the Apache License, version 2.0 # (the "License"); you may not use this file except in compliance with the # License. You may obtain a copy of the License at: # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # """ Kill Bill Kill Bill is an open-source billing and payments platform # noqa: E501 OpenAPI spec version: 0.22.22-SNAPSHOT Generated by: https://github.com/swagger-api/swagger-codegen.git """ from __future__ import absolute_import import re # noqa: F401 # python 2 and python 3 compatibility library import six from killbill.api_client import ApiClient class AccountApi(object): """NOTE: This class is auto generated by the swagger code generator program. Do not edit the class manually. Ref: https://github.com/swagger-api/swagger-codegen """ def __init__(self, api_client=None): if api_client is None: api_client = ApiClient() self.api_client = api_client def add_account_blocking_state(self, account_id=None, body=None, created_by=None, **kwargs): # noqa: E501 """Block an account # noqa: E501 # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.add_account_blocking_state(account_id, body, created_by, async=True) >>> result = thread.get() :param async bool :param Str account_id: (required) :param BlockingState body: (required) :param Str created_by: (required) :param Date requested_date: :param List[Str] plugin_property: :param Str reason: :param Str comment: :return: List[BlockingState] If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async'): return self.add_account_blocking_state_with_http_info(account_id, body, created_by, **kwargs) # noqa: E501 else: (data) = self.add_account_blocking_state_with_http_info(account_id, body, created_by, **kwargs) # noqa: E501 return data def add_account_blocking_state_with_http_info(self, account_id=None, body=None, created_by=None, **kwargs): # noqa: E501 """Block an account # noqa: E501 # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.add_account_blocking_state_with_http_info(account_id, body, created_by, async=True) >>> result = thread.get() :param async bool :param Str account_id: (required) :param BlockingState body: (required) :param Str created_by: (required) :param Date requested_date: :param List[Str] plugin_property: :param Str reason: :param Str comment: :return: List[BlockingState] If the method is called asynchronously, returns the request thread. """ all_params = ['account_id', 'body', 'created_by', 'requested_date', 'plugin_property', 'reason', 'comment'] # noqa: E501 all_params.append('async') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in six.iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method add_account_blocking_state" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'account_id' is set if ('account_id' not in params or params['account_id'] is None): raise ValueError("Missing the required parameter `account_id` when calling `add_account_blocking_state`") # noqa: E501 # verify the required parameter 'body' is set if ('body' not in params or params['body'] is None): raise ValueError("Missing the required parameter `body` when calling `add_account_blocking_state`") # noqa: E501 # verify the required parameter 'created_by' is set if ('created_by' not in params or params['created_by'] is None): raise ValueError("Missing the required parameter `created_by` when calling `add_account_blocking_state`") # noqa: E501 if 'account_id' in params and not re.search('\\w+-\\w+-\\w+-\\w+-\\w+', params['account_id']): # noqa: E501 raise ValueError("Invalid value for parameter `account_id` when calling `add_account_blocking_state`, must conform to the pattern `/\\w+-\\w+-\\w+-\\w+-\\w+/`") # noqa: E501 collection_formats = {} path_params = {} if 'account_id' in params: path_params['accountId'] = params['account_id'] # noqa: E501 query_params = [] if 'requested_date' in params: query_params.append(('requestedDate', params['requested_date'])) # noqa: E501 if 'plugin_property' in params: query_params.append(('pluginProperty', params['plugin_property'])) # noqa: E501 collection_formats['pluginProperty'] = 'multi' # noqa: E501 header_params = {} if 'created_by' in params: header_params['X-Killbill-CreatedBy'] = params['created_by'] # noqa: E501 if 'reason' in params: header_params['X-Killbill-Reason'] = params['reason'] # noqa: E501 if 'comment' in params: header_params['X-Killbill-Comment'] = params['comment'] # noqa: E501 form_params = [] local_var_files = {} body_params = None if 'body' in params: body_params = params['body'] # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501 ['application/json']) # noqa: E501 # Authentication setting auth_settings = ['Killbill Api Key', 'Killbill Api Secret', 'basicAuth'] # noqa: E501 return self.api_client.call_api( '/1.0/kb/accounts/{accountId}/block', 'POST', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='List[BlockingState]', # noqa: E501 auth_settings=auth_settings, async=params.get('async'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats) def add_email(self, account_id=None, body=None, created_by=None, **kwargs): # noqa: E501 """Add account email # noqa: E501 # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.add_email(account_id, body, created_by, async=True) >>> result = thread.get() :param async bool :param Str account_id: (required) :param AccountEmail body: (required) :param Str created_by: (required) :param Str reason: :param Str comment: :return: List[AccountEmail] If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async'): return self.add_email_with_http_info(account_id, body, created_by, **kwargs) # noqa: E501 else: (data) = self.add_email_with_http_info(account_id, body, created_by, **kwargs) # noqa: E501 return data def add_email_with_http_info(self, account_id=None, body=None, created_by=None, **kwargs): # noqa: E501 """Add account email # noqa: E501 # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.add_email_with_http_info(account_id, body, created_by, async=True) >>> result = thread.get() :param async bool :param Str account_id: (required) :param AccountEmail body: (required) :param Str created_by: (required) :param Str reason: :param Str comment: :return: List[AccountEmail] If the method is called asynchronously, returns the request thread. """ all_params = ['account_id', 'body', 'created_by', 'reason', 'comment'] # noqa: E501 all_params.append('async') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in six.iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method add_email" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'account_id' is set if ('account_id' not in params or params['account_id'] is None): raise ValueError("Missing the required parameter `account_id` when calling `add_email`") # noqa: E501 # verify the required parameter 'body' is set if ('body' not in params or params['body'] is None): raise ValueError("Missing the required parameter `body` when calling `add_email`") # noqa: E501 # verify the required parameter 'created_by' is set if ('created_by' not in params or params['created_by'] is None): raise ValueError("Missing the required parameter `created_by` when calling `add_email`") # noqa: E501 if 'account_id' in params and not re.search('\\w+-\\w+-\\w+-\\w+-\\w+', params['account_id']): # noqa: E501 raise ValueError("Invalid value for parameter `account_id` when calling `add_email`, must conform to the pattern `/\\w+-\\w+-\\w+-\\w+-\\w+/`") # noqa: E501 collection_formats = {} path_params = {} if 'account_id' in params: path_params['accountId'] = params['account_id'] # noqa: E501 query_params = [] header_params = {} if 'created_by' in params: header_params['X-Killbill-CreatedBy'] = params['created_by'] # noqa: E501 if 'reason' in params: header_params['X-Killbill-Reason'] = params['reason'] # noqa: E501 if 'comment' in params: header_params['X-Killbill-Comment'] = params['comment'] # noqa: E501 form_params = [] local_var_files = {} body_params = None if 'body' in params: body_params = params['body'] # HTTP header `Accept` header_params['Accept'] = self.api_client.select_header_accept( ['application/json']) # noqa: E501 # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501 ['application/json']) # noqa: E501 # Authentication setting auth_settings = ['Killbill Api Key', 'Killbill Api Secret', 'basicAuth'] # noqa: E501 return self.api_client.call_api( '/1.0/kb/accounts/{accountId}/emails', 'POST', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='List[AccountEmail]', # noqa: E501 auth_settings=auth_settings, async=params.get('async'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats) def close_account(self, account_id=None, created_by=None, **kwargs): # noqa: E501 """Close account # noqa: E501 # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.close_account(account_id, created_by, async=True) >>> result = thread.get() :param async bool :param Str account_id: (required) :param Str created_by: (required) :param Bool cancel_all_subscriptions: :param Bool write_off_unpaid_invoices: :param Bool item_adjust_unpaid_invoices: :param Bool remove_future_notifications: :param Str reason: :param Str comment: :return: None If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async'): return self.close_account_with_http_info(account_id, created_by, **kwargs) # noqa: E501 else: (data) = self.close_account_with_http_info(account_id, created_by, **kwargs) # noqa: E501 return data def close_account_with_http_info(self, account_id=None, created_by=None, **kwargs): # noqa: E501 """Close account # noqa: E501 # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.close_account_with_http_info(account_id, created_by, async=True) >>> result = thread.get() :param async bool :param Str account_id: (required) :param Str created_by: (required) :param Bool cancel_all_subscriptions: :param Bool write_off_unpaid_invoices: :param Bool item_adjust_unpaid_invoices: :param Bool remove_future_notifications: :param Str reason: :param Str comment: :return: None If the method is called asynchronously, returns the request thread. """ all_params = ['account_id', 'created_by', 'cancel_all_subscriptions', 'write_off_unpaid_invoices', 'item_adjust_unpaid_invoices', 'remove_future_notifications', 'reason', 'comment'] # noqa: E501 all_params.append('async') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in six.iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method close_account" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'account_id' is set if ('account_id' not in params or params['account_id'] is None): raise ValueError("Missing the required parameter `account_id` when calling `close_account`") # noqa: E501 # verify the required parameter 'created_by' is set if ('created_by' not in params or params['created_by'] is None): raise ValueError("Missing the required parameter `created_by` when calling `close_account`") # noqa: E501 if 'account_id' in params and not re.search('\\w+-\\w+-\\w+-\\w+-\\w+', params['account_id']): # noqa: E501 raise ValueError("Invalid value for parameter `account_id` when calling `close_account`, must conform to the pattern `/\\w+-\\w+-\\w+-\\w+-\\w+/`") # noqa: E501 collection_formats = {} path_params = {} if 'account_id' in params: path_params['accountId'] = params['account_id'] # noqa: E501 query_params = [] if 'cancel_all_subscriptions' in params: query_params.append(('cancelAllSubscriptions', params['cancel_all_subscriptions'])) # noqa: E501 if 'write_off_unpaid_invoices' in params: query_params.append(('writeOffUnpaidInvoices', params['write_off_unpaid_invoices'])) # noqa: E501 if 'item_adjust_unpaid_invoices' in params: query_params.append(('itemAdjustUnpaidInvoices', params['item_adjust_unpaid_invoices'])) # noqa: E501 if 'remove_future_notifications' in params: query_params.append(('removeFutureNotifications', params['remove_future_notifications'])) # noqa: E501 header_params = {} if 'created_by' in params: header_params['X-Killbill-CreatedBy'] = params['created_by'] # noqa: E501 if 'reason' in params: header_params['X-Killbill-Reason'] = params['reason'] # noqa: E501 if 'comment' in params: header_params['X-Killbill-Comment'] = params['comment'] # noqa: E501 form_params = [] local_var_files = {} body_params = None # HTTP header `Accept` header_params['Accept'] = self.api_client.select_header_accept( ['application/json']) # noqa: E501 # Authentication setting auth_settings = ['Killbill Api Key', 'Killbill Api Secret', 'basicAuth'] # noqa: E501 return self.api_client.call_api( '/1.0/kb/accounts/{accountId}', 'DELETE', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type=None, # noqa: E501 auth_settings=auth_settings, async=params.get('async'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats) def create_account(self, body=None, created_by=None, **kwargs): # noqa: E501 """Create account # noqa: E501 # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.create_account(body, created_by, async=True) >>> result = thread.get() :param async bool :param Account body: (required) :param Str created_by: (required) :param Str reason: :param Str comment: :return: Account If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async'): return self.create_account_with_http_info(body, created_by, **kwargs) # noqa: E501 else: (data) = self.create_account_with_http_info(body, created_by, **kwargs) # noqa: E501 return data def create_account_with_http_info(self, body=None, created_by=None, **kwargs): # noqa: E501 """Create account # noqa: E501 # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.create_account_with_http_info(body, created_by, async=True) >>> result = thread.get() :param async bool :param Account body: (required) :param Str created_by: (required) :param Str reason: :param Str comment: :return: Account If the method is called asynchronously, returns the request thread. """ all_params = ['body', 'created_by', 'reason', 'comment'] # noqa: E501 all_params.append('async') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in six.iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method create_account" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'body' is set if ('body' not in params or params['body'] is None): raise ValueError("Missing the required parameter `body` when calling `create_account`") # noqa: E501 # verify the required parameter 'created_by' is set if ('created_by' not in params or params['created_by'] is None): raise ValueError("Missing the required parameter `created_by` when calling `create_account`") # noqa: E501 collection_formats = {} path_params = {} query_params = [] header_params = {} if 'created_by' in params: header_params['X-Killbill-CreatedBy'] = params['created_by'] # noqa: E501 if 'reason' in params: header_params['X-Killbill-Reason'] = params['reason'] # noqa: E501 if 'comment' in params: header_params['X-Killbill-Comment'] = params['comment'] # noqa: E501 form_params = [] local_var_files = {} body_params = None if 'body' in params: body_params = params['body'] # HTTP header `Accept` header_params['Accept'] = self.api_client.select_header_accept( ['application/json']) # noqa: E501 # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501 ['application/json']) # noqa: E501 # Authentication setting auth_settings = ['Killbill Api Key', 'Killbill Api Secret', 'basicAuth'] # noqa: E501 return self.api_client.call_api( '/1.0/kb/accounts', 'POST', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='Account', # noqa: E501 auth_settings=auth_settings, async=params.get('async'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats) def create_account_custom_fields(self, account_id=None, body=None, created_by=None, **kwargs): # noqa: E501 """Add custom fields to account # noqa: E501 # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.create_account_custom_fields(account_id, body, created_by, async=True) >>> result = thread.get() :param async bool :param Str account_id: (required) :param List[CustomField] body: (required) :param Str created_by: (required) :param Str reason: :param Str comment: :return: List[CustomField] If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async'): return self.create_account_custom_fields_with_http_info(account_id, body, created_by, **kwargs) # noqa: E501 else: (data) = self.create_account_custom_fields_with_http_info(account_id, body, created_by, **kwargs) # noqa: E501 return data def create_account_custom_fields_with_http_info(self, account_id=None, body=None, created_by=None, **kwargs): # noqa: E501 """Add custom fields to account # noqa: E501 # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.create_account_custom_fields_with_http_info(account_id, body, created_by, async=True) >>> result = thread.get() :param async bool :param Str account_id: (required) :param List[CustomField] body: (required) :param Str created_by: (required) :param Str reason: :param Str comment: :return: List[CustomField] If the method is called asynchronously, returns the request thread. """ all_params = ['account_id', 'body', 'created_by', 'reason', 'comment'] # noqa: E501 all_params.append('async') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in six.iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method create_account_custom_fields" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'account_id' is set if ('account_id' not in params or params['account_id'] is None): raise ValueError("Missing the required parameter `account_id` when calling `create_account_custom_fields`") # noqa: E501 # verify the required parameter 'body' is set if ('body' not in params or params['body'] is None): raise ValueError("Missing the required parameter `body` when calling `create_account_custom_fields`") # noqa: E501 # verify the required parameter 'created_by' is set if ('created_by' not in params or params['created_by'] is None): raise ValueError("Missing the required parameter `created_by` when calling `create_account_custom_fields`") # noqa: E501 if 'account_id' in params and not re.search('\\w+-\\w+-\\w+-\\w+-\\w+', params['account_id']): # noqa: E501 raise ValueError("Invalid value for parameter `account_id` when calling `create_account_custom_fields`, must conform to the pattern `/\\w+-\\w+-\\w+-\\w+-\\w+/`") # noqa: E501 collection_formats = {} path_params = {} if 'account_id' in params: path_params['accountId'] = params['account_id'] # noqa: E501 query_params = [] header_params = {} if 'created_by' in params: header_params['X-Killbill-CreatedBy'] = params['created_by'] # noqa: E501 if 'reason' in params: header_params['X-Killbill-Reason'] = params['reason'] # noqa: E501 if 'comment' in params: header_params['X-Killbill-Comment'] = params['comment'] # noqa: E501 form_params = [] local_var_files = {} body_params = None if 'body' in params: body_params = params['body'] # HTTP header `Accept` header_params['Accept'] = self.api_client.select_header_accept( ['application/json']) # noqa: E501 # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501 ['application/json']) # noqa: E501 # Authentication setting auth_settings = ['Killbill Api Key', 'Killbill Api Secret', 'basicAuth'] # noqa: E501 return self.api_client.call_api( '/1.0/kb/accounts/{accountId}/customFields', 'POST', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='List[CustomField]', # noqa: E501 auth_settings=auth_settings, async=params.get('async'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats) def create_account_tags(self, account_id=None, body=None, created_by=None, **kwargs): # noqa: E501 """Add tags to account # noqa: E501 # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.create_account_tags(account_id, body, created_by, async=True) >>> result = thread.get() :param async bool :param Str account_id: (required) :param List[Str] body: (required) :param Str created_by: (required) :param Str reason: :param Str comment: :return: List[Tag] If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async'): return self.create_account_tags_with_http_info(account_id, body, created_by, **kwargs) # noqa: E501 else: (data) = self.create_account_tags_with_http_info(account_id, body, created_by, **kwargs) # noqa: E501 return data def create_account_tags_with_http_info(self, account_id=None, body=None, created_by=None, **kwargs): # noqa: E501 """Add tags to account # noqa: E501 # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.create_account_tags_with_http_info(account_id, body, created_by, async=True) >>> result = thread.get() :param async bool :param Str account_id: (required) :param List[Str] body: (required) :param Str created_by: (required) :param Str reason: :param Str comment: :return: List[Tag] If the method is called asynchronously, returns the request thread. """ all_params = ['account_id', 'body', 'created_by', 'reason', 'comment'] # noqa: E501 all_params.append('async') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in six.iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method create_account_tags" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'account_id' is set if ('account_id' not in params or params['account_id'] is None): raise ValueError("Missing the required parameter `account_id` when calling `create_account_tags`") # noqa: E501 # verify the required parameter 'body' is set if ('body' not in params or params['body'] is None): raise ValueError("Missing the required parameter `body` when calling `create_account_tags`") # noqa: E501 # verify the required parameter 'created_by' is set if ('created_by' not in params or params['created_by'] is None): raise ValueError("Missing the required parameter `created_by` when calling `create_account_tags`") # noqa: E501 if 'account_id' in params and not re.search('\\w+-\\w+-\\w+-\\w+-\\w+', params['account_id']): # noqa: E501 raise ValueError("Invalid value for parameter `account_id` when calling `create_account_tags`, must conform to the pattern `/\\w+-\\w+-\\w+-\\w+-\\w+/`") # noqa: E501 collection_formats = {} path_params = {} if 'account_id' in params: path_params['accountId'] = params['account_id'] # noqa: E501 query_params = [] header_params = {} if 'created_by' in params: header_params['X-Killbill-CreatedBy'] = params['created_by'] # noqa: E501 if 'reason' in params: header_params['X-Killbill-Reason'] = params['reason'] # noqa: E501 if 'comment' in params: header_params['X-Killbill-Comment'] = params['comment'] # noqa: E501 form_params = [] local_var_files = {} body_params = None if 'body' in params: body_params = params['body'] # HTTP header `Accept` header_params['Accept'] = self.api_client.select_header_accept( ['application/json']) # noqa: E501 # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501 ['application/json']) # noqa: E501 # Authentication setting auth_settings = ['Killbill Api Key', 'Killbill Api Secret', 'basicAuth'] # noqa: E501 return self.api_client.call_api( '/1.0/kb/accounts/{accountId}/tags', 'POST', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='List[Tag]', # noqa: E501 auth_settings=auth_settings, async=params.get('async'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats) def create_payment_method(self, account_id=None, body=None, created_by=None, **kwargs): # noqa: E501 """Add a payment method # noqa: E501 # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.create_payment_method(account_id, body, created_by, async=True) >>> result = thread.get() :param async bool :param Str account_id: (required) :param PaymentMethod body: (required) :param Str created_by: (required) :param Bool is_default: :param Bool pay_all_unpaid_invoices: :param List[Str] control_plugin_name: :param List[Str] plugin_property: :param Str reason: :param Str comment: :return: PaymentMethod If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async'): return self.create_payment_method_with_http_info(account_id, body, created_by, **kwargs) # noqa: E501 else: (data) = self.create_payment_method_with_http_info(account_id, body, created_by, **kwargs) # noqa: E501 return data def create_payment_method_with_http_info(self, account_id=None, body=None, created_by=None, **kwargs): # noqa: E501 """Add a payment method # noqa: E501 # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.create_payment_method_with_http_info(account_id, body, created_by, async=True) >>> result = thread.get() :param async bool :param Str account_id: (required) :param PaymentMethod body: (required) :param Str created_by: (required) :param Bool is_default: :param Bool pay_all_unpaid_invoices: :param List[Str] control_plugin_name: :param List[Str] plugin_property: :param Str reason: :param Str comment: :return: PaymentMethod If the method is called asynchronously, returns the request thread. """ all_params = ['account_id', 'body', 'created_by', 'is_default', 'pay_all_unpaid_invoices', 'control_plugin_name', 'plugin_property', 'reason', 'comment'] # noqa: E501 all_params.append('async') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in six.iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method create_payment_method" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'account_id' is set if ('account_id' not in params or params['account_id'] is None): raise ValueError("Missing the required parameter `account_id` when calling `create_payment_method`") # noqa: E501 # verify the required parameter 'body' is set if ('body' not in params or params['body'] is None): raise ValueError("Missing the required parameter `body` when calling `create_payment_method`") # noqa: E501 # verify the required parameter 'created_by' is set if ('created_by' not in params or params['created_by'] is None): raise ValueError("Missing the required parameter `created_by` when calling `create_payment_method`") # noqa: E501 if 'account_id' in params and not re.search('\\w+-\\w+-\\w+-\\w+-\\w+', params['account_id']): # noqa: E501 raise ValueError("Invalid value for parameter `account_id` when calling `create_payment_method`, must conform to the pattern `/\\w+-\\w+-\\w+-\\w+-\\w+/`") # noqa: E501 collection_formats = {} path_params = {} if 'account_id' in params: path_params['accountId'] = params['account_id'] # noqa: E501 query_params = [] if 'is_default' in params: query_params.append(('isDefault', params['is_default'])) # noqa: E501 if 'pay_all_unpaid_invoices' in params: query_params.append(('payAllUnpaidInvoices', params['pay_all_unpaid_invoices'])) # noqa: E501 if 'control_plugin_name' in params: query_params.append(('controlPluginName', params['control_plugin_name'])) # noqa: E501 collection_formats['controlPluginName'] = 'multi' # noqa: E501 if 'plugin_property' in params: query_params.append(('pluginProperty', params['plugin_property'])) # noqa: E501 collection_formats['pluginProperty'] = 'multi' # noqa: E501 header_params = {} if 'created_by' in params: header_params['X-Killbill-CreatedBy'] = params['created_by'] # noqa: E501 if 'reason' in params: header_params['X-Killbill-Reason'] = params['reason'] # noqa: E501 if 'comment' in params: header_params['X-Killbill-Comment'] = params['comment'] # noqa: E501 form_params = [] local_var_files = {} body_params = None if 'body' in params: body_params = params['body'] # HTTP header `Accept` header_params['Accept'] = self.api_client.select_header_accept( ['application/json']) # noqa: E501 # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501 ['application/json']) # noqa: E501 # Authentication setting auth_settings = ['Killbill Api Key', 'Killbill Api Secret', 'basicAuth'] # noqa: E501 return self.api_client.call_api( '/1.0/kb/accounts/{accountId}/paymentMethods', 'POST', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='PaymentMethod', # noqa: E501 auth_settings=auth_settings, async=params.get('async'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats) def delete_account_custom_fields(self, account_id=None, created_by=None, **kwargs): # noqa: E501 """Remove custom fields from account # noqa: E501 # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.delete_account_custom_fields(account_id, created_by, async=True) >>> result = thread.get() :param async bool :param Str account_id: (required) :param Str created_by: (required) :param List[Str] custom_field: :param Str reason: :param Str comment: :return: None If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async'): return self.delete_account_custom_fields_with_http_info(account_id, created_by, **kwargs) # noqa: E501 else: (data) = self.delete_account_custom_fields_with_http_info(account_id, created_by, **kwargs) # noqa: E501 return data def delete_account_custom_fields_with_http_info(self, account_id=None, created_by=None, **kwargs): # noqa: E501 """Remove custom fields from account # noqa: E501 # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.delete_account_custom_fields_with_http_info(account_id, created_by, async=True) >>> result = thread.get() :param async bool :param Str account_id: (required) :param Str created_by: (required) :param List[Str] custom_field: :param Str reason: :param Str comment: :return: None If the method is called asynchronously, returns the request thread. """ all_params = ['account_id', 'created_by', 'custom_field', 'reason', 'comment'] # noqa: E501 all_params.append('async') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in six.iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method delete_account_custom_fields" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'account_id' is set if ('account_id' not in params or params['account_id'] is None): raise ValueError("Missing the required parameter `account_id` when calling `delete_account_custom_fields`") # noqa: E501 # verify the required parameter 'created_by' is set if ('created_by' not in params or params['created_by'] is None): raise ValueError("Missing the required parameter `created_by` when calling `delete_account_custom_fields`") # noqa: E501 if 'account_id' in params and not re.search('\\w+-\\w+-\\w+-\\w+-\\w+', params['account_id']): # noqa: E501 raise ValueError("Invalid value for parameter `account_id` when calling `delete_account_custom_fields`, must conform to the pattern `/\\w+-\\w+-\\w+-\\w+-\\w+/`") # noqa: E501 collection_formats = {} path_params = {} if 'account_id' in params: path_params['accountId'] = params['account_id'] # noqa: E501 query_params = [] if 'custom_field' in params: query_params.append(('customField', params['custom_field'])) # noqa: E501 collection_formats['customField'] = 'multi' # noqa: E501 header_params = {} if 'created_by' in params: header_params['X-Killbill-CreatedBy'] = params['created_by'] # noqa: E501 if 'reason' in params: header_params['X-Killbill-Reason'] = params['reason'] # noqa: E501 if 'comment' in params: header_params['X-Killbill-Comment'] = params['comment'] # noqa: E501 form_params = [] local_var_files = {} body_params = None # HTTP header `Accept` header_params['Accept'] = self.api_client.select_header_accept( ['application/json']) # noqa: E501 # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501 ['application/json']) # noqa: E501 # Authentication setting auth_settings = ['Killbill Api Key', 'Killbill Api Secret', 'basicAuth'] # noqa: E501 return self.api_client.call_api( '/1.0/kb/accounts/{accountId}/customFields', 'DELETE', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type=None, # noqa: E501 auth_settings=auth_settings, async=params.get('async'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats) def delete_account_tags(self, account_id=None, created_by=None, **kwargs): # noqa: E501 """Remove tags from account # noqa: E501 # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.delete_account_tags(account_id, created_by, async=True) >>> result = thread.get() :param async bool :param Str account_id: (required) :param Str created_by: (required) :param List[Str] tag_def: :param Str reason: :param Str comment: :return: None If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async'): return self.delete_account_tags_with_http_info(account_id, created_by, **kwargs) # noqa: E501 else: (data) = self.delete_account_tags_with_http_info(account_id, created_by, **kwargs) # noqa: E501 return data def delete_account_tags_with_http_info(self, account_id=None, created_by=None, **kwargs): # noqa: E501 """Remove tags from account # noqa: E501 # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.delete_account_tags_with_http_info(account_id, created_by, async=True) >>> result = thread.get() :param async bool :param Str account_id: (required) :param Str created_by: (required) :param List[Str] tag_def: :param Str reason: :param Str comment: :return: None If the method is called asynchronously, returns the request thread. """ all_params = ['account_id', 'created_by', 'tag_def', 'reason', 'comment'] # noqa: E501 all_params.append('async') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in six.iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method delete_account_tags" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'account_id' is set if ('account_id' not in params or params['account_id'] is None): raise ValueError("Missing the required parameter `account_id` when calling `delete_account_tags`") # noqa: E501 # verify the required parameter 'created_by' is set if ('created_by' not in params or params['created_by'] is None): raise ValueError("Missing the required parameter `created_by` when calling `delete_account_tags`") # noqa: E501 if 'account_id' in params and not re.search('\\w+-\\w+-\\w+-\\w+-\\w+', params['account_id']): # noqa: E501 raise ValueError("Invalid value for parameter `account_id` when calling `delete_account_tags`, must conform to the pattern `/\\w+-\\w+-\\w+-\\w+-\\w+/`") # noqa: E501 collection_formats = {} path_params = {} if 'account_id' in params: path_params['accountId'] = params['account_id'] # noqa: E501 query_params = [] if 'tag_def' in params: query_params.append(('tagDef', params['tag_def'])) # noqa: E501 collection_formats['tagDef'] = 'multi' # noqa: E501 header_params = {} if 'created_by' in params: header_params['X-Killbill-CreatedBy'] = params['created_by'] # noqa: E501 if 'reason' in params: header_params['X-Killbill-Reason'] = params['reason'] # noqa: E501 if 'comment' in params: header_params['X-Killbill-Comment'] = params['comment'] # noqa: E501 form_params = [] local_var_files = {} body_params = None # HTTP header `Accept` header_params['Accept'] = self.api_client.select_header_accept( ['application/json']) # noqa: E501 # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501 ['application/json']) # noqa: E501 # Authentication setting auth_settings = ['Killbill Api Key', 'Killbill Api Secret', 'basicAuth'] # noqa: E501 return self.api_client.call_api( '/1.0/kb/accounts/{accountId}/tags', 'DELETE', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type=None, # noqa: E501 auth_settings=auth_settings, async=params.get('async'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats) def get_account(self, account_id=None, **kwargs): # noqa: E501 """Retrieve an account by id # noqa: E501 # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.get_account(account_id, async=True) >>> result = thread.get() :param async bool :param Str account_id: (required) :param Bool account_with_balance: :param Bool account_with_balance_and_cba: :param Str audit: :return: Account If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async'): return self.get_account_with_http_info(account_id, **kwargs) # noqa: E501 else: (data) = self.get_account_with_http_info(account_id, **kwargs) # noqa: E501 return data def get_account_with_http_info(self, account_id=None, **kwargs): # noqa: E501 """Retrieve an account by id # noqa: E501 # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.get_account_with_http_info(account_id, async=True) >>> result = thread.get() :param async bool :param Str account_id: (required) :param Bool account_with_balance: :param Bool account_with_balance_and_cba: :param Str audit: :return: Account If the method is called asynchronously, returns the request thread. """ all_params = ['account_id', 'account_with_balance', 'account_with_balance_and_cba', 'audit'] # noqa: E501 all_params.append('async') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in six.iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method get_account" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'account_id' is set if ('account_id' not in params or params['account_id'] is None): raise ValueError("Missing the required parameter `account_id` when calling `get_account`") # noqa: E501 if 'account_id' in params and not re.search('\\w+-\\w+-\\w+-\\w+-\\w+', params['account_id']): # noqa: E501 raise ValueError("Invalid value for parameter `account_id` when calling `get_account`, must conform to the pattern `/\\w+-\\w+-\\w+-\\w+-\\w+/`") # noqa: E501 collection_formats = {} path_params = {} if 'account_id' in params: path_params['accountId'] = params['account_id'] # noqa: E501 query_params = [] if 'account_with_balance' in params: query_params.append(('accountWithBalance', params['account_with_balance'])) # noqa: E501 if 'account_with_balance_and_cba' in params: query_params.append(('accountWithBalanceAndCBA', params['account_with_balance_and_cba'])) # noqa: E501 if 'audit' in params: query_params.append(('audit', params['audit'])) # noqa: E501 header_params = {} form_params = [] local_var_files = {} body_params = None # HTTP header `Accept` header_params['Accept'] = self.api_client.select_header_accept( ['application/json']) # noqa: E501 # Authentication setting auth_settings = ['Killbill Api Key', 'Killbill Api Secret', 'basicAuth'] # noqa: E501 return self.api_client.call_api( '/1.0/kb/accounts/{accountId}', 'GET', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='Account', # noqa: E501 auth_settings=auth_settings, async=params.get('async'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats) def get_account_audit_logs(self, account_id=None, **kwargs): # noqa: E501 """Retrieve audit logs by account id # noqa: E501 # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.get_account_audit_logs(account_id, async=True) >>> result = thread.get() :param async bool :param Str account_id: (required) :return: List[AuditLog] If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async'): return self.get_account_audit_logs_with_http_info(account_id, **kwargs) # noqa: E501 else: (data) = self.get_account_audit_logs_with_http_info(account_id, **kwargs) # noqa: E501 return data def get_account_audit_logs_with_http_info(self, account_id=None, **kwargs): # noqa: E501 """Retrieve audit logs by account id # noqa: E501 # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.get_account_audit_logs_with_http_info(account_id, async=True) >>> result = thread.get() :param async bool :param Str account_id: (required) :return: List[AuditLog] If the method is called asynchronously, returns the request thread. """ all_params = ['account_id'] # noqa: E501 all_params.append('async') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in six.iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method get_account_audit_logs" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'account_id' is set if ('account_id' not in params or params['account_id'] is None): raise ValueError("Missing the required parameter `account_id` when calling `get_account_audit_logs`") # noqa: E501 if 'account_id' in params and not re.search('\\w+-\\w+-\\w+-\\w+-\\w+', params['account_id']): # noqa: E501 raise ValueError("Invalid value for parameter `account_id` when calling `get_account_audit_logs`, must conform to the pattern `/\\w+-\\w+-\\w+-\\w+-\\w+/`") # noqa: E501 collection_formats = {} path_params = {} if 'account_id' in params: path_params['accountId'] = params['account_id'] # noqa: E501 query_params = [] header_params = {} form_params = [] local_var_files = {} body_params = None # HTTP header `Accept` header_params['Accept'] = self.api_client.select_header_accept( ['application/json']) # noqa: E501 # Authentication setting auth_settings = ['Killbill Api Key', 'Killbill Api Secret', 'basicAuth'] # noqa: E501 return self.api_client.call_api( '/1.0/kb/accounts/{accountId}/auditLogs', 'GET', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='List[AuditLog]', # noqa: E501 auth_settings=auth_settings, async=params.get('async'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats) def get_account_audit_logs_with_history(self, account_id=None, **kwargs): # noqa: E501 """Retrieve account audit logs with history by account id # noqa: E501 # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.get_account_audit_logs_with_history(account_id, async=True) >>> result = thread.get() :param async bool :param Str account_id: (required) :return: List[AuditLog] If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async'): return self.get_account_audit_logs_with_history_with_http_info(account_id, **kwargs) # noqa: E501 else: (data) = self.get_account_audit_logs_with_history_with_http_info(account_id, **kwargs) # noqa: E501 return data def get_account_audit_logs_with_history_with_http_info(self, account_id=None, **kwargs): # noqa: E501 """Retrieve account audit logs with history by account id # noqa: E501 # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.get_account_audit_logs_with_history_with_http_info(account_id, async=True) >>> result = thread.get() :param async bool :param Str account_id: (required) :return: List[AuditLog] If the method is called asynchronously, returns the request thread. """ all_params = ['account_id'] # noqa: E501 all_params.append('async') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in six.iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method get_account_audit_logs_with_history" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'account_id' is set if ('account_id' not in params or params['account_id'] is None): raise ValueError("Missing the required parameter `account_id` when calling `get_account_audit_logs_with_history`") # noqa: E501 if 'account_id' in params and not re.search('\\w+-\\w+-\\w+-\\w+-\\w+', params['account_id']): # noqa: E501 raise ValueError("Invalid value for parameter `account_id` when calling `get_account_audit_logs_with_history`, must conform to the pattern `/\\w+-\\w+-\\w+-\\w+-\\w+/`") # noqa: E501 collection_formats = {} path_params = {} if 'account_id' in params: path_params['accountId'] = params['account_id'] # noqa: E501 query_params = [] header_params = {} form_params = [] local_var_files = {} body_params = None # HTTP header `Accept` header_params['Accept'] = self.api_client.select_header_accept( ['application/json']) # noqa: E501 # Authentication setting auth_settings = ['Killbill Api Key', 'Killbill Api Secret', 'basicAuth'] # noqa: E501 return self.api_client.call_api( '/1.0/kb/accounts/{accountId}/auditLogsWithHistory', 'GET', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='List[AuditLog]', # noqa: E501 auth_settings=auth_settings, async=params.get('async'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats) def get_account_bundles(self, account_id=None, **kwargs): # noqa: E501 """Retrieve bundles for account # noqa: E501 # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.get_account_bundles(account_id, async=True) >>> result = thread.get() :param async bool :param Str account_id: (required) :param Str external_key: :param Str bundles_filter: :param Str audit: :return: List[Bundle] If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async'): return self.get_account_bundles_with_http_info(account_id, **kwargs) # noqa: E501 else: (data) = self.get_account_bundles_with_http_info(account_id, **kwargs) # noqa: E501 return data def get_account_bundles_with_http_info(self, account_id=None, **kwargs): # noqa: E501 """Retrieve bundles for account # noqa: E501 # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.get_account_bundles_with_http_info(account_id, async=True) >>> result = thread.get() :param async bool :param Str account_id: (required) :param Str external_key: :param Str bundles_filter: :param Str audit: :return: List[Bundle] If the method is called asynchronously, returns the request thread. """ all_params = ['account_id', 'external_key', 'bundles_filter', 'audit'] # noqa: E501 all_params.append('async') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in six.iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method get_account_bundles" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'account_id' is set if ('account_id' not in params or params['account_id'] is None): raise ValueError("Missing the required parameter `account_id` when calling `get_account_bundles`") # noqa: E501 if 'account_id' in params and not re.search('\\w+-\\w+-\\w+-\\w+-\\w+', params['account_id']): # noqa: E501 raise ValueError("Invalid value for parameter `account_id` when calling `get_account_bundles`, must conform to the pattern `/\\w+-\\w+-\\w+-\\w+-\\w+/`") # noqa: E501 collection_formats = {} path_params = {} if 'account_id' in params: path_params['accountId'] = params['account_id'] # noqa: E501 query_params = [] if 'external_key' in params: query_params.append(('externalKey', params['external_key'])) # noqa: E501 if 'bundles_filter' in params: query_params.append(('bundlesFilter', params['bundles_filter'])) # noqa: E501 if 'audit' in params: query_params.append(('audit', params['audit'])) # noqa: E501 header_params = {} form_params = [] local_var_files = {} body_params = None # HTTP header `Accept` header_params['Accept'] = self.api_client.select_header_accept( ['application/json']) # noqa: E501 # Authentication setting auth_settings = ['Killbill Api Key', 'Killbill Api Secret', 'basicAuth'] # noqa: E501 return self.api_client.call_api( '/1.0/kb/accounts/{accountId}/bundles', 'GET', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='List[Bundle]', # noqa: E501 auth_settings=auth_settings, async=params.get('async'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats) def get_account_by_key(self, external_key=None, **kwargs): # noqa: E501 """Retrieve an account by external key # noqa: E501 # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.get_account_by_key(external_key, async=True) >>> result = thread.get() :param async bool :param Str external_key: (required) :param Bool account_with_balance: :param Bool account_with_balance_and_cba: :param Str audit: :return: Account If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async'): return self.get_account_by_key_with_http_info(external_key, **kwargs) # noqa: E501 else: (data) = self.get_account_by_key_with_http_info(external_key, **kwargs) # noqa: E501 return data def get_account_by_key_with_http_info(self, external_key=None, **kwargs): # noqa: E501 """Retrieve an account by external key # noqa: E501 # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.get_account_by_key_with_http_info(external_key, async=True) >>> result = thread.get() :param async bool :param Str external_key: (required) :param Bool account_with_balance: :param Bool account_with_balance_and_cba: :param Str audit: :return: Account If the method is called asynchronously, returns the request thread. """ all_params = ['external_key', 'account_with_balance', 'account_with_balance_and_cba', 'audit'] # noqa: E501 all_params.append('async') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in six.iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method get_account_by_key" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'external_key' is set if ('external_key' not in params or params['external_key'] is None): raise ValueError("Missing the required parameter `external_key` when calling `get_account_by_key`") # noqa: E501 collection_formats = {} path_params = {} query_params = [] if 'external_key' in params: query_params.append(('externalKey', params['external_key'])) # noqa: E501 if 'account_with_balance' in params: query_params.append(('accountWithBalance', params['account_with_balance'])) # noqa: E501 if 'account_with_balance_and_cba' in params: query_params.append(('accountWithBalanceAndCBA', params['account_with_balance_and_cba'])) # noqa: E501 if 'audit' in params: query_params.append(('audit', params['audit'])) # noqa: E501 header_params = {} form_params = [] local_var_files = {} body_params = None # HTTP header `Accept` header_params['Accept'] = self.api_client.select_header_accept( ['application/json']) # noqa: E501 # Authentication setting auth_settings = ['Killbill Api Key', 'Killbill Api Secret', 'basicAuth'] # noqa: E501 return self.api_client.call_api( '/1.0/kb/accounts', 'GET', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='Account', # noqa: E501 auth_settings=auth_settings, async=params.get('async'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats) def get_account_custom_fields(self, account_id=None, **kwargs): # noqa: E501 """Retrieve account custom fields # noqa: E501 # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.get_account_custom_fields(account_id, async=True) >>> result = thread.get() :param async bool :param Str account_id: (required) :param Str audit: :return: List[CustomField] If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async'): return self.get_account_custom_fields_with_http_info(account_id, **kwargs) # noqa: E501 else: (data) = self.get_account_custom_fields_with_http_info(account_id, **kwargs) # noqa: E501 return data def get_account_custom_fields_with_http_info(self, account_id=None, **kwargs): # noqa: E501 """Retrieve account custom fields # noqa: E501 # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.get_account_custom_fields_with_http_info(account_id, async=True) >>> result = thread.get() :param async bool :param Str account_id: (required) :param Str audit: :return: List[CustomField] If the method is called asynchronously, returns the request thread. """ all_params = ['account_id', 'audit'] # noqa: E501 all_params.append('async') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in six.iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method get_account_custom_fields" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'account_id' is set if ('account_id' not in params or params['account_id'] is None): raise ValueError("Missing the required parameter `account_id` when calling `get_account_custom_fields`") # noqa: E501 if 'account_id' in params and not re.search('\\w+-\\w+-\\w+-\\w+-\\w+', params['account_id']): # noqa: E501 raise ValueError("Invalid value for parameter `account_id` when calling `get_account_custom_fields`, must conform to the pattern `/\\w+-\\w+-\\w+-\\w+-\\w+/`") # noqa: E501 collection_formats = {} path_params = {} if 'account_id' in params: path_params['accountId'] = params['account_id'] # noqa: E501 query_params = [] if 'audit' in params: query_params.append(('audit', params['audit'])) # noqa: E501 header_params = {} form_params = [] local_var_files = {} body_params = None # HTTP header `Accept` header_params['Accept'] = self.api_client.select_header_accept( ['application/json']) # noqa: E501 # Authentication setting auth_settings = ['Killbill Api Key', 'Killbill Api Secret', 'basicAuth'] # noqa: E501 return self.api_client.call_api( '/1.0/kb/accounts/{accountId}/customFields', 'GET', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='List[CustomField]', # noqa: E501 auth_settings=auth_settings, async=params.get('async'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats) def get_account_email_audit_logs_with_history(self, account_id=None, account_email_id=None, **kwargs): # noqa: E501 """Retrieve account email audit logs with history by id # noqa: E501 # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.get_account_email_audit_logs_with_history(account_id, account_email_id, async=True) >>> result = thread.get() :param async bool :param Str account_id: (required) :param Str account_email_id: (required) :return: List[AuditLog] If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async'): return self.get_account_email_audit_logs_with_history_with_http_info(account_id, account_email_id, **kwargs) # noqa: E501 else: (data) = self.get_account_email_audit_logs_with_history_with_http_info(account_id, account_email_id, **kwargs) # noqa: E501 return data def get_account_email_audit_logs_with_history_with_http_info(self, account_id=None, account_email_id=None, **kwargs): # noqa: E501 """Retrieve account email audit logs with history by id # noqa: E501 # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.get_account_email_audit_logs_with_history_with_http_info(account_id, account_email_id, async=True) >>> result = thread.get() :param async bool :param Str account_id: (required) :param Str account_email_id: (required) :return: List[AuditLog] If the method is called asynchronously, returns the request thread. """ all_params = ['account_id', 'account_email_id'] # noqa: E501 all_params.append('async') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in six.iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method get_account_email_audit_logs_with_history" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'account_id' is set if ('account_id' not in params or params['account_id'] is None): raise ValueError("Missing the required parameter `account_id` when calling `get_account_email_audit_logs_with_history`") # noqa: E501 # verify the required parameter 'account_email_id' is set if ('account_email_id' not in params or params['account_email_id'] is None): raise ValueError("Missing the required parameter `account_email_id` when calling `get_account_email_audit_logs_with_history`") # noqa: E501 if 'account_id' in params and not re.search('\\w+-\\w+-\\w+-\\w+-\\w+', params['account_id']): # noqa: E501 raise ValueError("Invalid value for parameter `account_id` when calling `get_account_email_audit_logs_with_history`, must conform to the pattern `/\\w+-\\w+-\\w+-\\w+-\\w+/`") # noqa: E501 if 'account_email_id' in params and not re.search('\\w+-\\w+-\\w+-\\w+-\\w+', params['account_email_id']): # noqa: E501 raise ValueError("Invalid value for parameter `account_email_id` when calling `get_account_email_audit_logs_with_history`, must conform to the pattern `/\\w+-\\w+-\\w+-\\w+-\\w+/`") # noqa: E501 collection_formats = {} path_params = {} if 'account_id' in params: path_params['accountId'] = params['account_id'] # noqa: E501 if 'account_email_id' in params: path_params['accountEmailId'] = params['account_email_id'] # noqa: E501 query_params = [] header_params = {} form_params = [] local_var_files = {} body_params = None # HTTP header `Accept` header_params['Accept'] = self.api_client.select_header_accept( ['application/json']) # noqa: E501 # Authentication setting auth_settings = ['Killbill Api Key', 'Killbill Api Secret', 'basicAuth'] # noqa: E501 return self.api_client.call_api( '/1.0/kb/accounts/{accountId}/emails/{accountEmailId}/auditLogsWithHistory', 'GET', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='List[AuditLog]', # noqa: E501 auth_settings=auth_settings, async=params.get('async'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats) def get_account_tags(self, account_id=None, **kwargs): # noqa: E501 """Retrieve account tags # noqa: E501 # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.get_account_tags(account_id, async=True) >>> result = thread.get() :param async bool :param Str account_id: (required) :param Bool included_deleted: :param Str audit: :return: List[Tag] If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async'): return self.get_account_tags_with_http_info(account_id, **kwargs) # noqa: E501 else: (data) = self.get_account_tags_with_http_info(account_id, **kwargs) # noqa: E501 return data def get_account_tags_with_http_info(self, account_id=None, **kwargs): # noqa: E501 """Retrieve account tags # noqa: E501 # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.get_account_tags_with_http_info(account_id, async=True) >>> result = thread.get() :param async bool :param Str account_id: (required) :param Bool included_deleted: :param Str audit: :return: List[Tag] If the method is called asynchronously, returns the request thread. """ all_params = ['account_id', 'included_deleted', 'audit'] # noqa: E501 all_params.append('async') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in six.iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method get_account_tags" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'account_id' is set if ('account_id' not in params or params['account_id'] is None): raise ValueError("Missing the required parameter `account_id` when calling `get_account_tags`") # noqa: E501 if 'account_id' in params and not re.search('\\w+-\\w+-\\w+-\\w+-\\w+', params['account_id']): # noqa: E501 raise ValueError("Invalid value for parameter `account_id` when calling `get_account_tags`, must conform to the pattern `/\\w+-\\w+-\\w+-\\w+-\\w+/`") # noqa: E501 collection_formats = {} path_params = {} if 'account_id' in params: path_params['accountId'] = params['account_id'] # noqa: E501 query_params = [] if 'included_deleted' in params: query_params.append(('includedDeleted', params['included_deleted'])) # noqa: E501 if 'audit' in params: query_params.append(('audit', params['audit'])) # noqa: E501 header_params = {} form_params = [] local_var_files = {} body_params = None # HTTP header `Accept` header_params['Accept'] = self.api_client.select_header_accept( ['application/json']) # noqa: E501 # Authentication setting auth_settings = ['Killbill Api Key', 'Killbill Api Secret', 'basicAuth'] # noqa: E501 return self.api_client.call_api( '/1.0/kb/accounts/{accountId}/tags', 'GET', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='List[Tag]', # noqa: E501 auth_settings=auth_settings, async=params.get('async'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats) def get_account_timeline(self, account_id=None, **kwargs): # noqa: E501 """Retrieve account timeline # noqa: E501 # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.get_account_timeline(account_id, async=True) >>> result = thread.get() :param async bool :param Str account_id: (required) :param Bool parallel: :param Str audit: :return: AccountTimeline If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async'): return self.get_account_timeline_with_http_info(account_id, **kwargs) # noqa: E501 else: (data) = self.get_account_timeline_with_http_info(account_id, **kwargs) # noqa: E501 return data def get_account_timeline_with_http_info(self, account_id=None, **kwargs): # noqa: E501 """Retrieve account timeline # noqa: E501 # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.get_account_timeline_with_http_info(account_id, async=True) >>> result = thread.get() :param async bool :param Str account_id: (required) :param Bool parallel: :param Str audit: :return: AccountTimeline If the method is called asynchronously, returns the request thread. """ all_params = ['account_id', 'parallel', 'audit'] # noqa: E501 all_params.append('async') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in six.iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method get_account_timeline" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'account_id' is set if ('account_id' not in params or params['account_id'] is None): raise ValueError("Missing the required parameter `account_id` when calling `get_account_timeline`") # noqa: E501 if 'account_id' in params and not re.search('\\w+-\\w+-\\w+-\\w+-\\w+', params['account_id']): # noqa: E501 raise ValueError("Invalid value for parameter `account_id` when calling `get_account_timeline`, must conform to the pattern `/\\w+-\\w+-\\w+-\\w+-\\w+/`") # noqa: E501 collection_formats = {} path_params = {} if 'account_id' in params: path_params['accountId'] = params['account_id'] # noqa: E501 query_params = [] if 'parallel' in params: query_params.append(('parallel', params['parallel'])) # noqa: E501 if 'audit' in params: query_params.append(('audit', params['audit'])) # noqa: E501 header_params = {} form_params = [] local_var_files = {} body_params = None # HTTP header `Accept` header_params['Accept'] = self.api_client.select_header_accept( ['application/json']) # noqa: E501 # Authentication setting auth_settings = ['Killbill Api Key', 'Killbill Api Secret', 'basicAuth'] # noqa: E501 return self.api_client.call_api( '/1.0/kb/accounts/{accountId}/timeline', 'GET', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='AccountTimeline', # noqa: E501 auth_settings=auth_settings, async=params.get('async'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats) def get_accounts(self, **kwargs): # noqa: E501 """List accounts # noqa: E501 # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.get_accounts(async=True) >>> result = thread.get() :param async bool :param Int offset: :param Int limit: :param Bool account_with_balance: :param Bool account_with_balance_and_cba: :param Str audit: :return: List[Account] If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async'): return self.get_accounts_with_http_info(**kwargs) # noqa: E501 else: (data) = self.get_accounts_with_http_info(**kwargs) # noqa: E501 return data def get_accounts_with_http_info(self, **kwargs): # noqa: E501 """List accounts # noqa: E501 # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.get_accounts_with_http_info(async=True) >>> result = thread.get() :param async bool :param Int offset: :param Int limit: :param Bool account_with_balance: :param Bool account_with_balance_and_cba: :param Str audit: :return: List[Account] If the method is called asynchronously, returns the request thread. """ all_params = ['offset', 'limit', 'account_with_balance', 'account_with_balance_and_cba', 'audit'] # noqa: E501 all_params.append('async') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in six.iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method get_accounts" % key ) params[key] = val del params['kwargs'] collection_formats = {} path_params = {} query_params = [] if 'offset' in params: query_params.append(('offset', params['offset'])) # noqa: E501 if 'limit' in params: query_params.append(('limit', params['limit'])) # noqa: E501 if 'account_with_balance' in params: query_params.append(('accountWithBalance', params['account_with_balance'])) # noqa: E501 if 'account_with_balance_and_cba' in params: query_params.append(('accountWithBalanceAndCBA', params['account_with_balance_and_cba'])) # noqa: E501 if 'audit' in params: query_params.append(('audit', params['audit'])) # noqa: E501 header_params = {} form_params = [] local_var_files = {} body_params = None # HTTP header `Accept` header_params['Accept'] = self.api_client.select_header_accept( ['application/json']) # noqa: E501 # Authentication setting auth_settings = ['Killbill Api Key', 'Killbill Api Secret', 'basicAuth'] # noqa: E501 return self.api_client.call_api( '/1.0/kb/accounts/pagination', 'GET', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='List[Account]', # noqa: E501 auth_settings=auth_settings, async=params.get('async'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats) def get_all_custom_fields(self, account_id=None, **kwargs): # noqa: E501 """Retrieve account customFields # noqa: E501 # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.get_all_custom_fields(account_id, async=True) >>> result = thread.get() :param async bool :param Str account_id: (required) :param Str object_type: :param Str audit: :return: List[CustomField] If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async'): return self.get_all_custom_fields_with_http_info(account_id, **kwargs) # noqa: E501 else: (data) = self.get_all_custom_fields_with_http_info(account_id, **kwargs) # noqa: E501 return data def get_all_custom_fields_with_http_info(self, account_id=None, **kwargs): # noqa: E501 """Retrieve account customFields # noqa: E501 # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.get_all_custom_fields_with_http_info(account_id, async=True) >>> result = thread.get() :param async bool :param Str account_id: (required) :param Str object_type: :param Str audit: :return: List[CustomField] If the method is called asynchronously, returns the request thread. """ all_params = ['account_id', 'object_type', 'audit'] # noqa: E501 all_params.append('async') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in six.iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method get_all_custom_fields" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'account_id' is set if ('account_id' not in params or params['account_id'] is None): raise ValueError("Missing the required parameter `account_id` when calling `get_all_custom_fields`") # noqa: E501 if 'account_id' in params and not re.search('\\w+-\\w+-\\w+-\\w+-\\w+', params['account_id']): # noqa: E501 raise ValueError("Invalid value for parameter `account_id` when calling `get_all_custom_fields`, must conform to the pattern `/\\w+-\\w+-\\w+-\\w+-\\w+/`") # noqa: E501 collection_formats = {} path_params = {} if 'account_id' in params: path_params['accountId'] = params['account_id'] # noqa: E501 query_params = [] if 'object_type' in params: query_params.append(('objectType', params['object_type'])) # noqa: E501 if 'audit' in params: query_params.append(('audit', params['audit'])) # noqa: E501 header_params = {} form_params = [] local_var_files = {} body_params = None # HTTP header `Accept` header_params['Accept'] = self.api_client.select_header_accept( ['application/json']) # noqa: E501 # Authentication setting auth_settings = ['Killbill Api Key', 'Killbill Api Secret', 'basicAuth'] # noqa: E501 return self.api_client.call_api( '/1.0/kb/accounts/{accountId}/allCustomFields', 'GET', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='List[CustomField]', # noqa: E501 auth_settings=auth_settings, async=params.get('async'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats) def get_all_tags(self, account_id=None, **kwargs): # noqa: E501 """Retrieve account tags # noqa: E501 # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.get_all_tags(account_id, async=True) >>> result = thread.get() :param async bool :param Str account_id: (required) :param Str object_type: :param Bool included_deleted: :param Str audit: :return: List[Tag] If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async'): return self.get_all_tags_with_http_info(account_id, **kwargs) # noqa: E501 else: (data) = self.get_all_tags_with_http_info(account_id, **kwargs) # noqa: E501 return data def get_all_tags_with_http_info(self, account_id=None, **kwargs): # noqa: E501 """Retrieve account tags # noqa: E501 # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.get_all_tags_with_http_info(account_id, async=True) >>> result = thread.get() :param async bool :param Str account_id: (required) :param Str object_type: :param Bool included_deleted: :param Str audit: :return: List[Tag] If the method is called asynchronously, returns the request thread. """ all_params = ['account_id', 'object_type', 'included_deleted', 'audit'] # noqa: E501 all_params.append('async') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in six.iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method get_all_tags" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'account_id' is set if ('account_id' not in params or params['account_id'] is None): raise ValueError("Missing the required parameter `account_id` when calling `get_all_tags`") # noqa: E501 if 'account_id' in params and not re.search('\\w+-\\w+-\\w+-\\w+-\\w+', params['account_id']): # noqa: E501 raise ValueError("Invalid value for parameter `account_id` when calling `get_all_tags`, must conform to the pattern `/\\w+-\\w+-\\w+-\\w+-\\w+/`") # noqa: E501 collection_formats = {} path_params = {} if 'account_id' in params: path_params['accountId'] = params['account_id'] # noqa: E501 query_params = [] if 'object_type' in params: query_params.append(('objectType', params['object_type'])) # noqa: E501 if 'included_deleted' in params: query_params.append(('includedDeleted', params['included_deleted'])) # noqa: E501 if 'audit' in params: query_params.append(('audit', params['audit'])) # noqa: E501 header_params = {} form_params = [] local_var_files = {} body_params = None # HTTP header `Accept` header_params['Accept'] = self.api_client.select_header_accept( ['application/json']) # noqa: E501 # Authentication setting auth_settings = ['Killbill Api Key', 'Killbill Api Secret', 'basicAuth'] # noqa: E501 return self.api_client.call_api( '/1.0/kb/accounts/{accountId}/allTags', 'GET', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='List[Tag]', # noqa: E501 auth_settings=auth_settings, async=params.get('async'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats) def get_blocking_state_audit_logs_with_history(self, blocking_id=None, **kwargs): # noqa: E501 """Retrieve blocking state audit logs with history by id # noqa: E501 # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.get_blocking_state_audit_logs_with_history(blocking_id, async=True) >>> result = thread.get() :param async bool :param Str blocking_id: (required) :return: List[AuditLog] If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async'): return self.get_blocking_state_audit_logs_with_history_with_http_info(blocking_id, **kwargs) # noqa: E501 else: (data) = self.get_blocking_state_audit_logs_with_history_with_http_info(blocking_id, **kwargs) # noqa: E501 return data def get_blocking_state_audit_logs_with_history_with_http_info(self, blocking_id=None, **kwargs): # noqa: E501 """Retrieve blocking state audit logs with history by id # noqa: E501 # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.get_blocking_state_audit_logs_with_history_with_http_info(blocking_id, async=True) >>> result = thread.get() :param async bool :param Str blocking_id: (required) :return: List[AuditLog] If the method is called asynchronously, returns the request thread. """ all_params = ['blocking_id'] # noqa: E501 all_params.append('async') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in six.iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method get_blocking_state_audit_logs_with_history" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'blocking_id' is set if ('blocking_id' not in params or params['blocking_id'] is None): raise ValueError("Missing the required parameter `blocking_id` when calling `get_blocking_state_audit_logs_with_history`") # noqa: E501 if 'blocking_id' in params and not re.search('\\w+-\\w+-\\w+-\\w+-\\w+', params['blocking_id']): # noqa: E501 raise ValueError("Invalid value for parameter `blocking_id` when calling `get_blocking_state_audit_logs_with_history`, must conform to the pattern `/\\w+-\\w+-\\w+-\\w+-\\w+/`") # noqa: E501 collection_formats = {} path_params = {} if 'blocking_id' in params: path_params['blockingId'] = params['blocking_id'] # noqa: E501 query_params = [] header_params = {} form_params = [] local_var_files = {} body_params = None # HTTP header `Accept` header_params['Accept'] = self.api_client.select_header_accept( ['application/json']) # noqa: E501 # Authentication setting auth_settings = ['Killbill Api Key', 'Killbill Api Secret', 'basicAuth'] # noqa: E501 return self.api_client.call_api( '/1.0/kb/accounts/block/{blockingId}/auditLogsWithHistory', 'GET', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='List[AuditLog]', # noqa: E501 auth_settings=auth_settings, async=params.get('async'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats) def get_blocking_states(self, account_id=None, **kwargs): # noqa: E501 """Retrieve blocking states for account # noqa: E501 # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.get_blocking_states(account_id, async=True) >>> result = thread.get() :param async bool :param Str account_id: (required) :param List[Str] blocking_state_types: :param List[Str] blocking_state_svcs: :param Str audit: :return: List[BlockingState] If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async'): return self.get_blocking_states_with_http_info(account_id, **kwargs) # noqa: E501 else: (data) = self.get_blocking_states_with_http_info(account_id, **kwargs) # noqa: E501 return data def get_blocking_states_with_http_info(self, account_id=None, **kwargs): # noqa: E501 """Retrieve blocking states for account # noqa: E501 # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.get_blocking_states_with_http_info(account_id, async=True) >>> result = thread.get() :param async bool :param Str account_id: (required) :param List[Str] blocking_state_types: :param List[Str] blocking_state_svcs: :param Str audit: :return: List[BlockingState] If the method is called asynchronously, returns the request thread. """ all_params = ['account_id', 'blocking_state_types', 'blocking_state_svcs', 'audit'] # noqa: E501 all_params.append('async') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in six.iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method get_blocking_states" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'account_id' is set if ('account_id' not in params or params['account_id'] is None): raise ValueError("Missing the required parameter `account_id` when calling `get_blocking_states`") # noqa: E501 if 'account_id' in params and not re.search('\\w+-\\w+-\\w+-\\w+-\\w+', params['account_id']): # noqa: E501 raise ValueError("Invalid value for parameter `account_id` when calling `get_blocking_states`, must conform to the pattern `/\\w+-\\w+-\\w+-\\w+-\\w+/`") # noqa: E501 collection_formats = {} path_params = {} if 'account_id' in params: path_params['accountId'] = params['account_id'] # noqa: E501 query_params = [] if 'blocking_state_types' in params: query_params.append(('blockingStateTypes', params['blocking_state_types'])) # noqa: E501 collection_formats['blockingStateTypes'] = 'multi' # noqa: E501 if 'blocking_state_svcs' in params: query_params.append(('blockingStateSvcs', params['blocking_state_svcs'])) # noqa: E501 collection_formats['blockingStateSvcs'] = 'multi' # noqa: E501 if 'audit' in params: query_params.append(('audit', params['audit'])) # noqa: E501 header_params = {} form_params = [] local_var_files = {} body_params = None # HTTP header `Accept` header_params['Accept'] = self.api_client.select_header_accept( ['application/json']) # noqa: E501 # Authentication setting auth_settings = ['Killbill Api Key', 'Killbill Api Secret', 'basicAuth'] # noqa: E501 return self.api_client.call_api( '/1.0/kb/accounts/{accountId}/block', 'GET', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='List[BlockingState]', # noqa: E501 auth_settings=auth_settings, async=params.get('async'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats) def get_children_accounts(self, account_id=None, **kwargs): # noqa: E501 """List children accounts # noqa: E501 # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.get_children_accounts(account_id, async=True) >>> result = thread.get() :param async bool :param Str account_id: (required) :param Bool account_with_balance: :param Bool account_with_balance_and_cba: :param Str audit: :return: List[Account] If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async'): return self.get_children_accounts_with_http_info(account_id, **kwargs) # noqa: E501 else: (data) = self.get_children_accounts_with_http_info(account_id, **kwargs) # noqa: E501 return data def get_children_accounts_with_http_info(self, account_id=None, **kwargs): # noqa: E501 """List children accounts # noqa: E501 # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.get_children_accounts_with_http_info(account_id, async=True) >>> result = thread.get() :param async bool :param Str account_id: (required) :param Bool account_with_balance: :param Bool account_with_balance_and_cba: :param Str audit: :return: List[Account] If the method is called asynchronously, returns the request thread. """ all_params = ['account_id', 'account_with_balance', 'account_with_balance_and_cba', 'audit'] # noqa: E501 all_params.append('async') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in six.iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method get_children_accounts" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'account_id' is set if ('account_id' not in params or params['account_id'] is None): raise ValueError("Missing the required parameter `account_id` when calling `get_children_accounts`") # noqa: E501 if 'account_id' in params and not re.search('\\w+-\\w+-\\w+-\\w+-\\w+', params['account_id']): # noqa: E501 raise ValueError("Invalid value for parameter `account_id` when calling `get_children_accounts`, must conform to the pattern `/\\w+-\\w+-\\w+-\\w+-\\w+/`") # noqa: E501 collection_formats = {} path_params = {} if 'account_id' in params: path_params['accountId'] = params['account_id'] # noqa: E501 query_params = [] if 'account_with_balance' in params: query_params.append(('accountWithBalance', params['account_with_balance'])) # noqa: E501 if 'account_with_balance_and_cba' in params: query_params.append(('accountWithBalanceAndCBA', params['account_with_balance_and_cba'])) # noqa: E501 if 'audit' in params: query_params.append(('audit', params['audit'])) # noqa: E501 header_params = {} form_params = [] local_var_files = {} body_params = None # HTTP header `Accept` header_params['Accept'] = self.api_client.select_header_accept( ['application/json']) # noqa: E501 # Authentication setting auth_settings = ['Killbill Api Key', 'Killbill Api Secret', 'basicAuth'] # noqa: E501 return self.api_client.call_api( '/1.0/kb/accounts/{accountId}/children', 'GET', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='List[Account]', # noqa: E501 auth_settings=auth_settings, async=params.get('async'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats) def get_emails(self, account_id=None, **kwargs): # noqa: E501 """Retrieve an account emails # noqa: E501 # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.get_emails(account_id, async=True) >>> result = thread.get() :param async bool :param Str account_id: (required) :return: List[AccountEmail] If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async'): return self.get_emails_with_http_info(account_id, **kwargs) # noqa: E501 else: (data) = self.get_emails_with_http_info(account_id, **kwargs) # noqa: E501 return data def get_emails_with_http_info(self, account_id=None, **kwargs): # noqa: E501 """Retrieve an account emails # noqa: E501 # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.get_emails_with_http_info(account_id, async=True) >>> result = thread.get() :param async bool :param Str account_id: (required) :return: List[AccountEmail] If the method is called asynchronously, returns the request thread. """ all_params = ['account_id'] # noqa: E501 all_params.append('async') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in six.iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method get_emails" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'account_id' is set if ('account_id' not in params or params['account_id'] is None): raise ValueError("Missing the required parameter `account_id` when calling `get_emails`") # noqa: E501 if 'account_id' in params and not re.search('\\w+-\\w+-\\w+-\\w+-\\w+', params['account_id']): # noqa: E501 raise ValueError("Invalid value for parameter `account_id` when calling `get_emails`, must conform to the pattern `/\\w+-\\w+-\\w+-\\w+-\\w+/`") # noqa: E501 collection_formats = {} path_params = {} if 'account_id' in params: path_params['accountId'] = params['account_id'] # noqa: E501 query_params = [] header_params = {} form_params = [] local_var_files = {} body_params = None # HTTP header `Accept` header_params['Accept'] = self.api_client.select_header_accept( ['application/json']) # noqa: E501 # Authentication setting auth_settings = ['Killbill Api Key', 'Killbill Api Secret', 'basicAuth'] # noqa: E501 return self.api_client.call_api( '/1.0/kb/accounts/{accountId}/emails', 'GET', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='List[AccountEmail]', # noqa: E501 auth_settings=auth_settings, async=params.get('async'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats) def get_invoice_payments(self, account_id=None, **kwargs): # noqa: E501 """Retrieve account invoice payments # noqa: E501 # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.get_invoice_payments(account_id, async=True) >>> result = thread.get() :param async bool :param Str account_id: (required) :param Bool with_plugin_info: :param Bool with_attempts: :param List[Str] plugin_property: :param Str audit: :return: List[InvoicePayment] If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async'): return self.get_invoice_payments_with_http_info(account_id, **kwargs) # noqa: E501 else: (data) = self.get_invoice_payments_with_http_info(account_id, **kwargs) # noqa: E501 return data def get_invoice_payments_with_http_info(self, account_id=None, **kwargs): # noqa: E501 """Retrieve account invoice payments # noqa: E501 # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.get_invoice_payments_with_http_info(account_id, async=True) >>> result = thread.get() :param async bool :param Str account_id: (required) :param Bool with_plugin_info: :param Bool with_attempts: :param List[Str] plugin_property: :param Str audit: :return: List[InvoicePayment] If the method is called asynchronously, returns the request thread. """ all_params = ['account_id', 'with_plugin_info', 'with_attempts', 'plugin_property', 'audit'] # noqa: E501 all_params.append('async') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in six.iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method get_invoice_payments" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'account_id' is set if ('account_id' not in params or params['account_id'] is None): raise ValueError("Missing the required parameter `account_id` when calling `get_invoice_payments`") # noqa: E501 if 'account_id' in params and not re.search('\\w+-\\w+-\\w+-\\w+-\\w+', params['account_id']): # noqa: E501 raise ValueError("Invalid value for parameter `account_id` when calling `get_invoice_payments`, must conform to the pattern `/\\w+-\\w+-\\w+-\\w+-\\w+/`") # noqa: E501 collection_formats = {} path_params = {} if 'account_id' in params: path_params['accountId'] = params['account_id'] # noqa: E501 query_params = [] if 'with_plugin_info' in params: query_params.append(('withPluginInfo', params['with_plugin_info'])) # noqa: E501 if 'with_attempts' in params: query_params.append(('withAttempts', params['with_attempts'])) # noqa: E501 if 'plugin_property' in params: query_params.append(('pluginProperty', params['plugin_property'])) # noqa: E501 collection_formats['pluginProperty'] = 'multi' # noqa: E501 if 'audit' in params: query_params.append(('audit', params['audit'])) # noqa: E501 header_params = {} form_params = [] local_var_files = {} body_params = None # HTTP header `Accept` header_params['Accept'] = self.api_client.select_header_accept( ['application/json']) # noqa: E501 # Authentication setting auth_settings = ['Killbill Api Key', 'Killbill Api Secret', 'basicAuth'] # noqa: E501 return self.api_client.call_api( '/1.0/kb/accounts/{accountId}/invoicePayments', 'GET', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='List[InvoicePayment]', # noqa: E501 auth_settings=auth_settings, async=params.get('async'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats) def get_invoices_for_account(self, account_id=None, **kwargs): # noqa: E501 """Retrieve account invoices # noqa: E501 # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.get_invoices_for_account(account_id, async=True) >>> result = thread.get() :param async bool :param Str account_id: (required) :param Date start_date: :param Date end_date: :param Bool with_migration_invoices: :param Bool unpaid_invoices_only: :param Bool include_voided_invoices: :param Str invoices_filter: :param Str audit: :return: List[Invoice] If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async'): return self.get_invoices_for_account_with_http_info(account_id, **kwargs) # noqa: E501 else: (data) = self.get_invoices_for_account_with_http_info(account_id, **kwargs) # noqa: E501 return data def get_invoices_for_account_with_http_info(self, account_id=None, **kwargs): # noqa: E501 """Retrieve account invoices # noqa: E501 # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.get_invoices_for_account_with_http_info(account_id, async=True) >>> result = thread.get() :param async bool :param Str account_id: (required) :param Date start_date: :param Date end_date: :param Bool with_migration_invoices: :param Bool unpaid_invoices_only: :param Bool include_voided_invoices: :param Str invoices_filter: :param Str audit: :return: List[Invoice] If the method is called asynchronously, returns the request thread. """ all_params = ['account_id', 'start_date', 'end_date', 'with_migration_invoices', 'unpaid_invoices_only', 'include_voided_invoices', 'invoices_filter', 'audit'] # noqa: E501 all_params.append('async') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in six.iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method get_invoices_for_account" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'account_id' is set if ('account_id' not in params or params['account_id'] is None): raise ValueError("Missing the required parameter `account_id` when calling `get_invoices_for_account`") # noqa: E501 if 'account_id' in params and not re.search('\\w+-\\w+-\\w+-\\w+-\\w+', params['account_id']): # noqa: E501 raise ValueError("Invalid value for parameter `account_id` when calling `get_invoices_for_account`, must conform to the pattern `/\\w+-\\w+-\\w+-\\w+-\\w+/`") # noqa: E501 collection_formats = {} path_params = {} if 'account_id' in params: path_params['accountId'] = params['account_id'] # noqa: E501 query_params = [] if 'start_date' in params: query_params.append(('startDate', params['start_date'])) # noqa: E501 if 'end_date' in params: query_params.append(('endDate', params['end_date'])) # noqa: E501 if 'with_migration_invoices' in params: query_params.append(('withMigrationInvoices', params['with_migration_invoices'])) # noqa: E501 if 'unpaid_invoices_only' in params: query_params.append(('unpaidInvoicesOnly', params['unpaid_invoices_only'])) # noqa: E501 if 'include_voided_invoices' in params: query_params.append(('includeVoidedInvoices', params['include_voided_invoices'])) # noqa: E501 if 'invoices_filter' in params: query_params.append(('invoicesFilter', params['invoices_filter'])) # noqa: E501 if 'audit' in params: query_params.append(('audit', params['audit'])) # noqa: E501 header_params = {} form_params = [] local_var_files = {} body_params = None # HTTP header `Accept` header_params['Accept'] = self.api_client.select_header_accept( ['application/json']) # noqa: E501 # Authentication setting auth_settings = ['Killbill Api Key', 'Killbill Api Secret', 'basicAuth'] # noqa: E501 return self.api_client.call_api( '/1.0/kb/accounts/{accountId}/invoices', 'GET', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='List[Invoice]', # noqa: E501 auth_settings=auth_settings, async=params.get('async'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats) def get_overdue_account(self, account_id=None, **kwargs): # noqa: E501 """Retrieve overdue state for account # noqa: E501 # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.get_overdue_account(account_id, async=True) >>> result = thread.get() :param async bool :param Str account_id: (required) :return: OverdueState If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async'): return self.get_overdue_account_with_http_info(account_id, **kwargs) # noqa: E501 else: (data) = self.get_overdue_account_with_http_info(account_id, **kwargs) # noqa: E501 return data def get_overdue_account_with_http_info(self, account_id=None, **kwargs): # noqa: E501 """Retrieve overdue state for account # noqa: E501 # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.get_overdue_account_with_http_info(account_id, async=True) >>> result = thread.get() :param async bool :param Str account_id: (required) :return: OverdueState If the method is called asynchronously, returns the request thread. """ all_params = ['account_id'] # noqa: E501 all_params.append('async') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in six.iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method get_overdue_account" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'account_id' is set if ('account_id' not in params or params['account_id'] is None): raise ValueError("Missing the required parameter `account_id` when calling `get_overdue_account`") # noqa: E501 if 'account_id' in params and not re.search('\\w+-\\w+-\\w+-\\w+-\\w+', params['account_id']): # noqa: E501 raise ValueError("Invalid value for parameter `account_id` when calling `get_overdue_account`, must conform to the pattern `/\\w+-\\w+-\\w+-\\w+-\\w+/`") # noqa: E501 collection_formats = {} path_params = {} if 'account_id' in params: path_params['accountId'] = params['account_id'] # noqa: E501 query_params = [] header_params = {} form_params = [] local_var_files = {} body_params = None # HTTP header `Accept` header_params['Accept'] = self.api_client.select_header_accept( ['application/json']) # noqa: E501 # Authentication setting auth_settings = ['Killbill Api Key', 'Killbill Api Secret', 'basicAuth'] # noqa: E501 return self.api_client.call_api( '/1.0/kb/accounts/{accountId}/overdue', 'GET', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='OverdueState', # noqa: E501 auth_settings=auth_settings, async=params.get('async'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats) def get_payment_methods_for_account(self, account_id=None, **kwargs): # noqa: E501 """Retrieve account payment methods # noqa: E501 # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.get_payment_methods_for_account(account_id, async=True) >>> result = thread.get() :param async bool :param Str account_id: (required) :param Bool with_plugin_info: :param Bool included_deleted: :param List[Str] plugin_property: :param Str audit: :return: List[PaymentMethod] If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async'): return self.get_payment_methods_for_account_with_http_info(account_id, **kwargs) # noqa: E501 else: (data) = self.get_payment_methods_for_account_with_http_info(account_id, **kwargs) # noqa: E501 return data def get_payment_methods_for_account_with_http_info(self, account_id=None, **kwargs): # noqa: E501 """Retrieve account payment methods # noqa: E501 # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.get_payment_methods_for_account_with_http_info(account_id, async=True) >>> result = thread.get() :param async bool :param Str account_id: (required) :param Bool with_plugin_info: :param Bool included_deleted: :param List[Str] plugin_property: :param Str audit: :return: List[PaymentMethod] If the method is called asynchronously, returns the request thread. """ all_params = ['account_id', 'with_plugin_info', 'included_deleted', 'plugin_property', 'audit'] # noqa: E501 all_params.append('async') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in six.iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method get_payment_methods_for_account" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'account_id' is set if ('account_id' not in params or params['account_id'] is None): raise ValueError("Missing the required parameter `account_id` when calling `get_payment_methods_for_account`") # noqa: E501 if 'account_id' in params and not re.search('\\w+-\\w+-\\w+-\\w+-\\w+', params['account_id']): # noqa: E501 raise ValueError("Invalid value for parameter `account_id` when calling `get_payment_methods_for_account`, must conform to the pattern `/\\w+-\\w+-\\w+-\\w+-\\w+/`") # noqa: E501 collection_formats = {} path_params = {} if 'account_id' in params: path_params['accountId'] = params['account_id'] # noqa: E501 query_params = [] if 'with_plugin_info' in params: query_params.append(('withPluginInfo', params['with_plugin_info'])) # noqa: E501 if 'included_deleted' in params: query_params.append(('includedDeleted', params['included_deleted'])) # noqa: E501 if 'plugin_property' in params: query_params.append(('pluginProperty', params['plugin_property'])) # noqa: E501 collection_formats['pluginProperty'] = 'multi' # noqa: E501 if 'audit' in params: query_params.append(('audit', params['audit'])) # noqa: E501 header_params = {} form_params = [] local_var_files = {} body_params = None # HTTP header `Accept` header_params['Accept'] = self.api_client.select_header_accept( ['application/json']) # noqa: E501 # Authentication setting auth_settings = ['Killbill Api Key', 'Killbill Api Secret', 'basicAuth'] # noqa: E501 return self.api_client.call_api( '/1.0/kb/accounts/{accountId}/paymentMethods', 'GET', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='List[PaymentMethod]', # noqa: E501 auth_settings=auth_settings, async=params.get('async'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats) def get_payments_for_account(self, account_id=None, **kwargs): # noqa: E501 """Retrieve account payments # noqa: E501 # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.get_payments_for_account(account_id, async=True) >>> result = thread.get() :param async bool :param Str account_id: (required) :param Bool with_attempts: :param Bool with_plugin_info: :param List[Str] plugin_property: :param Str audit: :return: List[Payment] If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async'): return self.get_payments_for_account_with_http_info(account_id, **kwargs) # noqa: E501 else: (data) = self.get_payments_for_account_with_http_info(account_id, **kwargs) # noqa: E501 return data def get_payments_for_account_with_http_info(self, account_id=None, **kwargs): # noqa: E501 """Retrieve account payments # noqa: E501 # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.get_payments_for_account_with_http_info(account_id, async=True) >>> result = thread.get() :param async bool :param Str account_id: (required) :param Bool with_attempts: :param Bool with_plugin_info: :param List[Str] plugin_property: :param Str audit: :return: List[Payment] If the method is called asynchronously, returns the request thread. """ all_params = ['account_id', 'with_attempts', 'with_plugin_info', 'plugin_property', 'audit'] # noqa: E501 all_params.append('async') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in six.iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method get_payments_for_account" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'account_id' is set if ('account_id' not in params or params['account_id'] is None): raise ValueError("Missing the required parameter `account_id` when calling `get_payments_for_account`") # noqa: E501 if 'account_id' in params and not re.search('\\w+-\\w+-\\w+-\\w+-\\w+', params['account_id']): # noqa: E501 raise ValueError("Invalid value for parameter `account_id` when calling `get_payments_for_account`, must conform to the pattern `/\\w+-\\w+-\\w+-\\w+-\\w+/`") # noqa: E501 collection_formats = {} path_params = {} if 'account_id' in params: path_params['accountId'] = params['account_id'] # noqa: E501 query_params = [] if 'with_attempts' in params: query_params.append(('withAttempts', params['with_attempts'])) # noqa: E501 if 'with_plugin_info' in params: query_params.append(('withPluginInfo', params['with_plugin_info'])) # noqa: E501 if 'plugin_property' in params: query_params.append(('pluginProperty', params['plugin_property'])) # noqa: E501 collection_formats['pluginProperty'] = 'multi' # noqa: E501 if 'audit' in params: query_params.append(('audit', params['audit'])) # noqa: E501 header_params = {} form_params = [] local_var_files = {} body_params = None # HTTP header `Accept` header_params['Accept'] = self.api_client.select_header_accept( ['application/json']) # noqa: E501 # Authentication setting auth_settings = ['Killbill Api Key', 'Killbill Api Secret', 'basicAuth'] # noqa: E501 return self.api_client.call_api( '/1.0/kb/accounts/{accountId}/payments', 'GET', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='List[Payment]', # noqa: E501 auth_settings=auth_settings, async=params.get('async'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats) def modify_account_custom_fields(self, account_id=None, body=None, created_by=None, **kwargs): # noqa: E501 """Modify custom fields to account # noqa: E501 # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.modify_account_custom_fields(account_id, body, created_by, async=True) >>> result = thread.get() :param async bool :param Str account_id: (required) :param List[CustomField] body: (required) :param Str created_by: (required) :param Str reason: :param Str comment: :return: None If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async'): return self.modify_account_custom_fields_with_http_info(account_id, body, created_by, **kwargs) # noqa: E501 else: (data) = self.modify_account_custom_fields_with_http_info(account_id, body, created_by, **kwargs) # noqa: E501 return data def modify_account_custom_fields_with_http_info(self, account_id=None, body=None, created_by=None, **kwargs): # noqa: E501 """Modify custom fields to account # noqa: E501 # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.modify_account_custom_fields_with_http_info(account_id, body, created_by, async=True) >>> result = thread.get() :param async bool :param Str account_id: (required) :param List[CustomField] body: (required) :param Str created_by: (required) :param Str reason: :param Str comment: :return: None If the method is called asynchronously, returns the request thread. """ all_params = ['account_id', 'body', 'created_by', 'reason', 'comment'] # noqa: E501 all_params.append('async') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in six.iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method modify_account_custom_fields" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'account_id' is set if ('account_id' not in params or params['account_id'] is None): raise ValueError("Missing the required parameter `account_id` when calling `modify_account_custom_fields`") # noqa: E501 # verify the required parameter 'body' is set if ('body' not in params or params['body'] is None): raise ValueError("Missing the required parameter `body` when calling `modify_account_custom_fields`") # noqa: E501 # verify the required parameter 'created_by' is set if ('created_by' not in params or params['created_by'] is None): raise ValueError("Missing the required parameter `created_by` when calling `modify_account_custom_fields`") # noqa: E501 if 'account_id' in params and not re.search('\\w+-\\w+-\\w+-\\w+-\\w+', params['account_id']): # noqa: E501 raise ValueError("Invalid value for parameter `account_id` when calling `modify_account_custom_fields`, must conform to the pattern `/\\w+-\\w+-\\w+-\\w+-\\w+/`") # noqa: E501 collection_formats = {} path_params = {} if 'account_id' in params: path_params['accountId'] = params['account_id'] # noqa: E501 query_params = [] header_params = {} if 'created_by' in params: header_params['X-Killbill-CreatedBy'] = params['created_by'] # noqa: E501 if 'reason' in params: header_params['X-Killbill-Reason'] = params['reason'] # noqa: E501 if 'comment' in params: header_params['X-Killbill-Comment'] = params['comment'] # noqa: E501 form_params = [] local_var_files = {} body_params = None if 'body' in params: body_params = params['body'] # HTTP header `Accept` header_params['Accept'] = self.api_client.select_header_accept( ['application/json']) # noqa: E501 # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501 ['application/json']) # noqa: E501 # Authentication setting auth_settings = ['Killbill Api Key', 'Killbill Api Secret', 'basicAuth'] # noqa: E501 return self.api_client.call_api( '/1.0/kb/accounts/{accountId}/customFields', 'PUT', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type=None, # noqa: E501 auth_settings=auth_settings, async=params.get('async'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats) def pay_all_invoices(self, account_id=None, created_by=None, **kwargs): # noqa: E501 """Trigger a payment for all unpaid invoices # noqa: E501 # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.pay_all_invoices(account_id, created_by, async=True) >>> result = thread.get() :param async bool :param Str account_id: (required) :param Str created_by: (required) :param Str payment_method_id: :param Bool external_payment: :param Float payment_amount: :param Date target_date: :param List[Str] plugin_property: :param Str reason: :param Str comment: :return: List[Invoice] If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async'): return self.pay_all_invoices_with_http_info(account_id, created_by, **kwargs) # noqa: E501 else: (data) = self.pay_all_invoices_with_http_info(account_id, created_by, **kwargs) # noqa: E501 return data def pay_all_invoices_with_http_info(self, account_id=None, created_by=None, **kwargs): # noqa: E501 """Trigger a payment for all unpaid invoices # noqa: E501 # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.pay_all_invoices_with_http_info(account_id, created_by, async=True) >>> result = thread.get() :param async bool :param Str account_id: (required) :param Str created_by: (required) :param Str payment_method_id: :param Bool external_payment: :param Float payment_amount: :param Date target_date: :param List[Str] plugin_property: :param Str reason: :param Str comment: :return: List[Invoice] If the method is called asynchronously, returns the request thread. """ all_params = ['account_id', 'created_by', 'payment_method_id', 'external_payment', 'payment_amount', 'target_date', 'plugin_property', 'reason', 'comment'] # noqa: E501 all_params.append('async') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in six.iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method pay_all_invoices" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'account_id' is set if ('account_id' not in params or params['account_id'] is None): raise ValueError("Missing the required parameter `account_id` when calling `pay_all_invoices`") # noqa: E501 # verify the required parameter 'created_by' is set if ('created_by' not in params or params['created_by'] is None): raise ValueError("Missing the required parameter `created_by` when calling `pay_all_invoices`") # noqa: E501 if 'account_id' in params and not re.search('\\w+-\\w+-\\w+-\\w+-\\w+', params['account_id']): # noqa: E501 raise ValueError("Invalid value for parameter `account_id` when calling `pay_all_invoices`, must conform to the pattern `/\\w+-\\w+-\\w+-\\w+-\\w+/`") # noqa: E501 collection_formats = {} path_params = {} if 'account_id' in params: path_params['accountId'] = params['account_id'] # noqa: E501 query_params = [] if 'payment_method_id' in params: query_params.append(('paymentMethodId', params['payment_method_id'])) # noqa: E501 if 'external_payment' in params: query_params.append(('externalPayment', params['external_payment'])) # noqa: E501 if 'payment_amount' in params: query_params.append(('paymentAmount', params['payment_amount'])) # noqa: E501 if 'target_date' in params: query_params.append(('targetDate', params['target_date'])) # noqa: E501 if 'plugin_property' in params: query_params.append(('pluginProperty', params['plugin_property'])) # noqa: E501 collection_formats['pluginProperty'] = 'multi' # noqa: E501 header_params = {} if 'created_by' in params: header_params['X-Killbill-CreatedBy'] = params['created_by'] # noqa: E501 if 'reason' in params: header_params['X-Killbill-Reason'] = params['reason'] # noqa: E501 if 'comment' in params: header_params['X-Killbill-Comment'] = params['comment'] # noqa: E501 form_params = [] local_var_files = {} body_params = None # HTTP header `Accept` header_params['Accept'] = self.api_client.select_header_accept( ['application/json']) # noqa: E501 # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501 ['application/json']) # noqa: E501 # Authentication setting auth_settings = ['Killbill Api Key', 'Killbill Api Secret', 'basicAuth'] # noqa: E501 return self.api_client.call_api( '/1.0/kb/accounts/{accountId}/invoicePayments', 'POST', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='List[Invoice]', # noqa: E501 auth_settings=auth_settings, async=params.get('async'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats) def process_payment(self, account_id=None, body=None, created_by=None, **kwargs): # noqa: E501 """Trigger a payment (authorization, purchase or credit) # noqa: E501 # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.process_payment(account_id, body, created_by, async=True) >>> result = thread.get() :param async bool :param Str account_id: (required) :param PaymentTransaction body: (required) :param Str created_by: (required) :param Str payment_method_id: :param List[Str] control_plugin_name: :param List[Str] plugin_property: :param Str reason: :param Str comment: :return: Payment If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async'): return self.process_payment_with_http_info(account_id, body, created_by, **kwargs) # noqa: E501 else: (data) = self.process_payment_with_http_info(account_id, body, created_by, **kwargs) # noqa: E501 return data def process_payment_with_http_info(self, account_id=None, body=None, created_by=None, **kwargs): # noqa: E501 """Trigger a payment (authorization, purchase or credit) # noqa: E501 # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.process_payment_with_http_info(account_id, body, created_by, async=True) >>> result = thread.get() :param async bool :param Str account_id: (required) :param PaymentTransaction body: (required) :param Str created_by: (required) :param Str payment_method_id: :param List[Str] control_plugin_name: :param List[Str] plugin_property: :param Str reason: :param Str comment: :return: Payment If the method is called asynchronously, returns the request thread. """ all_params = ['account_id', 'body', 'created_by', 'payment_method_id', 'control_plugin_name', 'plugin_property', 'reason', 'comment'] # noqa: E501 all_params.append('async') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in six.iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method process_payment" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'account_id' is set if ('account_id' not in params or params['account_id'] is None): raise ValueError("Missing the required parameter `account_id` when calling `process_payment`") # noqa: E501 # verify the required parameter 'body' is set if ('body' not in params or params['body'] is None): raise ValueError("Missing the required parameter `body` when calling `process_payment`") # noqa: E501 # verify the required parameter 'created_by' is set if ('created_by' not in params or params['created_by'] is None): raise ValueError("Missing the required parameter `created_by` when calling `process_payment`") # noqa: E501 if 'account_id' in params and not re.search('\\w+-\\w+-\\w+-\\w+-\\w+', params['account_id']): # noqa: E501 raise ValueError("Invalid value for parameter `account_id` when calling `process_payment`, must conform to the pattern `/\\w+-\\w+-\\w+-\\w+-\\w+/`") # noqa: E501 collection_formats = {} path_params = {} if 'account_id' in params: path_params['accountId'] = params['account_id'] # noqa: E501 query_params = [] if 'payment_method_id' in params: query_params.append(('paymentMethodId', params['payment_method_id'])) # noqa: E501 if 'control_plugin_name' in params: query_params.append(('controlPluginName', params['control_plugin_name'])) # noqa: E501 collection_formats['controlPluginName'] = 'multi' # noqa: E501 if 'plugin_property' in params: query_params.append(('pluginProperty', params['plugin_property'])) # noqa: E501 collection_formats['pluginProperty'] = 'multi' # noqa: E501 header_params = {} if 'created_by' in params: header_params['X-Killbill-CreatedBy'] = params['created_by'] # noqa: E501 if 'reason' in params: header_params['X-Killbill-Reason'] = params['reason'] # noqa: E501 if 'comment' in params: header_params['X-Killbill-Comment'] = params['comment'] # noqa: E501 form_params = [] local_var_files = {} body_params = None if 'body' in params: body_params = params['body'] # HTTP header `Accept` header_params['Accept'] = self.api_client.select_header_accept( ['application/json']) # noqa: E501 # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501 ['application/json']) # noqa: E501 # Authentication setting auth_settings = ['Killbill Api Key', 'Killbill Api Secret', 'basicAuth'] # noqa: E501 return self.api_client.call_api( '/1.0/kb/accounts/{accountId}/payments', 'POST', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='Payment', # noqa: E501 auth_settings=auth_settings, async=params.get('async'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats) def process_payment_by_external_key(self, body=None, external_key=None, created_by=None, **kwargs): # noqa: E501 """Trigger a payment using the account external key (authorization, purchase or credit) # noqa: E501 # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.process_payment_by_external_key(body, external_key, created_by, async=True) >>> result = thread.get() :param async bool :param PaymentTransaction body: (required) :param Str external_key: (required) :param Str created_by: (required) :param Str payment_method_id: :param List[Str] control_plugin_name: :param List[Str] plugin_property: :param Str reason: :param Str comment: :return: Payment If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async'): return self.process_payment_by_external_key_with_http_info(body, external_key, created_by, **kwargs) # noqa: E501 else: (data) = self.process_payment_by_external_key_with_http_info(body, external_key, created_by, **kwargs) # noqa: E501 return data def process_payment_by_external_key_with_http_info(self, body=None, external_key=None, created_by=None, **kwargs): # noqa: E501 """Trigger a payment using the account external key (authorization, purchase or credit) # noqa: E501 # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.process_payment_by_external_key_with_http_info(body, external_key, created_by, async=True) >>> result = thread.get() :param async bool :param PaymentTransaction body: (required) :param Str external_key: (required) :param Str created_by: (required) :param Str payment_method_id: :param List[Str] control_plugin_name: :param List[Str] plugin_property: :param Str reason: :param Str comment: :return: Payment If the method is called asynchronously, returns the request thread. """ all_params = ['body', 'external_key', 'created_by', 'payment_method_id', 'control_plugin_name', 'plugin_property', 'reason', 'comment'] # noqa: E501 all_params.append('async') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in six.iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method process_payment_by_external_key" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'body' is set if ('body' not in params or params['body'] is None): raise ValueError("Missing the required parameter `body` when calling `process_payment_by_external_key`") # noqa: E501 # verify the required parameter 'external_key' is set if ('external_key' not in params or params['external_key'] is None): raise ValueError("Missing the required parameter `external_key` when calling `process_payment_by_external_key`") # noqa: E501 # verify the required parameter 'created_by' is set if ('created_by' not in params or params['created_by'] is None): raise ValueError("Missing the required parameter `created_by` when calling `process_payment_by_external_key`") # noqa: E501 collection_formats = {} path_params = {} query_params = [] if 'external_key' in params: query_params.append(('externalKey', params['external_key'])) # noqa: E501 if 'payment_method_id' in params: query_params.append(('paymentMethodId', params['payment_method_id'])) # noqa: E501 if 'control_plugin_name' in params: query_params.append(('controlPluginName', params['control_plugin_name'])) # noqa: E501 collection_formats['controlPluginName'] = 'multi' # noqa: E501 if 'plugin_property' in params: query_params.append(('pluginProperty', params['plugin_property'])) # noqa: E501 collection_formats['pluginProperty'] = 'multi' # noqa: E501 header_params = {} if 'created_by' in params: header_params['X-Killbill-CreatedBy'] = params['created_by'] # noqa: E501 if 'reason' in params: header_params['X-Killbill-Reason'] = params['reason'] # noqa: E501 if 'comment' in params: header_params['X-Killbill-Comment'] = params['comment'] # noqa: E501 form_params = [] local_var_files = {} body_params = None if 'body' in params: body_params = params['body'] # HTTP header `Accept` header_params['Accept'] = self.api_client.select_header_accept( ['application/json']) # noqa: E501 # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501 ['application/json']) # noqa: E501 # Authentication setting auth_settings = ['Killbill Api Key', 'Killbill Api Secret', 'basicAuth'] # noqa: E501 return self.api_client.call_api( '/1.0/kb/accounts/payments', 'POST', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='Payment', # noqa: E501 auth_settings=auth_settings, async=params.get('async'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats) def rebalance_existing_cba_on_account(self, account_id=None, created_by=None, **kwargs): # noqa: E501 """Rebalance account CBA # noqa: E501 # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.rebalance_existing_cba_on_account(account_id, created_by, async=True) >>> result = thread.get() :param async bool :param Str account_id: (required) :param Str created_by: (required) :param Str reason: :param Str comment: :return: None If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async'): return self.rebalance_existing_cba_on_account_with_http_info(account_id, created_by, **kwargs) # noqa: E501 else: (data) = self.rebalance_existing_cba_on_account_with_http_info(account_id, created_by, **kwargs) # noqa: E501 return data def rebalance_existing_cba_on_account_with_http_info(self, account_id=None, created_by=None, **kwargs): # noqa: E501 """Rebalance account CBA # noqa: E501 # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.rebalance_existing_cba_on_account_with_http_info(account_id, created_by, async=True) >>> result = thread.get() :param async bool :param Str account_id: (required) :param Str created_by: (required) :param Str reason: :param Str comment: :return: None If the method is called asynchronously, returns the request thread. """ all_params = ['account_id', 'created_by', 'reason', 'comment'] # noqa: E501 all_params.append('async') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in six.iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method rebalance_existing_cba_on_account" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'account_id' is set if ('account_id' not in params or params['account_id'] is None): raise ValueError("Missing the required parameter `account_id` when calling `rebalance_existing_cba_on_account`") # noqa: E501 # verify the required parameter 'created_by' is set if ('created_by' not in params or params['created_by'] is None): raise ValueError("Missing the required parameter `created_by` when calling `rebalance_existing_cba_on_account`") # noqa: E501 if 'account_id' in params and not re.search('\\w+-\\w+-\\w+-\\w+-\\w+', params['account_id']): # noqa: E501 raise ValueError("Invalid value for parameter `account_id` when calling `rebalance_existing_cba_on_account`, must conform to the pattern `/\\w+-\\w+-\\w+-\\w+-\\w+/`") # noqa: E501 collection_formats = {} path_params = {} if 'account_id' in params: path_params['accountId'] = params['account_id'] # noqa: E501 query_params = [] header_params = {} if 'created_by' in params: header_params['X-Killbill-CreatedBy'] = params['created_by'] # noqa: E501 if 'reason' in params: header_params['X-Killbill-Reason'] = params['reason'] # noqa: E501 if 'comment' in params: header_params['X-Killbill-Comment'] = params['comment'] # noqa: E501 form_params = [] local_var_files = {} body_params = None # HTTP header `Accept` header_params['Accept'] = self.api_client.select_header_accept( ['application/json']) # noqa: E501 # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501 ['application/json']) # noqa: E501 # Authentication setting auth_settings = ['Killbill Api Key', 'Killbill Api Secret', 'basicAuth'] # noqa: E501 return self.api_client.call_api( '/1.0/kb/accounts/{accountId}/cbaRebalancing', 'PUT', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type=None, # noqa: E501 auth_settings=auth_settings, async=params.get('async'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats) def refresh_payment_methods(self, account_id=None, created_by=None, **kwargs): # noqa: E501 """Refresh account payment methods # noqa: E501 # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.refresh_payment_methods(account_id, created_by, async=True) >>> result = thread.get() :param async bool :param Str account_id: (required) :param Str created_by: (required) :param Str plugin_name: :param List[Str] plugin_property: :param Str reason: :param Str comment: :return: None If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async'): return self.refresh_payment_methods_with_http_info(account_id, created_by, **kwargs) # noqa: E501 else: (data) = self.refresh_payment_methods_with_http_info(account_id, created_by, **kwargs) # noqa: E501 return data def refresh_payment_methods_with_http_info(self, account_id=None, created_by=None, **kwargs): # noqa: E501 """Refresh account payment methods # noqa: E501 # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.refresh_payment_methods_with_http_info(account_id, created_by, async=True) >>> result = thread.get() :param async bool :param Str account_id: (required) :param Str created_by: (required) :param Str plugin_name: :param List[Str] plugin_property: :param Str reason: :param Str comment: :return: None If the method is called asynchronously, returns the request thread. """ all_params = ['account_id', 'created_by', 'plugin_name', 'plugin_property', 'reason', 'comment'] # noqa: E501 all_params.append('async') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in six.iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method refresh_payment_methods" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'account_id' is set if ('account_id' not in params or params['account_id'] is None): raise ValueError("Missing the required parameter `account_id` when calling `refresh_payment_methods`") # noqa: E501 # verify the required parameter 'created_by' is set if ('created_by' not in params or params['created_by'] is None): raise ValueError("Missing the required parameter `created_by` when calling `refresh_payment_methods`") # noqa: E501 if 'account_id' in params and not re.search('\\w+-\\w+-\\w+-\\w+-\\w+', params['account_id']): # noqa: E501 raise ValueError("Invalid value for parameter `account_id` when calling `refresh_payment_methods`, must conform to the pattern `/\\w+-\\w+-\\w+-\\w+-\\w+/`") # noqa: E501 collection_formats = {} path_params = {} if 'account_id' in params: path_params['accountId'] = params['account_id'] # noqa: E501 query_params = [] if 'plugin_name' in params: query_params.append(('pluginName', params['plugin_name'])) # noqa: E501 if 'plugin_property' in params: query_params.append(('pluginProperty', params['plugin_property'])) # noqa: E501 collection_formats['pluginProperty'] = 'multi' # noqa: E501 header_params = {} if 'created_by' in params: header_params['X-Killbill-CreatedBy'] = params['created_by'] # noqa: E501 if 'reason' in params: header_params['X-Killbill-Reason'] = params['reason'] # noqa: E501 if 'comment' in params: header_params['X-Killbill-Comment'] = params['comment'] # noqa: E501 form_params = [] local_var_files = {} body_params = None # HTTP header `Accept` header_params['Accept'] = self.api_client.select_header_accept( ['application/json']) # noqa: E501 # Authentication setting auth_settings = ['Killbill Api Key', 'Killbill Api Secret', 'basicAuth'] # noqa: E501 return self.api_client.call_api( '/1.0/kb/accounts/{accountId}/paymentMethods/refresh', 'PUT', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type=None, # noqa: E501 auth_settings=auth_settings, async=params.get('async'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats) def remove_email(self, account_id=None, email=None, created_by=None, **kwargs): # noqa: E501 """Delete email from account # noqa: E501 # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.remove_email(account_id, email, created_by, async=True) >>> result = thread.get() :param async bool :param Str account_id: (required) :param Str email: (required) :param Str created_by: (required) :param Str reason: :param Str comment: :return: None If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async'): return self.remove_email_with_http_info(account_id, email, created_by, **kwargs) # noqa: E501 else: (data) = self.remove_email_with_http_info(account_id, email, created_by, **kwargs) # noqa: E501 return data def remove_email_with_http_info(self, account_id=None, email=None, created_by=None, **kwargs): # noqa: E501 """Delete email from account # noqa: E501 # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.remove_email_with_http_info(account_id, email, created_by, async=True) >>> result = thread.get() :param async bool :param Str account_id: (required) :param Str email: (required) :param Str created_by: (required) :param Str reason: :param Str comment: :return: None If the method is called asynchronously, returns the request thread. """ all_params = ['account_id', 'email', 'created_by', 'reason', 'comment'] # noqa: E501 all_params.append('async') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in six.iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method remove_email" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'account_id' is set if ('account_id' not in params or params['account_id'] is None): raise ValueError("Missing the required parameter `account_id` when calling `remove_email`") # noqa: E501 # verify the required parameter 'email' is set if ('email' not in params or params['email'] is None): raise ValueError("Missing the required parameter `email` when calling `remove_email`") # noqa: E501 # verify the required parameter 'created_by' is set if ('created_by' not in params or params['created_by'] is None): raise ValueError("Missing the required parameter `created_by` when calling `remove_email`") # noqa: E501 if 'account_id' in params and not re.search('\\w+-\\w+-\\w+-\\w+-\\w+', params['account_id']): # noqa: E501 raise ValueError("Invalid value for parameter `account_id` when calling `remove_email`, must conform to the pattern `/\\w+-\\w+-\\w+-\\w+-\\w+/`") # noqa: E501 collection_formats = {} path_params = {} if 'account_id' in params: path_params['accountId'] = params['account_id'] # noqa: E501 if 'email' in params: path_params['email'] = params['email'] # noqa: E501 query_params = [] header_params = {} if 'created_by' in params: header_params['X-Killbill-CreatedBy'] = params['created_by'] # noqa: E501 if 'reason' in params: header_params['X-Killbill-Reason'] = params['reason'] # noqa: E501 if 'comment' in params: header_params['X-Killbill-Comment'] = params['comment'] # noqa: E501 form_params = [] local_var_files = {} body_params = None # HTTP header `Accept` header_params['Accept'] = self.api_client.select_header_accept( ['application/json']) # noqa: E501 # Authentication setting auth_settings = ['Killbill Api Key', 'Killbill Api Secret', 'basicAuth'] # noqa: E501 return self.api_client.call_api( '/1.0/kb/accounts/{accountId}/emails/{email}', 'DELETE', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type=None, # noqa: E501 auth_settings=auth_settings, async=params.get('async'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats) def search_accounts(self, search_key=None, **kwargs): # noqa: E501 """Search accounts # noqa: E501 # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.search_accounts(search_key, async=True) >>> result = thread.get() :param async bool :param Str search_key: (required) :param Int offset: :param Int limit: :param Bool account_with_balance: :param Bool account_with_balance_and_cba: :param Str audit: :return: List[Account] If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async'): return self.search_accounts_with_http_info(search_key, **kwargs) # noqa: E501 else: (data) = self.search_accounts_with_http_info(search_key, **kwargs) # noqa: E501 return data def search_accounts_with_http_info(self, search_key=None, **kwargs): # noqa: E501 """Search accounts # noqa: E501 # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.search_accounts_with_http_info(search_key, async=True) >>> result = thread.get() :param async bool :param Str search_key: (required) :param Int offset: :param Int limit: :param Bool account_with_balance: :param Bool account_with_balance_and_cba: :param Str audit: :return: List[Account] If the method is called asynchronously, returns the request thread. """ all_params = ['search_key', 'offset', 'limit', 'account_with_balance', 'account_with_balance_and_cba', 'audit'] # noqa: E501 all_params.append('async') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in six.iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method search_accounts" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'search_key' is set if ('search_key' not in params or params['search_key'] is None): raise ValueError("Missing the required parameter `search_key` when calling `search_accounts`") # noqa: E501 if 'search_key' in params and not re.search('.*', params['search_key']): # noqa: E501 raise ValueError("Invalid value for parameter `search_key` when calling `search_accounts`, must conform to the pattern `/.*/`") # noqa: E501 collection_formats = {} path_params = {} if 'search_key' in params: path_params['searchKey'] = params['search_key'] # noqa: E501 query_params = [] if 'offset' in params: query_params.append(('offset', params['offset'])) # noqa: E501 if 'limit' in params: query_params.append(('limit', params['limit'])) # noqa: E501 if 'account_with_balance' in params: query_params.append(('accountWithBalance', params['account_with_balance'])) # noqa: E501 if 'account_with_balance_and_cba' in params: query_params.append(('accountWithBalanceAndCBA', params['account_with_balance_and_cba'])) # noqa: E501 if 'audit' in params: query_params.append(('audit', params['audit'])) # noqa: E501 header_params = {} form_params = [] local_var_files = {} body_params = None # HTTP header `Accept` header_params['Accept'] = self.api_client.select_header_accept( ['application/json']) # noqa: E501 # Authentication setting auth_settings = ['Killbill Api Key', 'Killbill Api Secret', 'basicAuth'] # noqa: E501 return self.api_client.call_api( '/1.0/kb/accounts/search/{searchKey}', 'GET', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='List[Account]', # noqa: E501 auth_settings=auth_settings, async=params.get('async'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats) def set_default_payment_method(self, account_id=None, payment_method_id=None, created_by=None, **kwargs): # noqa: E501 """Set the default payment method # noqa: E501 # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.set_default_payment_method(account_id, payment_method_id, created_by, async=True) >>> result = thread.get() :param async bool :param Str account_id: (required) :param Str payment_method_id: (required) :param Str created_by: (required) :param Bool pay_all_unpaid_invoices: :param List[Str] plugin_property: :param Str reason: :param Str comment: :return: None If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async'): return self.set_default_payment_method_with_http_info(account_id, payment_method_id, created_by, **kwargs) # noqa: E501 else: (data) = self.set_default_payment_method_with_http_info(account_id, payment_method_id, created_by, **kwargs) # noqa: E501 return data def set_default_payment_method_with_http_info(self, account_id=None, payment_method_id=None, created_by=None, **kwargs): # noqa: E501 """Set the default payment method # noqa: E501 # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.set_default_payment_method_with_http_info(account_id, payment_method_id, created_by, async=True) >>> result = thread.get() :param async bool :param Str account_id: (required) :param Str payment_method_id: (required) :param Str created_by: (required) :param Bool pay_all_unpaid_invoices: :param List[Str] plugin_property: :param Str reason: :param Str comment: :return: None If the method is called asynchronously, returns the request thread. """ all_params = ['account_id', 'payment_method_id', 'created_by', 'pay_all_unpaid_invoices', 'plugin_property', 'reason', 'comment'] # noqa: E501 all_params.append('async') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in six.iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method set_default_payment_method" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'account_id' is set if ('account_id' not in params or params['account_id'] is None): raise ValueError("Missing the required parameter `account_id` when calling `set_default_payment_method`") # noqa: E501 # verify the required parameter 'payment_method_id' is set if ('payment_method_id' not in params or params['payment_method_id'] is None): raise ValueError("Missing the required parameter `payment_method_id` when calling `set_default_payment_method`") # noqa: E501 # verify the required parameter 'created_by' is set if ('created_by' not in params or params['created_by'] is None): raise ValueError("Missing the required parameter `created_by` when calling `set_default_payment_method`") # noqa: E501 if 'account_id' in params and not re.search('\\w+-\\w+-\\w+-\\w+-\\w+', params['account_id']): # noqa: E501 raise ValueError("Invalid value for parameter `account_id` when calling `set_default_payment_method`, must conform to the pattern `/\\w+-\\w+-\\w+-\\w+-\\w+/`") # noqa: E501 if 'payment_method_id' in params and not re.search('\\w+-\\w+-\\w+-\\w+-\\w+', params['payment_method_id']): # noqa: E501 raise ValueError("Invalid value for parameter `payment_method_id` when calling `set_default_payment_method`, must conform to the pattern `/\\w+-\\w+-\\w+-\\w+-\\w+/`") # noqa: E501 collection_formats = {} path_params = {} if 'account_id' in params: path_params['accountId'] = params['account_id'] # noqa: E501 if 'payment_method_id' in params: path_params['paymentMethodId'] = params['payment_method_id'] # noqa: E501 query_params = [] if 'pay_all_unpaid_invoices' in params: query_params.append(('payAllUnpaidInvoices', params['pay_all_unpaid_invoices'])) # noqa: E501 if 'plugin_property' in params: query_params.append(('pluginProperty', params['plugin_property'])) # noqa: E501 collection_formats['pluginProperty'] = 'multi' # noqa: E501 header_params = {} if 'created_by' in params: header_params['X-Killbill-CreatedBy'] = params['created_by'] # noqa: E501 if 'reason' in params: header_params['X-Killbill-Reason'] = params['reason'] # noqa: E501 if 'comment' in params: header_params['X-Killbill-Comment'] = params['comment'] # noqa: E501 form_params = [] local_var_files = {} body_params = None # HTTP header `Accept` header_params['Accept'] = self.api_client.select_header_accept( ['application/json']) # noqa: E501 # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501 ['application/json']) # noqa: E501 # Authentication setting auth_settings = ['Killbill Api Key', 'Killbill Api Secret', 'basicAuth'] # noqa: E501 return self.api_client.call_api( '/1.0/kb/accounts/{accountId}/paymentMethods/{paymentMethodId}/setDefault', 'PUT', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type=None, # noqa: E501 auth_settings=auth_settings, async=params.get('async'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats) def transfer_child_credit_to_parent(self, child_account_id=None, created_by=None, **kwargs): # noqa: E501 """Move a given child credit to the parent level # noqa: E501 # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.transfer_child_credit_to_parent(child_account_id, created_by, async=True) >>> result = thread.get() :param async bool :param Str child_account_id: (required) :param Str created_by: (required) :param Str reason: :param Str comment: :return: None If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async'): return self.transfer_child_credit_to_parent_with_http_info(child_account_id, created_by, **kwargs) # noqa: E501 else: (data) = self.transfer_child_credit_to_parent_with_http_info(child_account_id, created_by, **kwargs) # noqa: E501 return data def transfer_child_credit_to_parent_with_http_info(self, child_account_id=None, created_by=None, **kwargs): # noqa: E501 """Move a given child credit to the parent level # noqa: E501 # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.transfer_child_credit_to_parent_with_http_info(child_account_id, created_by, async=True) >>> result = thread.get() :param async bool :param Str child_account_id: (required) :param Str created_by: (required) :param Str reason: :param Str comment: :return: None If the method is called asynchronously, returns the request thread. """ all_params = ['child_account_id', 'created_by', 'reason', 'comment'] # noqa: E501 all_params.append('async') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in six.iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method transfer_child_credit_to_parent" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'child_account_id' is set if ('child_account_id' not in params or params['child_account_id'] is None): raise ValueError("Missing the required parameter `child_account_id` when calling `transfer_child_credit_to_parent`") # noqa: E501 # verify the required parameter 'created_by' is set if ('created_by' not in params or params['created_by'] is None): raise ValueError("Missing the required parameter `created_by` when calling `transfer_child_credit_to_parent`") # noqa: E501 if 'child_account_id' in params and not re.search('\\w+-\\w+-\\w+-\\w+-\\w+', params['child_account_id']): # noqa: E501 raise ValueError("Invalid value for parameter `child_account_id` when calling `transfer_child_credit_to_parent`, must conform to the pattern `/\\w+-\\w+-\\w+-\\w+-\\w+/`") # noqa: E501 collection_formats = {} path_params = {} if 'child_account_id' in params: path_params['childAccountId'] = params['child_account_id'] # noqa: E501 query_params = [] header_params = {} if 'created_by' in params: header_params['X-Killbill-CreatedBy'] = params['created_by'] # noqa: E501 if 'reason' in params: header_params['X-Killbill-Reason'] = params['reason'] # noqa: E501 if 'comment' in params: header_params['X-Killbill-Comment'] = params['comment'] # noqa: E501 form_params = [] local_var_files = {} body_params = None # HTTP header `Accept` header_params['Accept'] = self.api_client.select_header_accept( ['application/json']) # noqa: E501 # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501 ['application/json']) # noqa: E501 # Authentication setting auth_settings = ['Killbill Api Key', 'Killbill Api Secret', 'basicAuth'] # noqa: E501 return self.api_client.call_api( '/1.0/kb/accounts/{childAccountId}/transferCredit', 'PUT', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type=None, # noqa: E501 auth_settings=auth_settings, async=params.get('async'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats) def update_account(self, account_id=None, body=None, created_by=None, **kwargs): # noqa: E501 """Update account # noqa: E501 # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.update_account(account_id, body, created_by, async=True) >>> result = thread.get() :param async bool :param Str account_id: (required) :param Account body: (required) :param Str created_by: (required) :param Bool treat_null_as_reset: :param Str reason: :param Str comment: :return: None If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async'): return self.update_account_with_http_info(account_id, body, created_by, **kwargs) # noqa: E501 else: (data) = self.update_account_with_http_info(account_id, body, created_by, **kwargs) # noqa: E501 return data def update_account_with_http_info(self, account_id=None, body=None, created_by=None, **kwargs): # noqa: E501 """Update account # noqa: E501 # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.update_account_with_http_info(account_id, body, created_by, async=True) >>> result = thread.get() :param async bool :param Str account_id: (required) :param Account body: (required) :param Str created_by: (required) :param Bool treat_null_as_reset: :param Str reason: :param Str comment: :return: None If the method is called asynchronously, returns the request thread. """ all_params = ['account_id', 'body', 'created_by', 'treat_null_as_reset', 'reason', 'comment'] # noqa: E501 all_params.append('async') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in six.iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method update_account" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'account_id' is set if ('account_id' not in params or params['account_id'] is None): raise ValueError("Missing the required parameter `account_id` when calling `update_account`") # noqa: E501 # verify the required parameter 'body' is set if ('body' not in params or params['body'] is None): raise ValueError("Missing the required parameter `body` when calling `update_account`") # noqa: E501 # verify the required parameter 'created_by' is set if ('created_by' not in params or params['created_by'] is None): raise ValueError("Missing the required parameter `created_by` when calling `update_account`") # noqa: E501 if 'account_id' in params and not re.search('\\w+-\\w+-\\w+-\\w+-\\w+', params['account_id']): # noqa: E501 raise ValueError("Invalid value for parameter `account_id` when calling `update_account`, must conform to the pattern `/\\w+-\\w+-\\w+-\\w+-\\w+/`") # noqa: E501 collection_formats = {} path_params = {} if 'account_id' in params: path_params['accountId'] = params['account_id'] # noqa: E501 query_params = [] if 'treat_null_as_reset' in params: query_params.append(('treatNullAsReset', params['treat_null_as_reset'])) # noqa: E501 header_params = {} if 'created_by' in params: header_params['X-Killbill-CreatedBy'] = params['created_by'] # noqa: E501 if 'reason' in params: header_params['X-Killbill-Reason'] = params['reason'] # noqa: E501 if 'comment' in params: header_params['X-Killbill-Comment'] = params['comment'] # noqa: E501 form_params = [] local_var_files = {} body_params = None if 'body' in params: body_params = params['body'] # HTTP header `Accept` header_params['Accept'] = self.api_client.select_header_accept( ['application/json']) # noqa: E501 # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501 ['application/json']) # noqa: E501 # Authentication setting auth_settings = ['Killbill Api Key', 'Killbill Api Secret', 'basicAuth'] # noqa: E501 return self.api_client.call_api( '/1.0/kb/accounts/{accountId}', 'PUT', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type=None, # noqa: E501 auth_settings=auth_settings, async=params.get('async'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats)
apache-2.0
8,223,314,462,339,926,000
42.553597
207
0.58277
false
4.037577
false
false
false
nmarley/dash
contrib/zmq/zmq_sub.py
1
5988
#!/usr/bin/env python3 # Copyright (c) 2014-2016 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """ ZMQ example using python3's asyncio Dash should be started with the command line arguments: dashd -testnet -daemon \ -zmqpubrawtx=tcp://127.0.0.1:28332 \ -zmqpubrawblock=tcp://127.0.0.1:28332 \ -zmqpubhashtx=tcp://127.0.0.1:28332 \ -zmqpubhashblock=tcp://127.0.0.1:28332 We use the asyncio library here. `self.handle()` installs itself as a future at the end of the function. Since it never returns with the event loop having an empty stack of futures, this creates an infinite loop. An alternative is to wrap the contents of `handle` inside `while True`. A blocking example using python 2.7 can be obtained from the git history: https://github.com/bitcoin/bitcoin/blob/37a7fe9e440b83e2364d5498931253937abe9294/contrib/zmq/zmq_sub.py """ import binascii import asyncio import zmq import zmq.asyncio import signal import struct import sys if not (sys.version_info.major >= 3 and sys.version_info.minor >= 5): print("This example only works with Python 3.5 and greater") exit(1) port = 28332 class ZMQHandler(): def __init__(self): self.loop = asyncio.get_event_loop() self.zmqContext = zmq.asyncio.Context() self.zmqSubSocket = self.zmqContext.socket(zmq.SUB) self.zmqSubSocket.setsockopt_string(zmq.SUBSCRIBE, "hashblock") self.zmqSubSocket.setsockopt_string(zmq.SUBSCRIBE, "hashchainlock") self.zmqSubSocket.setsockopt_string(zmq.SUBSCRIBE, "hashtx") self.zmqSubSocket.setsockopt_string(zmq.SUBSCRIBE, "hashtxlock") self.zmqSubSocket.setsockopt_string(zmq.SUBSCRIBE, "hashgovernancevote") self.zmqSubSocket.setsockopt_string(zmq.SUBSCRIBE, "hashgovernanceobject") self.zmqSubSocket.setsockopt_string(zmq.SUBSCRIBE, "hashinstantsenddoublespend") self.zmqSubSocket.setsockopt_string(zmq.SUBSCRIBE, "rawblock") self.zmqSubSocket.setsockopt_string(zmq.SUBSCRIBE, "rawchainlock") self.zmqSubSocket.setsockopt_string(zmq.SUBSCRIBE, "rawchainlocksig") self.zmqSubSocket.setsockopt_string(zmq.SUBSCRIBE, "rawtx") self.zmqSubSocket.setsockopt_string(zmq.SUBSCRIBE, "rawtxlock") self.zmqSubSocket.setsockopt_string(zmq.SUBSCRIBE, "rawtxlocksig") self.zmqSubSocket.setsockopt_string(zmq.SUBSCRIBE, "rawgovernancevote") self.zmqSubSocket.setsockopt_string(zmq.SUBSCRIBE, "rawgovernanceobject") self.zmqSubSocket.setsockopt_string(zmq.SUBSCRIBE, "rawinstantsenddoublespend") self.zmqSubSocket.connect("tcp://127.0.0.1:%i" % port) async def handle(self) : msg = await self.zmqSubSocket.recv_multipart() topic = msg[0] body = msg[1] sequence = "Unknown" if len(msg[-1]) == 4: msgSequence = struct.unpack('<I', msg[-1])[-1] sequence = str(msgSequence) if topic == b"hashblock": print('- HASH BLOCK ('+sequence+') -') print(binascii.hexlify(body).decode("utf-8")) elif topic == b"hashchainlock": print('- HASH CHAINLOCK ('+sequence+') -') print(binascii.hexlify(body).decode("utf-8")) elif topic == b"hashtx": print ('- HASH TX ('+sequence+') -') print(binascii.hexlify(body).decode("utf-8")) elif topic == b"hashtxlock": print('- HASH TX LOCK ('+sequence+') -') print(binascii.hexlify(body).decode("utf-8")) elif topic == b"hashgovernancevote": print('- HASH GOVERNANCE VOTE ('+sequence+') -') print(binascii.hexlify(body).decode("utf-8")) elif topic == b"hashgovernanceobject": print('- HASH GOVERNANCE OBJECT ('+sequence+') -') print(binascii.hexlify(body).decode("utf-8")) elif topic == b"hashinstantsenddoublespend": print('- HASH IS DOUBLE SPEND ('+sequence+') -') print(binascii.hexlify(body).decode("utf-8")) elif topic == b"rawblock": print('- RAW BLOCK HEADER ('+sequence+') -') print(binascii.hexlify(body[:80]).decode("utf-8")) elif topic == b"rawchainlock": print('- RAW CHAINLOCK ('+sequence+') -') print(binascii.hexlify(body[:80]).decode("utf-8")) elif topic == b"rawchainlocksig": print('- RAW CHAINLOCK SIG ('+sequence+') -') print(binascii.hexlify(body[:80]).decode("utf-8")) elif topic == b"rawtx": print('- RAW TX ('+sequence+') -') print(binascii.hexlify(body).decode("utf-8")) elif topic == b"rawtxlock": print('- RAW TX LOCK ('+sequence+') -') print(binascii.hexlify(body).decode("utf-8")) elif topic == b"rawtxlocksig": print('- RAW TX LOCK SIG ('+sequence+') -') print(binascii.hexlify(body).decode("utf-8")) elif topic == b"rawgovernancevote": print('- RAW GOVERNANCE VOTE ('+sequence+') -') print(binascii.hexlify(body).decode("utf-8")) elif topic == b"rawgovernanceobject": print('- RAW GOVERNANCE OBJECT ('+sequence+') -') print(binascii.hexlify(body).decode("utf-8")) elif topic == b"rawinstantsenddoublespend": print('- RAW IS DOUBLE SPEND ('+sequence+') -') print(binascii.hexlify(body).decode("utf-8")) # schedule ourselves to receive the next message asyncio.ensure_future(self.handle()) def start(self): self.loop.add_signal_handler(signal.SIGINT, self.stop) self.loop.create_task(self.handle()) self.loop.run_forever() def stop(self): self.loop.stop() self.zmqContext.destroy() daemon = ZMQHandler() daemon.start()
mit
356,747,117,795,739,300
43.686567
107
0.629092
false
3.532743
false
false
false
mpoullet/audio-tools
KissFFT/kiss_fft130/test/testkiss.py
1
3565
#!/usr/bin/env python import math import sys import os import random import struct import popen2 import getopt import numpy pi=math.pi e=math.e j=complex(0,1) doreal=0 datatype = os.environ.get('DATATYPE','float') util = '../tools/fft_' + datatype minsnr=90 if datatype == 'double': fmt='d' elif datatype=='int16_t': fmt='h' minsnr=10 elif datatype=='int32_t': fmt='i' elif datatype=='simd': fmt='4f' sys.stderr.write('testkiss.py does not yet test simd') sys.exit(0) elif datatype=='float': fmt='f' else: sys.stderr.write('unrecognized datatype %s\n' % datatype) sys.exit(1) def dopack(x,cpx=1): x = numpy.reshape( x, ( numpy.size(x),) ) if cpx: s = ''.join( [ struct.pack(fmt*2,c.real,c.imag) for c in x ] ) else: s = ''.join( [ struct.pack(fmt,c.real) for c in x ] ) return s def dounpack(x,cpx): uf = fmt * ( len(x) / struct.calcsize(fmt) ) s = struct.unpack(uf,x) if cpx: return numpy.array(s[::2]) + numpy.array( s[1::2] )*j else: return numpy.array(s ) def make_random(dims=[1]): res = [] for i in range(dims[0]): if len(dims)==1: r=random.uniform(-1,1) if doreal: res.append( r ) else: i=random.uniform(-1,1) res.append( complex(r,i) ) else: res.append( make_random( dims[1:] ) ) return numpy.array(res) def flatten(x): ntotal = numpy.size(x) return numpy.reshape(x,(ntotal,)) def randmat( ndims ): dims=[] for i in range( ndims ): curdim = int( random.uniform(2,5) ) if doreal and i==(ndims-1): curdim = int(curdim/2)*2 # force even last dimension if real dims.append( curdim ) return make_random(dims ) def test_fft(ndims): x=randmat( ndims ) if doreal: xver = numpy.fft.rfftn(x) else: xver = numpy.fft.fftn(x) open('/tmp/fftexp.dat','w').write(dopack( flatten(xver) , True ) ) x2=dofft(x,doreal) err = xver - x2 errf = flatten(err) xverf = flatten(xver) errpow = numpy.vdot(errf,errf)+1e-10 sigpow = numpy.vdot(xverf,xverf)+1e-10 snr = 10*math.log10(abs(sigpow/errpow) ) print 'SNR (compared to NumPy) : %.1fdB' % float(snr) if snr<minsnr: print 'xver=',xver print 'x2=',x2 print 'err',err sys.exit(1) def dofft(x,isreal): dims=list( numpy.shape(x) ) x = flatten(x) scale=1 if datatype=='int16_t': x = 32767 * x scale = len(x) / 32767.0 elif datatype=='int32_t': x = 2147483647.0 * x scale = len(x) / 2147483647.0 cmd='%s -n ' % util cmd += ','.join([str(d) for d in dims]) if doreal: cmd += ' -R ' print cmd p = popen2.Popen3(cmd ) open('/tmp/fftin.dat','w').write(dopack( x , isreal==False ) ) p.tochild.write( dopack( x , isreal==False ) ) p.tochild.close() res = dounpack( p.fromchild.read() , 1 ) open('/tmp/fftout.dat','w').write(dopack( flatten(res) , True ) ) if doreal: dims[-1] = int( dims[-1]/2 ) + 1 res = scale * res p.wait() return numpy.reshape(res,dims) def main(): opts,args = getopt.getopt(sys.argv[1:],'r') opts=dict(opts) global doreal doreal = opts.has_key('-r') if doreal: print 'Testing multi-dimensional real FFTs' else: print 'Testing multi-dimensional FFTs' for dim in range(1,4): test_fft( dim ) if __name__ == "__main__": main()
mit
-8,352,119,809,851,959,000
21.006173
72
0.555961
false
2.861156
true
false
false
chrissorchard/malucrawl
malware_crawl/tasks.py
1
2599
from celery import task, chord from .scan import scanners, heavy_scanners from .search import search_engines from .source import sources from datetime import datetime from dateutil.tz import tzutc from models import TopicSet # validator = jsonschema.Draft3Validator(json.loads(pkgutil.get_data("malware_crawl", "malware_discovery_schema.json"))) def complete_crawl(): for source in sources: source.apply_async( link=begin_search.subtask(args=(source,)) ) # todo: repeat old searches @task def begin_search(keywords, source): discovered = datetime.now(tzutc()) ts = TopicSet.objects.create( discovered=discovered, source=source ) for keyword in keywords: topic = ts.topic_set.create( keyword=keyword ) for engine in search_engines: engine.apply_async( args=(keyword,), link=begin_scan.subtask(args=(engine, topic)) ) @task def begin_scan(urls, engine, topic): discovered = datetime.now(tzutc()) search = topic.search_set.create( discovered=discovered, source=engine ) for url in urls: result = search.result_set.create( url=url ) for scanner in scanners: report = result.malwarereport_set.create( reporter=scanner ) scanner.apply_async( args=(url,), link=begin_store.subtask( args=(report,) ) ) """ # Check to see if we should scan heavily def check_opinions(all_opinions, reporters): print all_opinions return False @task def accept_scan(all_opinions, reporters, url, result): if check_opinions(all_opinions, reporters): for scanner in heavy_scanners: report = result.malwarereport_set.create( reporter=scanner ) scanner.apply_async( args=(url,), link=begin_store.subtask( args=(report,) ) ) for opinions, reporter in zip(all_opinions, reporters): begin_store.apply_async( args=(opinions, report) ) """ @task def begin_store(opinions, report): for opinion in opinions: report.opinion_set.create( type=opinion["type"], confidence=opinion["confidence"] ) @task def tprint(content): print content
mit
-8,517,734,512,202,998,000
22.518868
120
0.560215
false
4.086478
false
false
false
atvKumar/TheWatcher
mkEmail.py
1
5302
from smtplib import SMTP, SMTP_SSL from smtplib import SMTPException from mimetypes import guess_type from os.path import basename from email.utils import COMMASPACE from email.mime.multipart import MIMEMultipart from email.mime.text import MIMEText from email.mime.base import MIMEBase from email.encoders import encode_base64 class EmailConnectionError(Exception): pass class SendEmailError(Exception): pass def get_email(email): if '<' in email: data = email.split('<') email = data[1].split('>')[0].strip() return email.strip() class Email(object): def __init__(self, from_, to, subject, message, message_type='plain', attachments=None, cc=None, bcc=None, message_encoding='us-ascii', multi_to=False, multi_cc=False, multi_bcc=False, multi_attach=False): self.email = MIMEMultipart() self.message = message self.email['From'] = from_ if not multi_to: self.email['To'] = to else: self.email['To'] = COMMASPACE.join(to) self.email['Subject'] = subject self.email['subject'] = subject # Case Sensitive Email-Readers if cc is not None: if not multi_cc: self.email['Cc'] = cc else: self.email['Cc'] = COMMASPACE.join(cc) if bcc is not None: if not multi_bcc: self.email['bcc'] = bcc else: self.email['bcc'] = COMMASPACE.join(bcc) text = MIMEText(message, message_type, message_encoding) self.email.attach(text) if attachments is not None: if multi_attach: for filename in attachments: self.attach(filename) else: self.attach(attachments) def debug(self, mime=False): print 'From : ', self.email['From'] print 'To : ', self.email['To'] print 'Cc : ', self.email['Cc'] print 'Bcc : ', self.email['bcc'] print 'Subject : ', self.email['Subject'] print 'Message :', self.message if mime: print self.email.as_string() def attach(self, filename): mimetype, encoding = guess_type(filename) if mimetype is None: mimetype = 'application/octet-stream' mimetype = mimetype.split('/', 1) fp = open(filename, 'rb') attachment = MIMEBase(mimetype[0], mimetype[1]) attachment.set_payload(fp.read()) fp.close() encode_base64(attachment) attachment.add_header('Content-Disposition', 'attachment', filename=basename(filename)) self.email.attach(attachment) def __str__(self): return self.email.as_string() class EmailConnection(object): def __init__(self, server, username, password, debug=False): if ':' in server: data = server.split(':') self.server = data[0] self.port = int(data[1]) else: self.server = server self.port = 25 self.username = username self.password = password self.connect(debug) def __enter__(self): return self def __exit__(self, exception_type, exception_val, trace): self.close() def connect(self, debug): self.connection = SMTP(host=self.server, port=self.port) if debug: # Debug Information # self.debuglevel = 1 self.connection.set_debuglevel(debug) # identify ourselves, prompting server for supported features self.connection.ehlo() # If we can encrypt this session, do it if self.connection.has_extn('STARTTLS'): self.connection.starttls() self.connection.ehlo() self.connection.esmtp_features['auth'] = 'PLAIN LOGIN' self.connection.login(self.username, self.password) def send(self, message, from_=None, to=None, verify=False): if type(message) == str: if from_ is None or to is None: raise EmailConnectionError('You need to specify `from_` ' 'and `to`') else: from_ = get_email(from_) to = get_email(to) else: from_ = message.email['From'] if 'Cc' not in message.email: message.email['Cc'] = '' if 'bcc' not in message.email: message.email['bcc'] = '' to_emails = list(message.email['To'].split(',')) + \ message.email['Cc'].split(',') + \ message.email['bcc'].split(',') to = [get_email(complete_email) for complete_email in to_emails] message = str(message) if verify: for each_email in to_emails: self.connection.verify(each_email) # TODO option - remove emails that failed verification # return self.connection.sendmail(from_, to, message) try: self.connection.sendmail(from_, to, message) except SMTPException: raise SendEmailError('Message Could not be sent!') def close(self): self.connection.close()
apache-2.0
-3,787,192,903,398,085,000
35.068027
77
0.557714
false
4.184688
false
false
false
thinkle/gourmet
gourmet/plugins/duplicate_finder/recipeMerger.py
1
25533
"""recipeMerger.py This module contains code for handling the 'merging' of duplicate recipes. """ import os.path import time from typing import Union from gettext import gettext as _ from gi.repository import Gtk, Pango from gourmet import convert, gglobals, recipeIdentifier, recipeManager from gourmet.gtk_extras import ratingWidget, mnemonic_manager, dialog_extras NEWER = 1 OLDER = 2 try: current_path = os.path.split(os.path.join(os.getcwd(),__file__))[0] except: current_path = '' def time_to_text (val): curtime = time.time() if val == 0: return 'Unknown' # within 18 hours, return in form 4 hours 23 minutes ago or some such if curtime - val < 18 * 60 * 60: return _("%s ago")%convert.seconds_to_timestring(curtime-val,round_at=1) tupl=time.localtime(val) if curtime - val < 7 * 24 * 60 * 60: return time.strftime('%A %T',tupl) else: return time.strftime('%D %T',tupl) class ConflictError (ValueError): def __init__ (self, conflicts): self.conflicts = conflicts class RecipeMergerDialog: """A dialog to allow the user to merge recipes. """ # These line up to the position of the options in the search-type # combo box in glade... RECIPE_DUP_MODE = 0 ING_DUP_MODE = 1 COMPLETE_DUP_MODE = 2 DUP_INDEX_PAGE = 0 MERGE_PAGE = 1 def __init__ (self, rd=None, in_recipes=None, on_close_callback=None): if rd: self.rd = rd else: self.rd = recipeManager.get_recipe_manager() self.in_recipes = in_recipes self.on_close_callback = on_close_callback self.to_merge = [] # Queue of recipes to be merged... self.ui = Gtk.Builder() self.ui.add_from_file(os.path.join(current_path,'recipeMerger.ui')) self.get_widgets() self.searchTypeCombo.set_active(self.COMPLETE_DUP_MODE) self.mm = mnemonic_manager.MnemonicManager() self.mm.add_builder(self.ui) self.mm.fix_conflicts_peacefully() self.ui.connect_signals( { 'on_searchTypeCombo_changed':lambda *args: self.populate_tree(), 'on_includeDeletedRecipesCheckButton_toggled':lambda *args: self.populate_tree(), 'on_mergeAllButton_clicked':self.merge_all, 'on_cancelMergeButton_clicked':self.cancel_merge, 'on_mergeSelectedButton_clicked':self.merge_selected, 'on_applyButton_clicked':self.apply_merge, 'auto_merge':self.offer_auto_merge, 'close':self.close, } ) def get_widgets (self): for w in [ 'recipeDiffScrolledWindow', 'duplicateRecipeTreeView', 'mergeAllButton','mergeSelectedButton', # buttons on list-dups page (minus close button) 'applyMergeButton','closeMergeButton','cancelMergeButton', # buttons on merge-recs page 'searchTypeCombo','includeDeletedRecipesCheckButton','notebook', 'mergeInfoLabel' ]: setattr(self,w,self.ui.get_object(w)) self.setup_treeview() def setup_treeview (self): renderer = Gtk.CellRendererText() col = Gtk.TreeViewColumn('Recipe',renderer,text=2) self.duplicateRecipeTreeView.append_column(col) self.duplicateRecipeTreeView.insert_column_with_data_func( -1, # position 'Last Modified', # title renderer, # renderer self.time_cell_data_func, # function 3 # data column ) col = Gtk.TreeViewColumn('Duplicates',renderer,text=4) self.duplicateRecipeTreeView.append_column(col) self.duplicateRecipeTreeView.get_selection().set_mode(Gtk.SelectionMode.MULTIPLE) def time_cell_data_func (self, tree_column, cell, model, titer, data_col): """Display time in treeview cell. """ val = model.get_value(titer,data_col) cell.set_property('text',time_to_text(val)) def populate_tree (self): """Populate treeview with duplicate recipes. """ #print 'CALL: populate_tree' search_mode =self.searchTypeCombo.get_active() include_deleted = self.includeDeletedRecipesCheckButton.get_active() if search_mode == self.RECIPE_DUP_MODE: dups = self.rd.find_duplicates(by='recipe', recipes=self.in_recipes, include_deleted=include_deleted) elif search_mode == self.ING_DUP_MODE: dups = self.rd.find_duplicates(by='ingredient', recipes=self.in_recipes, include_deleted=include_deleted) else: # == self.COMPLETE_DUP_MODE dups = self.rd.find_complete_duplicates(include_deleted=include_deleted, recipes=self.in_recipes) self.setup_treemodel(dups) self.dups = dups self.duplicateRecipeTreeView.set_model(self.treeModel) def setup_treemodel (self, dups): self.treeModel = Gtk.TreeStore(int,int,str,int,str) # dup_index, rec_id, rec_title, last_modified, number_of_duplicates for dup_index,duplicate_recipes in enumerate(dups): first = duplicate_recipes[0] others = duplicate_recipes[1:] nduplicates = len(duplicate_recipes) r = self.rd.get_rec(first) firstIter = self.treeModel.append( None, (dup_index or 0, first or 0, r.title or '', r.last_modified or 0, str(nduplicates)) ) for o in others: r = self.rd.get_rec(o) self.treeModel.append(firstIter, (dup_index,o,r.title,r.last_modified or 0,'') ) def merge_next_recipe (self, ): if self.to_merge: self.current_dup_index = self.to_merge.pop(0) self.mergeInfoLabel.set_text( 'Merging recipe %(index)s of %(total)s'%{ 'index':self.total_to_merge - len(self.to_merge), 'total':self.total_to_merge }) duplicate_recipes = self.dups[self.current_dup_index] #self.idt = IngDiffTable(self.rd,duplicate_recipes[0],duplicate_recipes[1]) self.current_recs = [self.rd.get_rec(i) for i in duplicate_recipes] last_modified = {'last_modified':[r.last_modified for r in self.current_recs]} self.current_diff_data = recipeIdentifier.diff_recipes(self.rd,self.current_recs) last_modified.update(self.current_diff_data) self.diff_table = DiffTable(last_modified,self.current_recs[0],parent=self.recipeDiffScrolledWindow) self.diff_table.add_ingblocks(self.rd, self.current_recs) if not self.diff_table.idiffs and not self.current_diff_data: # If there are no differences, just merge the recipes... self.apply_merge() return if self.recipeDiffScrolledWindow.get_child(): self.recipeDiffScrolledWindow.remove(self.recipeDiffScrolledWindow.get_child()) self.diff_table.show() #self.idt.show() vb = Gtk.VBox() vb.add(self.diff_table) #vb.add(self.idt) vb.show() #self.recipeDiffScrolledWindow.add_with_viewport(self.diff_table) self.recipeDiffScrolledWindow.add_with_viewport(vb) self.notebook.set_current_page(self.MERGE_PAGE) else: self.notebook.set_current_page(self.DUP_INDEX_PAGE) def do_merge (self, merge_dic, recs, to_keep=None): if not to_keep: to_keep = recs[0] if isinstance(to_keep, int): to_keep = self.rd.get_rec(to_keep) self.rd.modify_rec(to_keep,merge_dic) for r in recs: if r.id != to_keep.id: self.rd.delete_rec(r) def apply_merge (self, *args): #print "CALL: apply_merge" #print 'Apply ',self.diff_table.selected_dic,'on ',self.diff_table.rec self.do_merge(self.diff_table.selected_dic, self.current_recs, to_keep=self.diff_table.rec) self.merge_next_recipe() if not self.to_merge: self.populate_tree() def merge_selected (self, *args): """Merge currently selected row from treeview. """ #print "CALL: merge_selected" mod,rows = self.duplicateRecipeTreeView.get_selection().get_selected_rows() dup_indices = [mod[r][0] for r in rows] self.to_merge = [] for d in dup_indices: if d not in self.to_merge: self.to_merge.append(d) self.total_to_merge = len(self.to_merge) self.merge_next_recipe() def merge_all (self, *args): """Merge all rows currently in treeview. """ self.total_to_merge = len(self.dups) self.to_merge = list(range(self.total_to_merge)) self.merge_next_recipe() def offer_auto_merge (self, *args): try: option =dialog_extras.getOption( label=_('Auto-Merge recipes'), options=[ (_('Always use newest recipe'),NEWER), (_('Always use oldest recipe'),OLDER), # The following would be nice to add eventually... #_('Always use longer field'), #_('Ignore differences in ingredient keys') ] ) if not option: return self.do_auto_merge(NEWER) except dialog_extras.UserCancelledError: pass def do_auto_merge (self, mode): if self.recipeDiffScrolledWindow.get_child(): self.recipeDiffScrolledWindow.remove(self.recipeDiffScrolledWindow.get_child()) vb = Gtk.VBox() l = Gtk.Label() l.set_markup('<u>Automatically merged recipes</u>') vb.pack_start(l,expand=False,fill=False); vb.show_all() self.recipeDiffScrolledWindow.add_with_viewport(vb) def do_auto_merge (): kept = self.auto_merge_current_rec(mode) label = Gtk.Label(label='%s'%kept.title) vb.pack_start(label,expand=False,fill=False); label.show() self.cancelMergeButton.hide() self.applyMergeButton.hide() self.closeMergeButton.set_sensitive(False) do_auto_merge() while self.to_merge: self.mergeInfoLabel.set_text( 'Automatically merging recipe %(index)s of %(total)s'%{ 'index':self.total_to_merge - len(self.to_merge), 'total':self.total_to_merge }) self.current_dup_index = self.to_merge.pop(0) duplicate_recipes = self.dups[self.current_dup_index] self.current_recs = [self.rd.get_rec(i) for i in duplicate_recipes] do_auto_merge() while Gtk.events_pending(): Gtk.main_iteration() self.mergeInfoLabel.set_text('Automatically merged %s recipes'%self.total_to_merge) self.closeMergeButton.set_sensitive(True) def auto_merge_current_rec (self, mode): assert(mode in [NEWER, OLDER]) # TODO make this to an enum and type annotate it self.current_recs.sort(key=lambda x: x.last_modified, reverse=(mode==OLDER)) keeper = self.current_recs[0] tossers = self.current_recs[1:] for to_toss in tossers: self.rd.delete_rec(to_toss) return keeper def cancel_merge (self, *args): self.merge_next_recipe() if not self.to_merge: self.populate_tree() def populate_tree_if_possible (self): self.populate_tree() if not self.dups: self.searchTypeCombo.set_active(self.RECIPE_DUP_MODE) self.populate_tree() if not self.dups: self.searchTypeCombo.set_active(self.ING_DUP_MODE) self.populate_tree() def show_if_there_are_dups (self, label=None): self.populate_tree_if_possible() if self.dups: self.show(label=label) else: self.ui.get_object('window1').destroy() def show (self, label=None): if label: messagebox = self.ui.get_object('messagebox') l = Gtk.Label(label=label) l.set_line_wrap(True) infobar = Gtk.InfoBar() infobar.set_message_type(Gtk.MessageType.INFO) infobar.get_content_area().add(l) infobar.show_all() messagebox.pack_start(infobar, True, False) self.ui.get_object('window1').show() def close (self, *args): #print "CALL: close" w = self.ui.get_object('window1') w.hide() w.destroy() if self.on_close_callback: self.on_close_callback(self) class RecipeMerger: """A class to handle recipe merging. """ def __init__ (self, rd): self.rd = rd def autoMergeRecipes (self, recs): to_fill,conflicts = recipeIdentifier.merge_recipes(self.rd, recs) if conflicts: raise ConflictError(conflicts) else: to_keep = recs[0] # Update a single recipe with our information... self.rd.modify_rec(to_keep,to_fill) # Delete the other recipes... for r in recs[1:]: self.rd.delete_rec(r.id) def uiMergeRecipes (self, recs): diffs = recipeIdentifier.diff_recipes(self.rd, recs) idiffs = recipeIdentifier.diff_ings(self.rd, r1, r2) if diffs: return DiffTable(diffs,recs[0]) else: return None class DiffTable (Gtk.Table): """A Table displaying differences in a recipe. diff_dic is a dictionary with the differences. {'attribute':(VAL1,VAL2,...)} recipe_object is a recipe object representing one of our duplicate recs, from which we can grab attributes that are not different. dont_choose is a list of attributes whose differences are displayed, but where no choice is offered (such as modification time for the recipe). """ def __init__ (self, diff_dic, recipe_object=None, parent=None, dont_choose=[]): self.idiffs = [] self.diff_dic = diff_dic Gtk.Table.__init__(self) self.selected_dic = {} self.set_col_spacings(6) self.set_row_spacings(6) self.row = 0 self.max_cols = 1 for attr,name,typ in [('last_modified','Last Modified',None)] + gglobals.REC_ATTRS \ + [('image','Image',None)] \ + [(attr,gglobals.TEXT_ATTR_DIC[attr],None) for attr in gglobals.DEFAULT_TEXT_ATTR_ORDER]: if attr in diff_dic: buttons = self.build_options(attr,self.diff_dic[attr]) label = Gtk.Label(label='_'+name+':') label.set_alignment(0.0,0.5) label.set_use_underline(True) label.show() self.attach(label,0,1,self.row,self.row+1,xoptions=Gtk.AttachOptions.SHRINK|Gtk.AttachOptions.FILL,yoptions=Gtk.AttachOptions.SHRINK|Gtk.AttachOptions.FILL) target = None for col,b in enumerate(buttons): self.setup_widget_size(b,in_col=True) b.show() if not target: target = b label.set_mnemonic_widget(target) self.attach(b,col+1,col+2,self.row,self.row+1,xoptions=Gtk.AttachOptions.SHRINK|Gtk.AttachOptions.FILL,yoptions=Gtk.AttachOptions.SHRINK|Gtk.AttachOptions.FILL) if col > self.max_cols: self.max_cols = col self.row += 1 elif recipe_object and hasattr(recipe_object,attr) and getattr(recipe_object,attr): att_label = Gtk.Label(label=name+':') att_label.set_use_underline(True) att_label.set_alignment(0,0.5) att_label.show() constructor = get_display_constructor(attr) val = getattr(recipe_object,attr) val_label = constructor(getattr(recipe_object,attr)) val_label.show() self.setup_widget_size(val_label,False) if hasattr(val_label,'set_alignment'): val_label.set_alignment(0,0.5) self.attach(att_label,0,1,self.row,self.row+1,xoptions=Gtk.AttachOptions.SHRINK|Gtk.AttachOptions.FILL,yoptions=Gtk.AttachOptions.SHRINK|Gtk.AttachOptions.FILL) self.attach(val_label,1,5,self.row,self.row+1,xoptions=Gtk.AttachOptions.SHRINK|Gtk.AttachOptions.FILL,yoptions=Gtk.AttachOptions.SHRINK|Gtk.AttachOptions.FILL) self.row += 1 self.mm = mnemonic_manager.MnemonicManager() self.mm.add_toplevel_widget(self) self.mm.fix_conflicts_peacefully() self.rec = recipe_object.id def setup_widget_size (self, w, in_col=True): if in_col: w.set_size_request(230,-1) else: w.set_size_request(650,-1) def build_options (self, attribute, values): buttons = [] group_rb = None make_widget = get_display_constructor(attribute) for v in values: rb = Gtk.RadioButton(group=group_rb) if not group_rb: group_rb = rb if v is not None: rb.add(make_widget(v)) else: rb.add(Gtk.Label(label=_("None"))) rb.show_all() buttons.append(rb) rb.connect('toggled',self.value_toggled,attribute,v) self.selected_dic[attribute] = values[0] for n,v in enumerate(values): if v: buttons[n].set_active(True) break return buttons def value_toggled (self, rb, attribute, v): self.selected_dic[attribute] = v def add_ingblocks (self, rd, recs): #print 'add_ingblocks for ',[r.id for r in recs] self.rd = rd self.iblock_dic = {} if len(recs) == 1: blocks = recipeIdentifier.format_ingdiff_line(recipeIdentifier.format_ings(recs[0],self.rd)) self.iblock_dic[blocks[0]] = recs[0] else: blocks = [] rec_0 = recs[0] for r in recs[1:]: chunks = self.get_ing_text_blobs(rec_0,r) if not chunks and not blocks: # If there is no diff, in other words, and we # don't yet have any block... chunks = [recipeIdentifier.format_ings(recs[0],self.rd)] elif not chunks: # Otherwise if there are no diffs we just continue # our loop... continue if not blocks: blocks = [chunks[0]] self.iblock_dic[blocks[0]] = rec_0 if chunks and len(chunks) > 1: new_block = chunks[1] if new_block not in blocks: blocks.append(new_block) self.iblock_dic[new_block] = r group_rb = None name = _('Ingredients') if len(blocks) > 1: lab = Gtk.Label(label='_'+_("Ingredients")); lab.set_use_underline(True) for col,block in enumerate(blocks): rb = Gtk.RadioButton( label=_("Recipe")+ ' ' +'%i'%(col+1), group=group_rb ) if not group_rb: group_rb = rb lab.set_mnemonic_widget(rb) if not block: rb.add(Gtk.Label(label=_("None"))) else: for n,txt in enumerate(block): l = Gtk.Label(label=txt) l.set_alignment(0.0,0.0) l.set_use_markup(True) l.set_line_wrap(True); l.set_line_wrap_mode(Pango.WrapMode.WORD) l.show() self.setup_widget_size(l,in_col=True) self.attach(l,col+1,col+2,self.row+1+n,self.row+2+n, xoptions=Gtk.AttachOptions.SHRINK|Gtk.AttachOptions.FILL, yoptions=Gtk.AttachOptions.SHRINK|Gtk.AttachOptions.FILL) #rb.add(l) rb.connect('toggled',self.ing_value_toggled,block) self.setup_widget_size(rb,in_col=True) rb.show() self.attach(rb,col+1,col+2,self.row,self.row+1,xoptions=Gtk.AttachOptions.SHRINK|Gtk.AttachOptions.FILL,yoptions=Gtk.AttachOptions.SHRINK|Gtk.AttachOptions.FILL) else: lab = Gtk.Label(label=_("Ingredients")); lab.show() l = Gtk.Label(label=blocks[0]) l.set_alignment(0.0,0.0) l.set_use_markup(True) l.set_line_wrap(True); l.set_line_wrap_mode(Pango.WrapMode.WORD) l.show() self.attach(l,1,5,self.row,self.row+1,xoptions=Gtk.AttachOptions.SHRINK|Gtk.AttachOptions.FILL,yoptions=Gtk.AttachOptions.SHRINK|Gtk.AttachOptions.FILL) lab.set_alignment(0.0,0.0); lab.show() self.attach(lab,0,1,self.row,self.row+1,xoptions=Gtk.AttachOptions.SHRINK|Gtk.AttachOptions.FILL,yoptions=Gtk.AttachOptions.SHRINK|Gtk.AttachOptions.FILL) def ing_value_toggled (self, rb, block): if rb.get_active(): #print 'RB clicked',rb,'for block',block #print 'ING TOGGLED - REC = ', self.rec = self.iblock_dic[block] #print self.rec def get_ing_text_blobs (self, r1, r2): """Return an ing-blurb for r1 and r2 suitable for display.""" idiff = recipeIdentifier.diff_ings(self.rd, r1, r2) if idiff: self.idiffs.append(idiff) def is_line (l): return not (l == '<diff/>') if idiff: ret = [] for igroup in idiff: ret.append((recipeIdentifier.format_ingdiff_line(i) for i in filter(is_line,igroup))) return ret def put_text_in_scrolled_window(text: str) -> Gtk.ScrolledWindow: sw = Gtk.ScrolledWindow() tv = Gtk.TextView() sw.add(tv) tv.get_buffer().set_text(text) tv.set_editable(False) tv.set_wrap_mode(Gtk.WrapMode.WORD) sw.set_policy(Gtk.PolicyType.NEVER,Gtk.PolicyType.AUTOMATIC) tv.show() return sw def make_text_label(text: str, use_markup: bool = False) -> Union[Gtk.Label, Gtk.ScrolledWindow]: if not text: return Gtk.Label(label=_('None')) elif len(text) < 30: return Gtk.Label(label=text) elif len(text) < 250: label = Gtk.Label(label=text) if use_markup: label.set_use_markup(use_markup) label.set_line_wrap_mode(Pango.WrapMode.WORD) return label else: return put_text_in_scrolled_window(text) def get_display_constructor (attribute): if attribute == 'rating': return lambda v: ratingWidget.StarImage( ratingWidget.star_generator, value=v, upper=10) elif attribute in ['preptime','cooktime']: return lambda v: Gtk.Label(label=convert.seconds_to_timestring(v)) elif attribute=='image': return lambda v: (v and Gtk.Label(label="An Image") or Gtk.Label(label="No Image")) elif attribute in gglobals.DEFAULT_TEXT_ATTR_ORDER: return make_text_label elif attribute == 'last_modified': return lambda v: Gtk.Label(label=time_to_text(v)) else: return lambda v: v and Gtk.Label(label=v) or Gtk.Label(label=_('None')) if __name__ == '__main__': def test_in_window (widget): """Put widget in window and show it""" w = Gtk.Window() w.add(widget) w.connect('delete-event',Gtk.main_quit) w.show() Gtk.main() def test_difftable (): class FakeRec: pass test_rec = FakeRec() test_rec.title = 'Shloppidy Recipe' test_data = {'rating':[4,7], 'category':['Dessert','Dessert, Cake'], 'cuisine':['American','All-American'], 'preptime':[6000,12000], 'cooktime':[6543,None]} t = DiffTable(test_data,test_rec) t.show() test_in_window(t) print(t.selected_dic) def test_merger (rd, conflicts): recs = [rd.get_rec(i) for i in conflicts] rmerger = RecipeMerger(rd) to_fill,conflict_dic = recipeIdentifier.merge_recipes(rd,recs) if conflict_dic: dt = rmerger.uiMergeRecipes(recs) dt.show() test_in_window(dt) print(dt.selected_dic) elif to_fill: print('Differences in ',conflicts,'can be auto-filled with',to_fill) else: print('No differences in ',conflicts) rd = recipeManager.default_rec_manager() rmd = RecipeMergerDialog(rd) rmd.populate_tree() rmd.show() rmd.ui.get_object('window1').connect('delete-event',Gtk.main_quit) Gtk.main() #dups = rd.find_complete_duplicates() #for d in dups[5:]: # test_merger(rd,d)
gpl-2.0
6,182,747,767,306,343,000
39.464342
180
0.567932
false
3.686011
true
false
false
JoseBlanca/seq_crumbs
test/seq/test_seqio.py
1
10372
# Copyright 2012 Jose Blanca, Peio Ziarsolo, COMAV-Univ. Politecnica Valencia # This file is part of seq_crumbs. # seq_crumbs is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # seq_crumbs is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # You should have received a copy of the GNU General Public License # along with seq_crumbs. If not, see <http://www.gnu.org/licenses/>. # pylint: disable=R0201 # pylint: disable=R0904 # pylint: disable=C0111 import os import unittest from cStringIO import StringIO from tempfile import NamedTemporaryFile from subprocess import Popen, PIPE from Bio.SeqRecord import SeqRecord from Bio.Seq import Seq from crumbs.utils.test_utils import TEST_DATA_DIR from crumbs.utils.bin_utils import BIN_DIR from crumbs.seq.seqio import (guess_seq_type, fastaqual_to_fasta, seqio, _write_seqrecords, _read_seqrecords, _itemize_fastx, read_seqs, write_seqs) from crumbs.utils.tags import SEQITEM, SEQRECORD from crumbs.exceptions import IncompatibleFormatError, MalformedFile FASTA = ">seq1\natctagtc\n>seq2\natctagtc\n>seq3\natctagtc\n" QUAL = ">seq1\n30 30 30 30 30 30 30 30\n>seq2\n30 30 30 30 30 30 30 30\n" QUAL += ">seq3\n30 30 30 30 30 30 30 30\n" FASTQ = '@seq1\natcgt\n+\n?????\n@seq2\natcgt\n+\n?????\n@seq3\natcgt\n+\n' FASTQ += '?????\n' class SeqIOTest(unittest.TestCase): 'It tests the seqio functions' @staticmethod def _make_fhand(content=None): 'It makes temporary fhands' if content is None: content = '' fhand = NamedTemporaryFile() fhand.write(content) fhand.flush() return fhand def test_guess_seq_type(self): 'It guesses if the sequence is nucleotide or protein' fpath = os.path.join(TEST_DATA_DIR, 'arabidopsis_genes') assert guess_seq_type(open(fpath)) == 'nucl' fpath = os.path.join(TEST_DATA_DIR, 'pairend2.sfastq') assert guess_seq_type(open(fpath)) == 'nucl' @staticmethod def test_fastaqual_to_fasta(): seq_fhand = StringIO('>seq1\nattct\n>seq2\natc\n') qual_fhand = StringIO('>seq1\n2 2 2 2 2\n>seq2\n2 2 2\n') out_fhand = NamedTemporaryFile() fastaqual_to_fasta(seq_fhand, qual_fhand, out_fhand) fastq = open(out_fhand.name).read() assert fastq == "@seq1\nattct\n+\n#####\n@seq2\natc\n+\n###\n" def test_seqio(self): 'It tets the seqio function' # fastq to fasta out_fhand = NamedTemporaryFile() seqio([self._make_fhand(FASTQ)], out_fhand, 'fasta') assert ">seq1\natcgt" in open(out_fhand.name).read() # fastq to fastq-illumina out_fhand = NamedTemporaryFile() seqio([self._make_fhand(FASTQ)], out_fhand, 'fastq-illumina') assert "@seq1\natcgt\n+\n^^^^" in open(out_fhand.name).read() out_fhand = NamedTemporaryFile() seqio([self._make_fhand(FASTQ), self._make_fhand(FASTQ)], out_fhand, 'fastq-illumina') assert "@seq3\natcgt\n+\n^^^^^\n@seq1" in open(out_fhand.name).read() # fasta to fastq out_fhand = NamedTemporaryFile() try: seqio([self._make_fhand(FASTA)], out_fhand, 'fastq') self.fail("error previously expected") except IncompatibleFormatError as error: assert 'No qualities available' in str(error) # bad_format fastq bad_fastq_fhand = self._make_fhand(FASTQ + 'aklsjhdas') try: seqio([bad_fastq_fhand], out_fhand, 'fasta') self.fail("error previously expected") except MalformedFile as error: assert 'Lengths of sequence and quality' in str(error) # genbank to fasta out_fhand = NamedTemporaryFile() genbank_fhand = open(os.path.join(TEST_DATA_DIR, 'sequence.gb')) seqio([genbank_fhand], out_fhand, 'fasta') result = open(out_fhand.name).read() assert '>NM_019354.2' in result class ReadWriteSeqRecordsTest(unittest.TestCase): 'It writes seqrecords in a file' def test_write_empy_seq(self): 'It does not write an empty sequence' seq1 = SeqRecord(Seq('ACTG'), id='seq1') fhand = StringIO() _write_seqrecords([seq1, None, SeqRecord(Seq(''), id='seq2')], fhand, file_format='fasta') fhand.flush() assert fhand.getvalue() == '>seq1\nACTG\n' def test_read_fasta(self): 'It tests the reading of a fasta file' fhand = StringIO('>seq1\nACTG\n') assert not list(_read_seqrecords([fhand]))[0].description class SimpleIOTest(unittest.TestCase): 'It tests the simple input and output read' def test_singleline_itemizer(self): fhand = StringIO('@s1\nACTG\n+\n1234\n' * 1100) seqs = list(_itemize_fastx(fhand)) names = [seq[0] for seq in seqs] assert len(names) == 1100 assert len(set([seq[1][1] for seq in seqs])) == 1 def test_fasta_itemizer(self): 'It tests the fasta itemizer' fhand = StringIO('>s1\nACTG\n>s2 desc\nACTG\n') seqs = list(_itemize_fastx(fhand)) assert seqs == [('s1', ['>s1\n', 'ACTG\n'], {}), ('s2', ['>s2 desc\n', 'ACTG\n'], {})] # with several lines fhand = StringIO('>s1\nACTG\nGTAC\n>s2 desc\nACTG\n') seqs = list(_itemize_fastx(fhand)) assert seqs == [('s1', ['>s1\n', 'ACTGGTAC\n'], {}), ('s2', ['>s2 desc\n', 'ACTG\n'], {})] # With empty lines fhand = StringIO('>s1\nACTG\n\n>s2 desc\nACTG\n') seqs = list(_itemize_fastx(fhand)) assert seqs == [('s1', ['>s1\n', 'ACTG\n'], {}), ('s2', ['>s2 desc\n', 'ACTG\n'], {})] def test_fastq_itemizer(self): 'It tests the fasta itemizer' fhand = StringIO('@s1\nACTG\n+\n1234\n@s2 desc\nACTG\n+\n4321\n') seqs = list(_itemize_fastx(fhand)) assert seqs == [('s1', ['@s1\n', 'ACTG\n', '+\n', '1234\n'], {}), ('s2', ['@s2 desc\n', 'ACTG\n', '+\n', '4321\n'], {})] # Empty line fhand = StringIO('@s1\nACTG\n+\n1234\n\n@s2 desc\nACTG\n+\n4321\n') seqs = list(_itemize_fastx(fhand)) assert seqs == [('s1', ['@s1\n', 'ACTG\n', '+\n', '1234\n'], {}), ('s2', ['@s2 desc\n', 'ACTG\n', '+\n', '4321\n'], {})] # Empty line fhand = StringIO('@s1\nACTG\nATTA\n+\n1234\n1234\n') seqs = list(_itemize_fastx(fhand)) assert seqs == [('s1', ['@s1\n', 'ACTGATTA\n', '+\n', '12341234\n'], {})] def test_seqitems_io(self): 'It checks the different seq class streams IO' fhand = StringIO('>s1\nACTG\n>s2 desc\nACTG\n') seqs = list(read_seqs([fhand], prefered_seq_classes=[SEQITEM])) assert seqs[0].kind == SEQITEM fhand = StringIO() write_seqs(seqs, fhand) assert fhand.getvalue() == '>s1\nACTG\n>s2 desc\nACTG\n' assert seqs[0].object.name == 's1' # SeqRecord fhand = StringIO('>s1\nACTG\n>s2 desc\nACTG\n') seqs = list(read_seqs([fhand], prefered_seq_classes=[SEQRECORD])) assert seqs[0].kind == SEQRECORD fhand = StringIO() write_seqs(seqs, fhand, 'fasta') assert fhand.getvalue() == '>s1\nACTG\n>s2 desc\nACTG\n' # seqitem not possible with different input and output formats fhand = StringIO('>s1\nACTG\n>s2 desc\nACTG\n') try: seqs = list(read_seqs([fhand], out_format='fastq', prefered_seq_classes=[SEQITEM])) self.fail('ValueError expected') except ValueError: pass fhand = StringIO('>s1\nACTG\n>s2 desc\nACTG\n') seqs = list(read_seqs([fhand], out_format='fasta', prefered_seq_classes=[SEQITEM])) fhand = StringIO() write_seqs(seqs, fhand) assert fhand.getvalue() == '>s1\nACTG\n>s2 desc\nACTG\n' class PipingTest(unittest.TestCase): 'It tests that we get no error when trying to write in a closed pipe' def test_write_closed_pipe(self): seq_fhand = NamedTemporaryFile(suffix='.fasta') n_seqs = 1000 for i in range(n_seqs): seq_fhand.write('>s\nACTG\n') seq_fhand.flush() in_fpath = seq_fhand.name seq_head = os.path.join(BIN_DIR, 'seq_head') process_seq = Popen([seq_head, '-n', str(n_seqs), in_fpath], stdout=PIPE) stdout = NamedTemporaryFile(suffix='.stdout') process_head = Popen(['head', '-n', '1'], stdin=process_seq.stdout, stdout=stdout) process_seq.stdout.close() # Allow seq_head to receive a SIGPIPE if # head exits. process_head.communicate() assert open(stdout.name).read() == '>s\n' seq_fhand.close() stdout.close() # With SeqRecords gb_fpath = os.path.join(TEST_DATA_DIR, 'sequence.gb') gb_content = open(gb_fpath).read() seq_fhand = NamedTemporaryFile(suffix='.gb') n_seqs = 100 for i in range(n_seqs): seq_fhand.write(gb_content) seq_fhand.flush() in_fpath = seq_fhand.name process_seq = Popen([seq_head, '-n', str(n_seqs), in_fpath], stdout=PIPE) stdout = NamedTemporaryFile(suffix='.stdout') process_head = Popen(['head', '-n', '1'], stdin=process_seq.stdout, stdout=stdout) process_seq.stdout.close() # Allow seq_head to receive a SIGPIPE if # head exits. process_head.communicate() seq_fhand.close() assert 'LOCUS' in open(stdout.name).read() stdout.close() if __name__ == '__main__': #import sys;sys.argv = ['', 'SeqIOTest.test_guess_seq_type'] unittest.main()
gpl-3.0
-4,225,053,976,801,928,000
38.139623
78
0.58253
false
3.237203
true
false
false
ishanatmuz/HangmanMinimalist
hangman.py
1
4431
import random import string import os import platform # Defining the text file containing the list of words WORDLIST_FILENAME = "words.txt" MAX_GUESSES = 8 def loadWords(): # Returns a list of valid words. Words are taken from the file words.txt print "Loading word list from file..." # Open file for reading with no buffering inFile = open(WORDLIST_FILENAME, 'r', 0) # Read the file in single line line = inFile.readline() # Split all the words separated by whitespaces wordlist = string.split(line) print " ", len(wordlist), "words loaded." return wordlist def chooseWord(wordlist): # Choose a word at random which the user have to guess return random.choice(wordlist) def isWordGuessed(secretWord, lettersGuessed): # Checking for the non-existence of any character from the secretWord # The result is stored as True of False result = True; for secretLetter in secretWord: if not secretLetter in lettersGuessed: result = False; break; return result; def getGuessedWord(secretWord, lettersGuessed): # Returns the guessed word in a specific format # Example - the word 'apple' with the guessed characters ['a', 'b','l','s','e'] # would look like this 'a_ _ l _ ' result = "'"; for letter in secretWord: if letter in lettersGuessed: result += letter; else: result += '_ '; result += "'"; return result; def getAvailableLetters(lettersGuessed): # Return the list of letters that are available to be used # The letters returned are in lowercase availableLetters = string.ascii_lowercase; for letter in lettersGuessed: availableLetters = availableLetters.replace(letter, ''); return availableLetters; def clearTerminal(): # Clears the terminal on which the output is being displayed. # Works at least on Windows and Linux, I haven't tested it on Mac OS if platform.system() == 'Windows': os.system('cls') else: os.system('clear') def hangman(secretWord): # Total number of wrong guesses allowed is 8 numberOfGuesses = MAX_GUESSES # The letters guessed by the user lettersGuessed = {} # Welcome message print 'Welcome to the game, Hangman!' print 'I am thinking of a word that is %s letters long.' %(str(len(secretWord))) # Infinite loop which breaks from inside the loop's conditions while True: print '-------------' if not isWordGuessed(secretWord, lettersGuessed): # Word not guessed if numberOfGuesses == 0: # All guesses exhausted, end the game print 'Sorry, you ran out of guesses. The word was %s.' %(secretWord) break else: # Guesses left, Display guesses left and available letters print 'You have %s guesses left.' %(str(numberOfGuesses)) print 'Available letters: %s' %(getAvailableLetters(lettersGuessed)) # Take input from the user guessedLetter = raw_input('Please guess a letter: ') # Clearing the terminal # Can use and cannot use depending on the preference clearTerminal() if guessedLetter in lettersGuessed: # Already guessed letter, display guessed word print 'Oops! You\'ve already guessed that letter:%s' %(getGuessedWord(secretWord, lettersGuessed)) else: # New guess, add to lettersGuessed lettersGuessed[guessedLetter] = True if guessedLetter not in secretWord: # Wrong Guess, decrement number of guesses print 'Oops! That letter is not in my word:%s' %(getGuessedWord(secretWord, lettersGuessed)) numberOfGuesses -= 1 else: # Correct guess print 'Good guess:%s' %(getGuessedWord(secretWord, lettersGuessed)) else: # Word guessed print 'Congratulations, you won!' break # Execution sequence of the game # Load the words from file wordlist = loadWords() # Choose a secret word for the user to guess secretWord = chooseWord(wordlist).lower() # Start the game for user hangman(secretWord)
mit
7,097,529,540,876,522,000
36.880342
118
0.61995
false
4.314508
false
false
false
mirestrepo/voxels-at-lems
registration_eval/results/compute_trans_geo_accuracy.py
1
13935
#!/usr/bin/env python # encoding: utf-8 """ compute_transformation_error.py Created by Maria Isabel Restrepo on 2012-09-24. Copyright (c) 2012 . All rights reserved. This script computes the distances betweeen an estimated similarity transformation and its ground truth The transformation is used to transform a "source" coordinate system into a "target coordinate system" To compute the error between the translations, the L2 norm diference translation vectors in the "source coordinate system" is computed. Since distances are preserved under R and T, only scale is applied. The rotation error is computed as the half angle between the normalized queternions i.e acos(|<q1,q2>|) in [0, pi/2] """ import os import sys import logging import argparse import vpcl_adaptor as vpcl import numpy as np from numpy import linalg as LA import transformations as tf import math import matplotlib.pyplot as plt sys.path.append(os.pardir) import reg3d_transformations as reg3d_T LOG = None """Compute the accuracy between the LIDAR fiducial points and corresponding geo-register correspondances""" def compute_ref_accuracy(fid_path, original_corrs_path, geo_tform): #Load fiducial .ply fid = open(fid_path, 'r') fid_points = np.genfromtxt(fid, dtype=float, delimiter=' ', skip_header=9) fid.close() #Load original corrs .ply fid = open(original_corrs_path, 'r') original_corrs = np.genfromtxt(fid, dtype=float, delimiter=' ', skip_header=9) fid.close() #Load transformation #************GEO**************" Tfis = open(geo_tform, 'r') lines = [] lines = Tfis.readlines() scale_geo = float(lines[0]) Ss_geo = tf.scale_matrix(scale_geo) quat_line = lines[1].split(" ") quat_geo = np.array([float(quat_line[3]), float(quat_line[0]), float(quat_line[1]), float(quat_line[2])]) Rs_geo = tf.quaternion_matrix(quat_geo) trans_line = lines[2].split(" ") trans_geo = np.array([float(trans_line[0]), float(trans_line[1]), float(trans_line[2])]) Tfis.close() Hs_geo = Rs_geo.copy() Hs_geo[:3, 3] = trans_geo[:3] Hs_geo = Ss_geo.dot(Hs_geo) LOG.debug("\n******Geo***** \n Scale: \n%s \nR:\n%s \nT:\n%s \nH:\n%s", Ss_geo, Rs_geo, trans_geo, Hs_geo) #Compute the "reference error" #i.e. fiducial points - geo registered correspondances npoints, c = fid_points.shape if npoints != 30: LOG.warn("Number of fiducial point is NOT 30") if c != 3: LOG.error("Fiducial points has the wrong number of dimensions") # import code; code.interact(local=locals()) fid_points_hom = np.hstack((fid_points, np.ones([npoints, 1]))).T original_corrs_hom = np.hstack((original_corrs, np.ones([npoints, 1]))).T geo_corrs_hom = Hs_geo.dot(original_corrs_hom) geo_ref_diff = geo_corrs_hom - fid_points_hom # import pdb; pdb.set_trace() delta_z = np.sqrt(geo_ref_diff[2, :] * geo_ref_diff[2, :]) delta_r = np.sqrt(geo_ref_diff[0, :] * geo_ref_diff[0, :] + geo_ref_diff[1, :] * geo_ref_diff[1, :]) return delta_z, delta_r def compute_geo_accuracy(fid_path, original_corrs_path, geo_tform, trials_root, desc_name, niter, ntrials, percentile=99): #Load fiducial .ply fid = open(fid_path, 'r') fid_points = np.genfromtxt(fid, delimiter=' ', skip_header=9) fid.close() #Load original corrs .ply fid = open(original_corrs_path, 'r') original_corrs = np.genfromtxt(fid, delimiter=' ', skip_header=9) fid.close() #load the geo tranformation GEO = reg3d_T.geo_transformation(geo_tform); #Compute the "reference error" #i.e. fiducial points - geo registered correspondances npoints, c = fid_points.shape if npoints != 30: LOG.warn("Number of fiducial point is NOT 30") if c != 3: LOG.error("Fiducial points has the wrong number of dimensions") # import code; code.interact(local=locals()) fid_points_hom = np.hstack((fid_points, np.ones([npoints, 1]))).T original_corrs_hom = np.hstack((original_corrs, np.ones([npoints, 1]))).T geo_corrs_hom = GEO.transform_points(original_corrs_hom) geo_ref_diff = geo_corrs_hom - fid_points_hom # import pdb; pdb.set_trace() delta_z = (geo_ref_diff[2, :] **2) ** (1./2.) delta_r = (geo_ref_diff[0, :] **2 + geo_ref_diff[1, :] **2 )** (1./2.) delta_z_ia = np.zeros([ntrials, npoints]) delta_r_ia = np.zeros([ntrials, npoints]) delta_z_icp = np.zeros([ntrials, npoints]) delta_r_icp = np.zeros([ntrials, npoints]) for trial in range(0, ntrials): print "********Trial", trial, "**********" #Load the transformations for this trial #************Hs**************# #read source to target "Ground Truth" Transformation Tfile = trials_root + "/trial_" + str(trial) + "/Hs_inv.txt" GT_Tform = reg3d_T.gt_transformation(Tfile) src_features_dir = (trials_root + "/trial_" + str(trial) + "/" + desc_name) Tfile_ia = (src_features_dir + "/ia_transformation_" + str(percentile) + "_" + str(niter) + ".txt") Tfile_icp = (src_features_dir + "/icp_transformation_" + str(percentile) + "_" + str(niter) + ".txt") REG_Tform = reg3d_T.pcl_transformation(Tfile_ia, Tfile_icp) Hs_ia_error = REG_Tform.Hs_ia.dot(GT_Tform.Hs) Hs_icp_error = REG_Tform.Hs_icp.dot(GT_Tform.Hs) # transform the points with the residual transformations ia_corrs_hom = Hs_ia_error.dot(original_corrs_hom) icp_corrs_hom = Hs_icp_error.dot(original_corrs_hom) # geo-register geo_ia_corrs_hom = GEO.transform_points(ia_corrs_hom) geo_icp_corrs_hom = GEO.transform_points(icp_corrs_hom) # distances geo_ia_ref_diff = geo_ia_corrs_hom - fid_points_hom geo_icp_ref_diff = geo_icp_corrs_hom - fid_points_hom delta_z_ia[trial, :] = np.sqrt(geo_ia_ref_diff[2, :] ** 2) delta_r_ia[trial, :] = np.sqrt(geo_ia_ref_diff[0, :] ** 2 + geo_ia_ref_diff[1, :] ** 2 ) delta_z_icp[trial, :] = np.sqrt(geo_icp_ref_diff[2, :] ** 2) delta_r_icp[trial, :] = np.sqrt(geo_icp_ref_diff[0, :] ** 2 + geo_icp_ref_diff[1, :] ** 2) # import pdb; pdb.set_trace() return delta_z, delta_r,\ delta_z_ia, delta_r_ia, \ delta_z_icp, delta_r_icp def main(logfile=None): global LOG LOG = setlogging(logfile) descriptors = ["FPFH_30", "SHOT_30"] niter = 500; ntrials = 10; plot_errors = True; if (plot_errors): colors = ['magenta','green']; markers = ['o', 's', '*', '+', '^', 'v'] fid_path = "/data/lidar_providence/downtown_offset-1-financial-dan-pts1.ply" original_corrs_path = "/data/lidar_providence/downtown_offset-1-financial-dan-pts0.ply" trials_root = "/Users/isa/Experiments/reg3d_eval/downtown_dan"; geo_tform = "/data/lidar_providence/downtown_offset-1-financial-dan-Hs.txt" for d_idx in range(0, len(descriptors)): desc_name = descriptors[d_idx] delta_z, delta_r, \ delta_z_ia, delta_r_ia, \ delta_z_icp, delta_r_icp = compute_geo_accuracy(fid_path, original_corrs_path, geo_tform, trials_root, desc_name, niter, ntrials) #sort errors for all trials to get the 70 80 90 % errors delta_z_ia.sort(axis=0) delta_r_ia.sort(axis=0) delta_z_icp.sort(axis=0) delta_r_icp.sort(axis=0) CE_70_ia = delta_r_ia[int(0.7 * ntrials) - 1, :] CE_80_ia = delta_r_ia[int(0.8 * ntrials) - 1, :] CE_90_ia = delta_r_ia[int(0.9 * ntrials) - 1, :] LE_70_ia = delta_z_ia[int(0.7 * ntrials) - 1, :] LE_80_ia = delta_z_ia[int(0.8 * ntrials) - 1, :] LE_90_ia = delta_z_ia[int(0.9 * ntrials) - 1, :] CE_70_icp = delta_r_icp[int(0.7 * ntrials) - 1, :] CE_80_icp = delta_r_icp[int(0.8 * ntrials) - 1, :] CE_90_icp = delta_r_icp[int(0.9 * ntrials) - 1, :] LE_70_icp = delta_z_icp[int(0.7 * ntrials) - 1, :] LE_80_icp = delta_z_icp[int(0.8 * ntrials) - 1, :] LE_90_icp = delta_z_icp[int(0.9 * ntrials) - 1, :] if (plot_errors): #Plot CE and LE fig_ia_CE = plt.figure() ax_ia_CE = fig_ia_CE.add_subplot(111); plt.hold(True); plt.axis(tight=True); ax_ia_CE.plot(CE_70_ia, "--s", color="green", label= "CE_70"); ax_ia_CE.plot(CE_80_ia, "--^", color="magenta", label= "CE_80"); ax_ia_CE.plot(CE_90_ia, "--*", color="blue", label= "CE_90"); ax_ia_CE.plot( delta_r, "--o", color="cyan", label= "GT"); ax_ia_CE.set_xlabel('Fiducial Marker (index)',fontsize= 20); ax_ia_CE.set_ylabel('Error (meters)',fontsize= 20); ax_ia_CE.legend(loc='best', frameon=False); # ax_ia_CE.set_title('IA CE') fname = trials_root + "/GEO_results/IA_CE_" + desc_name + ".pdf" fig_ia_CE.savefig(fname, transparent=True, pad_inches=5) fig_ia_LE = plt.figure() ax_ia_LE = fig_ia_LE.add_subplot(111); plt.hold(True); plt.axis(tight=True); ax_ia_LE.plot(LE_70_ia, "--s", color="green", label= "LE_70"); ax_ia_LE.plot(LE_80_ia, "--^", color="magenta", label= "LE_80"); ax_ia_LE.plot(LE_90_ia, "--*", color="blue", label= "LE_90"); ax_ia_LE.plot( delta_z, "--o", color="cyan", label= "GT"); ax_ia_LE.set_xlabel('Fiducial Marker (index)',fontsize= 20); ax_ia_LE.set_ylabel('Error (meters)',fontsize= 20); ax_ia_LE.legend(loc='best', frameon=False); # ax_ia_LE.set_title('IA LE') fname = trials_root + "/GEO_results/IA_LE_" + desc_name + ".pdf" fig_ia_LE.savefig(fname, transparent=True, pad_inches=5) fig_icp_CE = plt.figure() ax_icp_CE = fig_icp_CE.add_subplot(111); plt.hold(True); plt.axis(tight=True); ax_icp_CE.plot(CE_70_icp, "--s", color="green", label= "CE_70"); ax_icp_CE.plot(CE_80_icp, "--^", color="magenta", label= "CE_80"); ax_icp_CE.plot(CE_90_icp, "--*", color="blue", label= "CE_90"); ax_icp_CE.plot( delta_r, "--o", color="cyan", label= "GT"); ax_icp_CE.set_xlabel('Fiducial Marker (index)',fontsize= 20); ax_icp_CE.set_ylabel('Error (meters)',fontsize= 20); ax_icp_CE.legend(loc='best', frameon=False); # ax_icp_CE.set_title('ICP CE') fname = trials_root + "/GEO_results/ICP_CE_" + desc_name + ".pdf" fig_icp_CE.savefig(fname, transparent=True, pad_inches=5) fig_icp_LE = plt.figure() ax_icp_LE = fig_icp_LE.add_subplot(111); plt.hold(True); plt.axis(tight=True); ax_icp_LE.plot(LE_70_icp, "--s", color="green", label= "LE_70"); ax_icp_LE.plot(LE_80_icp, "--^", color="magenta", label= "LE_80"); ax_icp_LE.plot(LE_90_icp, "--*", color="blue", label= "LE_90"); ax_icp_LE.plot( delta_z, "--o", color="cyan", label= "GT"); ax_icp_LE.set_xlabel('Fiducial Marker (index)',fontsize= 20); ax_icp_LE.set_ylabel('Error (meters)',fontsize= 20); ax_icp_LE.legend(loc='best', frameon=False); # ax_icp_LE.set_title('ICP LE') fname = trials_root + "/GEO_results/ICP_LE_" + desc_name + ".pdf" fig_icp_LE.savefig(fname, transparent=True, pad_inches=5) # axT.set_xlim((0,505) ); # axT.set_yticks(np.arange(0.0,250.0,20)); # # axT.legend(bbox_to_anchor=(0., 1.02, 1., .102), loc=3, # # ncol=4, mode="expand", borderaxespad=0.) # # figT.savefig("/Users/isa/Experiments/reg3d_eval/downtown_dan/T_error.pdf", transparent=True, pad_inches=5) # plt.show(); # import pdb; pdb.set_trace() def setlogging(logfile=None): level = logging.DEBUG logger = logging.getLogger(__name__) logger.setLevel(level) # create formatter and add it to the handlers formatter = logging.Formatter("%(asctime)s - %(name)s - %(levelname)s - %(message)s") # create console handler with a higher log level ch = logging.StreamHandler() ch.setLevel(level) ch.setFormatter(formatter) # add the handlers to logger logger.addHandler(ch) # create file handler which logs error messages if logfile: print "Logging to file" fh = logging.FileHandler(logfile) fh.setLevel(level) fh.setFormatter(formatter) logger.addHandler(fh) #test logging logger.debug("debug message") logger.info("info message") logger.warn("warn message") logger.error("error message") logger.critical("critical message") return logger if __name__ == '__main__': # initialize the parser object: parser = argparse.ArgumentParser(description="Export PLY to PCD file") # define options here: parser.add_argument("-v", "--verbose", action='store', type = bool, dest="verbose", default=True, help="Write debug log to log_file") parser.add_argument("-L", "--log", dest="logfile", help="write debug log to log_file") args = parser.parse_args(argv) # set up logging if args.verbose: status = main(args.logfile) else: status = main() sys.exit(status)
bsd-2-clause
5,998,700,725,163,091,000
37.924581
150
0.568497
false
3.043241
false
false
false
denizs/torchUp
torchup/logging/logger.py
1
1865
import tensorflow as tf import numpy as np import scipy.misc from tensorboardX.src.summary_pb2 import Summary from tensorboardX import SummaryWriter try: from StringIO import StringIO except ImportError: from io import BytesIO class Logger(object): def __init__(self, log_dir): ''' Create a summary writer logging to log_dir ''' self.writer = tf.summary.FileWriter(log_dir) self.writerX = SummaryWriter(log_dir=log_dir) def scalar_summary(self, tag, value, step): ''' Log scalar value ''' summary = tf.Summary(value=[tf.Summary.Value(tag=tag, simple_value=value)]) self.writer.add_summary(summary, step) def add_image(self, tag, img, step): ''' Log img ''' summary = Summary(value=[Summary.Value(tag=tag, image=imgs)]) self.writerX.add_summary(summary, step) def histo_summary(self, tag, values, step, bins=1000): ''' Log a histogram of the tensor of values. ''' # Create histogram: counts, bin_edges = np.histogram(values, bins=bins) # Fill the fields of the histogram proto hist = tf.HistogramProto() his.min = float(np.min(values)) his.max = float(np.max(values)) hist.num = int(np.prod(values.shape)) hist.sum = float(np.sum(values)) hist.sum_squares = float(np.sum(values**2)) # Drop the start of the first bin bin_edges = bin_edges[1:] # Add bin edges and counts: for edge in bin_edges: hist.bucket_limit.append(edge) for c in counts: hist.bucket.append(c) # Create and write Summary summary = tf.Summary(value=[tf.Summary.Value(tag=tag, histo=hist)]) self.writer.add_summary(summary, step) self.writer.flush()
bsd-2-clause
-5,471,846,208,124,120,000
28.140625
83
0.604826
false
3.813906
false
false
false
HXLStandard/libhxl-python
hxl/model.py
1
48776
"""Main data-model classes for the Humanitarian Exchange Language (HXL). This module defines the basic classes for working with HXL data. Other modules have classes derived from these (e.g. in [hxl.filters](filters.html) or [hxl.io](io.html)). The core class is [Dataset](#hxl.model.Dataset), which defines the operations available on a HXL dataset, including convenience methods for chaining filters. Typical usage: source = hxl.data("https://example.org/data.csv") # returns a hxl.model.Dataset object result = source.with_lines("#country+name=Kenya").sort() # a filtered/sorted view of the data This code is released into the Public Domain and comes with NO WARRANTY. """ import abc, copy, csv, dateutil, hashlib, json, logging, operator, re, six import hxl logger = logging.getLogger(__name__) class TagPattern(object): """Pattern for matching a HXL hashtag and attributes - the pattern "#*" matches any hashtag/attribute combination - the pattern "#*+foo" matches any hashtag with the foo attribute - the pattern "#tag" matches #tag with any attributes - the pattern "#tag+foo" matches #tag with foo among its attributes - the pattern "#tag-foo" matches #tag with foo *not* among its attributes - the pattern "#tag+foo-bar" matches #tag with foo but not bar - the pattern "#tag+foo+bar!" matches #tag with exactly the attributes foo and bar, but *no others* The normal way to create a tag pattern is using the [parse()](#hxl.model.TagPattern.parse) method rather than the constructor: pattern = hxl.model.TagPattern.parse("#affected+f-children") Args: tag: the basic hashtag (without attributes) include_attributes: a list of attributes that must be present exclude_attributes: a list of attributes that must not be present is_absolute: if True, no attributes are allowed except those in _include_attributes_ """ PATTERN = r'^\s*#?({token}|\*)((?:\s*[+-]{token})*)\s*(!)?\s*$'.format(token=hxl.datatypes.TOKEN_PATTERN) """Constant: regular expression to match a HXL tag pattern. """ def __init__(self, tag, include_attributes=[], exclude_attributes=[], is_absolute=False): self.tag = tag self.include_attributes = set(include_attributes) """Set of all attributes that must be present""" self.exclude_attributes = set(exclude_attributes) """Set of all attributes that must not be present""" self.is_absolute = is_absolute """True if this pattern is absolute (no extra attributes allowed)""" def is_wildcard(self): return self.tag == '#*' def match(self, column): """Check whether a Column matches this pattern. @param column: the column to check @returns: True if the column is a match """ if column.tag and (self.is_wildcard() or self.tag == column.tag): # all include_attributes must be present if self.include_attributes: for attribute in self.include_attributes: if attribute not in column.attributes: return False # all exclude_attributes must be absent if self.exclude_attributes: for attribute in self.exclude_attributes: if attribute in column.attributes: return False # if absolute, then only specified attributes may be present if self.is_absolute: for attribute in column.attributes: if attribute not in self.include_attributes: return False return True else: return False def get_matching_columns(self, columns): """Return a list of columns that match the pattern. @param columns: a list of L{hxl.model.Column} objects @returns: a list (possibly empty) """ result = [] for column in columns: if self.match(column): result.append(column) return result def find_column_index(self, columns): """Get the index of the first matching column. @param columns: a list of columns to check @returns: the 0-based index of the first matching column, or None for no match """ for i in range(len(columns)): if self.match(columns[i]): return i return None def find_column(self, columns): """Check whether there is a match in a list of columns.""" for column in columns: if self.match(column): return column return None def __repr__(self): s = self.tag if self.include_attributes: for attribute in self.include_attributes: s += '+' + attribute if self.exclude_attributes: for attribute in self.exclude_attributes: s += '-' + attribute return s __str__ = __repr__ @staticmethod def parse(s): """Parse a single tag-pattern string. pattern = TagPattern.parse("#affected+f-children") The [parse_list()](#hxl.model.TagPattern.parse_list) method will call this method to parse multiple patterns at once. Args: s: the tag-pattern string to parse Returns: A TagPattern object """ if not s: # edge case: null value raise hxl.HXLException('Attempt to parse empty tag pattern') elif isinstance(s, TagPattern): # edge case: already parsed return s result = re.match(TagPattern.PATTERN, s) if result: tag = '#' + result.group(1).lower() include_attributes = set() exclude_attributes = set() attribute_specs = re.split(r'\s*([+-])', result.group(2)) for i in range(1, len(attribute_specs), 2): if attribute_specs[i] == '+': include_attributes.add(attribute_specs[i + 1].lower()) else: exclude_attributes.add(attribute_specs[i + 1].lower()) if result.group(3) == '!': is_absolute = True if exclude_attributes: raise ValueError('Exclusions not allowed in absolute patterns') else: is_absolute = False return TagPattern( tag, include_attributes=include_attributes, exclude_attributes=exclude_attributes, is_absolute=is_absolute ) else: raise hxl.HXLException('Malformed tag: ' + s) @staticmethod def parse_list(specs): """Parse a list of tag-pattern strings. If _specs_ is a list of already-parsed TagPattern objects, do nothing. If it's a list of strings, apply [parse()](#hxl.model.TagPattern.parse) to each one. If it's a single string with multiple patterns separated by commas, split the string, then parse the patterns. patterns = TagPattern.parse_list("#affected+f,#inneed+f") # or patterns = TagPattern.parse_list("#affected+f", "#inneed+f") Args: specs: the raw input (a list of strings, or a single string with commas separating the patterns) Returns: A list of TagPattern objects. """ if not specs: return [] if isinstance(specs, six.string_types): specs = specs.split(',') return [TagPattern.parse(spec) for spec in specs] @staticmethod def match_list(column, patterns): """Test if a column matches any of the patterns in a list. This is convenient to use together with [parse_list()](hxl.model.TagPattern.parse_list): patterns = TagPattern.parse_list(["#affected+f", "#inneed+f"]) if TagPattern.match_list(column, patterns): print("The column matched one of the patterns") Args: column: the column to test patterns: a list of zero or more patterns. Returns: True if there is a match """ for pattern in patterns: if pattern.match(column): return True return False class Dataset(object): """Abstract base class for a HXL data source. Any source of parsed HXL data inherits from this class: that includes Dataset, HXLReader, and the various filters in the hxl.old_filters package. The contract of a Dataset is that it will provide a columns property and a next() method to read through the rows. The child class must implement the columns() method as a property and the __iter__() method to make itself iterable. """ __metaclass__ = abc.ABCMeta def __init__(self): """Constructor.""" super().__init__() @abc.abstractmethod def __iter__(self): """Get the iterator over the rows. @returns: an iterator that returns L{hxl.model.Row} objects """ raise RuntimeException("child class must implement __iter__() method") @property def is_cached(self): """Test whether the source data is cached (replayable). By default, this is False, but some subclasses may override. @returns: C{True} if the input is cached (replayable); C{False} otherwise. """ return False @property @abc.abstractmethod def columns(self): """Get the column definitions for the dataset. @returns: a list of Column objects. """ raise RuntimeException("child class must implement columns property method") @property def columns_hash(self): """Generate a hash across all of the columns in the dataset. This function helps detect whether two HXL documents are of the same type, even if they contain different data (e.g. the HXL API output for the same humanitarian dataset in two different months or two different countries). It takes into account text headers, hashtags, the order of attributes, and the order of columns. Whitespace is normalised, and null values are treated as empty strings. The MD5 hash digest is generated from a UTF-8 encoded version of each header. @returns: a 32-character hex-formatted MD5 hash string """ md5 = hashlib.md5() for column in self.columns: md5.update(hxl.datatypes.normalise_space(column.header).encode('utf-8')) for column in self.columns: md5.update(hxl.datatypes.normalise_space(column.display_tag).encode('utf-8')) return md5.hexdigest() @property def data_hash(self): """Generate a hash for the entire dataset. This function allows checking if two HXL datasets are functionally identical. It takes into account text headers, hashtags, the order of attributes, and the order of columns. Whitespace is normalised, and null values are treated as empty strings. The MD5 hash digest is generated from a UTF-8 encoded version of each header and data cell. @returns: a 32-character hex-formatted MD5 hash string """ md5 = hashlib.md5() # text header row for column in self.columns: md5.update(hxl.datatypes.normalise_space(column.header).encode('utf-8')) # hashtag row for column in self.columns: md5.update(hxl.datatypes.normalise_space(column.display_tag).encode('utf-8')) # data rows for row in self: for value in row: md5.update(hxl.datatypes.normalise_space(value).encode('utf-8')) return md5.hexdigest() @property def headers(self): """Return a list of header strings (for a spreadsheet row). """ return [column.header if column else '' for column in self.columns] @property def tags(self): """Get all hashtags (without attributes) as a list @returns: a list of base hashtags for the dataset columns """ return [column.tag if column else '' for column in self.columns] @property def display_tags(self): """Return a list of display tags. @returns: a list of strings containing the hashtag and attributes for each column """ return [column.display_tag if column else '' for column in self.columns] @property def has_headers(self): """Report whether any non-empty header strings exist. @returns: C{True} if there is at least one column with a non-empty header string """ for column in self.columns: if column.header: return True return False @property def values(self): """Get all values for the dataset at once, in an array of arrays. This method can be highly inefficient for large datasets. @returns: an array of arrays of scalar values """ return [row.values for row in self] def get_value_set(self, tag_pattern=None, normalise=False): """Return the set of all values in a dataset (optionally matching a tag pattern for a single column) Warning: this method can be highly inefficient for large datasets. @param tag_pattern: (optional) return values only for columns matching this tag pattern. @param normalise: (optional) normalise the strings with hxl.datatypes.normalise (default: False) @returns: a Python set of values """ value_set = set([]) if tag_pattern: tag_pattern = TagPattern.parse(tag_pattern) for row in self: if tag_pattern: new_values = row.get_all(tag_pattern) else: new_values = row.values if normalise: new_values = [hxl.datatypes.normalise(s) for s in new_values] else: new_values = [hxl.datatypes.normalise_space(s) for s in new_values] value_set.update(new_values) return value_set def get_column_indices(self, tag_patterns, columns): """Get a list of indices that match the tag patterns provided @param tag_patterns: a list of tag patterns or a string version of the list @param columns: a list of columns @returns: a (possibly-empty) list of 0-based indices """ patterns = TagPattern.parse_list(tag_patterns) indices = [] for i, column in enumerate(columns): for pattern in patterns: if pattern.match(column): indices.push(i) return indices # # Aggregates # def _get_minmax(self, pattern, op): """Calculate the extreme min/max value for a tag pattern Will iterate through the dataset, and use values from multiple matching columns. Uses numbers, dates, or strings for comparison, based on the first non-empty value found. @param pattern: the L{hxl.model.TagPattern} to match @param op: operator_lt or operator_gt @returns: the extreme value according to operator supplied, or None if no values found """ pattern = TagPattern.parse(pattern) result_raw = None # what's actually in the dataset result_normalised = None # normalised version for comparison # Look at every row for row in self: # Look at every matching value in every row for i, value in enumerate(row.get_all(pattern)): # ignore empty values if hxl.datatypes.is_empty(value): continue # make a normalised value for comparison normalised = hxl.datatypes.normalise(value, row.columns[i]) # first non-empty value is always a match if result_normalised is None: result_raw = value result_normalised = normalised else: # try comparing the normalised types first, then strings on failure try: if op(normalised, result_normalised): result_raw = value result_normalised = normalised except TypeError: if op(str(normalised), str(result_normalised)): result_raw = value result_normalised = normalised return result_raw def min(self, pattern): """Calculate the minimum value for a tag pattern Will iterate through the dataset, and use values from multiple matching columns. Uses numbers, dates, or strings for comparison, based on the first non-empty value found. @param pattern: the L{hxl.model.TagPattern} to match @returns: the minimum value according to the '<' operator, or None if no values found """ return self._get_minmax(pattern, operator.lt) def max(self, pattern): """Calculate the maximum value for a tag pattern Will iterate through the dataset, and use values from multiple matching columns. @param pattern: the L{hxl.model.TagPattern} to match @returns: the minimum value according to the '<' operator, or None if no values found """ return self._get_minmax(pattern, operator.gt) # # Utility # def validate(self, schema=None, callback=None): """ Validate the current dataset. @param schema (optional) the pre-compiled schema, schema filename, URL, file object, etc. Defaults to a built-in schema. @param callback (optional) a function to call with each error or warning. Defaults to collecting errors in an array and returning them. """ return hxl.schema(schema, callback).validate(self) def recipe(self, recipe): """Parse a recipe (JSON or a list of dicts) and create the appropriate filters. @param recipe: a list of dicts, a single dict, or a JSON literal string. @return: the new end filter. """ import hxl.filters return hxl.filters.from_recipe(self, recipe) # # Filters # def append(self, append_sources, add_columns=True, queries=[]): """Append additional datasets. @param append_sources: a list of sources to append @param add_columns: if True (default), include any extra columns in the append sources @param queries: a list of row queries to select rows for inclusion from the append sources. @returns: a new HXL source for chaining """ import hxl.filters return hxl.filters.AppendFilter(self, append_sources, add_columns=add_columns, queries=queries) def append_external_list(self, source_list_url, add_columns=True, queries=[]): """Append additional datasets from an external list @param source_list_url: URL of a HXL dataset containing a list of sources to append. @param add_columns: if True (default), include any extra columns in the append sources. @param queries: a list of row queries to select rows for inclusion from the append sources. @returns: a new HXL source for chaining """ import hxl.filters logger.debug("Loading append list from %s...", source_list_url) append_sources = hxl.filters.AppendFilter.parse_external_source_list(source_list_url) logger.debug("Done loading") return hxl.filters.AppendFilter(self, append_sources, add_columns=add_columns, queries=queries) def cache(self): """Add a caching filter to the dataset.""" import hxl.filters return hxl.filters.CacheFilter(self) def dedup(self, patterns=[], queries=[]): """Deduplicate a dataset.""" import hxl.filters return hxl.filters.DeduplicationFilter(self, patterns=patterns, queries=queries) def with_columns(self, includes): """Select matching columns.""" import hxl.filters return hxl.filters.ColumnFilter(self, include_tags=includes) def without_columns(self, excludes=None, skip_untagged=False): """Select non-matching columns.""" import hxl.filters return hxl.filters.ColumnFilter(self, exclude_tags=excludes, skip_untagged=skip_untagged) def with_rows(self, queries, mask=[]): """Select matching rows. @param queries: a predicate or list of predicates for rows to include @param mask: a predicate or list of predicates for rows to test (default: [] to test all) @return: a filtered version of the source """ import hxl.filters return hxl.filters.RowFilter(self, queries=queries, reverse=False, mask=mask) def without_rows(self, queries, mask=[]): """Select non-matching rows. @param queries: a predicate or list of predicates for rows to ignore @param mask: a predicate or list of predicates for rows to test (default: [] to test all) @return: a filtered version of the source """ import hxl.filters return hxl.filters.RowFilter(self, queries=queries, reverse=True, mask=mask) def sort(self, keys=None, reverse=False): """Sort the dataset (caching).""" import hxl.filters return hxl.filters.SortFilter(self, tags=keys, reverse=reverse) def count(self, patterns=[], aggregators=None, queries=[]): """Count values in the dataset (caching).""" import hxl.filters return hxl.filters.CountFilter( self, patterns=patterns, aggregators=aggregators, queries=queries ) def row_counter(self, queries=[]): """Count the number of rows while streaming.""" import hxl.filters return hxl.filters.RowCountFilter(self, queries=queries) def replace_data(self, original, replacement, pattern=None, use_regex=False, queries=[]): """Replace values in a HXL dataset.""" import hxl.filters replacement = hxl.filters.ReplaceDataFilter.Replacement(original, replacement, pattern, use_regex) return hxl.filters.ReplaceDataFilter(self, [replacement], queries=queries) def replace_data_map(self, map_source, queries=[]): """Replace values in a HXL dataset.""" import hxl.filters replacements = hxl.filters.ReplaceDataFilter.Replacement.parse_map(hxl.data(map_source)) return hxl.filters.ReplaceDataFilter(self, replacements, queries=queries) def add_columns(self, specs, before=False): """Add fixed-value columns to a HXL dataset.""" import hxl.filters return hxl.filters.AddColumnsFilter(self, specs=specs, before=before) def rename_columns(self, specs): """Changes headers and tags on a column.""" import hxl.filters return hxl.filters.RenameFilter(self, specs) def clean_data( self, whitespace=[], upper=[], lower=[], date=[], date_format=None, number=[], number_format=None, latlon=[], purge=False, queries=[] ): """Clean data fields.""" import hxl.filters return hxl.filters.CleanDataFilter( self, whitespace=whitespace, upper=upper, lower=lower, date=date, date_format=date_format, number=number, number_format=number_format, latlon=latlon, purge=purge, queries=queries ) def merge_data(self, merge_source, keys, tags, replace=False, overwrite=False, queries=[]): """Merges values from a second dataset. @param merge_source: the second HXL data source @param keys: a single tagspec or list of tagspecs for the shared keys @param tags: the tags to copy over from the second dataset @param replace: if True, replace existing columns when present @param overwrite: if True, overwrite individual values in existing columns when available @param queries: optional row queries to control the merge """ import hxl.filters return hxl.filters.MergeDataFilter(self, merge_source, keys, tags, replace, overwrite, queries=queries) def expand_lists(self, patterns=None, separator="|", correlate=False, queries=[]): """Expand lists by repeating rows. By default, applies to every column with a +list attribute, and uses "|" as the separator. @param patterns: a single tag pattern or list of tag patterns for columns to expand @param separator: the list-item separator """ import hxl.filters return hxl.filters.ExpandListsFilter(self, patterns=patterns, separator=separator, correlate=correlate, queries=queries) def explode(self, header_attribute='header', value_attribute='value'): """Explodes a wide dataset into a long datasets. @param header_attribute: the attribute to add to the hashtag of the column with the former header (default 'header') @param value_attribute: the attribute to add to the hashtag of the column with the former value (default 'value') @return: filtered dataset. @see hxl.filters.ExplodeFilter """ import hxl.filters return hxl.filters.ExplodeFilter(self, header_attribute, value_attribute) def implode(self, label_pattern, value_pattern): """Implodes a long dataset into a wide dataset @param label_pattern: the tag pattern to match the label column @param value_pattern: the tag pattern to match the @return: filtered dataset. @see hxl.filters.ImplodeFilter """ import hxl.filters return hxl.filters.ImplodeFilter(self, label_pattern=label_pattern, value_pattern=value_pattern) def jsonpath(self, path, patterns=[], queries=[], use_json=True): """Parse the value as a JSON expression and extract data from it. See http://goessner.net/articles/JsonPath/ @param path: a JSONPath expression for extracting data @param patterns: a tag pattern or list of patterns for the columns to use (default to all) @param queries: a predicate or list of predicates for the rows to consider. @param use_json: if True, serialise multiple results as JSON lists. @returns: filtered dataset @see: hxl.filters.JSONPathFilter """ import hxl.filters return hxl.filters.JSONPathFilter(self, path, patterns=patterns, queries=queries, use_json=use_json) def fill_data(self, patterns=[], queries=[]): """Fills empty cells in a column using the last non-empty value. @param patterns: a tag pattern or list of patterns for the columns to fill (default to all) @param queries: a predicate or list of predicates for rows to fill (leave any blank that don't match). @return filtered dataset @see hxl.filters.FillFilter """ import hxl.filters return hxl.filters.FillDataFilter(self, patterns=patterns, queries=queries) # # Generators # def gen_raw(self, show_headers=True, show_tags=True): """Generate an array representation of a HXL dataset, one at a time.""" if show_headers: yield self.headers if show_tags: yield self.display_tags for row in self: yield row.values def gen_csv(self, show_headers=True, show_tags=True): """Generate a CSV representation of a HXL dataset, one row at a time.""" class TextOut: """Simple string output source to capture CSV""" def __init__(self): self.data = '' def write(self, s): self.data += s def get(self): data = self.data self.data = '' return data output = TextOut() writer = csv.writer(output) for raw in self.gen_raw(show_headers, show_tags): writer.writerow(raw) yield output.get() def gen_json(self, show_headers=True, show_tags=True, use_objects=False): """Generate a JSON representation of a HXL dataset, one row at a time.""" is_first = True yield "[\n" if use_objects: for row in self: if is_first: is_first = False yield json.dumps(row.dictionary, sort_keys=True, indent=2) else: yield ",\n" + json.dumps(row.dictionary, sort_keys=True, indent=2) else: for raw in self.gen_raw(show_headers, show_tags): if is_first: is_first = False yield json.dumps(raw) else: yield ",\n" + json.dumps(raw) yield "\n]\n" class Column(object): """ The definition of a logical column in the HXL data. """ # Regular expression to match a HXL tag PATTERN = r'^\s*(#{token})((?:\s*\+{token})*)\s*$'.format(token=hxl.datatypes.TOKEN_PATTERN) # To tighten debugging (may reconsider later -- not really a question of memory efficiency here) __slots__ = ['tag', 'attributes', 'attribute_list', 'header', 'column_number'] def __init__(self, tag=None, attributes=(), header=None, column_number=None): """ Initialise a column definition. @param tag: the HXL hashtag for the column (default: None) @param attributes: (optional) a sequence of attributes (default: ()) @param header: (optional) the original plaintext header for the column (default: None) @param column_number: (optional) the zero-based column number """ if tag: tag = tag.lower() self.tag = tag self.header = header self.column_number = column_number self.attributes = set([a.lower() for a in attributes]) self.attribute_list = [a.lower() for a in attributes] # to preserve order @property def display_tag(self): """Default display version of a HXL hashtag. Attributes are not sorted. """ return self.get_display_tag(sort_attributes=False) def get_display_tag(self, sort_attributes=False): """ Generate a display version of the column hashtag @param sort_attributes: if True, sort attributes; otherwise, preserve the original order @return the reassembled HXL hashtag string, including language code """ if self.tag: s = self.tag for attribute in sorted(self.attribute_list) if sort_attributes else self.attribute_list: s += '+' + attribute return s else: return '' def has_attribute(self, attribute): """Check if an attribute is present.""" return (attribute in self.attribute_list) def add_attribute(self, attribute): """Add an attribute to the column.""" if attribute not in self.attributes: self.attributes.add(attribute) self.attribute_list.append(attribute) return self def remove_attribute(self, attribute): """Remove an attribute from the column.""" if attribute in self.attributes: self.attributes.remove(attribute) self.attribute_list.remove(attribute) return self def __hash__(self): """Make columns usable in a dictionary. Only the hashtag and attributes are used. """ hash_value = hash(self.tag) for attribute in self.attributes: hash_value += hash(attribute) return hash_value def __eq__(self, other): """Test for comparison with another object. For equality, only the hashtag and attributes have to be the same.""" try: return (self.tag == other.tag and self.attributes == other.attributes) except: return False def __repr__(self): return self.display_tag __str__ = __repr__ @staticmethod def parse(raw_string, header=None, use_exception=False, column_number=None): """ Attempt to parse a full hashtag specification. @param raw_string: the string representation of the tagspec @param header: the text header to include @param use_exception: if True, throw an exception for a malformed tagspec @returns: None if the string is empty, False if it's malformed (and use_exception is False), or a Column object otherwise """ # Already parsed? if isinstance(raw_string, Column): return raw_string # Empty string? if hxl.datatypes.is_empty(raw_string): return None # Pattern for a single tag result = re.match(Column.PATTERN, raw_string) if result: tag = result.group(1) attribute_string = result.group(2) if attribute_string: attributes = re.split(r'\s*\+', attribute_string.strip().strip('+')) else: attributes = [] return Column(tag=tag, attributes=attributes, header=header, column_number=column_number) else: if use_exception: raise hxl.HXLException("Malformed tag expression: " + raw_string) else: logger.debug("Not a HXL hashtag spec: %s", raw_string) return False @staticmethod def parse_spec(raw_string, default_header=None, use_exception=False, column_number=None): """Attempt to parse a single-string header/hashtag spec""" # Already parsed? if isinstance(raw_string, Column): return raw_string matches = re.match(r'^(.*)(#.*)$', raw_string) if matches: header = matches.group(1) if matches.group(1) else default_header return Column.parse(matches.group(2), header=header, column_number=column_number) else: return Column.parse('#' + raw_string, header=default_header, column_number=column_number) class Row(object): """ An iterable row of values in a HXL dataset. """ # Predefine the slots for efficiency (may reconsider later) __slots__ = ['columns', 'values', 'row_number', 'source_row_number'] def __init__(self, columns, values=[], row_number=None, source_row_number=None): """ Set up a new row. @param columns: The column definitions (array of Column objects). @param values: (optional) The string values for the row (default: []) @param row_number: (optional) The zero-based logical row number in the input dataset, if available (default: None) @param source_row_number: (optional) The zero-based source row number in the input dataset, if available (default: None) """ self.columns = columns self.values = copy.copy(values) self.row_number = row_number self.source_row_number = source_row_number def append(self, value): """ Append a value to the row. @param value The new value to append. @return The new value """ self.values.append(value) return value def get(self, tag, index=None, default=None, parsed=False): """ Get a single value for a tag in a row. If no index is provided ("None"), return the first non-empty value. @param tag: A TagPattern or a string value for a tag. @param index: The zero-based index if there are multiple values for the tag (default: None) @param default: The default value if not found (default: None). Never parsed, even if parsed=True @param parsed: If true, use attributes as hints to try to parse the value (e.g. number, list, date) @return The value found, or the default value provided. If parsed=True, the return value will be a list (default: False) """ # FIXME - move externally, use for get_all as well, and support numbers and dates def parse(column, value): if parsed: if column.has_attribute('list'): return re.split(r'\s*,\s*', value) else: return [value] return value if type(tag) is TagPattern: pattern = tag else: pattern = TagPattern.parse(tag) for i, column in enumerate(self.columns): if i >= len(self.values): break if pattern.match(column): if index is None: # None (the default) is a special case: it means look # for the first truthy value if self.values[i]: return parse(column, self.values[i]) else: # Otherwise, look for a specific index if index == 0: return parse(column, self.values[i]) else: index = index - 1 return default def get_all(self, tag, default=None): """ Get all values for a specific tag in a row @param tag A TagPattern or a string value for a tag. @return An array of values for the HXL hashtag. """ if type(tag) is TagPattern: pattern = tag else: pattern = TagPattern.parse(tag) result = [] for i, column in enumerate(self.columns): if i >= len(self.values): break if pattern.match(column): value = self.values[i] if default is not None and not value: value = default result.append(value) return result def key(self, patterns=None, indices=None): """Generate a unique key tuple for the row, based on a list of tag patterns @param patterns: a list of L{TagPattern} objects, or a parseable string @returns: the key as a tuple (might be empty) """ key = [] # if the user doesn't provide indices, get indices from the pattern if not indices and patterns: indices = get_column_indices(patterns, self.columns) if indices: # if we have indices, use them to build the key for i in indices: if i < len(self.values): key.append(hxl.datatypes.normalise(self.values[i], self.columns[i])) else: # if there are still no indices, use the whole row for the key for i, value in enumerate(self.values): key.append(hxl.datatypes.normalise(value, self.columns[i])) return tuple(key) # make it into a tuple so that it's hashable @property def dictionary(self): """Return the row as a Python dict. The keys will be HXL hashtags and attributes, normalised per HXL 1.1. If two or more columns have the same hashtags and attributes, only the first will be included. @return: The row as a Python dictionary. """ data = {} for i, col in enumerate(self.columns): key = col.get_display_tag(sort_attributes=True) if key and (not key in data) and (i < len(self.values)): data[key] = self.values[i] return data def __getitem__(self, index): """ Array-access method to make this class iterable. @param index The zero-based index of a value to look up. @return The value if it exists. @exception IndexError if the index is out of range. """ return self.values[index] def __str__(self): """ Create a string representation of a row for debugging. """ s = '<Row'; for column_number, value in enumerate(self.values): s += "\n " + str(self.columns[column_number]) + "=" + str(value) s += "\n>" return s class RowQuery(object): """Query to execute against a row of HXL data.""" def __init__(self, pattern, op, value, is_aggregate=False): """Constructor @param pattern: the L{TagPattern} to match in the row @param op: the operator function to use for comparison @param value: the value to compare against @param is_aggregate: if True, the value is a special placeholder like "min" or "max" that needs to be calculated """ self.pattern = TagPattern.parse(pattern) self.op = op self.value = value # if the value is a formula, extract it self.formula = None result = re.match(r'^{{(.+)}}$', hxl.datatypes.normalise_space(value)) if result: self.formula = result.group(1) self.is_aggregate=is_aggregate self.needs_aggregate = False """Need to calculate an aggregate value""" if is_aggregate: self.needs_aggregate = True # calculate later self.date_value = None self.number_value = None self._saved_indices = None def calc_aggregate(self, dataset): """Calculate the aggregate value that we need for the row query Substitute the special values "min" and "max" with aggregates. @param dataset: the HXL dataset to use (must be cached) """ if not self.needs_aggregate: logger.warning("no aggregate calculation needed") return # no need to calculate if not dataset.is_cached: raise HXLException("need a cached dataset for calculating an aggregate value") if self.value == 'min': self.value = dataset.min(self.pattern) self.op = operator.eq elif self.value == 'max': self.value = dataset.max(self.pattern) self.op = operator.eq elif self.value == 'not min': self.value = dataset.min(self.pattern) self.op = operator.ne elif self.value == 'not max': self.value = dataset.max(self.pattern) self.op = operator.ne else: raise HXLException("Unrecognised aggregate: {}".format(value)) self.needs_aggregate = False def match_row(self, row): """Check if a key-value pair appears in a HXL row""" # fail if we need an aggregate and haven't calculated it if self.needs_aggregate and not self.aggregate_is_calculated: raise HXLException("must call calc_aggregate before matching an 'is min' or 'is max' condition") # initialise is this is the first time matching for the row query if self._saved_indices is None or self.formula: # if it's a row formula, evaluate first if self.formula: value = hxl.formulas.eval.eval(row, self.formula) else: value = self.value if self.pattern.tag == '#date': try: self.date_value = hxl.datatypes.normalise_date(value) except ValueError: self.date_value = None try: self.number_value = hxl.datatypes.normalise_number(value) except ValueError: self.number_value = None self.string_value = hxl.datatypes.normalise_string(value) # try all the matching column values indices = self._get_saved_indices(row.columns) for i in indices: if i < len(row.values) and self.match_value(row.values[i], self.op): return True return False def match_value(self, value, op): """Try matching as dates, then as numbers, then as simple strings""" if self.date_value is not None: try: return op(hxl.datatypes.normalise_date(value), self.date_value) except ValueError: pass if self.number_value is not None: try: return op(hxl.datatypes.normalise_number(value), self.number_value) except: pass return self.op(hxl.datatypes.normalise_string(value), self.string_value) def _get_saved_indices(self, columns): """Cache the column tests, so that we run them only once.""" # FIXME - assuming that the columns never change self._saved_indices = [] for i in range(len(columns)): if self.pattern.match(columns[i]): self._saved_indices.append(i) return self._saved_indices @staticmethod def parse(query): """Parse a filter expression""" if isinstance(query, RowQuery): # already parsed return query parts = re.split(r'([<>]=?|!?=|!?~|\bis\b)', hxl.datatypes.normalise_string(query), maxsplit=1) pattern = TagPattern.parse(parts[0]) op_name = hxl.datatypes.normalise_string(parts[1]) op = RowQuery.OPERATOR_MAP.get(op_name) value = hxl.datatypes.normalise_string(parts[2]) is_aggregate = False # special handling for aggregates (FIXME) if op_name == 'is' and value in ('min', 'max', 'not min', 'not max'): is_aggregate = True return RowQuery(pattern, op, value, is_aggregate) @staticmethod def parse_list(queries): """Parse a single query spec or a list of specs.""" if queries: if not hasattr(queries, '__len__') or isinstance(queries, six.string_types): # make a list if needed queries = [queries] return [hxl.model.RowQuery.parse(query) for query in queries] else: return [] @staticmethod def match_list(row, queries=None, reverse=False): """See if any query in a list matches a row.""" if not queries: # no queries = pass return True else: # otherwise, must match at least one for query in queries: if query.match_row(row): return not reverse return reverse @staticmethod def operator_re(s, pattern): """Regular-expression comparison operator.""" return re.search(pattern, s) @staticmethod def operator_nre(s, pattern): """Regular-expression negative comparison operator.""" return not re.search(pattern, s) @staticmethod def operator_is(s, condition): """Advanced tests Note: this won't be called for aggregate values like "is min" or "is not max"; for these, the aggregate will already be calculated, and a simple comparison operator substituted by L{calc_aggregate}. """ if condition == 'empty': return hxl.datatypes.is_empty(s) elif condition == 'not empty': return not hxl.datatypes.is_empty(s) elif condition == 'number': return hxl.datatypes.is_number(s) elif condition == 'not number': return not hxl.datatypes.is_number(s) elif condition == 'date': return (hxl.datatypes.is_date(s)) elif condition == 'not date': return (hxl.datatypes.is_date(s) is False) else: raise hxl.HXLException('Unknown is condition: {}'.format(condition)) # Constant map of comparison operators OPERATOR_MAP = { '=': operator.eq, '!=': operator.ne, '<': operator.lt, '<=': operator.le, '>': operator.gt, '>=': operator.ge, } # Static functions def get_column_indices(tag_patterns, columns): """Get a list of column indices that match the tag patterns provided @param tag_patterns: a list of tag patterns or a string version of the list @param columns: a list of columns @returns: a (possibly-empty) list of 0-based indices """ tag_patterns = TagPattern.parse_list(tag_patterns) columns = [Column.parse(column) for column in columns] indices = [] for i, column in enumerate(columns): for pattern in tag_patterns: if pattern.match(column): indices.append(i) return indices # Extra static initialisation RowQuery.OPERATOR_MAP['~'] = RowQuery.operator_re RowQuery.OPERATOR_MAP['!~'] = RowQuery.operator_nre RowQuery.OPERATOR_MAP['is'] = RowQuery.operator_is # end
unlicense
-8,416,026,671,488,264,000
38.114675
143
0.603247
false
4.380029
false
false
false
KDD-OpenSource/geox-young-academy
day-3/Kalman-filter_Mark.py
1
1494
# -*- coding: utf-8 -*- """ Created on Wed Oct 11 10:10:24 2017 @author: Mark """ import numpy as np import matplotlib.pyplot as plt #Define functions def model(state_0,A,B): state_1 = A*state_0 + np.random.normal(0,B) return state_1 state_null=np.random.normal(0,0.4) def observation_function(state,R): obs=state+np.random.normal(0,R) return obs def forecast(state_0,cov_0,A,B): state_1=A*state_0 cov_1=A*cov_0*A+B return state_1,cov_1 def analysis_formulas(state_1_hat,cov_1_hat,K,H,obs_0): state_1 = state_1_hat - K*(H*state_1_hat - obs_0) cov_1 = cov_1_hat - K*H*cov_1_hat return state_1, cov_1 def kalman_gain(cov_1_hat,H,R): K = cov_1_hat*H*(R+H*cov_1_hat*H)**(-1) return K #Initialize model parameters A = 0.5 H = 1 B = 0.5 R = 0.1 lev = 100 #Sythetic Model STATE_real = np.zeros(lev) OBS_real = np.zeros(lev) STATE_real[0] = np.random.normal(5,0.1) OBS_real[0] = observation_function(STATE_real[0],R) for i in range (1,lev-1): STATE_real[i] = model(STATE_real[i-1],0.4,0.01) OBS_real[i] = observation_function(STATE_real[i],R) #Kalman-filter STATE = np.zeros(lev) COV = np.zeros(lev) STATE[0] = state_null COV[0] = B for i in range (1,lev-1): (state_hat,cov_hat) = forecast(STATE[i-1],COV[i-1],A,B) K = kalman_gain(cov_hat,H,R) (STATE[i],COV[i]) = analysis_formulas(state_hat,cov_hat,K,H,OBS_real[i]) plt.plot(STATE) plt.plot(STATE_real)
mit
167,307,088,535,886,000
21.34375
76
0.613788
false
2.270517
false
false
false
Brazelton-Lab/lab_scripts
edit-esom-class-file.py
1
1891
#! /usr/bin/env python """ edit user-provided ESOM class file with new assignments in user-provided file each line of user-provided file of new assignments should contain a data point number and a class number, separated by tabs usage: python edit-esom-class-file.py esom.cls new-assignments.tsv new-class-filename.cls Copyright: edit-esom-class-file.py Append user data to ESOM class file Copyright (C) 2016 William Brazelton This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see <http://www.gnu.org/licenses/>. """ import sys cls_file = sys.argv[1] user_file = sys.argv[2] new_file = sys.argv[3] # create dictionary of user-provided new assignments: d = {} with open(user_file) as user: for line in user: cols = line.split('\t') data_point = cols[0].strip() cls_number = cols[1].strip() d[data_point] = cls_number.strip('\n') # iterate through class file, writing new class file with new assignments: with open(new_file,'w') as new: with open(cls_file) as cls: for line in cls: if line[0] == '%': new.write(line) else: cols = line.split('\t') if cols[0] in d: new.write(str(cols[0]) + '\t' + str(d[cols[0]]) + '\n') else: new.write(line) print 'WARNING: if you introduced new classes to this .cls file, you need to manually add them to the header of this new .cls file'
gpl-2.0
-7,825,894,047,534,338,000
31.603448
131
0.710206
false
3.501852
false
false
false
xhqu1981/custodian
custodian/qchem/handlers.py
1
25246
# coding: utf-8 from __future__ import unicode_literals, division import shutil import time """ This module implements error handlers for QChem runs. Currently tested only for B3LYP DFT jobs. """ import copy import glob import json import logging import os import re import tarfile from pymatgen.core.structure import Molecule from pymatgen.io.qchem import QcOutput, QcInput, QcTask from custodian.custodian import ErrorHandler __author__ = "Xiaohui Qu" __version__ = "0.1" __maintainer__ = "Xiaohui Qu" __email__ = "[email protected]" __status__ = "Alpha" __date__ = "12/04/13" class QChemErrorHandler(ErrorHandler): """ Error handler for QChem Jobs. Currently tested only for B3LYP DFT jobs generated by pymatgen. """ def __init__(self, input_file="mol.qcinp", output_file="mol.qcout", ex_backup_list=(), rca_gdm_thresh=1.0E-3, scf_max_cycles=200, geom_max_cycles=200, qchem_job=None): """ Initializes the error handler from a set of input and output files. Args: input_file (str): Name of the QChem input file. output_file (str): Name of the QChem output file. ex_backup_list ([str]): List of the files to backup in addition to input and output file. rca_gdm_thresh (float): The threshold for the prior scf algorithm. If last deltaE is larger than the threshold try RCA_DIIS first, else, try DIIS_GDM first. scf_max_cycles (int): The max iterations to set to fix SCF failure. geom_max_cycles (int): The max iterations to set to fix geometry optimization failure. qchem_job (QchemJob): the managing object to run qchem. """ self.input_file = input_file self.output_file = output_file self.ex_backup_list = ex_backup_list self.rca_gdm_thresh = rca_gdm_thresh self.scf_max_cycles = scf_max_cycles self.geom_max_cycles = geom_max_cycles self.outdata = None self.qcinp = None self.error_step_id = None self.errors = None self.fix_step = None self.qchem_job = qchem_job def check(self): # Checks output file for errors. self.outdata = QcOutput(self.output_file).data self.qcinp = QcInput.from_file(self.input_file) self.error_step_id = None self.errors = None self.fix_step = None for i, od in enumerate(self.outdata): if od["has_error"]: self.error_step_id = i self.fix_step = self.qcinp.jobs[i] self.errors = sorted(list(set(od["errors"]))) return True return False def correct(self): self.backup() actions = [] error_rankings = ("pcm_solvent deprecated", "autoz error", "No input text", "Killed", "Insufficient static memory", "Not Enough Total Memory", "NAN values", "Bad SCF convergence", "Geometry optimization failed", "Freq Job Too Small", "Exit Code 134", "Molecular charge is not found", "Molecular spin multipilicity is not found" ) e = self.errors[0] for prio_error in error_rankings: if prio_error in self.errors: e = prio_error break if e == "autoz error": if "sym_ignore" not in self.fix_step.params["rem"]: self.fix_step.disable_symmetry() actions.append("disable symmetry") else: return {"errors": self.errors, "actions": None} elif e == "Bad SCF convergence": act = self.fix_scf() if act: actions.append(act) else: return {"errors": self.errors, "actions": None} elif e == "Geometry optimization failed": act = self.fix_geom_opt() if act: actions.append(act) else: return {"errors": self.errors, "actions": None} elif e == "NAN values": if "xc_grid" not in self.fix_step.params["rem"]: self.fix_step.set_dft_grid(128, 302) actions.append("use tighter grid") else: return {"errors": self.errors, "actions": None} elif e == "No input text": if "sym_ignore" not in self.fix_step.params["rem"]: self.fix_step.disable_symmetry() actions.append("disable symmetry") else: # This indicates something strange occured on the # compute node. Wait for 30 minutes, such that it # won't run too fast to make all the jobs fail if "PBS_JOBID" in os.environ and ("edique" in os.environ["PBS_JOBID"] or "hopque" in os.environ["PBS_JOBID"]): time.sleep(30.0 * 60.0) return {"errors": self.errors, "actions": None} elif e == "Freq Job Too Small": natoms = len(self.fix_step.mol) if "cpscf_nseg" not in self.fix_step.params["rem"] or \ self.fix_step.params["rem"]["cpscf_nseg"] != natoms: self.fix_step.params["rem"]["cpscf_nseg"] = natoms actions.append("use {} segment in CPSCF".format(natoms)) else: return {"errors": self.errors, "actions": None} elif e == "pcm_solvent deprecated": solvent_params = self.fix_step.params.pop("pcm_solvent", None) if solvent_params is not None: self.fix_step.params["solvent"] = solvent_params actions.append("use keyword solvent instead") else: return {"errors": self.errors, "actions": None} elif e == "Exit Code 134": act = self.fix_error_code_134() if act: actions.append(act) else: return {"errors": self.errors, "actions": None} elif e == "Killed": act = self.fix_error_killed() if act: actions.append(act) else: return {"errors": self.errors, "actions": None} elif e == "Insufficient static memory": act = self.fix_insufficient_static_memory() if act: actions.append(act) else: return {"errors": self.errors, "actions": None} elif e == "Not Enough Total Memory": act = self.fix_not_enough_total_memory() if act: actions.append(act) else: return {"errors": self.errors, "actions": None} elif e == "Molecular charge is not found": return {"errors": self.errors, "actions": None} elif e == "Molecular spin multipilicity is not found": return {"errors": self.errors, "actions": None} else: return {"errors": self.errors, "actions": None} self.qcinp.write_file(self.input_file) return {"errors": self.errors, "actions": actions} def fix_not_enough_total_memory(self): if self.fix_step.params['rem']["jobtype"] in ["freq", "nmr"]: ncpu = 1 if "-np" in self.qchem_job.current_command: cmd = self.qchem_job.current_command ncpu = int(cmd[cmd.index("-np") + 1]) natoms = len(self.qcinp.jobs[0].mol) times_ncpu_full = int(natoms/ncpu) nsegment_full = ncpu * times_ncpu_full times_ncpu_half = int(natoms/(ncpu/2)) nsegment_half = int((ncpu/2) * times_ncpu_half) if "cpscf_nseg" not in self.fix_step.params["rem"]: self.fix_step.params["rem"]["cpscf_nseg"] = nsegment_full return "Use {} CPSCF segments".format(nsegment_full) elif self.fix_step.params["rem"]["cpscf_nseg"] < nsegment_half: self.qchem_job.select_command("half_cpus", self.qcinp) self.fix_step.params["rem"]["cpscf_nseg"] = nsegment_half return "Use half CPUs and {} CPSCF segments".format(nsegment_half) return None elif not self.qchem_job.is_openmp_compatible(self.qcinp): if self.qchem_job.current_command_name != "half_cpus": self.qchem_job.select_command("half_cpus", self.qcinp) return "half_cpus" else: return None def fix_error_code_134(self): if "thresh" not in self.fix_step.params["rem"]: self.fix_step.set_integral_threshold(thresh=12) return "use tight integral threshold" elif not (self.qchem_job.is_openmp_compatible(self.qcinp) and self.qchem_job.command_available("openmp")): if self.qchem_job.current_command_name != "half_cpus": self.qchem_job.select_command("half_cpus", self.qcinp) return "half_cpus" else: if self.fix_step.params['rem']["jobtype"] in ["freq", "nmr"]: act = self.fix_not_enough_total_memory() return act return None elif self.qchem_job.current_command_name != "openmp": self.qchem_job.select_command("openmp", self.qcinp) return "openmp" else: if self.fix_step.params['rem']["jobtype"] in ["freq", "nmr"]: act = self.fix_not_enough_total_memory() return act return None def fix_insufficient_static_memory(self): if not (self.qchem_job.is_openmp_compatible(self.qcinp) and self.qchem_job.command_available("openmp")): if self.qchem_job.current_command_name != "half_cpus": self.qchem_job.select_command("half_cpus", self.qcinp) return "half_cpus" elif not self.qchem_job.large_static_mem: self.qchem_job.large_static_mem = True # noinspection PyProtectedMember self.qchem_job._set_qchem_memory(self.qcinp) return "Increase Static Memory" else: return None elif self.qchem_job.current_command_name != "openmp": self.qchem_job.select_command("openmp", self.qcinp) return "Use OpenMP" elif not self.qchem_job.large_static_mem: self.qchem_job.large_static_mem = True # noinspection PyProtectedMember self.qchem_job._set_qchem_memory(self.qcinp) return "Increase Static Memory" else: return None def fix_error_killed(self): if not (self.qchem_job.is_openmp_compatible(self.qcinp) and self.qchem_job.command_available("openmp")): if self.qchem_job.current_command_name != "half_cpus": self.qchem_job.select_command("half_cpus", self.qcinp) return "half_cpus" else: return None elif self.qchem_job.current_command_name != "openmp": self.qchem_job.select_command("openmp", self.qcinp) return "Use OpenMP" else: return None def fix_scf(self): comments = self.fix_step.params.get("comment", "") scf_pattern = re.compile(r"<SCF Fix Strategy>(.*)</SCF Fix " r"Strategy>", flags=re.DOTALL) old_strategy_text = re.findall(scf_pattern, comments) if len(old_strategy_text) > 0: old_strategy_text = old_strategy_text[0] od = self.outdata[self.error_step_id] if "Negative Eigen" in self.errors: if "thresh" not in self.fix_step.params["rem"]: self.fix_step.set_integral_threshold(thresh=12) return "use tight integral threshold" elif int(self.fix_step.params["rem"]["thresh"]) < 14: self.fix_step.set_integral_threshold(thresh=14) return "use even tighter integral threshold" if len(od["scf_iteration_energies"]) == 0 \ or len(od["scf_iteration_energies"][-1]) <= 10: if 'Exit Code 134' in self.errors: # immature termination of SCF return self.fix_error_code_134() else: return None if od["jobtype"] in ["opt", "ts", "aimd"] \ and len(od["molecules"]) >= 2: strategy = "reset" elif len(old_strategy_text) > 0: strategy = json.loads(old_strategy_text) strategy["current_method_id"] += 1 else: strategy = dict() scf_iters = od["scf_iteration_energies"][-1] if scf_iters[-1][1] >= self.rca_gdm_thresh: strategy["methods"] = ["increase_iter", "rca_diis", "gwh", "gdm", "rca", "core+rca", "fon"] strategy["current_method_id"] = 0 else: strategy["methods"] = ["increase_iter", "diis_gdm", "gwh", "rca", "gdm", "core+gdm", "fon"] strategy["current_method_id"] = 0 strategy["version"] = 2.0 # noinspection PyTypeChecker if strategy == "reset": self.fix_step.set_scf_algorithm_and_iterations( algorithm="diis", iterations=self.scf_max_cycles) if self.error_step_id > 0: self.set_scf_initial_guess("read") else: self.set_scf_initial_guess("sad") if od["jobtype"] in ["opt", "ts"]: self.set_last_input_geom(od["molecules"][-1]) else: assert od["jobtype"] == "aimd" from pymatgen.io.qchem import QcNucVeloc from pymatgen.io.xyz import XYZ scr_dir = od["scratch_dir"] qcnv_filepath = os.path.join(scr_dir, "AIMD", "NucVeloc") qc_md_view_filepath = os.path.join(scr_dir, "AIMD", "View.xyz") qcnv = QcNucVeloc(qcnv_filepath) qc_md_view = XYZ.from_file(qc_md_view_filepath) assert len(qcnv.velocities) == len(qc_md_view.all_molecules) aimd_steps = self.fix_step.params["rem"]["aimd_steps"] elapsed_steps = len(qc_md_view.all_molecules) remaining_steps = aimd_steps - elapsed_steps + 1 self.fix_step.params["rem"]["aimd_steps"] = remaining_steps self.set_last_input_geom(qc_md_view.molecule) self.fix_step.set_velocities(qcnv.velocities[-1]) self.fix_step.params["rem"].pop("aimd_init_veloc", None) traj_num = max([0] + [int(f.split(".")[1]) for f in glob.glob("traj_View.*.xyz")]) dest_view_filename = "traj_View.{}.xyz".format(traj_num + 1) dest_nv_filename = "traj_NucVeloc.{}.txt".format(traj_num + 1) logging.info("Backing up trajectory files to {} and {}." .format(dest_view_filename, dest_nv_filename)) shutil.copy(qc_md_view_filepath, dest_view_filename) shutil.copy(qcnv_filepath, dest_nv_filename) if len(old_strategy_text) > 0: comments = scf_pattern.sub("", comments) self.fix_step.params["comment"] = comments if len(comments.strip()) == 0: self.fix_step.params.pop("comment") return "reset" elif strategy["current_method_id"] > len(strategy["methods"])-1: return None else: # noinspection PyTypeChecker method = strategy["methods"][strategy["current_method_id"]] if method == "increase_iter": self.fix_step.set_scf_algorithm_and_iterations( algorithm="diis", iterations=self.scf_max_cycles) self.set_scf_initial_guess("sad") elif method == "rca_diis": self.fix_step.set_scf_algorithm_and_iterations( algorithm="rca_diis", iterations=self.scf_max_cycles) self.set_scf_initial_guess("sad") elif method == "gwh": self.fix_step.set_scf_algorithm_and_iterations( algorithm="diis", iterations=self.scf_max_cycles) self.set_scf_initial_guess("gwh") elif method == "gdm": self.fix_step.set_scf_algorithm_and_iterations( algorithm="gdm", iterations=self.scf_max_cycles) self.set_scf_initial_guess("sad") elif method == "rca": self.fix_step.set_scf_algorithm_and_iterations( algorithm="rca", iterations=self.scf_max_cycles) self.set_scf_initial_guess("sad") elif method == "core+rca": self.fix_step.set_scf_algorithm_and_iterations( algorithm="rca", iterations=self.scf_max_cycles) self.set_scf_initial_guess("core") elif method == "diis_gdm": self.fix_step.set_scf_algorithm_and_iterations( algorithm="diis_gdm", iterations=self.scf_max_cycles) self.fix_step.set_scf_initial_guess("sad") elif method == "core+gdm": self.fix_step.set_scf_algorithm_and_iterations( algorithm="gdm", iterations=self.scf_max_cycles) self.set_scf_initial_guess("core") elif method == "fon": self.fix_step.set_scf_algorithm_and_iterations( algorithm="diis", iterations=self.scf_max_cycles) self.set_scf_initial_guess("sad") natoms = len(od["molecules"][-1]) self.fix_step.params["rem"]["occupations"] = 2 self.fix_step.params["rem"]["fon_norb"] = int(natoms * 0.618) self.fix_step.params["rem"]["fon_t_start"] = 300 self.fix_step.params["rem"]["fon_t_end"] = 300 self.fix_step.params["rem"]["fon_e_thresh"] = 6 self.fix_step.set_integral_threshold(14) self.fix_step.set_scf_convergence_threshold(7) else: raise ValueError("fix method " + method + " is not supported") strategy_text = "<SCF Fix Strategy>" strategy_text += json.dumps(strategy, indent=4, sort_keys=True) strategy_text += "</SCF Fix Strategy>" if len(old_strategy_text) > 0: comments = scf_pattern.sub(strategy_text, comments) else: comments += "\n" + strategy_text self.fix_step.params["comment"] = comments return method def set_last_input_geom(self, new_mol): for i in range(self.error_step_id, -1, -1): qctask = self.qcinp.jobs[i] if isinstance(qctask.mol, Molecule): qctask.mol = copy.deepcopy(new_mol) def set_scf_initial_guess(self, guess="sad"): if "scf_guess" not in self.fix_step.params["rem"] \ or self.error_step_id > 0 \ or self.fix_step.params["rem"]["scf_guess"] != "read": self.fix_step.set_scf_initial_guess(guess) def fix_geom_opt(self): comments = self.fix_step.params.get("comment", "") geom_pattern = re.compile(r"<Geom Opt Fix Strategy>(.*)" r"</Geom Opt Fix Strategy>", flags=re.DOTALL) old_strategy_text = re.findall(geom_pattern, comments) if len(old_strategy_text) > 0: old_strategy_text = old_strategy_text[0] od = self.outdata[self.error_step_id] if 'Lamda Determination Failed' in self.errors and len(od["molecules"])>=2: self.fix_step.set_scf_algorithm_and_iterations( algorithm="diis", iterations=self.scf_max_cycles) if self.error_step_id > 0: self.set_scf_initial_guess("read") else: self.set_scf_initial_guess("sad") self.set_last_input_geom(od["molecules"][-1]) if od["jobtype"] == "aimd": aimd_steps = self.fix_step.params["rem"]["aimd_steps"] elapsed_steps = len(od["molecules"]) - 1 remaining_steps = aimd_steps - elapsed_steps + 1 self.fix_step.params["rem"]["aimd_steps"] = remaining_steps if len(old_strategy_text) > 0: comments = geom_pattern.sub("", comments) self.fix_step.params["comment"] = comments if len(comments.strip()) == 0: self.fix_step.params.pop("comment") return "reset" if len(od["molecules"]) <= 10: # immature termination of geometry optimization if 'Exit Code 134' in self.errors: return self.fix_error_code_134() else: return None if len(old_strategy_text) > 0: strategy = json.loads(old_strategy_text) strategy["current_method_id"] += 1 else: strategy = dict() strategy["methods"] = ["increase_iter", "GDIIS", "CartCoords"] strategy["current_method_id"] = 0 if strategy["current_method_id"] > len(strategy["methods"]) - 1: return None else: method = strategy["methods"][strategy["current_method_id"]] if method == "increase_iter": self.fix_step.set_geom_max_iterations(self.geom_max_cycles) self.set_last_input_geom(od["molecules"][-1]) elif method == "GDIIS": self.fix_step.set_geom_opt_use_gdiis(subspace_size=5) self.fix_step.set_geom_max_iterations(self.geom_max_cycles) self.set_last_input_geom(od["molecules"][-1]) elif method == "CartCoords": self.fix_step.set_geom_opt_coords_type("cartesian") self.fix_step.set_geom_max_iterations(self.geom_max_cycles) self.fix_step.set_geom_opt_use_gdiis(0) self.set_last_input_geom(od["molecules"][-1]) else: raise ValueError("fix method" + method + "is not supported") strategy_text = "<Geom Opt Fix Strategy>" strategy_text += json.dumps(strategy, indent=4, sort_keys=True) strategy_text += "</Geom Opt Fix Strategy>" if len(old_strategy_text) > 0: comments = geom_pattern.sub(strategy_text, comments) else: comments += "\n" + strategy_text self.fix_step.params["comment"] = comments return method def backup(self): error_num = max([0] + [int(f.split(".")[1]) for f in glob.glob("error.*.tar.gz")]) filename = "error.{}.tar.gz".format(error_num + 1) logging.info("Backing up run to {}.".format(filename)) tar = tarfile.open(filename, "w:gz") bak_list = [self.input_file, self.output_file] + \ list(self.ex_backup_list) for f in bak_list: if os.path.exists(f): tar.add(f) tar.close() def as_dict(self): return {"@module": self.__class__.__module__, "@class": self.__class__.__name__, "input_file": self.input_file, "output_file": self.output_file, "ex_backup_list": tuple(self.ex_backup_list), "rca_gdm_thresh": self.rca_gdm_thresh, "scf_max_cycles": self.scf_max_cycles, "geom_max_cycles": self.geom_max_cycles, "outdata": self.outdata, "qcinp": self.qcinp.as_dict() if self.qcinp else None, "error_step_id": self.error_step_id, "errors": self.errors, "fix_step": self.fix_step.as_dict() if self.fix_step else None} @classmethod def from_dict(cls, d): h = QChemErrorHandler(input_file=d["input_file"], output_file=d["output_file"], ex_backup_list=d["ex_backup_list"], rca_gdm_thresh=d["rca_gdm_thresh"], scf_max_cycles=d["scf_max_cycles"], geom_max_cycles=d["geom_max_cycles"]) h.outdata = d["outdata"] h.qcinp = QcInput.from_dict(d["qcinp"]) if d["qcinp"] else None h.error_step_id = d["error_step_id"] h.errors = d["errors"] h.fix_step = QcTask.from_dict(d["fix_step"]) if d["fix_step"] else None return h
mit
-107,415,132,109,779,500
44.735507
90
0.528202
false
3.812443
false
false
false
sergeneren/anima
anima/env/mayaEnv/plugins/closestPointOnCurve.py
1
8872
# -*- coding: utf-8 -*- # Copyright (c) 2012-2015, Anima Istanbul # # This module is part of anima-tools and is released under the BSD 2 # License: http://www.opensource.org/licenses/BSD-2-Clause import sys import maya.OpenMaya as OpenMaya import maya.OpenMayaMPx as OpenMayaMPx kPluginNodeTypeName = "spClosestPointOnCurve" cpocPluginId = OpenMaya.MTypeId(0x00349) # Node definition class closestPointOnCurve(OpenMayaMPx.MPxNode): # the plugs aInCurve = OpenMaya.MObject() aInPosition = OpenMaya.MObject() aOutPosition = OpenMaya.MObject() aOutPositionX = OpenMaya.MObject() aOutPositionY = OpenMaya.MObject() aOutPositionZ = OpenMaya.MObject() aOutNormal = OpenMaya.MObject() aOutNormalX = OpenMaya.MObject() aOutNormalY = OpenMaya.MObject() aOutNormalZ = OpenMaya.MObject() aOutParam = OpenMaya.MObject() def __init__(self): OpenMayaMPx.MPxNode.__init__(self) def compute(self, plug, dataBlock): if plug == closestPointOnCurve.aOutPosition or plug == closestPointOnCurve.aOutParam: dataHandle = dataBlock.inputValue(closestPointOnCurve.aInCurve) inputAsCurve = dataHandle.asNurbsCurve() #if not inputAsCurve.hasFn(OpenMaya.MFn.kNurbsCurve): # return OpenMaya.kUnknownParameter dataHandle = dataBlock.inputValue(closestPointOnCurve.aInPosition) inPositionAsFloat3 = dataHandle.asFloat3() inPosition = OpenMaya.MPoint( inPositionAsFloat3[0], inPositionAsFloat3[1], inPositionAsFloat3[2] ) # connect the MFnNurbsCurve # and ask the closest point nurbsCurveFn = OpenMaya.MFnNurbsCurve(inputAsCurve) # get and set outPosition outParam = OpenMaya.MScriptUtil() outParam.createFromDouble(0) outParamPtr = outParam.asDoublePtr() # get position and paramater outPosition = nurbsCurveFn.closestPoint( inPosition, True, outParamPtr, 0.001, OpenMaya.MSpace.kWorld ) outputHandle = dataBlock.outputValue( closestPointOnCurve.aOutPosition ) outputHandle.set3Float(outPosition.x, outPosition.y, outPosition.z) # get and set outNormal #outNormal = nurbsCurveFn.normal(parameter, OpenMaya.MSpace.kWorld) #outputHandle = dataBlock.outputValue(closestPointOnCurve.aOutNormal) #outputHandle.set3Float(outNormal.x, outNormal.y, outNormal.z) #outputHandle.set3Float(0, 1, 0 ) # get and set the uvs outputHandle = dataBlock.outputValue(closestPointOnCurve.aOutParam) #outputHandle.setFloat(OpenMaya.MScriptUtil(outParamPtr).asDouble()) outputHandle.setFloat(OpenMaya.MScriptUtil.getDouble(outParamPtr)) dataBlock.setClean(plug) else: return OpenMaya.kUnknownParameter # creator def nodeCreator(): return OpenMayaMPx.asMPxPtr(closestPointOnCurve()) # initializer def nodeInitializer(): tAttr = OpenMaya.MFnTypedAttribute() nAttr = OpenMaya.MFnNumericAttribute() # input curve closestPointOnCurve.aInCurve = tAttr.create( "inCurve", "ic", OpenMaya.MFnData.kNurbsCurve ) tAttr.setStorable(0) closestPointOnCurve.addAttribute(closestPointOnCurve.aInCurve) # input position closestPointOnCurve.aInPositionX = nAttr.create( "inPositionX", "ipx", OpenMaya.MFnNumericData.kFloat, 0.0 ) nAttr.setStorable(1) nAttr.setWritable(1) closestPointOnCurve.addAttribute(closestPointOnCurve.aInPositionX) closestPointOnCurve.aInPositionY = nAttr.create( "inPositionY", "ipy", OpenMaya.MFnNumericData.kFloat, 0.0 ) nAttr.setStorable(1) nAttr.setWritable(1) closestPointOnCurve.addAttribute(closestPointOnCurve.aInPositionY) closestPointOnCurve.aInPositionZ = nAttr.create( "inPositionZ", "ipz", OpenMaya.MFnNumericData.kFloat, 0.0 ) nAttr.setStorable(1) nAttr.setWritable(1) closestPointOnCurve.addAttribute(closestPointOnCurve.aInPositionZ) closestPointOnCurve.aInPosition = nAttr.create( "inPosition", "ip", closestPointOnCurve.aInPositionX, closestPointOnCurve.aInPositionY, closestPointOnCurve.aInPositionZ ) nAttr.setStorable(1) nAttr.setKeyable(1) nAttr.setWritable(1) closestPointOnCurve.addAttribute(closestPointOnCurve.aInPosition) # output position closestPointOnCurve.aOutPositionX = nAttr.create( "outPositionX", "opx", OpenMaya.MFnNumericData.kFloat, 0.0 ) nAttr.setStorable(0) nAttr.setWritable(0) closestPointOnCurve.addAttribute(closestPointOnCurve.aOutPositionX) closestPointOnCurve.aOutPositionY = nAttr.create( "outPositionY", "opy", OpenMaya.MFnNumericData.kFloat, 0.0 ) nAttr.setStorable(0) nAttr.setWritable(0) closestPointOnCurve.addAttribute(closestPointOnCurve.aOutPositionY) closestPointOnCurve.aOutPositionZ = nAttr.create( "outPositionZ", "opz", OpenMaya.MFnNumericData.kFloat, 0.0 ) nAttr.setStorable(0) nAttr.setWritable(0) closestPointOnCurve.addAttribute(closestPointOnCurve.aOutPositionZ) closestPointOnCurve.aOutPosition = nAttr.create( "outPosition", "op", closestPointOnCurve.aOutPositionX, closestPointOnCurve.aOutPositionY, closestPointOnCurve.aOutPositionZ ) nAttr.setStorable(0) nAttr.setKeyable(0) nAttr.setWritable(1) closestPointOnCurve.addAttribute(closestPointOnCurve.aOutPosition) # output normal closestPointOnCurve.aOutNormalX = nAttr.create( "outNormalX", "onx", OpenMaya.MFnNumericData.kFloat, 0.0 ) nAttr.setStorable(0) nAttr.setWritable(0) closestPointOnCurve.addAttribute(closestPointOnCurve.aOutNormalX) closestPointOnCurve.aOutNormalY = nAttr.create( "outNormalY", "ony", OpenMaya.MFnNumericData.kFloat, 0.0 ) nAttr.setStorable(0) nAttr.setWritable(0) closestPointOnCurve.addAttribute(closestPointOnCurve.aOutNormalY) closestPointOnCurve.aOutNormalZ = nAttr.create( "outNormalZ", "onz", OpenMaya.MFnNumericData.kFloat, 0.0 ) nAttr.setStorable(0) nAttr.setWritable(0) closestPointOnCurve.addAttribute(closestPointOnCurve.aOutNormalZ) closestPointOnCurve.aOutNormal = nAttr.create( "outNormal", "on", closestPointOnCurve.aOutNormalX, closestPointOnCurve.aOutNormalY, closestPointOnCurve.aOutNormalZ ) nAttr.setStorable(0) nAttr.setKeyable(0) nAttr.setWritable(1) closestPointOnCurve.addAttribute(closestPointOnCurve.aOutNormal) closestPointOnCurve.aOutParam = nAttr.create( "outParam", "opa", OpenMaya.MFnNumericData.kFloat, 0.0 ) nAttr.setStorable(0) nAttr.setKeyable(0) nAttr.setWritable(1) closestPointOnCurve.addAttribute(closestPointOnCurve.aOutParam) closestPointOnCurve.attributeAffects( closestPointOnCurve.aInCurve, closestPointOnCurve.aOutPosition ) closestPointOnCurve.attributeAffects( closestPointOnCurve.aInPosition, closestPointOnCurve.aOutPosition ) closestPointOnCurve.attributeAffects( closestPointOnCurve.aInCurve, closestPointOnCurve.aOutParam ) closestPointOnCurve.attributeAffects( closestPointOnCurve.aInPosition, closestPointOnCurve.aOutParam ) closestPointOnCurve.attributeAffects( closestPointOnCurve.aInCurve, closestPointOnCurve.aOutNormal ) closestPointOnCurve.attributeAffects( closestPointOnCurve.aInPosition, closestPointOnCurve.aOutNormal ) closestPointOnCurve.attributeAffects( closestPointOnCurve.aOutParam, closestPointOnCurve.aOutPosition ) # initialize the script plug-in def initializePlugin(mobject): mplugin = OpenMayaMPx.MFnPlugin(mobject, "Erkan Ozgur Yilmaz","1.0.2") try: mplugin.registerNode( kPluginNodeTypeName, cpocPluginId, nodeCreator, nodeInitializer ) except: sys.stderr.write("Failed to register node: %s" % kPluginNodeTypeName) raise # uninitialize the script plug-in def uninitializePlugin(mobject): mplugin = OpenMayaMPx.MFnPlugin(mobject) try: mplugin.deregisterNode(cpocPluginId) except: sys.stderr.write("Failed to deregister node: %s" % kPluginNodeTypeName) raise
bsd-2-clause
-1,998,576,980,713,366,500
32.228464
93
0.679554
false
3.486051
false
false
false
appknox/vendor
ak_vendor/report_sample.py
1
1164
import json from os.path import dirname, abspath from django import template from django.conf import settings from django.template import Template, Context from django.template.engine import Engine from django.core.wsgi import get_wsgi_application from ak_vendor.report import Report settings.configure() application = get_wsgi_application() CUR_DIR = dirname(abspath(__file__)) template.Library() class ReportHTMLExporter: def __init__(self, report): self.report = report def to_html(self): tpl = open('{}/report/report_template.html'.format(CUR_DIR)).read() template = Template(tpl, engine=Engine(libraries={ 'i18n': 'django.templatetags.i18n' })) context = Context({ 'report': self.report }) content = template.render(context) return content def to_html_file(self, path=''): with open('{}/output.html'.format(path), 'w') as file: tpl = self.to_html() file.write(tpl) data = json.load(open('{}/report_sample1.json'.format(CUR_DIR))) report_obj = Report.from_json(data) ReportHTMLExporter(report_obj).to_html_file(CUR_DIR)
mit
2,017,626,688,635,430,700
28.1
75
0.664089
false
3.742765
false
false
false
pajlada/pajbot
pajbot/modules/chat_alerts/cheeralert.py
1
12111
import logging import math from pajbot.managers.handler import HandlerManager from pajbot.modules import BaseModule from pajbot.modules import ModuleSetting from pajbot.modules.chat_alerts import ChatAlertModule log = logging.getLogger(__name__) class CheerAlertModule(BaseModule): ID = __name__.split(".")[-1] NAME = "Cheer Alert" DESCRIPTION = "Prints a message in chat/whispers when a user cheers in your chat" CATEGORY = "Feature" ENABLED_DEFAULT = False PARENT_MODULE = ChatAlertModule SETTINGS = [ ModuleSetting( key="chat_message", label="Enable chat messages for users who cheer bits", type="boolean", required=True, default=True, ), ModuleSetting( key="whisper_message", label="Enable whisper messages for users who cheer bits", type="boolean", required=True, default=False, ), ModuleSetting( key="whisper_after", label="Whisper the message after X seconds", type="number", required=True, placeholder="", default=5, constraints={"min_value": 1, "max_value": 120}, ), ModuleSetting( key="one_bit", label="Chat message for users who cheer 1 or more bits | Available arguments: {username}, {num_bits}", type="text", required=True, placeholder="{username} thank you so much for cheering {num_bits} bits! PogChamp", default="{username} thank you so much for cheering {num_bits} bits! PogChamp", constraints={"max_str_len": 400}, ), ModuleSetting( key="sixnine_bits", label="Chat message for users who cheer 69 bits, leave empty to fallback to the previous bit amount message. | Available arguments: {username}, {num_bits}", type="text", required=True, placeholder="{username} thank you so much for cheering {num_bits} bits! Kreygasm", default="", constraints={"max_str_len": 400}, ), ModuleSetting( key="hundred_bits", label="Chat message for users who cheer 100 or more bits, leave empty to fallback to the previous bit amount message. | Available arguments: {username}, {num_bits}", type="text", required=True, placeholder="{username} thank you so much for cheering {num_bits} bits! PogChamp", default="", constraints={"max_str_len": 400}, ), ModuleSetting( key="fourtwenty_bits", label="Chat message for users who cheer 420 bits, leave empty to fallback to the previous bit amount message. | Available arguments: {username}, {num_bits}", type="text", required=True, placeholder="{username} thank you so much for cheering {num_bits} bits! CiGrip", default="", constraints={"max_str_len": 400}, ), ModuleSetting( key="fivehundred_bits", label="Chat message for users who cheer 500 or more bits, leave empty to fallback to the previous bit amount message. | Available arguments: {username}, {num_bits}", type="text", required=True, placeholder="{username} thank you so much for cheering {num_bits} bits! PogChamp", default="", constraints={"max_str_len": 400}, ), ModuleSetting( key="fifteenhundred_bits", label="Chat message for users who cheer 1500 or more bits, leave empty to fallback to the previous bit amount message. | Available arguments: {username}, {num_bits}", type="text", required=True, placeholder="{username} thank you so much for cheering {num_bits} bits! PogChamp", default="", constraints={"max_str_len": 400}, ), ModuleSetting( key="fivethousand_bits", label="Chat message for users who cheer 5000 or more bits, leave empty to fallback to the previous bit amount message. | Available arguments: {username}, {num_bits}", type="text", required=True, placeholder="{username} thank you so much for cheering {num_bits} bits! PogChamp", default="", constraints={"max_str_len": 400}, ), ModuleSetting( key="tenthousand_bits", label="Chat message for users who cheer 10000 or more bits, leave empty to fallback to the previous bit amount message. | Available arguments: {username}, {num_bits}", type="text", required=True, placeholder="{username} thank you so much for cheering {num_bits} bits! PogChamp", default="", constraints={"max_str_len": 400}, ), ModuleSetting( key="twentyfivethousand_bits", label="Chat message for users who cheer 25000 or more bits, leave empty to fallback to the previous bit amount message. | Available arguments: {username}, {num_bits}", type="text", required=True, placeholder="{username} thank you so much for cheering {num_bits} bits! PogChamp", default="", constraints={"max_str_len": 400}, ), ModuleSetting( key="grant_points_per_100_bits", label="Give points to user per 100 bits they cheer. 0 = off", type="number", required=True, placeholder="", default=0, constraints={"min_value": 0, "max_value": 50000}, ), ModuleSetting( key="alert_message_points_given", label="Message to announce points were given to user, leave empty to disable message. If the user cheers less than 100 bits, no message will be sent. | Available arguments: {username}, {points}, {num_bits}", type="text", required=True, default="{username} was given {points} points for cheering {num_bits} bits! FeelsAmazingMan", constraints={"max_str_len": 300}, ), ] def __init__(self, bot): super().__init__(bot) def on_cheer(self, user, num_bits): """ A user just cheered bits. Send the event to the websocket manager, and send a customized message in chat. """ payload = {"username": user.name, "num_bits": num_bits} self.bot.websocket_manager.emit("cheer", payload) if self.settings["chat_message"]: if num_bits >= 25000 and self.settings["twentyfivethousand_bits"] != "": self.bot.say(self.get_phrase("twentyfivethousand_bits", **payload)) elif num_bits >= 10000 and self.settings["tenthousand_bits"] != "": self.bot.say(self.get_phrase("tenthousand_bits", **payload)) elif num_bits >= 5000 and self.settings["fivethousand_bits"] != "": self.bot.say(self.get_phrase("fivethousand_bits", **payload)) elif num_bits >= 1500 and self.settings["fifteenhundred_bits"] != "": self.bot.say(self.get_phrase("fifteenhundred_bits", **payload)) elif num_bits >= 500 and self.settings["fivehundred_bits"] != "": self.bot.say(self.get_phrase("fivehundred_bits", **payload)) elif num_bits == 420 and self.settings["fourtwenty_bits"] != "": self.bot.say(self.get_phrase("fourtwenty_bits", **payload)) elif num_bits >= 100 and self.settings["hundred_bits"] != "": self.bot.say(self.get_phrase("hundred_bits", **payload)) elif num_bits == 69 and self.settings["sixnine_bits"] != "": self.bot.say(self.get_phrase("sixnine_bits", **payload)) elif self.settings["one_bit"] != "": self.bot.say(self.get_phrase("one_bit", **payload)) if self.settings["whisper_message"]: if num_bits >= 25000 and self.settings["twentyfivethousand_bits"] != "": self.bot.execute_delayed( self.settings["whisper_after"], self.bot.whisper, user, self.get_phrase("twentyfivethousand_bits", **payload), ) elif num_bits >= 10000 and self.settings["tenthousand_bits"] != "": self.bot.execute_delayed( self.settings["whisper_after"], self.bot.whisper, user, self.get_phrase("tenthousand_bits", **payload), ) elif num_bits >= 5000 and self.settings["fivethousand_bits"] != "": self.bot.execute_delayed( self.settings["whisper_after"], self.bot.whisper, user, self.get_phrase("fivethousand_bits", **payload), ) elif num_bits >= 1500 and self.settings["fifteenhundred_bits"] != "": self.bot.execute_delayed( self.settings["whisper_after"], self.bot.whisper, user, self.get_phrase("fifteenhundred_bits", **payload), ) elif num_bits >= 500 and self.settings["fivehundred_bits"] != "": self.bot.execute_delayed( self.settings["whisper_after"], self.bot.whisper, user, self.get_phrase("fivehundred_bits", **payload), ) elif num_bits == 420 and self.settings["fourtwenty_bits"] != "": self.bot.execute_delayed( self.settings["whisper_after"], self.bot.whisper, user, self.get_phrase("fourtwenty_bits", **payload), ) elif num_bits >= 100 and self.settings["hundred_bits"] != "": self.bot.execute_delayed( self.settings["whisper_after"], self.bot.whisper, user, self.get_phrase("hundred_bits", **payload), ) elif num_bits == 69 and self.settings["sixnine_bits"] != "": self.bot.execute_delayed( self.settings["whisper_after"], self.bot.whisper, user, self.get_phrase("sixnine_bits", **payload), ) elif self.settings["one_bit"] != "": self.bot.execute_delayed( self.settings["whisper_after"], self.bot.whisper, user, self.get_phrase("one_bit", **payload), ) if self.settings["grant_points_per_100_bits"] <= 0: return round_number = math.floor(num_bits / 100) if round_number > 0: points_to_grant = round_number * self.settings["grant_points_per_100_bits"] user.points += points_to_grant alert_message = self.settings["alert_message_points_given"] if alert_message != "": self.bot.say(alert_message.format(username=user, points=points_to_grant, num_bits=num_bits)) def on_pubmsg(self, source, tags, **rest): if "bits" not in tags: return try: num_bits = int(tags["bits"]) except ValueError: log.error("BabyRage error occurred with getting the bits integer") return if "display-name" not in tags: log.debug(f"cheeralert requires a display-name, but it is missing: {tags}") return self.on_cheer(source, num_bits) def enable(self, bot): HandlerManager.add_handler("on_pubmsg", self.on_pubmsg) def disable(self, bot): HandlerManager.remove_handler("on_pubmsg", self.on_pubmsg)
mit
-5,770,239,708,818,410,000
43.690037
219
0.546033
false
4.222803
false
false
false
artemharutyunyan/copilot
src/copilot-dashboard/copilot_dashboard/dashboard/views.py
1
3545
import httplib2 import datetime from urllib import urlencode from random import random from django.http import HttpRequest, HttpResponse from django.core.serializers.json import DjangoJSONEncoder from django.utils import simplejson from django.conf import settings import bson.json_util from bson.objectid import ObjectId import models as DB from copilot_dashboard.settings import SETTINGS HTTP = httplib2.Http("/tmp/httplib2-cache") ### Handlers ### def ping(request): """ GET /api/ping Responds with {"ping":"pong"} (HTTP 200) in case the system is working fine Status codes: * 200 - OK """ return json({'ping': 'pong'}) def stats(request): """ GET /api/stats?target={graphite path}[&from={start timestamp}&until={end timestamp}] A simple Graphite proxy Status codes: * 200 - OK * 400 - Missing query parameter * 500 - No such data is available """ try: path = request.GET['target'] except KeyError, e: return json({'error': True}, 400) start = request.GET.get('from', None) end = request.GET.get('until', None) data = mk_graphite_request(path, start, end) return HttpResponse(data, mimetype="application/json") def connections(request): """ GET /api/connections?from={start}[&allactive=true] Lists all connected users in specified timeframe. If 'allactive' is set to 'true', the timeframe will be ignored and instead all currently connected users will be listed. Response (JSON): [ {"_id": "Document ID", "loc": [Longitude, Latitude]}, ... ] Status codes: * 200 - OK * 400 - Missing query parameter (from) """ collection = DB.get_collection('connections') docs = [] query = None if request.GET.get('allactive', 'false') == 'true': query = {'connected': True, 'agent_data.component': 'agent'} else: try: start = datetime.datetime.fromtimestamp(int(request.GET['from'])/1000) except KeyError, e: return json({'error': True}, 400) query = {'updated_at': {'$gte': start}, 'agent_data.component': 'agent'} for doc in collection.find(query, {'_id': 1, 'loc': 1}): doc['loc'] = [coord + random()*0.0004 for coord in doc['loc']] docs.append(doc) return json(docs) def connection_info(request, id): """ GET /api/connection/{connection id} Responds with all data available for the specified connection (except for document's ID and coordinates). Status codes: * 200 - OK * 404 - Given ID did not match any documents """ doc = DB.get_connection(id) if not doc: return json({'error': True}, 404) else: doc['contributions'] = DB.get_contributions(doc['agent_data']['id']) return json(doc) ### Utilites ### def mk_graphite_request(path, start, end): global HTTP query = {'target': path, 'format': 'json', '_salt': str(random)[2:]} if start: query['from'] = start if end: query['until'] = end url = "http://%s:%s/render/?%s" % (SETTINGS['GRAPHITE_HOST'], SETTINGS['GRAPHITE_PORT'], urlencode(query)) headers, content = HTTP.request(url, "GET") return content class EnhancedJSONEncoder(DjangoJSONEncoder): """ Custom JSON encoder which can serialize MongoDB's ObjectId objects. """ def default(self, o, **kwargs): if isinstance(o, ObjectId): return str(o) else: return DjangoJSONEncoder.default(self, o, **kwargs) def json(data, status=200): data = simplejson.dumps(data, cls=EnhancedJSONEncoder, separators=(',', ':')) return HttpResponse(data, mimetype='application/json', status=status)
bsd-3-clause
345,892,791,650,336,500
25.856061
108
0.667137
false
3.643371
false
false
false
petr-devaikin/dancee
helpers/extractor.py
1
5790
# Cut the experiment session in small fragments # Input: ../bin/data/records/{session}/body.csv and skeletok.csv # Output: fragments/{fragment_number}.json and fragments/log.csv import os import numpy import json DELAY = 15 LENGTH = 30 OVERLAP = 0.719999 FREQUENCY = 60 MARGIN = 5 FREQUENCY = 60 CUTOFF_FREQUENCY = 10 buf_length = FREQUENCY / CUTOFF_FREQUENCY kernel = numpy.blackman(buf_length) kernel_summ = numpy.sum(kernel) to_filter = [ 9, 10, 11, #r_shoulder 12, 13, 14, #r_elbow 15, 16, 17, #r_hand 18, 19, 20, #l_shoulder 21, 22, 23, #l_elbow 24, 25, 26, #l_hand 27, 28, 29, #r_hip 30, 31, 32, #r_knee 36, 37, 38, #r_foot 39, 40, 41, #l_hip 42, 43, 44, #l_knee 48, 49, 50 #l_foot ] buffers = [[0] * buf_length] * len(to_filter) values = [0] * len(to_filter) values2 = [0] * len(to_filter) # emg filtering CUTOFF_EMG_FREQUENCY = 6 buf_emg_length = FREQUENCY / CUTOFF_EMG_FREQUENCY kernel_emg = numpy.blackman(buf_emg_length) kernel_emg_summ = numpy.sum(kernel_emg) emg2_buffer = [0] * buf_emg_length # acc filtering CUTOFF_ACC_FREQUENCY = 10 buf_acc_length = FREQUENCY / CUTOFF_ACC_FREQUENCY kernel_acc = numpy.blackman(buf_acc_length) kernel_acc_summ = numpy.sum(kernel_acc) acc_buffer = [[0] * buf_acc_length] * 3 # clean the folder for f in os.listdir("fragments"): os.remove(os.path.join('fragments', f)) # cut fragments record_counter = 0 def cut_fragment(participant, track_number): global record_counter global values global values2 global buffers global emg2_buffer global acc_buffer print "Cut participant " + participant + ", track " + str(track_number) result_data = { 'acc1': [], 'acc2': [], 'acc2_nf': [], 'emg1': [], 'emg2': [], 'emg2_nf': [], 'skeleton': [], 'skeleton_nf': [], } path = "../bin/data/records/" for rec in os.listdir(path): if rec.split(' ')[0] == participant: with open(path + rec + "/body.csv", 'r') as f_read_body: with open(path + rec + "/skeleton.csv", 'r') as f_read_skeleton: i = 0 while i < (DELAY + (OVERLAP + LENGTH) * (track_number + 1) - MARGIN) * FREQUENCY: line_body = f_read_body.readline().strip().split('\t') line_skeleton = f_read_skeleton.readline().strip().split('\t') values3 = [0] * len(to_filter) if i >= (DELAY + OVERLAP + (OVERLAP + LENGTH) * track_number) * FREQUENCY: values = [float(line_skeleton[j]) for j in to_filter] for j in range(2, len(values), 3): if values[j] > 1.4: values2[j - 2] = values[j - 2] values2[j - 1] = values[j - 1] values2[j] = values[j] for j in range(len(values)): buffers[j].append(values2[j]) buffers[j] = buffers[j][1:] for k in range(buf_length): values3[j] += buffers[j][k] * kernel[k] values3[j] /= kernel_summ #emg filtering emg2 = float(line_body[7]) emg2_nf = emg2 emg2_buffer.append(emg2) emg2_buffer = emg2_buffer[1:] emg2 = 0 for k in range(buf_emg_length): emg2 += emg2_buffer[k] * kernel_emg[k] emg2 /= kernel_emg_summ line_body[7] = str(emg2) #acc filtering acc_values = [float(v) for v in line_body[3:6]] for j in range(3): v = float(line_body[3 + j]) acc_buffer[j].append(v) acc_buffer[j] = acc_buffer[j][1:] v2 = 0 for k in range(buf_acc_length): v2 += acc_buffer[j][k] * kernel_acc[k] v2 /= kernel_acc_summ line_body[j + 3] = str(v2) if i >= (DELAY + OVERLAP + (OVERLAP + LENGTH) * track_number + MARGIN) * FREQUENCY: result_data['acc1'].append([float(v) - 512 for v in line_body[0:3]]) result_data['acc2'].append([float(v) - 512 for v in line_body[3:6]]) result_data['acc2_nf'].append(acc_values) result_data['emg1'].append(float(line_body[6])) result_data['emg2'].append(float(line_body[7])) result_data['emg2_nf'].append(emg2_nf) result_data['skeleton'].append({ 'r_shoulder': values3[0:3], 'r_elbow': values3[3:6], 'r_hand': values3[6:9], 'l_shoulder': values3[9:12], 'l_elbow': values3[12:15], 'l_hand': values3[15:18], 'r_hip': values3[18:21], 'r_knee': values3[21:24], 'r_foot': values3[24:27], 'l_hip': values3[27:30], 'l_knee': values3[30:33], 'l_foot': values3[33:36] }) result_data['skeleton_nf'].append({ 'r_shoulder': values[0:3], 'r_elbow': values[3:6], 'r_hand': values[6:9], 'l_shoulder': values[9:12], 'l_elbow': values[12:15], 'l_hand': values[15:18], 'r_hip': values[18:21], 'r_knee': values[21:24], 'r_foot': values[24:27], 'l_hip': values[27:30], 'l_knee': values[30:33], 'l_foot': values[33:36] }) i += 1 with open('fragments/' + str(record_counter) + '.json', "w") as f_write: json.dump(result_data, f_write) break else: print "Cannot find data for participant ", participant, "\n" return None record_counter += 1 return record_counter - 1 with open('selftest/results.txt', 'r') as f: with open('fragments/log.csv', 'w') as log: log.write('Participant\tTrack number\tTrack order\tValence\tArousal\tFragment\n') participant = -1 track_number = 0 for line in f: ar = line.strip().split(' '); if ar[0] != participant: track_number = 0 participant = ar[0] track_real_number = ar[1] valence = ar[2] arousal = ar[3] record = cut_fragment(participant, track_number) log.write(participant + '\t' + track_real_number + '\t' + str(track_number) + '\t' + valence + '\t' + arousal + '\t' + str(record) + '\n') track_number += 1 #break
gpl-3.0
-8,201,075,262,819,803,000
25.199095
141
0.580656
false
2.549538
false
false
false
quantumgraph/qgprofiler
qgprofiler/qg_profile_aggregator.py
1
4240
from node import Node, NodeList from .qg_profiler import QGProfiler from .helper import get_real_file_path, get_file_type, xml_scanner, read_attributes_from_xml, merge_attributes import glob import json class QGProfileAggregator(object): def __init__(self, in_file_path, out_file_path): self.root_node = Node('i_am_root', None, {}) self.in_file_path = get_real_file_path(in_file_path) get_file_type(out_file_path) self.out_file_path = get_real_file_path(out_file_path) def add_json(self, _json): new_node = self.make_node_from_json(_json, self.root_node) new_node_list = NodeList() new_node_list.append(new_node) self.merge_node_list_to_node(self.root_node, new_node_list) def merge_node_list_to_node(self, main_node, node_list): for node in node_list: index = main_node.is_child_in_children(node.get_name()) if index == -1: main_node.add_child(node) else: existing_node = main_node.get_child(index) existing_node.increment_value_by(node.get_value()) existing_node.increment_count_by(node.get_count()) existing_node.set_aggregate_attr(merge_attributes(node.get_aggregate_attr(), existing_node.get_aggregate_attr())) existing_node.update_over_head(node.get_over_head()) self.merge_node_list_to_node(existing_node, node.get_children()) def make_node_from_json(self, _json, parent_node): name = _json['name'] value = _json['value'] count = _json['count'] children = _json['children'] attributes = _json.get('attributes', {}) over_head = _json.get('overhead', 0) new_node = Node(name, parent_node, attributes) new_node.set_value(value) new_node.set_count(count) new_node.set_over_head(over_head) for child in children: child_node = self.make_node_from_json(child, new_node) new_node.add_child(child_node) return new_node def add_xml(self, _xml): current_node = self.root_node xml_gen = xml_scanner(_xml) for each in xml_gen: if each[0] == 'START': name = str(each[2]['name']) value = float(each[2]['value']) count = int(each[2]['count']) over_head = float(each[2].get('overhead', 0)) attributes = read_attributes_from_xml(each[2].get('attributes', {})) index = current_node.is_child_in_children(name) if index == -1: new_node = Node(name, current_node, attributes) new_node.set_value(value) new_node.set_count(count) new_node.set_over_head(over_head) current_node.add_child(new_node) current_node = new_node else: current_node = current_node.get_child(index) current_node.increment_value_by(value) current_node.increment_count_by(count) current_node.set_aggregate_attr(merge_attributes(attributes, current_node.get_aggregate_attr())) current_node.update_over_head(over_head) elif each[0] == 'END': current_node = current_node.get_parent() def generate_file(self, rounding_no=6): for file_path in glob.iglob(self.in_file_path): filename = file_path.split('/')[-1] if filename.endswith('.json'): with open(file_path, 'r') as f: raw_json = f.read() _json = json.loads(raw_json) self.add_json(_json) elif filename.endswith('.xml'): with open(file_path, 'r') as f: _xml = f.read() self.add_xml(_xml) qg_profiler = QGProfiler('test', self.out_file_path) if len(self.root_node.get_children()) == 1: qg_profiler.root_node = self.root_node.get_child(0) else: qg_profiler.root_node = self.root_node qg_profiler.generate_file(rounding_no)
mit
7,139,093,304,680,659,000
43.631579
129
0.556132
false
3.617747
false
false
false
macmanes-lab/MCBS913
code/Junhong Chen/generateProtineSeq.py
1
4902
""" Author: Junhong Chen """ from Bio.Alphabet import IUPAC from Bio.Seq import Seq from Bio import SeqIO from sys import argv import os path = argv[1] class CDS: def __init__(self,gff): self.data = dict() self.fname = gff def parse(self): file = open(self.fname,"r") for elem in file: if "CDS" in elem: tmp = elem.split() ind = tmp.index("CDS") if tmp[0] in self.data: self.data[tmp[0]].append((int(tmp[ind+1]),int(tmp[ind+2]),tmp[ind+4])) else: self.data[tmp[0]] = [(int(tmp[ind+1]),int(tmp[ind+2]),tmp[ind+4])] def getContigName(self): return self.data.keys() def getContigNumber(self): return len(self.data) def getContigCDSIndex(self,name): if name in self.data: return self.data[name] else: print "No indices for that contig ID: ", name #return self.data[name.split(".")[0]] def getContigCDSSize(self,name): return len(self.getContigCDSIndex(name)) class RefSeq: def __init__(self,fast): self.fname = fast self.data = dict() self.result = dict() self.cds = CDS(fast.split(".")[0]+".gff") def parse(self): fast = SeqIO.parse(open(self.fname),"fasta") for elem in fast: tmp = elem.id.split("|")[3] if tmp in self.data: print "ATTENTION: same contig ID in: " + self.fname else: self.data[tmp] = str(elem.seq) def getContigSeq(self,name): if name in self.data: return self.data[name] else: print "Can NOT find the contig: "+name def getContigData(self): return self.data def getContigID(self): return self.data.keys() def getContigCDSSize(self,name): return self.cds.getContigCDSSize(name) def translate(self,mode = IUPAC.ambiguous_dna): self.cds.parse() contig = self.data.keys() for name in contig: ind = self.cds.getContigCDSIndex(name) sq = self.data[name] ret = [] for tup in ind: myseq = sq[tup[0]-1:tup[1]] #store Seq Object if tup[2] == "+": ret.append(Seq(myseq, mode).translate()) else: ret.append(Seq(myseq, mode).reverse_complement().translate()) self.result[name] = ret return self.result def getCDSSeq(self,name,index): sq = self.data[name] ind = self.cds.getContigCDSIndex(name)[index] print self.cds.getContigName(); return sq[ind[0]-1:ind[1]] def compareProtineSeq(path): refd = RefSeq(path+".fastd") refd.parse() refa = RefSeq(path+".fasta") refa.parse() refat = refa.translate() refdt = refd.translate() #print refat["NC_008752.1"][3] #print refdt["NC_008752.1"][3] #print refa.getCDSSeq("NC_008752.1",3) #print refd.getCDSSeq("NC_008752.1",3) id = refd.getContigID() ret = dict() for name in id: mis = [] l = refa.getContigCDSSize(name) stat = 0 for i in range(l): if refat[name][i] in refdt[name][i]: stat = stat + 1 else: mis.append(i) ret[name] = (l,stat,mis) def sum(x): ret = 0. for el in x: ret = ret + el*1. return ret mis = [x[1] for x in ret.values()] tot = [x[0] for x in ret.values()] return sum(mis)/sum(tot) #return ret def getFilesinCWD(path): if path[-1] is not "/": path = path + "/" ref = [] files = [f for f in os.listdir(path)] for i in range(1,5): for fo in files: f = fo.split(".")[0] if f not in ref and f.startswith(str(i)+"-"): ref.append(f) ret = [path+tp for tp in ref] return ret def doCompare(path): fpath = getFilesinCWD(path) retp = [f.split("/")[-1] for f in fpath] ret = [] for p in fpath: ret.append(compareProtineSeq(p)) return retp,ret if __name__ == "__main__": print doCompare(path) ##refa = RefSeq(path+".fasta") #refa.parse() #print refa.getCDSSeq("NC_008752",0)
mit
-7,779,518,581,467,217,000
20.5
90
0.467768
false
3.617712
false
false
false
openre/openre
openre/agent/domain/__init__.py
1
1623
# -*- coding: utf-8 -*- """ Домен. Создает один процесс для одного домена openre. Получает и передает спайки (pub), получает команды от сервера (req-rep) и пересылает результат выполнения команды. """ from openre.agent.decorators import daemonize from openre.agent.helpers import daemon_stop import logging import signal from openre.agent.args import parse_args from openre.agent.domain.args import parser from openre.agent.domain.domain import Agent def run(): args = parse_args(parser) def sigterm(signum, frame): signum_to_str = dict( (k, v) for v, k in reversed(sorted(signal.__dict__.items())) if v.startswith('SIG') and not v.startswith('SIG_') ) logging.debug( 'Got signal.%s. Clean and exit.', signum_to_str.get(signum, signum) ) exit(0) @daemonize( args.pid_file, signal_map={ signal.SIGTERM: sigterm, signal.SIGINT: sigterm, }, ) def start(): """ Запуск серера """ logging.info('Start OpenRE.Agent domain') agent = Agent(vars(args)) agent.run() def stop(): """ Остановка серера """ logging.info('Stop OpenRE.Agent domain') daemon_stop(args.pid_file) if args.action == 'start': start() elif args.action == 'stop': stop() elif args.action == 'restart': stop() start()
mit
-6,384,374,142,818,930,000
24.413793
74
0.580054
false
3.096639
false
false
false
ttm/pingosom
pingosom2.py
1
50613
#-*- coding: utf-8 -*- import numpy as n, random, os, sys, time from scipy.io import wavfile as w tfoo=time.time() H=n.hstack V=n.vstack f_a = 44100. # Hz, frequência de amostragem ############## 2.2.1 Tabela de busca (LUT) Lambda_tilde=Lt=1024.*16 # Senoide fooXY=n.linspace(0,2*n.pi,Lt,endpoint=False) S_i=n.sin(fooXY) # um período da senóide com T amostras # Quadrada: Q_i=n.hstack( ( n.ones(Lt/2)*-1 , n.ones(Lt/2) ) ) # Triangular: foo=n.linspace(-1,1,Lt/2,endpoint=False) Tr_i=n.hstack( ( foo , foo*-1 ) ) # Dente de Serra: D_i=n.linspace(-1,1,Lt) def v(f=220,d=2.,tab=S_i,fv=2.,nu=2.,tabv=S_i): if nu==13.789987: return n.zeros(int(fa*d)) Lambda=n.floor(f_a*d) ii=n.arange(Lambda) Lv=float(len(tabv)) Gammav_i=n.floor((ii*fv*Lv)/f_a) # índices para a LUT Gammav_i=n.array(Gammav_i,n.int) # padrão de variação do vibrato para cada amostra Tv_i=tabv[Gammav_i%int(Lv)] # frequência em Hz em cada amostra F_i=f*( 2.**( Tv_i*nu/12. ) ) # a movimentação na tabela por amostra D_gamma_i=F_i*(Lt/float(f_a)) Gamma_i=n.cumsum(D_gamma_i) # a movimentação na tabela total Gamma_i=n.floor( Gamma_i) # já os índices Gamma_i=n.array( Gamma_i, dtype=n.int) # já os índices return tab[Gamma_i%int(Lt)] # busca dos índices na tabela def A(fa=2.,V_dB=10.,d=2.,taba=S_i): # Use com: v(d=XXX)*A(d=XXX) Lambda=n.floor(f_a*d) ii=n.arange(Lambda) Lt=float(len(taba)) Gammaa_i=n.floor(ii*fa*Lt/f_a) # índices para a LUT Gammaa_i=n.array(Gammaa_i,n.int) # variação da amplitude em cada amostra A_i=taba[Gammaa_i%int(Lt)] A_i=1+A_i*(1- 10.**(V_dB/20.)) return A_i def adsr(som,A=10.,D=20.,S=-20.,R=100.,xi=1e-2): """Envelope ADSR com A ataque em milissegundos, D decay em milissegundos S sustain, com número de decibéis a menos R Release em milisegundos Atenção para que a duração total é dada pelo som em si e que a duração do trecho em sustain é a diferença entre a duração total e as durações das partes ADR.""" a_S=10**(S/20.) Lambda=len(som) Lambda_A=int(A*f_a*0.001) Lambda_D=int(D*f_a*0.001) Lambda_R=int(R*f_a*0.001) Lambda_S=Lambda - Lambda_A - Lambda_D - Lambda_R ii=n.arange(Lambda_A,dtype=n.float) A=ii/(Lambda_A-1) A_i=A # ok ii=n.arange(Lambda_A,Lambda_D+Lambda_A,dtype=n.float) D=1-(1-a_S)*( ( ii-Lambda_A )/( Lambda_D-1) ) A_i=n.hstack( (A_i, D ) ) S=n.ones(Lambda-Lambda_R-(Lambda_A+Lambda_D),dtype=n.float)*a_S A_i=n.hstack( ( A_i, S ) ) ii=n.arange(Lambda-Lambda_R,Lambda,dtype=n.float) R=a_S-a_S*((ii-(Lambda-Lambda_R))/(Lambda_R-1)) A_i=n.hstack( (A_i,R) ) return som*A_i triadeM=[0.,4.,7.] def ac(f=220.,notas=[0.,4.,7.,12.],tab=Q_i,d=2.,nu=0,fv=2.): acorde=adsr(v(tab=tab,d=d,f=f*2.**(notas[-1]/12.),nu=nu,fv=fv)) for na in notas[:-1]: acorde+=adsr(v(tab=tab,d=d,f=f*2**(na/12.),nu=nu,fv=fv)) return acorde*10 def N(arr,xx=1.): r=arr r = (((r-r.min())/(r.max()-r.min()))*2-1)*xx return n.int16(r * float(2**15-1)) def NN(arr): return 2*((arr-arr.min())/(arr.max()-arr.min()))-1 vozes="f3,f2,f1,f5,m5,m1,m3".split(",") def fala(frase="Semicondutor livre",ss=160): arq=frase.split()[0] #os.system("espeak -vpt-pt+%s -w%s.wav -g110 -p99 -s110 -b=1 '%s'"%(random.sample(vozes,1)[0],arq,frase)) os.system(u"espeak -vpt-pt+%s -w%s.wav -p99 -b=1 '%s' -s%i"%(random.sample(vozes,1)[0],arq,frase,ss)) #os.system(u"espeak "+ frase +(u" -vpt-pt+%s -w%s.wav -p99 -b=1 -s%i"%(random.sample(vozes,1)[0],arq,ss))) #os.system("espeak -vpt-pt+%s -w%s.wav -g110 -p99 -s130 -b=1 '%s'"%(random.sample(vozes,1)[0],arq,frase)) ff=w.read("%s.wav"%(arq,))[1] ff_=n.fft.fft(ff) s=ff2=n.fft.ifft( n.hstack((ff_,n.zeros(len(ff_)) )) ).real sc_aud=((s-s.min())/(s.max()-s.min()))*2.-1. return sc_aud*10 #### # ruidos Lambda = 100000 # Lambda sempre par # diferença das frequências entre coeficiêntes vizinhos: df = f_a/float(Lambda) coefs = n.exp(1j*n.random.uniform(0, 2*n.pi, Lambda)) # real par, imaginaria impar coefs[Lambda/2+1:] = n.real(coefs[1:Lambda/2])[::-1] - 1j * \ n.imag(coefs[1:Lambda/2])[::-1] coefs[0] = 0. # sem bias coefs[Lambda/2] = 1. # freq max eh real simplesmente # as frequências relativas a cada coeficiente # acima de Lambda/2 nao vale fi = n.arange(coefs.shape[0])*df f0 = 15. # iniciamos o ruido em 15 Hz i0 = n.floor(f0/df) # primeiro coef a valer coefs[:i0] = n.zeros(i0) f0 = fi[i0] # obtenção do ruído em suas amostras temporais ruido = n.fft.ifft(coefs) r = n.real(ruido) r = ((r-r.min())/(r.max()-r.min()))*2-1 rb=r r = n.int16(r * float(2**15-1)) w.write('branco.wav', f_a, r) fator = 10.**(-6/20.) alphai = fator**(n.log2(fi[i0:]/f0)) c = n.copy(coefs) c[i0:] = c[i0:]*alphai # real par, imaginaria impar c[Lambda/2+1:] = n.real(c[1:Lambda/2])[::-1] - 1j * \ n.imag(c[1:Lambda/2])[::-1] # realizando amostras temporais do ruído marrom ruido = n.fft.ifft(c) r = n.real(ruido) r = ((r-r.min())/(r.max()-r.min()))*2-1 rm=r r = n.int16(r * float(2**15-1)) w.write('marrom.wav', f_a, r) ### 2.53 Ruído azul # para cada oitava, ganhamos 3dB fator = 10.**(3/20.) alphai = fator**(n.log2(fi[i0:]/f0)) c = n.copy(coefs) c[i0:] = c[i0:]*alphai # real par, imaginaria impar c[Lambda/2+1:] = n.real(c[1:Lambda/2])[::-1] - 1j * \ n.imag(c[1:Lambda/2])[::-1] # realizando amostras temporais do ruído azul ruido = n.fft.ifft(c) r = n.real(ruido) r = ((r-r.min())/(r.max()-r.min()))*2-1 ra=r r = n.int16(r * float(2**15-1)) w.write('azul.wav', f_a, r) ### 2.54 Ruido violeta # a cada oitava, ganhamos 6dB fator = 10.**(6/20.) alphai = fator**(n.log2(fi[i0:]/f0)) c = n.copy(coefs) c[i0:] = c[i0:]*alphai # real par, imaginaria impar c[Lambda/2+1:] = n.real(c[1:Lambda/2])[::-1] - 1j * \ n.imag(c[1:Lambda/2])[::-1] ruido = n.fft.ifft(c) r = n.real(ruido) r = ((r-r.min())/(r.max()-r.min()))*2-1 rv=r r = n.int16(r * float(2**15-1)) w.write('violeta.wav', f_a, r) ### 2.51 Ruído rosa # a cada oitava, perde-se 3dB fator = 10.**(-3/20.) alphai = fator**(n.log2(fi[i0:]/f0)) c = n.copy(coefs) c[i0:] = coefs[i0:]*alphai # real par, imaginaria impar c[Lambda/2+1:] = n.real(c[1:Lambda/2])[::-1] - 1j * \ n.imag(c[1:Lambda/2])[::-1] ruido = n.fft.ifft(c) r = n.real(ruido) r = ((r-r.min())/(r.max()-r.min()))*2-1 rr=r r = n.int16(r * float(2**15-1)) w.write('rosa.wav', f_a, r) fator = 10.**(-9/20.) alphai = fator**(n.log2(fi[i0:]/f0)) c = n.copy(coefs) c[i0:] = c[i0:]*alphai # real par, imaginaria impar c[Lambda/2+1:] = n.real(c[1:Lambda/2])[::-1] - 1j * \ n.imag(c[1:Lambda/2])[::-1] # realizando amostras temporais do ruído marrom ruido = n.fft.ifft(c) r = n.real(ruido) r = ((r-r.min())/(r.max()-r.min()))*2-1 rp=r r = n.int16(r * float(2**15-1)) w.write('preto.wav', f_a, r) #w.write('respira.wav', f_a, N(H(( # rr[:int(f_a*.5)], # rm[:int(f_a*.5)], # rr[:int(f_a*.5)], # rm[:int(f_a*.5)], # rr[:int(f_a*.5)], # rm[:int(f_a*.5)], # )))) # #w.write('respira2.wav', f_a, N(H(( # rp[:int(f_a*.5)], # rm[:int(f_a*.5)], # rp[:int(f_a*.5)], # rm[:int(f_a*.5)], # rp[:int(f_a*.5)], # rm[:int(f_a*.5)], # )))) # # #w.write('respira3.wav', f_a, N(H(( # adsr(rr[:int(f_a*.5)],S=-.5,A=360.), # 5.*adsr(rm[:int(f_a*.5)],S=-.5,A=360.), # adsr(rr[:int(f_a*.5)],S=-.5,A=360.), # 5.*adsr(rm[:int(f_a*.5)],S=-.5,A=360.), # adsr(rr[:int(f_a*.5)],S=-.5,A=360.), # 5.*adsr(rm[:int(f_a*.5)],S=-.5,A=360.), # )))) # # #w.write('respira4.wav', f_a, N(H(( # adsr(rr[:int(f_a*.5)],S=-.5,A=360.), # adsr(rb[:int(f_a*.5)],S=-.5,A=360.), # adsr(rr[:int(f_a*.5)],S=-.5,A=360.), # adsr(rb[:int(f_a*.5)],S=-.5,A=360.), # adsr(rr[:int(f_a*.5)],S=-.5,A=360.), # adsr(rb[:int(f_a*.5)],S=-.5,A=360.), # )))) # # #w.write('respira5.wav', f_a, N(H(( # adsr(rr[:int(f_a*.5)],S=-.5,A=360.), # adsr(rv[:int(f_a*.5)],S=-.5,A=360.), # adsr(rr[:int(f_a*.5)],S=-.5,A=360.), # adsr(rv[:int(f_a*.5)],S=-.5,A=360.), # adsr(rr[:int(f_a*.5)],S=-.5,A=360.), # adsr(rv[:int(f_a*.5)],S=-.5,A=360.), # )))) # # #w.write('respira6.wav', f_a, N(H(( # adsr(rr[:int(f_a*.2)],S=-.5,A=160.,R=10.), # adsr(rv[:int(f_a*.2)],S=-.5,A=160.,R=10.), # adsr(rr[:int(f_a*.2)],S=-.5,A=160.,R=10.), # adsr(rv[:int(f_a*.2)],S=-.5,A=160.,R=10.), # adsr(rr[:int(f_a*.2)],S=-.5,A=160.,R=10.), # adsr(rv[:int(f_a*.2)],S=-.5,A=160.,R=10.), # adsr(rr[:int(f_a*.2)],S=-.5,A=160.,R=10.), # adsr(rv[:int(f_a*.2)],S=-.5,A=160.,R=10.), # adsr(rr[:int(f_a*.2)],S=-.5,A=160.,R=10.), # adsr(rv[:int(f_a*.2)],S=-.5,A=160.,R=10.), # adsr(rr[:int(f_a*.2)],S=-.5,A=160.,R=10.), # adsr(rv[:int(f_a*.2)],S=-.5,A=160.,R=10.), # adsr(rr[:int(f_a*.2)],S=-.5,A=160.,R=10.), # adsr(rv[:int(f_a*.2)],S=-.5,A=160.,R=10.), # adsr(rr[:int(f_a*.2)],S=-.5,A=160.,R=10.), # adsr(rv[:int(f_a*.2)],S=-.5,A=160.,R=10.), # )))) # # #f0=110. #s=n.zeros(4*f_a) #kk=(2*n.pi/10)*2. # uma volta #aa=20. # 10. dB #for i in xrange(10): # 10 harmonicas # ff=f0*(1+i) # n_oitavas=n.log2(ff/f0) # a_=10.**((n_oitavas*(-25.+aa*n.cos(kk*i))/20.)) # s+=v(f=ff,d=4.,nu=0.)*a_ # #w.write('pisca.wav', f_a, N(H(( # s[:f_a/2], n.zeros(f_a/2), # s[:f_a/2], n.zeros(f_a/2), # s[:f_a/2], n.zeros(f_a/2), # s[:f_a/2], n.zeros(f_a/2), # )))) # # # #f0=1100. #s=n.zeros(4*f_a) #kk=(2*n.pi/10)*2. # uma volta #aa=20. # 10. dB #for i in xrange(10): # 10 harmonicas # ff=f0*(1+i) # n_oitavas=n.log2(ff/f0) # a_=10.**((n_oitavas*(-25.+aa*n.cos(kk*i))/20.)) # s+=v(f=ff,d=4.,nu=0.)*a_ # #w.write('pisca2.wav', f_a, N(H(( # s[:f_a/2], n.zeros(f_a/2), # s[:f_a/2], n.zeros(f_a/2), # s[:f_a/2], n.zeros(f_a/2), # s[:f_a/2], n.zeros(f_a/2), # )))) # # # #f0=11000. #s=n.zeros(4*f_a) #kk=(2*n.pi/10)*2. # uma volta #aa=20. # 10. dB #for i in xrange(10): # 10 harmonicas # ff=f0*(1+i) # n_oitavas=n.log2(ff/f0) # a_=10.**((n_oitavas*(-25.+aa*n.cos(kk*i))/20.)) # s+=v(f=ff,d=4.,nu=0.)*a_ # #w.write('pisca3.wav', f_a, N(H(( # s[:f_a/2], n.zeros(f_a/2), # s[:f_a/2], n.zeros(f_a/2), # s[:f_a/2], n.zeros(f_a/2), # s[:f_a/2], n.zeros(f_a/2), # )))) # # # #f0=410. #s=n.zeros(4*f_a) #kk=(2*n.pi/10)*2. # uma volta #aa=20. # 10. dB #for i in xrange(10): # 10 harmonicas # ff=f0*(1+i) # n_oitavas=n.log2(ff/f0) # a_=10.**((n_oitavas*(-25.+aa*n.cos(kk*i))/20.)) # s+=adsr(v(f=ff,d=4.,nu=0.)*a_,S=-5.) # #w.write('pisca4.wav', f_a, N(H(( # s[:f_a/2], n.zeros(f_a/2), # s[:f_a/2], n.zeros(f_a/2), # s[:f_a/2], n.zeros(f_a/2), # s[:f_a/2], n.zeros(f_a/2), # )))) ##### PISCA TTMPPC #f0=110. #s=n.zeros(4*f_a) #kk=(2*n.pi/10)*2. # uma volta #aa=20. # 10. dB #for i in xrange(10): # 10 harmonicas # ff=f0*(1+i) # n_oitavas=n.log2(ff/f0) # a_=10.**((n_oitavas*(-25.+aa*n.cos(kk*i))/20.)) # s+=v(f=ff,d=4.,nu=0.)*a_ # #w.write('pisca_.wav', f_a, N(H(( # s[:f_a/8], n.zeros(f_a/2), # )))) # # # #f0=1100. #s=n.zeros(4*f_a) #kk=(2*n.pi/10)*2. # uma volta #aa=20. # 10. dB #for i in xrange(10): # 10 harmonicas # ff=f0*(1+i) # n_oitavas=n.log2(ff/f0) # a_=10.**((n_oitavas*(-25.+aa*n.cos(kk*i))/20.)) # s+=v(f=ff,d=4.,nu=0.)*a_ # #w.write('pisca2_.wav', f_a, N(H(( # s[:f_a/8], n.zeros(f_a/2), # )))) # # # #f0=11000. #s=n.zeros(4*f_a) #kk=(2*n.pi/10)*2. # uma volta #aa=20. # 10. dB #for i in xrange(10): # 10 harmonicas # ff=f0*(1+i) # n_oitavas=n.log2(ff/f0) # a_=10.**((n_oitavas*(-25.+aa*n.cos(kk*i))/20.)) # s+=v(f=ff,d=4.,nu=0.)*a_ # #w.write('pisca3_.wav', f_a, N(H(( # s[:f_a/8], n.zeros(f_a/2), # )))) # # # #f0=410. #s=n.zeros(4*f_a) #kk=(2*n.pi/10)*2. # uma volta #aa=20. # 10. dB #for i in xrange(10): # 10 harmonicas # ff=f0*(1+i) # n_oitavas=n.log2(ff/f0) # a_=10.**((n_oitavas*(-25.+aa*n.cos(kk*i))/20.)) # s+=adsr(v(f=ff,d=4.,nu=0.)*a_,S=-5.) # #w.write('pisca4_.wav', f_a, N(H(( # s[:f_a/8], n.zeros(f_a/2), # )))) # ##### END TTMPPC w.write('comendo6.wav', f_a, N(fala("O melhor que voce faz com a sua boca, eh servir de toca, para outra cabessa. Nao que voce meressa, esta oportunidade, que vem com a idade, de se curtir em mim.",ss=3500))) w.write('comendo7.wav', f_a, N(fala("Diga aonde voce vai, que eu vou varrendo, diga aonda voce vai, que eu vou varrendo. Vou varrendo, vou varrendo vou varrendo. Vou varrendo, vou varrendo, vou varrendo.",ss=3500))) # # #w.write('comendo.wav', f_a, N(fala("mahnamnahamhahamnahamhanhamnanhnahamha"))) #w.write('comendo2.wav', f_a, N(fala("manamnaamaamnaamanamnannaama"))) #w.write('comendo3.wav', f_a, N(fala("mnmnmmnmnmnnnm"))) #w.write('comendo4.wav', f_a, N(fala("mnmnmm nmnm nn nmnmnmn"))) #w.write('comendo5.wav', f_a, N(fala("mnhmnhmm nhmhnm nn nhmhnmhnhmn"))) # # #w.write('chorando_.wav', f_a, N(fala("bbbbuaaa bbbbbuaaa bbbbuaaa bbbuaaa"))) # # #w.write('chorando_2.wav', f_a, N(fala("buaaa bbuaaa buaaa buaaa"))) # # # #w.write('chorando_3.wav', f_a, N(fala("buaaa nheee ee ee nheeee e eeeee bbuaaa buaaa nheeeee eee eeeee buaaa"))) # # #w.write('chorando_4.wav', f_a, N(fala("buaaa nheee ee hhh hhh hhh ee nheeehhhh h hh hhe e eeeee bbuhhh h hh haaa buaaa nhhhh hhh eeeee eee hhhhhh h heeeee buaaa"))) # w.write('coma.wav', f_a, N(H(( v(f=1000.*(3./2),nu=0.,d=0.5), n.zeros(int(f_a*0.5)), v(f=1000.*(3./2),nu=0.,d=0.5), n.zeros(int(f_a*0.5)), v(f=1000.*(3./2),nu=0.,d=0.5), n.zeros(int(f_a*0.5)), v(f=1000.*(3./2),nu=0.,d=0.5), n.zeros(int(f_a*0.5)), )),.3)) w.write('coma2.wav', f_a, N(H(( v(f=1000.*2.*(3./2),nu=0.,d=0.5), n.zeros(int(f_a*0.5)), v(f=1000.*2.*(3./2),nu=0.,d=0.5), n.zeros(int(f_a*0.5)), v(f=1000.*2.*(3./2),nu=0.,d=0.5), n.zeros(int(f_a*0.5)), v(f=1000.*2.*(3./2),nu=0.,d=0.5), n.zeros(int(f_a*0.5)), )),.3)) w.write('coma3.wav', f_a, N(H(( v(f=1000.*2.*(3./2),nu=0.,d=0.1), v(f=1000.*2.*(3./2),nu=0.,d=0.3), n.zeros(int(f_a*1.5)), v(f=1000.*2.*(3./2),nu=0.,d=0.1), v(f=1000.*2.*(3./2),nu=0.,d=0.3), n.zeros(int(f_a*1.5)), v(f=1000.*2.*(3./2),nu=0.,d=0.1), v(f=1000.*2.*(3./2),nu=0.,d=0.3), n.zeros(int(f_a*1.5)), v(f=1000.*2.*(3./2),nu=0.,d=0.1), v(f=1000.*2.*(3./2),nu=0.,d=0.3), n.zeros(int(f_a*1.5)), )),.3)) w.write('coma4.wav', f_a, N(H(( v(f=1000.*2.*(3./2),nu=0.,d=0.1), n.zeros(int(f_a*0.1)), v(f=1000.*2.*(3./2),nu=0.,d=0.3), n.zeros(int(f_a*1.5)), v(f=1000.*2.*(3./2),nu=0.,d=0.1), n.zeros(int(f_a*0.1)), v(f=1000.*2.*(3./2),nu=0.,d=0.3), n.zeros(int(f_a*1.5)), v(f=1000.*2.*(3./2),nu=0.,d=0.1), n.zeros(int(f_a*0.1)), v(f=1000.*2.*(3./2),nu=0.,d=0.3), n.zeros(int(f_a*1.5)), v(f=1000.*2.*(3./2),nu=0.,d=0.1), n.zeros(int(f_a*0.1)), v(f=1000.*2.*(3./2),nu=0.,d=0.3), n.zeros(int(f_a*1.5)), )),.3)) w.write('coma5.wav', f_a, N(H(( v(f=1000.*2.*(3./2),nu=0.,d=0.1), n.zeros(int(f_a*1.5)), v(f=1000.*2.*(3./2),nu=0.,d=0.1), n.zeros(int(f_a*1.5)), v(f=1000.*2.*(3./2),nu=0.,d=0.1), n.zeros(int(f_a*1.5)), v(f=1000.*2.*(3./2),nu=0.,d=0.1), n.zeros(int(f_a*1.5)), )),.3)) w.write('coma6.wav', f_a, N(H(( v(f=1000.*2.*(3./2),nu=0.,d=0.1), n.zeros(int(f_a*2.5)), v(f=1000.*2.*(3./2),nu=0.,d=0.1), n.zeros(int(f_a*2.5)), v(f=1000.*2.*(3./2),nu=0.,d=0.1), n.zeros(int(f_a*2.5)), v(f=1000.*2.*(3./2),nu=0.,d=0.1), n.zeros(int(f_a*2.5)), )),.3)) w.write('coma7.wav', f_a, N(H(( v(f=1000.*2.*(3./2),nu=0.,d=0.1), n.zeros(int(f_a*3.5)), v(f=1000.*2.*(3./2),nu=0.,d=0.1), n.zeros(int(f_a*3.5)), v(f=1000.*2.*(3./2),nu=0.,d=0.1), n.zeros(int(f_a*3.5)), v(f=1000.*2.*(3./2),nu=0.,d=0.1), n.zeros(int(f_a*3.5)), )),.3)) w.write('coma8.wav', f_a, N(H(( v(f=1000.*2.*(3./2),nu=2.,d=0.1,tab=Tr_i), n.zeros(int(f_a*3.5)), v(f=1000.*2.*(3./2),nu=2.,d=0.1,tab=Tr_i), n.zeros(int(f_a*3.5)), v(f=1000.*2.*(3./2),nu=2.,d=0.1,tab=Tr_i), n.zeros(int(f_a*3.5)), v(f=1000.*2.*(3./2),nu=2.,d=0.1,tab=Tr_i), n.zeros(int(f_a*3.5)), )),.3)) w.write('respira7.wav', f_a, N(H(( adsr(rr[ :int(f_a*1.5)],S=-.5,A=360.), 5.*adsr(rm[:int(f_a*1.5)],S=-.5,A=360.), adsr(rr[ :int(f_a*1.5)],S=-.5,A=360.), 5.*adsr(rm[:int(f_a*1.5)],S=-.5,A=360.), adsr(rr[ :int(f_a*1.5)],S=-.5,A=360.), 5.*adsr(rm[:int(f_a*1.5)],S=-.5,A=360.), )))) w.write('respira8.wav', f_a, N(H(( adsr(rr[ :int(f_a*2.5)],S=-.5,A=360.), 5.*adsr(rm[:int(f_a*2.5)],S=-.5,A=360.), adsr(rr[ :int(f_a*2.5)],S=-.5,A=360.), 5.*adsr(rm[:int(f_a*2.5)],S=-.5,A=360.), adsr(rr[ :int(f_a*2.5)],S=-.5,A=360.), 5.*adsr(rm[:int(f_a*2.5)],S=-.5,A=360.), )))) w.write('respira9.wav', f_a, N(H(( adsr(rr[ :int(f_a*2.5)],S=-.5,A=1160.), adsr(rb[:int(f_a*2.5)],S=-.5,A=1160.), adsr(rr[ :int(f_a*2.5)],S=-.5,A=1160.), adsr(rb[:int(f_a*2.5)],S=-.5,A=1160.), adsr(rr[ :int(f_a*2.5)],S=-.5,A=1160.), adsr(rb[:int(f_a*2.5)],S=-.5,A=1160.), )))) w.write('respira91.wav', f_a, N(H(( adsr(ra[ :int(f_a*2.5)],S=-.5,A=1160.), adsr(rb[:int(f_a*2.5)],S=-.5,A=1160.), adsr(ra[ :int(f_a*2.5)],S=-.5,A=1160.), adsr(rb[:int(f_a*2.5)],S=-.5,A=1160.), adsr(ra[ :int(f_a*2.5)],S=-.5,A=1160.), adsr(rb[:int(f_a*2.5)],S=-.5,A=1160.), )))) w.write('respira92.wav', f_a, N(H(( adsr(ra[ :int(f_a*2.5)],S=-.5,A=1160.), adsr(rv[:int(f_a*2.5)],S=-.5,A=1160.), adsr(ra[ :int(f_a*2.5)],S=-.5,A=1160.), adsr(rv[:int(f_a*2.5)],S=-.5,A=1160.), adsr(ra[ :int(f_a*2.5)],S=-.5,A=1160.), adsr(rv[:int(f_a*2.5)],S=-.5,A=1160.), )))) w.write('dormindo.wav', f_a, N(H(( adsr(ra[ :int(f_a*2.5)],S=-.5,A=1160.), adsr(rv[:int(f_a*2.5)],S=-.5,A=1160.), adsr(ra[ :int(f_a*2.5)],S=-.5,A=1160.), adsr(rv[:int(f_a*2.5)],S=-.5,A=1160.), adsr(ra[ :int(f_a*2.5)],S=-.5,A=1160.), adsr(rv[:int(f_a*2.5)],S=-.5,A=1160.), )))) # arroto3 arroto6 arroto 9 92 w.write('dormindo2.wav', f_a, N(H(( adsr(ra,S=-.5,A=1760.), adsr(rv,S=-.5,A=1760.), adsr(ra,S=-.5,A=1760.), adsr(rv,S=-.5,A=1760.), adsr(ra,S=-.5,A=1760.), adsr(rv,S=-.5,A=1760.), )))) w.write('dormindo2.wav', f_a, N(H(( adsr(ra,S=-.5,A=1760.), adsr(rv,S=-.5,A=1760.), adsr(ra,S=-.5,A=1760.), adsr(rv,S=-.5,A=1760.), adsr(ra,S=-.5,A=1760.), adsr(rv,S=-.5,A=1760.), )))) ronco=H(( adsr(rp[:int(f_a*0.040)],A=3.,S=-3.,R=10.), adsr(rp[:int(f_a*0.040)],A=3.,S=-3.,R=10.), adsr(rp[:int(f_a*0.040)],A=3.,S=-3.,R=10.), adsr(rp[:int(f_a*0.040)],A=3.,S=-3.,R=10.), adsr(rp[:int(f_a*0.040)],A=3.,S=-3.,R=10.), adsr(rp[:int(f_a*0.040)],A=3.,S=-3.,R=10.), adsr(rp[:int(f_a*0.040)],A=3.,S=-3.,R=10.), )) w.write('dormindo3.wav', f_a, N(H(( ronco,n.zeros(f_a), ronco,n.zeros(f_a), ronco,n.zeros(f_a), ronco,n.zeros(f_a), ronco,n.zeros(f_a), )))) w.write('dormindo4.wav', f_a, N(H(( adsr(ra,S=-.5,A=1760.),ronco,n.zeros(f_a), adsr(rv,S=-.5,A=1760.),ronco,n.zeros(f_a), adsr(ra,S=-.5,A=1760.),ronco,n.zeros(f_a), adsr(rv,S=-.5,A=1760.),ronco,n.zeros(f_a), adsr(ra,S=-.5,A=1760.),ronco,n.zeros(f_a), adsr(rv,S=-.5,A=1760.),ronco,n.zeros(f_a), )))) w.write('dormindo5.wav', f_a, N(H(( adsr(ra,S=-.5,A=1760.),10*ronco,n.zeros(f_a), adsr(rv,S=-.5,A=1760.),10*ronco,n.zeros(f_a), adsr(ra,S=-.5,A=1760.),10*ronco,n.zeros(f_a), adsr(rv,S=-.5,A=1760.),10*ronco,n.zeros(f_a), adsr(ra,S=-.5,A=1760.),10*ronco,n.zeros(f_a), adsr(rv,S=-.5,A=1760.),10*ronco,n.zeros(f_a), )))) w.write('dormindo6.wav', f_a, N(H(( adsr(ra,S=-.5,A=1760.),5*ronco,n.zeros(f_a), adsr(rv,S=-.5,A=1760.),5*ronco,n.zeros(f_a), adsr(ra,S=-.5,A=1760.),5*ronco,n.zeros(f_a), adsr(rv,S=-.5,A=1760.),5*ronco,n.zeros(f_a), adsr(ra,S=-.5,A=1760.),5*ronco,n.zeros(f_a), adsr(rv,S=-.5,A=1760.),5*ronco,n.zeros(f_a), )))) w.write('dormindo7.wav', f_a, N(H(( adsr(ra,S=-.5,A=1760.)+H((n.zeros(len(ra)-len(ronco)),5*ronco)),n.zeros(f_a), adsr(rv,S=-.5,A=1760.)+H((n.zeros(len(ra)-len(ronco)),5*ronco)),n.zeros(f_a), adsr(ra,S=-.5,A=1760.)+H((n.zeros(len(ra)-len(ronco)),5*ronco)),n.zeros(f_a), adsr(rv,S=-.5,A=1760.)+H((n.zeros(len(ra)-len(ronco)),5*ronco)),n.zeros(f_a), adsr(ra,S=-.5,A=1760.)+H((n.zeros(len(ra)-len(ronco)),5*ronco)),n.zeros(f_a), adsr(rv,S=-.5,A=1760.)+H((n.zeros(len(ra)-len(ronco)),5*ronco)),n.zeros(f_a), )))) ronco2=H(( adsr(rm[:int(f_a*0.040)],A=5.,S=-3.,R=10.), adsr(rm[:int(f_a*0.040)],A=5.,S=-3.,R=10.), adsr(rm[:int(f_a*0.040)],A=5.,S=-3.,R=10.), adsr(rm[:int(f_a*0.040)],A=5.,S=-3.,R=10.), adsr(rm[:int(f_a*0.040)],A=5.,S=-3.,R=10.), adsr(rm[:int(f_a*0.040)],A=5.,S=-3.,R=10.), adsr(rm[:int(f_a*0.040)],A=5.,S=-3.,R=10.), )) w.write('dormindo8.wav', f_a, N(H(( adsr(ra,S=-.5,A=1760.)+H((n.zeros(len(ra)-len(ronco)),5*ronco)),n.zeros(f_a),ronco2, adsr(rv,S=-.5,A=1760.)+H((n.zeros(len(ra)-len(ronco)),5*ronco)),n.zeros(f_a),ronco2, adsr(ra,S=-.5,A=1760.)+H((n.zeros(len(ra)-len(ronco)),5*ronco)),n.zeros(f_a),ronco2, adsr(rv,S=-.5,A=1760.)+H((n.zeros(len(ra)-len(ronco)),5*ronco)),n.zeros(f_a),ronco2, adsr(ra,S=-.5,A=1760.)+H((n.zeros(len(ra)-len(ronco)),5*ronco)),n.zeros(f_a),ronco2, adsr(rv,S=-.5,A=1760.)+H((n.zeros(len(ra)-len(ronco)),5*ronco)),n.zeros(f_a),ronco2, )))) w.write('dormindo9.wav', f_a, N(H(( adsr(ra,S=-.5,A=1760.)+H((n.zeros(len(ra)-len(ronco)),5*ronco)),v(440.,nu=3.,fv=.2),ronco2, adsr(rv,S=-.5,A=1760.)+H((n.zeros(len(ra)-len(ronco)),5*ronco)),v(440.,nu=3.,fv=.2),ronco2, adsr(ra,S=-.5,A=1760.)+H((n.zeros(len(ra)-len(ronco)),5*ronco)),v(440.,nu=3.,fv=.2),ronco2, adsr(rv,S=-.5,A=1760.)+H((n.zeros(len(ra)-len(ronco)),5*ronco)),v(440.,nu=3.,fv=.2),ronco2, adsr(ra,S=-.5,A=1760.)+H((n.zeros(len(ra)-len(ronco)),5*ronco)),v(440.,nu=3.,fv=.2),ronco2, adsr(rv,S=-.5,A=1760.)+H((n.zeros(len(ra)-len(ronco)),5*ronco)),v(440.,nu=3.,fv=.2),ronco2, )))) w.write('dormindo91.wav', f_a, N(H(( adsr(ra,S=-.5,A=1760.)+H((n.zeros(len(ra)-len(ronco)),5*ronco)),v(440.*2,nu=3.,fv=.2,tabv=D_i),ronco2, adsr(rv,S=-.5,A=1760.)+H((n.zeros(len(ra)-len(ronco)),5*ronco)),v(440.*2,nu=3.,fv=.2,tabv=D_i),ronco2, adsr(ra,S=-.5,A=1760.)+H((n.zeros(len(ra)-len(ronco)),5*ronco)),v(440.*2,nu=3.,fv=.2,tabv=D_i),ronco2, adsr(rv,S=-.5,A=1760.)+H((n.zeros(len(ra)-len(ronco)),5*ronco)),v(440.*2,nu=3.,fv=.2,tabv=D_i),ronco2, adsr(ra,S=-.5,A=1760.)+H((n.zeros(len(ra)-len(ronco)),5*ronco)),v(440.*2,nu=3.,fv=.2,tabv=D_i),ronco2, adsr(rv,S=-.5,A=1760.)+H((n.zeros(len(ra)-len(ronco)),5*ronco)),v(440.*2,nu=3.,fv=.2,tabv=D_i),ronco2, )))) w.write('dormindo92.wav', f_a, N(H(( adsr(ra,S=-.5,A=1760.)+H((n.zeros(len(ra)-len(ronco)),5*ronco)),v(440.*2,nu=3.,fv=.2,tabv=D_i[::-1]),ronco2, adsr(rv,S=-.5,A=1760.)+H((n.zeros(len(ra)-len(ronco)),5*ronco)),v(440.*2,nu=3.,fv=.2,tabv=D_i[::-1]),ronco2, adsr(ra,S=-.5,A=1760.)+H((n.zeros(len(ra)-len(ronco)),5*ronco)),v(440.*2,nu=3.,fv=.2,tabv=D_i[::-1]),ronco2, adsr(rv,S=-.5,A=1760.)+H((n.zeros(len(ra)-len(ronco)),5*ronco)),v(440.*2,nu=3.,fv=.2,tabv=D_i[::-1]),ronco2, adsr(ra,S=-.5,A=1760.)+H((n.zeros(len(ra)-len(ronco)),5*ronco)),v(440.*2,nu=3.,fv=.2,tabv=D_i[::-1]),ronco2, adsr(rv,S=-.5,A=1760.)+H((n.zeros(len(ra)-len(ronco)),5*ronco)),v(440.*2,nu=3.,fv=.2,tabv=D_i[::-1]),ronco2, )))) w.write('porta_abre.wav', f_a, N(v(200,fv=1./(7*2.),d=1.0,nu=20.))) w.write('porta_abre2.wav', f_a, N(v(800,fv=1./(7*2.),d=1.0,nu=20.))) w.write('porta_abre3.wav', f_a, N(v(800,fv=1.,d=.5,nu=20.,tabv=D_i))) w.write('porta_abre4.wav', f_a, N(v(1800,fv=1.,d=.5,nu=20.,tabv=D_i,tab=Tr_i))) w.write('porta_abre5.wav', f_a, N(v(2800,fv=1.,d=.5,nu=20.,tabv=D_i,tab=Tr_i))) w.write('porta_abre6.wav', f_a, N(v(2800,fv=1.,d=.5,nu=2.,tabv=D_i,tab=Tr_i))) w.write('porta_abre7.wav', f_a, N(v(1800,fv=1.,d=.5,nu=20.,tabv=D_i,tab=D_i))) w.write('porta_abre8.wav', f_a, N(v(1800,fv=1.,d=.5,nu=20.,tabv=D_i,tab=Q_i))) w.write('porta_fecha.wav', f_a, N(v(200,fv=1./(7*2.),d=1.0,nu=20. , tabv=S_i*-1))) w.write('porta_fecha2.wav', f_a, N(v(800,fv=1./(7*2.),d=1.0,nu=20. , tabv=S_i*-1))) w.write('porta_fecha3.wav', f_a, N(v(800,fv=1.,d=.5,nu=20.,tabv=D_i))) w.write('porta_fecha4.wav', f_a, N(v(1800,fv=1.,d=.5,nu=20.,tabv=D_i,tab=Tr_i*-1))) w.write('porta_fecha5.wav', f_a, N(v(2800,fv=1.,d=.5,nu=20.,tabv=D_i,tab=Tr_i*-1))) w.write('porta_fecha6.wav', f_a, N(v(2800,fv=1.,d=.5,nu=2.,tabv=D_i,tab=Tr_i *-1))) w.write('porta_fecha7.wav', f_a, N(v(1800,fv=1.,d=.5,nu=20.,tabv=D_i,tab=D_i *-1))) w.write('porta_fecha8.wav', f_a, N(v(1800,fv=1.,d=.5,nu=20.,tabv=D_i,tab=Q_i *-1))) w.write('clique.wav', f_a, N(n.array([0]*100+[1]+[0]*10000))) w.write('clique2.wav', f_a, N(adsr(v(fv=20,d=.2),S=-3.))) w.write('clique3.wav', f_a, N(adsr(v(fv=20,d=.2,tab=Tr_i),S=-3.))) w.write('clique4.wav', f_a, N(adsr(v(f=1000.,fv=20,d=.2,tab=Tr_i),S=-3.))) w.write('clique5.wav', f_a, N(adsr(v(f=660.,fv=20,d=.2,tab=Tr_i),S=-3.))) w.write('seleciona.wav', f_a, N(adsr(v(f=460.,fv=1.,d=.1,tab=Tr_i),S=-3.,R=10.))) w.write('seleciona2.wav', f_a, N(adsr(v(f=460.,fv=10.,d=.1,tab=Tr_i),S=-3.,R=10.))) w.write('cancela.wav', f_a, N(adsr(v(f=460.,fv=100.,d=.1,tab=Tr_i),S=-3.,R=10.))) w.write('cancela2.wav', f_a, N(adsr(v(f=40.,fv=100.,d=.1,tab=Tr_i),S=-3.,R=10.))) w.write('msgPos.wav', f_a, N(H(( adsr(v(f=440.*(3/4.),fv=0.,nu=0.,d=.1,tab=Tr_i),S=-3.,R=10.), adsr(v(f=440.,fv=0.,nu=0.,d=.1,tab=Tr_i),S=-3.,R=10.), )))) w.write('msgNeg.wav', f_a, N(H(( adsr(v(f=440.*(3/4.),fv=0.,nu=0.,d=.1,tab=Tr_i),S=-3.,R=10.), adsr(v(f=440.,fv=0.,nu=0.,d=.1,tab=Tr_i),S=-3.,R=10.), adsr(v(f=440.*(3/4.),fv=0.,nu=0.,d=.1,tab=Tr_i),S=-3.,R=10.), )))) w.write('msgPos2.wav', f_a, N(H(( adsr(v(f=840.*(3/4.),fv=0.,nu=0.,d=.1,tab=Tr_i),S=-3.,R=10.), adsr(v(f=840.,fv=0.,nu=0.,d=.1,tab=Tr_i),S=-3.,R=10.), )))) w.write('msgNeg2.wav', f_a, N(H(( adsr(v(f=840.*(3/4.),fv=0.,nu=0.,d=.1,tab=Tr_i),S=-3.,R=10.), adsr(v(f=840.,fv=0.,nu=0.,d=.1,tab=Tr_i),S=-3.,R=10.), adsr(v(f=840.*(3/4.),fv=0.,nu=0.,d=.1,tab=Tr_i),S=-3.,R=10.), )))) w.write('msgNeg3.wav', f_a, N(H(( adsr(v(f=840.,fv=0.,nu=0.,d=.1,tab=Tr_i),S=-3.,R=10.), adsr(v(f=840.*(3/4.),fv=0.,nu=0.,d=.1,tab=Tr_i),S=-3.,R=10.), )))) w.write('msgPos3.wav', f_a, N(H(( adsr(v(f=840.*(3/4.),fv=0.,nu=0.,d=.1,tab=Tr_i),S=-3.,R=10.), adsr(v(f=840.,fv=0.,nu=0.,d=.1,tab=Tr_i),S=-3.,R=10.), adsr(v(f=840.*(2**(4./12)),fv=0.,nu=0.,d=.1,tab=Tr_i),S=-3.,R=10.), adsr(v(f=840.,fv=0.,nu=0.,d=.1,tab=Tr_i),S=-3.,R=10.), )))) w.write('msgPos4.wav', f_a, N(H(( adsr(v(f=840.*(3/4.),fv=0.,nu=0.,d=.1,tab=Tr_i),S=-3.,R=10.), adsr(v(f=840.,fv=0.,nu=0.,d=.1,tab=Tr_i),S=-3.,R=10.), adsr(v(f=840.*(2**(4./12)),fv=0.,nu=0.,d=.1,tab=Tr_i),S=-3.,R=10.), adsr(v(f=840.*(2**(7./12)),fv=0.,nu=0.,d=.1,tab=Tr_i),S=-3.,R=10.), adsr(v(f=840.,fv=0.,nu=0.,d=.1,tab=Tr_i),S=-3.,R=10.), )))) w.write('msgNeg4.wav', f_a, N(H(( adsr(v(f=840.,fv=0.,nu=0.,d=.1,tab=Tr_i),S=-3.,R=10.), adsr(v(f=840.*(2**(-6./12)),fv=0.,nu=0.,d=.1,tab=Tr_i),S=-3.,R=10.), )))) w.write('perda.wav', f_a, N(H(( adsr(v(f=840.,fv=0.,nu=0.,d=.1, tab=D_i),S=-3.,R=10.), adsr(v(f=840.*(2**(-6./12)),fv=0.,nu=0.,d=.1,tab=D_i),S=-3.,R=10.), )))) w.write('ganho.wav', f_a, N(H(( adsr(v(f=840.*(2**(-7./12)),fv=0.,nu=0.,d=.1,tab=D_i),S=-3.,R=10.), adsr(v(f=840.,fv=0.,nu=0.,d=.1, tab=D_i),S=-3.,R=10.), )))) w.write('ganho2.wav', f_a, N(H(( adsr(v(f=840.,fv=0.,nu=0.,d=.075, tab=D_i),S=-3.,R=10.,A=5.,D=10.), adsr(v(f=840.,fv=0.,nu=0.,d=.025, tab=D_i),S=-3.,R=10.,A=5.,D=10.), adsr(v(f=840.,fv=0.,nu=0.,d=.05, tab=D_i),S=-3.,R=10.,A=5.,D=10.), adsr(v(f=840.,fv=0.,nu=0.,d=.05, tab=D_i),S=-3.,R=5.,A=5.,D=10.), )))) w.write('ganho3.wav', f_a, N(H(( adsr(v(f=240.,fv=0.,nu=0.,d=.75, tab=D_i),S=-9.,R=10.,A=5.,D=610.), adsr(v(f=240.*(2.**(-7/12.)),fv=0.,nu=0.,d=.25, tab=D_i),S=-9.,R=10.,A=5.,D=210.), adsr(v(f=240.*(2.**(4/12.)),fv=0.,nu=0.,d=.5, tab=D_i), S=-9.,R=10.,A=5., D=410.), adsr(v(f=240.*(2.**(0/12.)),fv=0.,nu=0.,d=.5, tab=D_i), S=-9.,R=5.,A=5., D=410.), )))) w.write('ganho4.wav', f_a, N(H(( adsr(v(f=240.,fv=0.,nu=0.,d=.175, tab=D_i),S=-9.,R=10.,A=5.,D=60.), adsr(v(f=240.*(2.**(-7/12.)),fv=0.,nu=0.,d=.25, tab=D_i),S=-9.,R=10.,A=5.,D=210.), adsr(v(f=240.*(2.**(4/12.)),fv=0.,nu=0.,d=.105, tab=D_i), S=-9.,R=10.,A=5., D=40.), adsr(v(f=240.*(2.**(0/12.)),fv=0.,nu=0.,d=.105, tab=D_i), S=-9.,R=5.,A=5., D=40.), )))) w.write('ganho5.wav', f_a, N(H(( adsr(v(f=240.,fv=0.,nu=0.,d=.175, tab=D_i),S=-9.,R=10.,A=5.,D=60.), adsr(v(f=240.*(2.**(7/12.)),fv=0.,nu=0.,d=.25, tab=D_i),S=-9.,R=10.,A=5.,D=210.), adsr(v(f=240.*(2.**(4/12.)),fv=0.,nu=0.,d=.105, tab=D_i), S=-9.,R=10.,A=5., D=40.), adsr(v(f=240.*(2.**(0/12.)),fv=0.,nu=0.,d=.105, tab=D_i), S=-9.,R=5.,A=5., D=40.), )))) w.write('ganho6.wav', f_a, N(H(( adsr(v(f=240.,fv=0.,nu=0.,d=.175, tab=D_i),S=-9.,R=10.,A=5.,D=60.), adsr(v(f=240.*(2.**(7/12.)),fv=0.,nu=0.,d=.25, tab=D_i),S=-9.,R=10.,A=5.,D=210.), adsr(v(f=240.*(2.**(4/12.)),fv=0.,nu=0.,d=.105, tab=D_i), S=-9.,R=10.,A=5., D=40.), adsr(v(f=240.*(2.**(12./12.)),fv=0.,nu=0.,d=.105, tab=D_i), S=-9.,R=5.,A=5., D=40.), )))) w.write('perda2.wav', f_a, N(H(( adsr(v(f=240.,fv=0.,nu=0.,d=.25, tab=D_i),S=-9.,R=10.,A=5.,D=60.)+ adsr(v(f=240.*(2.**(6/12.)),fv=0.,nu=0.,d=.25, tab=D_i),S=-9.,R=10.,A=5.,D=210.), adsr(v(f=240.*(2.**(4/12.)),fv=0.,nu=0.,d=.105, tab=D_i), S=-9.,R=10.,A=5., D=40.)+ adsr(v(f=240.*(2.**(3./12.)),fv=0.,nu=0.,d=.105, tab=D_i), S=-9.,R=5.,A=5., D=40.), )))) w.write('perda3.wav', f_a, N(H(( adsr(v(f=240.,fv=0.,nu=0.,d=.25, tab=D_i),S=-9.,R=10.,A=5.,D=60.)+ adsr(v(f=240.*(2.**(6/12.)),fv=0.,nu=0.,d=.25, tab=D_i),S=-9.,R=10.,A=5.,D=210.), )))) w.write('perda4.wav', f_a, N(H(( adsr(v(f=240.*(2.**(4/12.)),fv=0.,nu=0.,d=.105, tab=D_i), S=-9.,R=10.,A=5., D=40.)+ adsr(v(f=240.*(2.**(3./12.)),fv=0.,nu=0.,d=.105, tab=D_i), S=-9.,R=5.,A=5., D=40.), )))) w.write('perda5.wav', f_a, N(H(( adsr(v(f=240.*(2.**(0/12.)),fv=0.,nu=0.,d=.105, tab=D_i), S=-9.,R=10.,A=5., D=40.)+ adsr(v(f=240.*(2.**(11./12.)),fv=0.,nu=0.,d=.105, tab=D_i), S=-9.,R=5.,A=5., D=40.), )))) w.write('ganhoX.wav', f_a, N(H(( adsr(v(f=240.*(2.**(0/12.)),fv=0.,nu=0., d=.065, tab=Q_i), S=-9.,R=10.,A=5., D=40.), adsr(v(f=240.*(2.**(11./12.)),fv=0.,nu=0.,d=.065, tab=Q_i), S=-9.,R=5.,A=5., D=40.), adsr(v(f=240.*(2.**(0/12.)),fv=0.,nu=0., d=.065, tab=Q_i), S=-9.,R=10.,A=5., D=40.), adsr(v(f=240.*(2.**(0/12.)),fv=0.,nu=0., d=.065, tab=Q_i), S=-9.,R=10.,A=5., D=40.), adsr(v(f=240.*(2.**(0/12.)),fv=0.,nu=0., d=.065, tab=Q_i), S=-9.,R=10.,A=5., D=40.), adsr(v(f=240.*(2.**(11./12.)),fv=0.,nu=0.,d=.065, tab=Q_i), S=-9.,R=5.,A=5., D=40.), adsr(v(f=240.*(2.**(11./12.)),fv=0.,nu=0.,d=.065, tab=D_i), S=-9.,R=5.,A=5., D=40.), adsr(v(f=240.*(2.**(11./12.)),fv=0.,nu=0.,d=.065, tab=Q_i), S=-9.,R=5.,A=5., D=40.), )))) w.write('ganhoX2.wav', f_a, N(H(( adsr(v(f=240.*(2.**(0/12.)),fv=0.,nu=0., d=.065, tab=Q_i), S=-9.,R=10.,A=5., D=40.), adsr(v(f=240.*(2.**(11./12.)),fv=0.,nu=0.,d=.065, tab=Q_i), S=-9.,R=5.,A=5., D=40.), adsr(v(f=240.*(2.**(6/12.)),fv=0.,nu=0., d=.065, tab=Q_i), S=-9.,R=10.,A=5., D=40.), adsr(v(f=240.*(2.**(0/12.)),fv=0.,nu=0., d=.065, tab=Q_i), S=-9.,R=10.,A=5., D=40.), adsr(v(f=240.*(2.**(-1/12.)),fv=0.,nu=0., d=.065, tab=Q_i), S=-9.,R=10.,A=5., D=40.), adsr(v(f=240.*(2.**(11./12.)),fv=0.,nu=0.,d=.065, tab=Q_i), S=-9.,R=5.,A=5., D=40.), adsr(v(f=240.*(2.**(6./12.)),fv=0.,nu=0.,d=.065, tab=D_i), S=-9.,R=5.,A=5., D=40.), adsr(v(f=240.*(2.**(0./12.)),fv=0.,nu=0.,d=.065, tab=Q_i), S=-9.,R=5.,A=5., D=40.), )))) w.write('ganhoX3.wav', f_a, N(H(( adsr(v(f=240.*(2.**(0/12.)),fv=0. ,nu=10., d=.065, tab=Q_i), S=-9.,R=10.,A=5., D=40.), adsr(v(f=240.*(2.**(11./12.)),fv=0.,nu=10.,d=.065, tab=Q_i), S=-9.,R=5.,A=5., D=40.), adsr(v(f=240.*(2.**(6/12.)),fv=0. ,nu=10., d=.065, tab=Q_i), S=-9.,R=10.,A=5., D=40.), adsr(v(f=240.*(2.**(0/12.)),fv=0. ,nu=10., d=.065, tab=Q_i), S=-9.,R=10.,A=5., D=40.), adsr(v(f=240.*(2.**(-1/12.)),fv=0. ,nu=10., d=.065, tab=Q_i), S=-9.,R=10.,A=5., D=40.), adsr(v(f=240.*(2.**(11./12.)),fv=0.,nu=10.,d=.065, tab=Q_i), S=-9.,R=5.,A=5., D=40.), adsr(v(f=240.*(2.**(6./12.)),fv=0. ,nu=10.,d=.065, tab=D_i), S=-9.,R=5.,A=5., D=40.), adsr(v(f=240.*(2.**(0./12.)),fv=0. ,nu=10.,d=.065, tab=Q_i), S=-9.,R=5.,A=5., D=40.), )))) w.write('perdaX4.wav', f_a, N(H(( adsr(v(f=240.*(2.**(0/12.)) , fv=100. ,nu=10., d=.065, tab=Q_i), S=-9.,R=10.,A=5., D=40.), adsr(v(f=240.*(2.**(11./12.)),fv=100.,nu=10.,d=.065, tab=Q_i), S=-9.,R=5.,A=5., D=40.), adsr(v(f=240.*(2.**(6/12.)), fv=100. ,nu=10., d=.065, tab=Q_i), S=-9.,R=10.,A=5., D=40.), adsr(v(f=240.*(2.**(0/12.)) , fv=100. ,nu=10., d=.065, tab=Q_i), S=-9.,R=10.,A=5., D=40.), adsr(v(f=240.*(2.**(-1/12.)), fv=100. ,nu=10., d=.065, tab=Q_i), S=-9.,R=10.,A=5., D=40.), adsr(v(f=240.*(2.**(11./12.)),fv=100.,nu=10.,d=.065, tab=Q_i), S=-9.,R=5.,A=5., D=40.), adsr(v(f=240.*(2.**(6./12.)), fv=100. ,nu=10.,d=.065, tab=D_i), S=-9.,R=5.,A=5., D=40.), adsr(v(f=240.*(2.**(0./12.)), fv=100. ,nu=10.,d=.065, tab=Q_i), S=-9.,R=5.,A=5., D=40.), )))) w.write('perdaX5.wav', f_a, N(H(( adsr(v(f=240.*(2.**(0/12.)) , fv=200. ,nu=10., d=.065, tab=Q_i), S=-9.,R=10.,A=5., D=40.), adsr(v(f=240.*(2.**(11./12.)),fv=200.,nu=10.,d=.065, tab=Q_i), S=-9.,R=5.,A=5., D=40.), adsr(v(f=240.*(2.**(6/12.)), fv=200. ,nu=10., d=.065, tab=Q_i), S=-9.,R=10.,A=5., D=40.), adsr(v(f=240.*(2.**(0/12.)) , fv=200. ,nu=10., d=.065, tab=Q_i), S=-9.,R=10.,A=5., D=40.), adsr(v(f=240.*(2.**(-1/12.)), fv=200. ,nu=10., d=.065, tab=Q_i), S=-9.,R=10.,A=5., D=40.), adsr(v(f=240.*(2.**(11./12.)),fv=200.,nu=10.,d=.065, tab=Q_i), S=-9.,R=5.,A=5., D=40.), adsr(v(f=240.*(2.**(6./12.)), fv=200. ,nu=10.,d=.065, tab=D_i), S=-9.,R=5.,A=5., D=40.), adsr(v(f=240.*(2.**(0./12.)), fv=200. ,nu=10.,d=.065, tab=Q_i), S=-9.,R=5.,A=5., D=40.), )))) w.write('videogame.wav', f_a, N(H(( adsr(v(f=240.*(2.**(0/12.)),fv=0.,nu=0., d=.065, tab=Tr_i), S=-9.,R=10.,A=5., D=40.), adsr(v(f=240.*(2.**(4./12.)),fv=0.,nu=0.,d=.065, tab=Tr_i), S=-9.,R=5.,A=5., D=40.), adsr(v(f=240.*(2.**(7/12.)),fv=0.,nu=0., d=.065, tab=Tr_i), S=-9.,R=10.,A=5., D=40.), adsr(v(f=240.*(2.**(7/12.)),fv=0.,nu=0., d=.065, tab=Tr_i), S=-9.,R=10.,A=5., D=40.), adsr(v(f=240.*(2.**(4/12.)),fv=0.,nu=0., d=.065, tab=Tr_i), S=-9.,R=10.,A=5., D=40.), adsr(v(f=240.*(2.**(0./12.)),fv=0.,nu=0.,d=.065, tab=Tr_i), S=-9.,R=5.,A=5., D=40.), adsr(v(f=240.*(2.**(7./12.)),fv=0.,nu=0.,d=.065, tab=Tr_i), S=-9.,R=5.,A=5., D=40.), adsr(v(f=240.*(2.**(0./12.)),fv=0.,nu=0.,d=.065, tab=Tr_i), S=-9.,R=5.,A=5., D=40.), adsr(v(f=240.*(2.**(0/12.)),fv=0.,nu=0., d=.65, tab=Tr_i), S=-9.,R=10.,A=5., D=40.), adsr(v(f=240.*(2.**(11./12.)),fv=0.,nu=0.,d=.065, tab=Tr_i), S=-9.,R=5.,A=5., D=40.), adsr(v(f=240.*(2.**(12/12.)),fv=0.,nu=0., d=.065, tab=Tr_i), S=-9.,R=10.,A=5., D=40.), adsr(v(f=240.*(2.**(11/12.)),fv=0.,nu=0., d=.65, tab=Tr_i), S=-9.,R=10.,A=5., D=40.), adsr(v(f=240.*(2.**(12/12.)),fv=0.,nu=0., d=.065, tab=Tr_i), S=-9.,R=10.,A=5., D=40.), adsr(v(f=240.*(2.**(0./12.)),fv=0.,nu=0.,d=.65, tab=Tr_i), S=-9.,R=5.,A=5., D=40.), adsr(v(f=240.*(2.**(6./12.)),fv=0.,nu=0.,d=.065, tab=Tr_i), S=-9.,R=5.,A=5., D=40.), adsr(v(f=240.*(2.**(-1./12.)),fv=0.,nu=0.,d=.65, tab=Tr_i), S=-9.,R=5.,A=5., D=40.), adsr(v(f=240.*(2.**(0/12.)),fv=0.,nu=0., d=.065, tab=Tr_i), S=-9.,R=10.,A=5., D=40.), adsr(v(f=240.*(2.**(11./12.)),fv=0.,nu=0.,d=.065, tab=Q_i), S=-9.,R=5.,A=5., D=40.), adsr(v(f=240.*(2.**(12/12.)),fv=0.,nu=0., d=.5, tab=Tr_i), S=-9.,R=10.,A=5., D=40.), adsr(v(f=240.*(2.**(14/12.)),fv=0.,nu=0., d=.065, tab=Q_i), S=-9.,R=10.,A=5., D=40.), adsr(v(f=240.*(2.**(16/12.)),fv=0.,nu=0., d=.5, tab=Tr_i), S=-9.,R=10.,A=5., D=40.), adsr(v(f=240.*(2.**(17./12.)),fv=0.,nu=0.,d=.065, tab=Q_i), S=-9.,R=5.,A=5., D=40.), adsr(v(f=240.*(2.**(19./12.)),fv=0.,nu=0.,d=.65, tab=Tr_i), S=-9.,R=5.,A=5., D=40.), adsr(v(f=240.*(2.**(12./12.)),fv=0.,nu=0.,d=.065, tab=Tr_i), S=-9.,R=5.,A=5., D=40.), )))) w.write('videogame2.wav', f_a, N(H(( adsr(v(f=240.*(2.**(0/12.)),fv=0.,nu=0., d=.065*4, tab=Tr_i), S=-9.,R=10.,A=5., D=40.), adsr(v(f=240.*(2.**(4./12.)),fv=0.,nu=0.,d=.0652*2, tab=Tr_i), S=-9.,R=5.,A=5., D=40.), adsr(v(f=240.*(2.**(7/12.)),fv=0.,nu=0., d=.065, tab=Tr_i), S=-9.,R=10.,A=5., D=40.), adsr(v(f=240.*(2.**(7/12.)),fv=0.,nu=0., d=.065, tab=Tr_i), S=-9.,R=10.,A=5., D=40.), adsr(v(f=240.*(2.**(4/12.)),fv=0.,nu=0., d=.065, tab=Tr_i), S=-9.,R=10.,A=5., D=40.), adsr(v(f=240.*(2.**(0./12.)),fv=0.,nu=0.,d=.065, tab=Tr_i), S=-9.,R=5.,A=5., D=40.), adsr(v(f=240.*(2.**(7./12.)),fv=0.,nu=0.,d=.065, tab=Tr_i), S=-9.,R=5.,A=5., D=40.), adsr(v(f=240.*(2.**(0./12.)),fv=0.,nu=0.,d=.065, tab=Tr_i), S=-9.,R=5.,A=5., D=40.), adsr(v(f=240.*(2.**(0/12.)),fv=0.,nu=0., d=.065, tab=Tr_i), S=-9.,R=10.,A=5., D=40.), adsr(v(f=240.*(2.**(11./12.)),fv=0.,nu=0.,d=.065, tab=Tr_i), S=-9.,R=5.,A=5., D=40.), adsr(v(f=240.*(2.**(12/12.)),fv=0.,nu=0., d=.065, tab=Tr_i), S=-9.,R=10.,A=5., D=40.), adsr(v(f=240.*(2.**(11/12.)),fv=0.,nu=0., d=.065*4, tab=Tr_i), S=-9.,R=10.,A=5., D=40.), adsr(v(f=240.*(2.**(12/12.)),fv=0.,nu=0., d=.065, tab=Tr_i), S=-9.,R=10.,A=5., D=40.), adsr(v(f=240.*(2.**(0./12.)),fv=0.,nu=0.,d=.065, tab=Tr_i), S=-9.,R=5.,A=5., D=40.), adsr(v(f=240.*(2.**(6./12.)),fv=0.,nu=0.,d=.065, tab=Tr_i), S=-9.,R=5.,A=5., D=40.), adsr(v(f=240.*(2.**(-1./12.)),fv=0.,nu=0.,d=.065*2, tab=Tr_i), S=-9.,R=5.,A=5., D=40.), adsr(v(f=240.*(2.**(0/12.)),fv=0.,nu=0., d=.065, tab=Tr_i), S=-9.,R=10.,A=5., D=40.), adsr(v(f=240.*(2.**(11./12.)),fv=0.,nu=0.,d=.065, tab=Tr_i), S=-9.,R=5.,A=5., D=40.), adsr(v(f=240.*(2.**(12/12.)),fv=0.,nu=0., d=.065, tab=Tr_i), S=-9.,R=10.,A=5., D=40.), adsr(v(f=240.*(2.**(14/12.)),fv=0.,nu=0., d=.065*4, tab=Tr_i), S=-9.,R=10.,A=5., D=40.), adsr(v(f=240.*(2.**(16/12.)),fv=0.,nu=0., d=.065, tab=Tr_i), S=-9.,R=10.,A=5., D=40.), adsr(v(f=240.*(2.**(17./12.)),fv=0.,nu=0.,d=.065, tab=Tr_i), S=-9.,R=5.,A=5., D=40.), adsr(v(f=240.*(2.**(19./12.)),fv=0.,nu=0.,d=.065*2, tab=Tr_i), S=-9.,R=5.,A=5., D=40.), adsr(v(f=240.*(2.**(12./12.)),fv=0.,nu=0.,d=.065, tab=Tr_i), S=-9.,R=5.,A=5., D=40.), )))) w.write('videogame3.wav', f_a, N(H(( adsr(v(f=240.*(2.**(0/12.)),fv=0.,nu=0., d=.065*4, tab=Tr_i), S=-9.,R=10.,A=5., D=40.), adsr(v(f=240.*(2.**(4./12.)),fv=0.,nu=0.,d=.0652*2, tab=Tr_i), S=-9.,R=5.,A=5., D=40.), adsr(v(f=240.*(2.**(7/12.)),fv=0.,nu=0., d=.065, tab=Tr_i), S=-9.,R=10.,A=5., D=40.), adsr(v(f=240.*(2.**(7/12.)),fv=0.,nu=0., d=.065, tab=Tr_i), S=-9.,R=10.,A=5., D=40.), adsr(v(f=240.*(2.**(4/12.)),fv=0.,nu=0., d=.065, tab=Tr_i), S=-9.,R=10.,A=5., D=40.), adsr(v(f=240.*(2.**(0./12.)),fv=0.,nu=0.,d=.065*3, tab=Tr_i), S=-9.,R=5.,A=5., D=40.), adsr(v(f=240.*(2.**(7./12.)),fv=0.,nu=0.,d=.065, tab=Tr_i), S=-9.,R=5.,A=5., D=40.), adsr(v(f=240.*(2.**(0./12.)),fv=0.,nu=0.,d=.065, tab=Tr_i), S=-9.,R=5.,A=5., D=40.), adsr(v(f=240.*(2.**(0/12.)),fv=0.,nu=0., d=.065, tab=Tr_i), S=-9.,R=10.,A=5., D=40.), adsr(v(f=240.*(2.**(11./12.)),fv=0.,nu=0.,d=.065*3, tab=Tr_i), S=-9.,R=5.,A=5., D=40.), adsr(v(f=240.*(2.**(12/12.)),fv=0.,nu=0., d=.065, tab=Tr_i), S=-9.,R=10.,A=5., D=40.), adsr(v(f=240.*(2.**(11/12.)),fv=0.,nu=0., d=.065*4, tab=Tr_i), S=-9.,R=10.,A=5., D=40.), adsr(v(f=240.*(2.**(12/12.)),fv=0.,nu=0., d=.065, tab=Tr_i), S=-9.,R=10.,A=5., D=40.), adsr(v(f=240.*(2.**(0./12.)),fv=0.,nu=0.,d=.065, tab=Tr_i), S=-9.,R=5.,A=5., D=40.), adsr(v(f=240.*(2.**(6./12.)),fv=0.,nu=0.,d=.065, tab=Tr_i), S=-9.,R=5.,A=5., D=40.), adsr(v(f=240.*(2.**(-1./12.)),fv=0.,nu=0.,d=.065*2, tab=Tr_i), S=-9.,R=5.,A=5., D=40.), adsr(v(f=240.*(2.**(0/12.)),fv=0.,nu=0., d=.065*3, tab=Tr_i), S=-9.,R=10.,A=5., D=40.), adsr(v(f=240.*(2.**(11./12.)),fv=0.,nu=0.,d=.065, tab=Tr_i), S=-9.,R=5.,A=5., D=40.), adsr(v(f=240.*(2.**(12/12.)),fv=0.,nu=0., d=.065, tab=Tr_i), S=-9.,R=10.,A=5., D=40.), adsr(v(f=240.*(2.**(14/12.)),fv=0.,nu=0., d=.065*4, tab=Tr_i), S=-9.,R=10.,A=5., D=40.), adsr(v(f=240.*(2.**(16/12.)),fv=0.,nu=0., d=.065, tab=Tr_i), S=-9.,R=10.,A=5., D=40.), adsr(v(f=240.*(2.**(17./12.)),fv=0.,nu=0.,d=.065, tab=Tr_i), S=-9.,R=5.,A=5., D=40.), adsr(v(f=240.*(2.**(19./12.)),fv=0.,nu=0.,d=.065*2, tab=Tr_i), S=-9.,R=5.,A=5., D=40.), adsr(v(f=240.*(2.**(12./12.)),fv=0.,nu=0.,d=.065, tab=Tr_i), S=-9.,R=5.,A=5., D=40.), )))) w.write('videogame4.wav', f_a, N(H(( adsr(v(f=240.*(2.**(0/12.)),fv=0.,nu=0., d=.065*4, tab=Tr_i), S=-9.,R=10.,A=5., D=40.), adsr(v(f=240.*(2.**(4./12.)),fv=0.,nu=0.,d=.0652*2, tab=Tr_i), S=-9.,R=5.,A=5., D=40.), adsr(v(f=240.*(2.**(7/12.)),fv=0.,nu=0., d=.065, tab=Tr_i), S=-9.,R=10.,A=5., D=40.), adsr(v(f=240.*(2.**(7/12.)),fv=0.,nu=0., d=.065, tab=Tr_i), S=-9.,R=10.,A=5., D=40.), adsr(v(f=240.*(2.**(4/12.)),fv=0.,nu=0., d=.065, tab=Tr_i), S=-9.,R=10.,A=5., D=40.), adsr(v(f=240.*(2.**(0./12.)),fv=0.,nu=0.,d=.065*3, tab=Tr_i), S=-9.,R=5.,A=5., D=40.), adsr(v(f=240.*(2.**(7./12.)),fv=0.,nu=0.,d=.065, tab=Tr_i), S=-9.,R=5.,A=5., D=40.), adsr(v(f=240.*(2.**(0./12.)),fv=0.,nu=0.,d=.065, tab=Tr_i), S=-9.,R=5.,A=5., D=40.), adsr(v(f=240.*(2.**(0/12.)),fv=0.,nu=0., d=.065*5, tab=Tr_i), S=-9.,R=10.,A=5., D=40.), adsr(v(f=240.*(2.**(11./12.)),fv=0.,nu=0.,d=.065*3, tab=Tr_i), S=-9.,R=5.,A=5., D=40.), adsr(v(f=240.*(2.**(12/12.)),fv=0.,nu=0., d=.065, tab=Tr_i), S=-9.,R=10.,A=5., D=40.), adsr(v(f=240.*(2.**(11/12.)),fv=0.,nu=0., d=.065*4, tab=Tr_i), S=-9.,R=10.,A=5., D=40.), adsr(v(f=240.*(2.**(12/12.)),fv=0.,nu=0., d=.065, tab=Tr_i), S=-9.,R=10.,A=5., D=40.), adsr(v(f=240.*(2.**(0./12.)),fv=0.,nu=0.,d=.065, tab=Tr_i), S=-9.,R=5.,A=5., D=40.), adsr(v(f=240.*(2.**(2./12.)),fv=0.,nu=0.,d=.065, tab=Tr_i), S=-9.,R=5.,A=5., D=40.), adsr(v(f=240.*(2.**(-1./12.)),fv=0.,nu=0.,d=.065*2, tab=Tr_i), S=-9.,R=5.,A=5., D=40.), adsr(v(f=240.*(2.**(0/12.)),fv=0.,nu=0., d=.065*3, tab=Tr_i), S=-9.,R=10.,A=5., D=40.), adsr(v(f=240.*(2.**(11./12.)),fv=0.,nu=0.,d=.065, tab=Tr_i), S=-9.,R=5.,A=5., D=40.), adsr(v(f=240.*(2.**(12/12.)),fv=0.,nu=0., d=.065*4, tab=Tr_i), S=-9.,R=10.,A=5., D=40.), adsr(v(f=240.*(2.**(14/12.)),fv=0.,nu=0., d=.065*4, tab=Tr_i), S=-9.,R=10.,A=5., D=40.), adsr(v(f=240.*(2.**(16/12.)),fv=0.,nu=0., d=.065, tab=Tr_i), S=-9.,R=10.,A=5., D=40.), adsr(v(f=240.*(2.**(17./12.)),fv=0.,nu=0.,d=.065, tab=Tr_i), S=-9.,R=5.,A=5., D=40.), adsr(v(f=240.*(2.**(19./12.)),fv=0.,nu=0.,d=.065*2, tab=Tr_i), S=-9.,R=5.,A=5., D=40.), adsr(v(f=240.*(2.**(12./12.)),fv=0.,nu=0.,d=.065, tab=Tr_i), S=-9.,R=5.,A=5., D=40.), )))) # abre todos os gritoFala* # passa por um passa bandas que soh passa uns medios # salva como tv_gritoFala* # #c = n.zeros(len(coefs)) #c[1000:10000] = n.exp(1j*n.random.uniform(0, 2*n.pi, 9000)) # ## real par, imaginaria impar #c[Lambda/2+1:] = n.real(c[1:Lambda/2])[::-1] - 1j * \ # n.imag(c[1:Lambda/2])[::-1] # #resp_imp= n.fft.ifft(c) #resp_imp_= n.real(resp_imp) #import os # #ll=os.listdir(".") #ll=[lll for lll in ll if "gritoFala" in lll] #for i in ll: # print i # foo=n.convolve(w.read("%s"%(i,))[1],resp_imp) # w.write('tv_%s'%(i,), f_a, N(foo)) # print i #
unlicense
4,547,374,845,288,080,000
45.390826
216
0.425859
false
1.993063
false
false
false
AkihikoITOH/capybara
capybara/capybara.py
1
1091
#!/bin/python # -*- coding: utf-8 -*- import os from amazon_wrapper import AmazonWrapper from rakuten_wrapper import RakutenWrapper class Capybara: def __init__(self, config_dir=None, tokens_dir=None): self.wrappers = {} self.wrappers['amazon'] = AmazonWrapper() self.wrappers['rakuten'] = RakutenWrapper() for service, wrapper in self.wrappers.items(): config_filename = './%s_config.json' % service tokens_filename = './%s_tokens.tsv' % service config_path = os.path.normpath(os.path.join(os.getcwd(), config_dir, config_filename)) tokens_path = os.path.normpath(os.path.join(os.getcwd(), tokens_dir, tokens_filename)) wrapper.setup(config_path, tokens_path) def get(self, service=None, item=None): return self.wrappers[service].access_wrapper({'item': item}) def isAvailable(self, service=None): if service is None: return False try: if self.wrappers[service]: return True except: return False
mit
-303,462,010,463,526,900
33.09375
98
0.613199
false
3.855124
true
false
false
eoinof/stem
test/unit/exit_policy/rule.py
1
10901
""" Unit tests for the stem.exit_policy.ExitPolicyRule class. """ import unittest from stem.exit_policy import AddressType, ExitPolicyRule class TestExitPolicyRule(unittest.TestCase): def test_accept_or_reject(self): self.assertTrue(ExitPolicyRule("accept *:*").is_accept) self.assertFalse(ExitPolicyRule("reject *:*").is_accept) invalid_inputs = ( "accept", "reject", "accept *:*", "accept\t*:*", "accept\n*:*", "acceptt *:*", "rejectt *:*", "blarg *:*", " *:*", "*:*", "", ) for rule_arg in invalid_inputs: self.assertRaises(ValueError, ExitPolicyRule, rule_arg) def test_str_unchanged(self): # provides a series of test inputs where the str() representation should # match the input rule test_inputs = ( "accept *:*", "reject *:*", "accept *:80", "accept *:80-443", "accept 127.0.0.1:80", "accept 87.0.0.1/24:80", "accept 156.5.38.3/255.255.0.255:80", "accept [FFFF:FFFF:FFFF:FFFF:FFFF:FFFF:FFFF:FFFF]:80", "accept [FFFF:FFFF:FFFF:FFFF:FFFF:FFFF:FFFF:FFFF]/32:80", ) for rule_arg in test_inputs: rule = ExitPolicyRule(rule_arg) self.assertEquals(rule_arg, rule.rule) self.assertEquals(rule_arg, str(rule)) def test_str_changed(self): # some instances where our rule is valid but won't match our str() representation test_inputs = { "accept 10.0.0.1/32:80": "accept 10.0.0.1:80", "accept 192.168.0.1/255.255.255.0:80": "accept 192.168.0.1/24:80", "accept [::]/32:*": "accept [0000:0000:0000:0000:0000:0000:0000:0000]/32:*", "accept [::]/128:*": "accept [0000:0000:0000:0000:0000:0000:0000:0000]:*", } for rule_arg, expected_str in test_inputs.items(): rule = ExitPolicyRule(rule_arg) self.assertEquals(rule_arg, rule.rule) self.assertEquals(expected_str, str(rule)) def test_valid_wildcard(self): test_inputs = { "reject *:*": (True, True), "reject *:80": (True, False), "accept 192.168.0.1:*": (False, True), "accept 192.168.0.1:80": (False, False), "reject 127.0.0.1/0:*": (False, True), "reject 127.0.0.1/16:*": (False, True), "reject 127.0.0.1/32:*": (False, True), "reject [0000:0000:0000:0000:0000:0000:0000:0000]/0:80": (False, False), "reject [0000:0000:0000:0000:0000:0000:0000:0000]/64:80": (False, False), "reject [0000:0000:0000:0000:0000:0000:0000:0000]/128:80": (False, False), "accept 192.168.0.1:0-65535": (False, True), "accept 192.168.0.1:1-65535": (False, True), "accept 192.168.0.1:2-65535": (False, False), "accept 192.168.0.1:1-65534": (False, False), } for rule_arg, attr in test_inputs.items(): is_address_wildcard, is_port_wildcard = attr rule = ExitPolicyRule(rule_arg) self.assertEquals(is_address_wildcard, rule.is_address_wildcard()) self.assertEquals(is_port_wildcard, rule.is_port_wildcard()) def test_invalid_wildcard(self): test_inputs = ( "reject */16:*", "reject 127.0.0.1/*:*", "reject *:0-*", "reject *:*-15", ) for rule_arg in test_inputs: self.assertRaises(ValueError, ExitPolicyRule, rule_arg) def test_wildcard_attributes(self): rule = ExitPolicyRule("reject *:*") self.assertEquals(AddressType.WILDCARD, rule.address_type) self.assertEquals(None, rule.address) self.assertEquals(None, rule.mask) self.assertEquals(None, rule.masked_bits) self.assertEquals(1, rule.min_port) self.assertEquals(65535, rule.max_port) def test_valid_ipv4_addresses(self): test_inputs = { "0.0.0.0": ("0.0.0.0", "255.255.255.255", 32), "127.0.0.1/32": ("127.0.0.1", "255.255.255.255", 32), "192.168.0.50/24": ("192.168.0.50", "255.255.255.0", 24), "255.255.255.255/0": ("255.255.255.255", "0.0.0.0", 0), } for rule_addr, attr in test_inputs.items(): address, mask, masked_bits = attr rule = ExitPolicyRule("accept %s:*" % rule_addr) self.assertEquals(AddressType.IPv4, rule.address_type) self.assertEquals(address, rule.address) self.assertEquals(mask, rule.mask) self.assertEquals(masked_bits, rule.masked_bits) def test_invalid_ipv4_addresses(self): test_inputs = ( "256.0.0.0", "-1.0.0.0", "0.0.0", "0.0.0.", "0.0.0.a", "127.0.0.1/-1", "127.0.0.1/33", ) for rule_addr in test_inputs: self.assertRaises(ValueError, ExitPolicyRule, "accept %s:*" % rule_addr) def test_valid_ipv6_addresses(self): test_inputs = { "[fe80:0000:0000:0000:0202:b3ff:fe1e:8329]": ("FE80:0000:0000:0000:0202:B3FF:FE1E:8329", "FFFF:FFFF:FFFF:FFFF:FFFF:FFFF:FFFF:FFFF", 128), "[FE80::0202:b3ff:fe1e:8329]": ("FE80:0000:0000:0000:0202:B3FF:FE1E:8329", "FFFF:FFFF:FFFF:FFFF:FFFF:FFFF:FFFF:FFFF", 128), "[0000:0000:0000:0000:0000:0000:0000:0000]/0": ("0000:0000:0000:0000:0000:0000:0000:0000", "0000:0000:0000:0000:0000:0000:0000:0000", 0), "[::]": ("0000:0000:0000:0000:0000:0000:0000:0000", "FFFF:FFFF:FFFF:FFFF:FFFF:FFFF:FFFF:FFFF", 128), } for rule_addr, attr in test_inputs.items(): address, mask, masked_bits = attr rule = ExitPolicyRule("accept %s:*" % rule_addr) self.assertEquals(AddressType.IPv6, rule.address_type) self.assertEquals(address, rule.address) self.assertEquals(mask, rule.mask) self.assertEquals(masked_bits, rule.masked_bits) def test_invalid_ipv6_addresses(self): test_inputs = ( "fe80::0202:b3ff:fe1e:8329", "[fe80::0202:b3ff:fe1e:8329", "fe80::0202:b3ff:fe1e:8329]", "[fe80::0202:b3ff:fe1e:832g]", "[fe80:::b3ff:fe1e:8329]", "[fe80::b3ff::fe1e:8329]", "[fe80::0202:b3ff:fe1e:8329]/-1", "[fe80::0202:b3ff:fe1e:8329]/129", ) for rule_addr in test_inputs: self.assertRaises(ValueError, ExitPolicyRule, "accept %s:*" % rule_addr) def test_valid_ports(self): test_inputs = { "0": (0, 0), "1": (1, 1), "80": (80, 80), "80-443": (80, 443), } for rule_port, attr in test_inputs.items(): min_port, max_port = attr rule = ExitPolicyRule("accept 127.0.0.1:%s" % rule_port) self.assertEquals(min_port, rule.min_port) self.assertEquals(max_port, rule.max_port) def test_invalid_ports(self): test_inputs = ( "65536", "a", "5-3", "5-", "-3", ) for rule_port in test_inputs: self.assertRaises(ValueError, ExitPolicyRule, "accept 127.0.0.1:%s" % rule_port) def test_is_match_wildcard(self): test_inputs = { "reject *:*": { ("192.168.0.1", 80): True, ("0.0.0.0", 80): True, ("255.255.255.255", 80): True, ("FE80:0000:0000:0000:0202:B3FF:FE1E:8329", 80): True, ("[FE80:0000:0000:0000:0202:B3FF:FE1E:8329]", 80): True, ("192.168.0.1", None): True, (None, 80): True, (None, None): True, }, "reject 255.255.255.255/0:*": { ("192.168.0.1", 80): True, ("0.0.0.0", 80): True, ("255.255.255.255", 80): True, ("FE80:0000:0000:0000:0202:B3FF:FE1E:8329", 80): False, ("[FE80:0000:0000:0000:0202:B3FF:FE1E:8329]", 80): False, ("192.168.0.1", None): True, (None, 80): False, (None, None): False, }, } for rule_arg, matches in test_inputs.items(): rule = ExitPolicyRule(rule_arg) for match_args, expected_result in matches.items(): self.assertEquals(expected_result, rule.is_match(*match_args)) # port zero is special in that exit policies can include it, but it's not # something that we can match against rule = ExitPolicyRule("reject *:*") self.assertRaises(ValueError, rule.is_match, "127.0.0.1", 0) def test_is_match_ipv4(self): test_inputs = { "reject 192.168.0.50:*": { ("192.168.0.50", 80): True, ("192.168.0.51", 80): False, ("192.168.0.49", 80): False, (None, 80): False, ("192.168.0.50", None): True, }, "reject 0.0.0.0/24:*": { ("0.0.0.0", 80): True, ("0.0.0.1", 80): True, ("0.0.0.255", 80): True, ("0.0.1.0", 80): False, ("0.1.0.0", 80): False, ("1.0.0.0", 80): False, (None, 80): False, ("0.0.0.0", None): True, }, } for rule_arg, matches in test_inputs.items(): rule = ExitPolicyRule(rule_arg) for match_args, expected_result in matches.items(): self.assertEquals(expected_result, rule.is_match(*match_args)) def test_is_match_ipv6(self): test_inputs = { "reject [FE80:0000:0000:0000:0202:B3FF:FE1E:8329]:*": { ("FE80:0000:0000:0000:0202:B3FF:FE1E:8329", 80): True, ("fe80:0000:0000:0000:0202:b3ff:fe1e:8329", 80): True, ("[FE80:0000:0000:0000:0202:B3FF:FE1E:8329]", 80): True, ("FE80:0000:0000:0000:0202:B3FF:FE1E:8330", 80): False, ("FE80:0000:0000:0000:0202:B3FF:FE1E:8328", 80): False, (None, 80): False, ("FE80:0000:0000:0000:0202:B3FF:FE1E:8329", None): True, }, "reject [FE80:0000:0000:0000:0202:B3FF:FE1E:8329]/112:*": { ("FE80:0000:0000:0000:0202:B3FF:FE1E:8329", 80): True, ("FE80:0000:0000:0000:0202:B3FF:FE1E:0000", 80): True, ("FE80:0000:0000:0000:0202:B3FF:FE1E:FFFF", 80): True, ("FE80:0000:0000:0000:0202:B3FF:FE1F:8329", 80): False, ("FE81:0000:0000:0000:0202:B3FF:FE1E:8329", 80): False, (None, 80): False, ("FE80:0000:0000:0000:0202:B3FF:FE1E:8329", None): True, }, } for rule_arg, matches in test_inputs.items(): rule = ExitPolicyRule(rule_arg) for match_args, expected_result in matches.items(): self.assertEquals(expected_result, rule.is_match(*match_args)) def test_is_match_port(self): test_inputs = { "reject *:80": { ("192.168.0.50", 80): True, ("192.168.0.50", 81): False, ("192.168.0.50", 79): False, (None, 80): True, ("192.168.0.50", None): False, }, "reject *:80-85": { ("192.168.0.50", 79): False, ("192.168.0.50", 80): True, ("192.168.0.50", 83): True, ("192.168.0.50", 85): True, ("192.168.0.50", 86): False, (None, 83): True, ("192.168.0.50", None): False, }, } for rule_arg, matches in test_inputs.items(): rule = ExitPolicyRule(rule_arg) for match_args, expected_result in matches.items(): self.assertEquals(expected_result, rule.is_match(*match_args))
lgpl-3.0
8,623,108,772,148,325,000
32.336391
86
0.566278
false
2.887682
true
false
false
mskwark/PconsC3
extra/arne/MSA/find-intradom.py
1
1381
#!/usr/bin/env perl # Find all contacts beween domains.. import sys, os, re, string import argparse from os.path import expanduser home = expanduser("~") sys.path.append(home + '/bioinfo-toolbox/parsing') sys.path.append(home + '/git/bioinfo-toolbox/parsing') import parse_contacts import numpy as np import matplotlib matplotlib.use('Agg') sep=5 contacts = parse_contacts.parse(open(c_filename, 'r'), sep) contacts_np = parse_contacts.get_numpy_cmap(contacts) contacts_np = contacts_np[start:end,start:end] for i in range(len(contacts)): score = contacts[i][0] c_x = contacts[i][1] - 1 c_y = contacts[i][2] - 1 # only look at contacts within given range # default: take full sequence range into account if c_x < start or c_x >= end: continue if c_y < start or c_y >= end: continue if c_y-c_x < start or c_y >= end: continue if c_x < domain pos_diff = abs(c_x - c_y) too_close = pos_diff < 5 if __name__ == "__main__": p = argparse.ArgumentParser(description='Plot protein residue contact maps.') p.add_argument('-t', '--threshold', default=-1, type=float) p.add_argument('--start', default=0, type=int) p.add_argument('--end', default=-1, type=int) p.add_argument('--sep', default=5, type=int) p.add_argument('--domain', default=-1, type=int)
gpl-2.0
-7,908,435,588,187,494,000
26.078431
81
0.631427
false
3.204176
false
false
false
soroushmehr/sampleRNN_ICLR2017
models/three_tier/three_tier.py
1
35718
""" RNN Audio Generation Model Three-tier model, Quantized input For more info: $ python three_tier.py -h How-to-run example: sampleRNN$ pwd /u/mehris/sampleRNN sampleRNN$ \ THEANO_FLAGS=mode=FAST_RUN,device=gpu,floatX=float32 python -u \ models/three_tier/three_tier.py --exp AXIS1 --seq_len 512 --big_frame_size 8 \ --frame_size 2 --weight_norm True --emb_size 64 --skip_conn False --dim 32 \ --n_rnn 2 --rnn_type LSTM --learn_h0 False --q_levels 16 --q_type linear \ --batch_size 128 --which_set MUSIC To resume add ` --resume` to the END of the EXACTLY above line. You can run the resume code as many time as possible, depending on the TRAIN_MODE. (folder name, file name, flags, their order, and the values are important) """ from time import time from datetime import datetime print "Experiment started at:", datetime.strftime(datetime.now(), '%Y-%m-%d %H:%M') exp_start = time() import os, sys, glob sys.path.insert(1, os.getcwd()) import argparse import itertools import numpy numpy.random.seed(123) np = numpy import random random.seed(123) import theano import theano.tensor as T import theano.ifelse import lasagne import scipy.io.wavfile import lib LEARNING_RATE = 0.001 ### Parsing passed args/hyperparameters ### def get_args(): def t_or_f(arg): ua = str(arg).upper() if 'TRUE'.startswith(ua): return True elif 'FALSE'.startswith(ua): return False else: raise ValueError('Arg is neither `True` nor `False`') def check_non_negative(value): ivalue = int(value) if ivalue < 0: raise argparse.ArgumentTypeError("%s is not non-negative!" % value) return ivalue def check_positive(value): ivalue = int(value) if ivalue < 1: raise argparse.ArgumentTypeError("%s is not positive!" % value) return ivalue def check_unit_interval(value): fvalue = float(value) if fvalue < 0 or fvalue > 1: raise argparse.ArgumentTypeError("%s is not in [0, 1] interval!" % value) return fvalue # No default value here. Indicate every single arguement. parser = argparse.ArgumentParser( description='three_tier.py\nNo default value! Indicate every argument.') # TODO: Fix the descriptions # Hyperparameter arguements: parser.add_argument('--exp', help='Experiment name', type=str, required=False, default='_') parser.add_argument('--seq_len', help='How many samples to include in each\ Truncated BPTT pass', type=check_positive, required=True) parser.add_argument('--big_frame_size', help='How many samples per big frame',\ type=check_positive, required=True) parser.add_argument('--frame_size', help='How many samples per frame',\ type=check_positive, required=True) parser.add_argument('--weight_norm', help='Adding learnable weight normalization\ to all the linear layers (except for the embedding layer)',\ type=t_or_f, required=True) parser.add_argument('--emb_size', help='Size of embedding layer (> 0)', type=check_positive, required=True) # different than two_tier parser.add_argument('--skip_conn', help='Add skip connections to RNN', type=t_or_f, required=True) parser.add_argument('--dim', help='Dimension of RNN and MLPs',\ type=check_positive, required=True) parser.add_argument('--n_rnn', help='Number of layers in the stacked RNN', type=check_positive, choices=xrange(1,6), required=True) parser.add_argument('--rnn_type', help='GRU or LSTM', choices=['LSTM', 'GRU'],\ required=True) parser.add_argument('--learn_h0', help='Whether to learn the initial state of RNN',\ type=t_or_f, required=True) parser.add_argument('--q_levels', help='Number of bins for quantization of\ audio samples. Should be 256 for mu-law.',\ type=check_positive, required=True) parser.add_argument('--q_type', help='Quantization in linear-scale, a-law-companding,\ or mu-law compandig. With mu-/a-law quantization level shoud be set as 256',\ choices=['linear', 'a-law', 'mu-law'], required=True) parser.add_argument('--which_set', help='ONOM, BLIZZ, MUSIC, or HUCK', choices=['ONOM', 'BLIZZ', 'MUSIC', 'HUCK'], required=True) parser.add_argument('--batch_size', help='size of mini-batch', type=check_positive, choices=[64, 128, 256], required=True) parser.add_argument('--debug', help='Debug mode', required=False, default=False, action='store_true') parser.add_argument('--resume', help='Resume the same model from the last\ checkpoint. Order of params are important. [for now]',\ required=False, default=False, action='store_true') args = parser.parse_args() # NEW # Create tag for this experiment based on passed args tag = reduce(lambda a, b: a+b, sys.argv).replace('--resume', '').replace('/', '-').replace('--', '-').replace('True', 'T').replace('False', 'F') tag += '-lr'+str(LEARNING_RATE) print "Created experiment tag for these args:" print tag return args, tag args, tag = get_args() SEQ_LEN = args.seq_len # How many samples to include in each truncated BPTT pass #print "------------------previous SEQ_LEN:", SEQ_LEN # TODO: test incremental training #SEQ_LEN = 512 + 256 #print "---------------------------new SEQ_LEN:", SEQ_LEN BIG_FRAME_SIZE = args.big_frame_size # how many samples per big frame FRAME_SIZE = args.frame_size # How many samples per frame OVERLAP = BIG_FRAME_SIZE WEIGHT_NORM = args.weight_norm EMB_SIZE = args.emb_size SKIP_CONN = args.skip_conn DIM = args.dim # Model dimensionality. BIG_DIM = DIM # Dimensionality for the slowest level. N_RNN = args.n_rnn # How many RNNs to stack in the frame-level model N_BIG_RNN = N_RNN # how many RNNs to stack in the big-frame-level model RNN_TYPE = args.rnn_type H0_MULT = 2 if RNN_TYPE == 'LSTM' else 1 LEARN_H0 = args.learn_h0 Q_LEVELS = args.q_levels # How many levels to use when discretizing samples. e.g. 256 = 8-bit scalar quantization Q_TYPE = args.q_type # log- or linear-scale WHICH_SET = args.which_set BATCH_SIZE = args.batch_size RESUME = args.resume assert SEQ_LEN % BIG_FRAME_SIZE == 0,\ 'seq_len should be divisible by big_frame_size' assert BIG_FRAME_SIZE % FRAME_SIZE == 0,\ 'big_frame_size should be divisible by frame_size' N_FRAMES = SEQ_LEN / FRAME_SIZE # Number of frames in each truncated BPTT pass if Q_TYPE == 'mu-law' and Q_LEVELS != 256: raise ValueError('For mu-law Quantization levels should be exactly 256!') # Fixed hyperparams GRAD_CLIP = 1 # Elementwise grad clip threshold BITRATE = 16000 # Other constants #TRAIN_MODE = 'iters' # To use PRINT_ITERS and STOP_ITERS TRAIN_MODE = 'time' # To use PRINT_TIME and STOP_TIME #TRAIN_MODE = 'time-iters' # To use PRINT_TIME for validation, # and (STOP_ITERS, STOP_TIME), whichever happened first, for stopping exp. #TRAIN_MODE = 'iters-time' # To use PRINT_ITERS for validation, # and (STOP_ITERS, STOP_TIME), whichever happened first, for stopping exp. PRINT_ITERS = 10000 # Print cost, generate samples, save model checkpoint every N iterations. STOP_ITERS = 100000 # Stop after this many iterations PRINT_TIME = 90*60 # Print cost, generate samples, save model checkpoint every N seconds. STOP_TIME = 60*60*24*3 # Stop after this many seconds of actual training (not including time req'd to generate samples etc.) N_SEQS = 20 # Number of samples to generate every time monitoring. RESULTS_DIR = 'results_3t' FOLDER_PREFIX = os.path.join(RESULTS_DIR, tag) Q_ZERO = numpy.int32(Q_LEVELS//2) # Discrete value correponding to zero amplitude epoch_str = 'epoch' iter_str = 'iter' lowest_valid_str = 'lowest valid cost' corresp_test_str = 'correponding test cost' train_nll_str, valid_nll_str, test_nll_str = \ 'train NLL (bits)', 'valid NLL (bits)', 'test NLL (bits)' if args.debug: import warnings warnings.warn('----------RUNNING IN DEBUG MODE----------') TRAIN_MODE = 'time' PRINT_TIME = 100 STOP_TIME = 3000 STOP_ITERS = 1000 ### Create directories ### # FOLDER_PREFIX: root, contains: # log.txt, __note.txt, train_log.pkl, train_log.png [, model_settings.txt] # FOLDER_PREFIX/params: saves all checkpoint params as pkl # FOLDER_PREFIX/samples: keeps all checkpoint samples as wav # FOLDER_PREFIX/best: keeps the best parameters, samples, ... if not os.path.exists(FOLDER_PREFIX): os.makedirs(FOLDER_PREFIX) PARAMS_PATH = os.path.join(FOLDER_PREFIX, 'params') if not os.path.exists(PARAMS_PATH): os.makedirs(PARAMS_PATH) SAMPLES_PATH = os.path.join(FOLDER_PREFIX, 'samples') if not os.path.exists(SAMPLES_PATH): os.makedirs(SAMPLES_PATH) BEST_PATH = os.path.join(FOLDER_PREFIX, 'best') if not os.path.exists(BEST_PATH): os.makedirs(BEST_PATH) lib.print_model_settings(locals(), path=FOLDER_PREFIX, sys_arg=True) ### Import the data_feeder ### # Handling WHICH_SET if WHICH_SET == 'ONOM': from datasets.dataset import onom_train_feed_epoch as train_feeder from datasets.dataset import onom_valid_feed_epoch as valid_feeder from datasets.dataset import onom_test_feed_epoch as test_feeder elif WHICH_SET == 'BLIZZ': from datasets.dataset import blizz_train_feed_epoch as train_feeder from datasets.dataset import blizz_valid_feed_epoch as valid_feeder from datasets.dataset import blizz_test_feed_epoch as test_feeder elif WHICH_SET == 'MUSIC': from datasets.dataset import music_train_feed_epoch as train_feeder from datasets.dataset import music_valid_feed_epoch as valid_feeder from datasets.dataset import music_test_feed_epoch as test_feeder elif WHICH_SET == 'HUCK': from datasets.dataset import huck_train_feed_epoch as train_feeder from datasets.dataset import huck_valid_feed_epoch as valid_feeder from datasets.dataset import huck_test_feed_epoch as test_feeder def load_data(data_feeder): """ Helper function to deal with interface of different datasets. `data_feeder` should be `train_feeder`, `valid_feeder`, or `test_feeder`. """ return data_feeder(BATCH_SIZE, SEQ_LEN, OVERLAP, Q_LEVELS, Q_ZERO, Q_TYPE) ### Creating computation graph ### def big_frame_level_rnn(input_sequences, h0, reset): """ input_sequences.shape: (batch size, n big frames * BIG_FRAME_SIZE) h0.shape: (batch size, N_BIG_RNN, BIG_DIM) reset.shape: () output[0].shape: (batch size, n frames, DIM) output[1].shape: same as h0.shape output[2].shape: (batch size, seq len, Q_LEVELS) """ frames = input_sequences.reshape(( input_sequences.shape[0], input_sequences.shape[1] // BIG_FRAME_SIZE, BIG_FRAME_SIZE )) # Rescale frames from ints in [0, Q_LEVELS) to floats in [-2, 2] # (a reasonable range to pass as inputs to the RNN) frames = (frames.astype('float32') / lib.floatX(Q_LEVELS/2)) - lib.floatX(1) frames *= lib.floatX(2) # Initial state of RNNs learned_h0 = lib.param( 'BigFrameLevel.h0', numpy.zeros((N_BIG_RNN, H0_MULT*BIG_DIM), dtype=theano.config.floatX) ) # Handling LEARN_H0 learned_h0.param = LEARN_H0 learned_h0 = T.alloc(learned_h0, h0.shape[0], N_BIG_RNN, H0_MULT*BIG_DIM) learned_h0 = T.unbroadcast(learned_h0, 0, 1, 2) h0 = theano.ifelse.ifelse(reset, learned_h0, h0) # Handling RNN_TYPE # Handling SKIP_CONN if RNN_TYPE == 'GRU': rnns_out, last_hidden = lib.ops.stackedGRU('BigFrameLevel.GRU', N_BIG_RNN, BIG_FRAME_SIZE, BIG_DIM, frames, h0=h0, weightnorm=WEIGHT_NORM, skip_conn=SKIP_CONN) elif RNN_TYPE == 'LSTM': rnns_out, last_hidden = lib.ops.stackedLSTM('BigFrameLevel.LSTM', N_BIG_RNN, BIG_FRAME_SIZE, BIG_DIM, frames, h0=h0, weightnorm=WEIGHT_NORM, skip_conn=SKIP_CONN) output = lib.ops.Linear( 'BigFrameLevel.Output', BIG_DIM, DIM * BIG_FRAME_SIZE / FRAME_SIZE, rnns_out, initialization='he', weightnorm=WEIGHT_NORM ) output = output.reshape((output.shape[0], output.shape[1] * BIG_FRAME_SIZE / FRAME_SIZE, DIM)) independent_preds = lib.ops.Linear( 'BigFrameLevel.IndependentPreds', BIG_DIM, Q_LEVELS * BIG_FRAME_SIZE, rnns_out, initialization='he', weightnorm=WEIGHT_NORM ) independent_preds = independent_preds.reshape((independent_preds.shape[0], independent_preds.shape[1] * BIG_FRAME_SIZE, Q_LEVELS)) return (output, last_hidden, independent_preds) def frame_level_rnn(input_sequences, other_input, h0, reset): """ input_sequences.shape: (batch size, n frames * FRAME_SIZE) other_input.shape: (batch size, n frames, DIM) h0.shape: (batch size, N_RNN, DIM) reset.shape: () output.shape: (batch size, n frames * FRAME_SIZE, DIM) """ frames = input_sequences.reshape(( input_sequences.shape[0], input_sequences.shape[1] // FRAME_SIZE, FRAME_SIZE )) # Rescale frames from ints in [0, Q_LEVELS) to floats in [-2, 2] # (a reasonable range to pass as inputs to the RNN) frames = (frames.astype('float32') / lib.floatX(Q_LEVELS/2)) - lib.floatX(1) frames *= lib.floatX(2) gru_input = lib.ops.Linear( 'FrameLevel.InputExpand', FRAME_SIZE, DIM, frames, initialization='he', weightnorm=WEIGHT_NORM, ) + other_input # Initial state of RNNs learned_h0 = lib.param( 'FrameLevel.h0', numpy.zeros((N_RNN, H0_MULT*DIM), dtype=theano.config.floatX) ) # Handling LEARN_H0 learned_h0.param = LEARN_H0 learned_h0 = T.alloc(learned_h0, h0.shape[0], N_RNN, H0_MULT*DIM) learned_h0 = T.unbroadcast(learned_h0, 0, 1, 2) #learned_h0 = T.patternbroadcast(learned_h0, [False] * learned_h0.ndim) h0 = theano.ifelse.ifelse(reset, learned_h0, h0) # Handling RNN_TYPE # Handling SKIP_CONN if RNN_TYPE == 'GRU': rnns_out, last_hidden = lib.ops.stackedGRU('FrameLevel.GRU', N_RNN, DIM, DIM, gru_input, h0=h0, weightnorm=WEIGHT_NORM, skip_conn=SKIP_CONN) elif RNN_TYPE == 'LSTM': rnns_out, last_hidden = lib.ops.stackedLSTM('FrameLevel.LSTM', N_RNN, DIM, DIM, gru_input, h0=h0, weightnorm=WEIGHT_NORM, skip_conn=SKIP_CONN) output = lib.ops.Linear( 'FrameLevel.Output', DIM, FRAME_SIZE * DIM, rnns_out, initialization='he', weightnorm=WEIGHT_NORM ) output = output.reshape((output.shape[0], output.shape[1] * FRAME_SIZE, DIM)) return (output, last_hidden) def sample_level_predictor(frame_level_outputs, prev_samples): """ frame_level_outputs.shape: (batch size, DIM) prev_samples.shape: (batch size, FRAME_SIZE) output.shape: (batch size, Q_LEVELS) """ # Handling EMB_SIZE if EMB_SIZE == 0: # no support for one-hot in three_tier and one_tier. prev_samples = lib.ops.T_one_hot(prev_samples, Q_LEVELS) # (BATCH_SIZE*N_FRAMES*FRAME_SIZE, FRAME_SIZE, Q_LEVELS) last_out_shape = Q_LEVELS elif EMB_SIZE > 0: prev_samples = lib.ops.Embedding( 'SampleLevel.Embedding', Q_LEVELS, EMB_SIZE, prev_samples) # (BATCH_SIZE*N_FRAMES*FRAME_SIZE, FRAME_SIZE, EMB_SIZE), f32 last_out_shape = EMB_SIZE else: raise ValueError('EMB_SIZE cannot be negative.') prev_samples = prev_samples.reshape((-1, FRAME_SIZE * last_out_shape)) out = lib.ops.Linear( 'SampleLevel.L1_PrevSamples', FRAME_SIZE * last_out_shape, DIM, prev_samples, biases=False, initialization='he', weightnorm=WEIGHT_NORM ) out += frame_level_outputs # out = T.nnet.relu(out) # commented out to be similar to two_tier out = lib.ops.Linear('SampleLevel.L2', DIM, DIM, out, initialization='he', weightnorm=WEIGHT_NORM) out = T.nnet.relu(out) # L3 out = lib.ops.Linear('SampleLevel.L3', DIM, DIM, out, initialization='he', weightnorm=WEIGHT_NORM) out = T.nnet.relu(out) # Output # We apply the softmax later out = lib.ops.Linear('SampleLevel.Output', DIM, Q_LEVELS, out, weightnorm=WEIGHT_NORM) return out sequences = T.imatrix('sequences') h0 = T.tensor3('h0') big_h0 = T.tensor3('big_h0') reset = T.iscalar('reset') mask = T.matrix('mask') if args.debug: # Solely for debugging purposes. # Maybe I should set the compute_test_value=warn from here. sequences.tag.test_value = numpy.zeros((BATCH_SIZE, SEQ_LEN+OVERLAP), dtype='int32') h0.tag.test_value = numpy.zeros((BATCH_SIZE, N_RNN, H0_MULT*DIM), dtype='float32') big_h0.tag.test_value = numpy.zeros((BATCH_SIZE, N_RNN, H0_MULT*BIG_DIM), dtype='float32') reset.tag.test_value = numpy.array(1, dtype='int32') mask.tag.test_value = numpy.ones((BATCH_SIZE, SEQ_LEN+OVERLAP), dtype='float32') big_input_sequences = sequences[:, :-BIG_FRAME_SIZE] input_sequences = sequences[:, BIG_FRAME_SIZE-FRAME_SIZE:-FRAME_SIZE] target_sequences = sequences[:, BIG_FRAME_SIZE:] target_mask = mask[:, BIG_FRAME_SIZE:] big_frame_level_outputs, new_big_h0, big_frame_independent_preds = big_frame_level_rnn(big_input_sequences, big_h0, reset) frame_level_outputs, new_h0 = frame_level_rnn(input_sequences, big_frame_level_outputs, h0, reset) prev_samples = sequences[:, BIG_FRAME_SIZE-FRAME_SIZE:-1] prev_samples = prev_samples.reshape((1, BATCH_SIZE, 1, -1)) prev_samples = T.nnet.neighbours.images2neibs(prev_samples, (1, FRAME_SIZE), neib_step=(1, 1), mode='valid') prev_samples = prev_samples.reshape((BATCH_SIZE * SEQ_LEN, FRAME_SIZE)) sample_level_outputs = sample_level_predictor( frame_level_outputs.reshape((BATCH_SIZE * SEQ_LEN, DIM)), prev_samples ) cost = T.nnet.categorical_crossentropy( T.nnet.softmax(sample_level_outputs), target_sequences.flatten() ) cost = cost.reshape(target_sequences.shape) cost = cost * target_mask # Don't use these lines; could end up with NaN # Specially at the end of audio files where mask is # all zero for some of the shorter files in mini-batch. #cost = cost.sum(axis=1) / target_mask.sum(axis=1) #cost = cost.mean(axis=0) # Use this one instead. cost = cost.sum() cost = cost / target_mask.sum() # By default we report cross-entropy cost in bits. # Switch to nats by commenting out this line: # log_2(e) = 1.44269504089 cost = cost * lib.floatX(numpy.log2(numpy.e)) ip_cost = lib.floatX(numpy.log2(numpy.e)) * T.nnet.categorical_crossentropy( T.nnet.softmax(big_frame_independent_preds.reshape((-1, Q_LEVELS))), target_sequences.flatten() ) ip_cost = ip_cost.reshape(target_sequences.shape) ip_cost = ip_cost * target_mask ip_cost = ip_cost.sum() ip_cost = ip_cost / target_mask.sum() ### Getting the params, grads, updates, and Theano functions ### #params = lib.get_params(cost, lambda x: hasattr(x, 'param') and x.param==True) #ip_params = lib.get_params(ip_cost, lambda x: hasattr(x, 'param') and x.param==True\ # and 'BigFrameLevel' in x.name) #other_params = [p for p in params if p not in ip_params] #params = ip_params + other_params #lib.print_params_info(params, path=FOLDER_PREFIX) # #grads = T.grad(cost, wrt=params, disconnected_inputs='warn') #grads = [T.clip(g, lib.floatX(-GRAD_CLIP), lib.floatX(GRAD_CLIP)) for g in grads] # #updates = lasagne.updates.adam(grads, params, learning_rate=LEARNING_RATE) ########### all_params = lib.get_params(cost, lambda x: hasattr(x, 'param') and x.param==True) ip_params = lib.get_params(ip_cost, lambda x: hasattr(x, 'param') and x.param==True\ and 'BigFrameLevel' in x.name) other_params = [p for p in all_params if p not in ip_params] all_params = ip_params + other_params lib.print_params_info(ip_params, path=FOLDER_PREFIX) lib.print_params_info(other_params, path=FOLDER_PREFIX) lib.print_params_info(all_params, path=FOLDER_PREFIX) ip_grads = T.grad(ip_cost, wrt=ip_params, disconnected_inputs='warn') ip_grads = [T.clip(g, lib.floatX(-GRAD_CLIP), lib.floatX(GRAD_CLIP)) for g in ip_grads] other_grads = T.grad(cost, wrt=other_params, disconnected_inputs='warn') other_grads = [T.clip(g, lib.floatX(-GRAD_CLIP), lib.floatX(GRAD_CLIP)) for g in other_grads] grads = T.grad(cost, wrt=all_params, disconnected_inputs='warn') grads = [T.clip(g, lib.floatX(-GRAD_CLIP), lib.floatX(GRAD_CLIP)) for g in grads] ip_updates = lasagne.updates.adam(ip_grads, ip_params) other_updates = lasagne.updates.adam(other_grads, other_params) updates = lasagne.updates.adam(grads, all_params) # Training function(s) ip_train_fn = theano.function( [sequences, big_h0, reset, mask], [ip_cost, new_big_h0], updates=ip_updates, on_unused_input='warn' ) other_train_fn = theano.function( [sequences, big_h0, h0, reset, mask], [cost, new_big_h0, new_h0], updates=other_updates, on_unused_input='warn' ) train_fn = theano.function( [sequences, big_h0, h0, reset, mask], [cost, new_big_h0, new_h0], updates=updates, on_unused_input='warn' ) # Validation and Test function, hence no updates ip_test_fn = theano.function( [sequences, big_h0, reset, mask], [ip_cost, new_big_h0], on_unused_input='warn' ) other_test_fn = theano.function( [sequences, big_h0, h0, reset, mask], [cost, new_big_h0, new_h0], on_unused_input='warn' ) test_fn = theano.function( [sequences, big_h0, h0, reset, mask], [cost, new_big_h0, new_h0], on_unused_input='warn' ) # Sampling at big frame level big_frame_level_generate_fn = theano.function( [sequences, big_h0, reset], big_frame_level_rnn(sequences, big_h0, reset)[0:2], on_unused_input='warn' ) # Sampling at frame level big_frame_level_outputs = T.matrix('big_frame_level_outputs') frame_level_generate_fn = theano.function( [sequences, big_frame_level_outputs, h0, reset], frame_level_rnn(sequences, big_frame_level_outputs.dimshuffle(0,'x',1), h0, reset), on_unused_input='warn' ) # Sampling at audio sample level frame_level_outputs = T.matrix('frame_level_outputs') prev_samples = T.imatrix('prev_samples') sample_level_generate_fn = theano.function( [frame_level_outputs, prev_samples], lib.ops.softmax_and_sample( sample_level_predictor( frame_level_outputs, prev_samples ) ), on_unused_input='warn' ) # Uniform [-0.5, 0.5) for half of initial state for generated samples # to study the behaviour of the model and also to introduce some diversity # to samples in a simple way. [it's disabled] fixed_rand_h0 = numpy.random.rand(N_SEQS//2, N_RNN, H0_MULT*DIM) fixed_rand_h0 -= 0.5 fixed_rand_h0 = fixed_rand_h0.astype('float32') fixed_rand_big_h0 = numpy.random.rand(N_SEQS//2, N_RNN, H0_MULT*DIM) fixed_rand_big_h0 -= 0.5 fixed_rand_big_h0 = fixed_rand_big_h0.astype('float32') def generate_and_save_samples(tag): def write_audio_file(name, data): data = data.astype('float32') data -= data.min() data /= data.max() data -= 0.5 data *= 0.95 scipy.io.wavfile.write( os.path.join(SAMPLES_PATH, name+'.wav'), BITRATE, data) total_time = time() # Generate N_SEQS' sample files, each 5 seconds long N_SECS = 5 LENGTH = N_SECS*BITRATE if not args.debug else 100 samples = numpy.zeros((N_SEQS, LENGTH), dtype='int32') samples[:, :BIG_FRAME_SIZE] = Q_ZERO # First half zero, others fixed random at each checkpoint big_h0 = numpy.zeros( (N_SEQS-fixed_rand_big_h0.shape[0], N_BIG_RNN, H0_MULT*BIG_DIM), dtype='float32' ) big_h0 = numpy.concatenate((big_h0, fixed_rand_big_h0), axis=0) h0 = numpy.zeros( (N_SEQS-fixed_rand_h0.shape[0], N_RNN, H0_MULT*DIM), dtype='float32' ) h0 = numpy.concatenate((h0, fixed_rand_h0), axis=0) big_frame_level_outputs = None frame_level_outputs = None for t in xrange(BIG_FRAME_SIZE, LENGTH): if t % BIG_FRAME_SIZE == 0: big_frame_level_outputs, big_h0 = big_frame_level_generate_fn( samples[:, t-BIG_FRAME_SIZE:t], big_h0, numpy.int32(t == BIG_FRAME_SIZE) ) if t % FRAME_SIZE == 0: frame_level_outputs, h0 = frame_level_generate_fn( samples[:, t-FRAME_SIZE:t], big_frame_level_outputs[:, (t / FRAME_SIZE) % (BIG_FRAME_SIZE / FRAME_SIZE)], h0, numpy.int32(t == BIG_FRAME_SIZE) ) samples[:, t] = sample_level_generate_fn( frame_level_outputs[:, t % FRAME_SIZE], samples[:, t-FRAME_SIZE:t] ) total_time = time() - total_time log = "{} samples of {} seconds length generated in {} seconds." log = log.format(N_SEQS, N_SECS, total_time) print log, for i in xrange(N_SEQS): samp = samples[i] if Q_TYPE == 'mu-law': from datasets.dataset import mu2linear samp = mu2linear(samp) elif Q_TYPE == 'a-law': raise NotImplementedError('a-law is not implemented') write_audio_file("sample_{}_{}".format(tag, i), samp) def monitor(data_feeder): """ Cost and time of test_fn on a given dataset section. Pass only one of `valid_feeder` or `test_feeder`. Don't pass `train_feed`. :returns: Mean cost over the input dataset (data_feeder) Total time spent """ _total_time = time() _h0 = numpy.zeros((BATCH_SIZE, N_RNN, H0_MULT*DIM), dtype='float32') _big_h0 = numpy.zeros((BATCH_SIZE, N_RNN, H0_MULT*BIG_DIM), dtype='float32') _costs = [] _data_feeder = load_data(data_feeder) for _seqs, _reset, _mask in _data_feeder: _cost, _big_h0, _h0 = test_fn(_seqs, _big_h0, _h0, _reset, _mask) _costs.append(_cost) return numpy.mean(_costs), time() - _total_time print "Wall clock time spent before training started: {:.2f}h"\ .format((time()-exp_start)/3600.) print "Training!" total_iters = 0 total_time = 0. last_print_time = 0. last_print_iters = 0 costs = [] lowest_valid_cost = numpy.finfo(numpy.float32).max corresponding_test_cost = numpy.finfo(numpy.float32).max new_lowest_cost = False end_of_batch = False epoch = 0 h0 = numpy.zeros((BATCH_SIZE, N_RNN, H0_MULT*DIM), dtype='float32') big_h0 = numpy.zeros((BATCH_SIZE, N_RNN, H0_MULT*BIG_DIM), dtype='float32') # Initial load train dataset tr_feeder = load_data(train_feeder) ### Handling the resume option: if RESUME: # Check if checkpoint from previous run is not corrupted. # Then overwrite some of the variables above. iters_to_consume, res_path, epoch, total_iters,\ [lowest_valid_cost, corresponding_test_cost, test_cost] = \ lib.resumable(path=FOLDER_PREFIX, iter_key=iter_str, epoch_key=epoch_str, add_resume_counter=True, other_keys=[lowest_valid_str, corresp_test_str, test_nll_str]) # At this point we saved the pkl file. last_print_iters = total_iters print "### RESUMING JOB FROM EPOCH {}, ITER {}".format(epoch, total_iters) # Consumes this much iters to get to the last point in training data. consume_time = time() for i in xrange(iters_to_consume): tr_feeder.next() consume_time = time() - consume_time print "Train data ready in {:.2f}secs after consuming {} minibatches.".\ format(consume_time, iters_to_consume) lib.load_params(res_path) print "Parameters from last available checkpoint loaded." while True: # THIS IS ONE ITERATION if total_iters % 500 == 0: print total_iters, total_iters += 1 try: # Take as many mini-batches as possible from train set mini_batch = tr_feeder.next() except StopIteration: # Mini-batches are finished. Load it again. # Basically, one epoch. tr_feeder = load_data(train_feeder) # and start taking new mini-batches again. mini_batch = tr_feeder.next() epoch += 1 end_of_batch = True print "[Another epoch]", seqs, reset, mask = mini_batch start_time = time() cost, big_h0, h0 = train_fn(seqs, big_h0, h0, reset, mask) total_time += time() - start_time #print "This cost:", cost, "This h0.mean()", h0.mean() costs.append(cost) # Monitoring step if (TRAIN_MODE=='iters' and total_iters-last_print_iters == PRINT_ITERS) or \ (TRAIN_MODE=='time' and total_time-last_print_time >= PRINT_TIME) or \ (TRAIN_MODE=='time-iters' and total_time-last_print_time >= PRINT_TIME) or \ (TRAIN_MODE=='iters-time' and total_iters-last_print_iters >= PRINT_ITERS) or \ end_of_batch: # 0. Validation print "\nValidation!", valid_cost, valid_time = monitor(valid_feeder) print "Done!" # 1. Test test_time = 0. # Only when the validation cost is improved get the cost for test set. if valid_cost < lowest_valid_cost: lowest_valid_cost = valid_cost print "\n>>> Best validation cost of {} reached. Testing!"\ .format(valid_cost), test_cost, test_time = monitor(test_feeder) print "Done!" # Report last one which is the lowest on validation set: print ">>> test cost:{}\ttotal time:{}".format(test_cost, test_time) corresponding_test_cost = test_cost new_lowest_cost = True # 2. Stdout the training progress print_info = "epoch:{}\ttotal iters:{}\twall clock time:{:.2f}h\n" print_info += ">>> Lowest valid cost:{}\t Corresponding test cost:{}\n" print_info += "\ttrain cost:{:.4f}\ttotal time:{:.2f}h\tper iter:{:.3f}s\n" print_info += "\tvalid cost:{:.4f}\ttotal time:{:.2f}h\n" print_info += "\ttest cost:{:.4f}\ttotal time:{:.2f}h" print_info = print_info.format(epoch, total_iters, (time()-exp_start)/3600, lowest_valid_cost, corresponding_test_cost, numpy.mean(costs), total_time/3600, total_time/total_iters, valid_cost, valid_time/3600, test_cost, test_time/3600) print print_info tag = "e{}_i{}_t{:.2f}_tr{:.4f}_v{:.4f}" tag = tag.format(epoch, total_iters, total_time/3600, numpy.mean(cost), valid_cost) tag += ("_best" if new_lowest_cost else "") # 3. Save params of model (IO bound, time consuming) # If saving params is not successful, there shouldn't be any trace of # successful monitoring step in train_log as well. print "Saving params!", lib.save_params( os.path.join(PARAMS_PATH, 'params_{}.pkl'.format(tag)) ) print "Done!" # 4. Save and graph training progress (fast) training_info = {epoch_str : epoch, iter_str : total_iters, train_nll_str : numpy.mean(costs), valid_nll_str : valid_cost, test_nll_str : test_cost, lowest_valid_str : lowest_valid_cost, corresp_test_str : corresponding_test_cost, 'train time' : total_time, 'valid time' : valid_time, 'test time' : test_time, 'wall clock time' : time()-exp_start} lib.save_training_info(training_info, FOLDER_PREFIX) print "Train info saved!", y_axis_strs = [train_nll_str, valid_nll_str, test_nll_str] lib.plot_traing_info(iter_str, y_axis_strs, FOLDER_PREFIX) print "And plotted!" # 5. Generate and save samples (time consuming) # If not successful, we still have the params to sample afterward print "Sampling!", # Generate samples generate_and_save_samples(tag) print "Done!" if total_iters-last_print_iters == PRINT_ITERS \ or total_time-last_print_time >= PRINT_TIME: # If we are here b/c of onom_end_of_batch, we shouldn't mess # with costs and last_print_iters costs = [] last_print_time += PRINT_TIME last_print_iters += PRINT_ITERS end_of_batch = False new_lowest_cost = False print "Validation Done!\nBack to Training..." if (TRAIN_MODE=='iters' and total_iters == STOP_ITERS) or \ (TRAIN_MODE=='time' and total_time >= STOP_TIME) or \ ((TRAIN_MODE=='time-iters' or TRAIN_MODE=='iters-time') and \ (total_iters == STOP_ITERS or total_time >= STOP_TIME)): print "Done! Total iters:", total_iters, "Total time: ", total_time print "Experiment ended at:", datetime.strftime(datetime.now(), '%Y-%m-%d %H:%M') print "Wall clock time spent: {:.2f}h"\ .format((time()-exp_start)/3600) sys.exit()
mit
-2,786,688,481,508,566,500
37.406452
148
0.59379
false
3.416029
true
false
false
RosesTheN00b/BudgetButlerWeb
butler_offline/views/einzelbuchungen/uebersicht_monat.py
1
8451
from butler_offline.viewcore.state import persisted_state from butler_offline.core import time from butler_offline.viewcore import request_handler from butler_offline.viewcore import viewcore from butler_offline.core.report import ReportGenerator from butler_offline.viewcore.converter import datum_to_string def _handle_request(request): context = viewcore.generate_base_context('monatsuebersicht') einzelbuchungen = persisted_state.database_instance().einzelbuchungen monate = sorted(einzelbuchungen.get_monate(), reverse=True) context['monate'] = monate if not monate: return viewcore.generate_error_context('monatsuebersicht', 'Keine Ausgaben erfasst') selected_item = context['monate'][0] if request.method == "POST": selected_item = request.values['date'] month = int(float(selected_item.split("_")[1])) year = int(float(selected_item.split("_")[0])) table_data_selection = einzelbuchungen.select().select_month(month).select_year(year) table_ausgaben = table_data_selection.select_ausgaben() table_einnahmen = table_data_selection.select_einnahmen() ''' Berechnung der Ausgaben für das Kreisdiagramm ''' ausgaben_liste = [] ausgaben_labels = [] ausgaben_data = [] ausgaben_colors = [] for kategorie, row in table_ausgaben.group_by_kategorie().iterrows(): ausgaben_labels.append(kategorie) ausgaben_data.append("%.2f" % abs(row.Wert)) ausgaben_colors.append("#" + einzelbuchungen.get_farbe_fuer(kategorie)) ausgaben_liste.append((kategorie, "%.2f" % row.Wert, einzelbuchungen.get_farbe_fuer(kategorie))) context['ausgaben'] = ausgaben_liste context['ausgaben_labels'] = ausgaben_labels context['ausgaben_data'] = ausgaben_data context['ausgaben_colors'] = ausgaben_colors ''' Berechnung der Einnahmen für das Kreisdiagramm ''' einnahmen_liste = [] einnahmen_labels = [] einnahmen_data = [] einnahmen_colors = [] for kategorie, row in table_einnahmen.group_by_kategorie().iterrows(): einnahmen_labels.append(kategorie) einnahmen_data.append("%.2f" % abs(row.Wert)) einnahmen_colors.append("#" + einzelbuchungen.get_farbe_fuer(kategorie)) einnahmen_liste.append((kategorie, "%.2f" % row.Wert, einzelbuchungen.get_farbe_fuer(kategorie))) context['einnahmen'] = einnahmen_liste context['einnahmen_labels'] = einnahmen_labels context['einnahmen_data'] = einnahmen_data context['einnahmen_colors'] = einnahmen_colors zusammenfassung = table_data_selection.get_month_summary() for tag, kategorien_liste in zusammenfassung: for einheit in kategorien_liste: einheit['farbe'] = einzelbuchungen.get_farbe_fuer(einheit['kategorie']) context['zusammenfassung'] = zusammenfassung ausgaben_monat = table_ausgaben.sum() context['gesamt'] = "%.2f" % ausgaben_monat einnahmen_monat = table_einnahmen.sum() context['gesamt_einnahmen'] = "%.2f" % einnahmen_monat selected_date = str(year) + "_" + str(month).rjust(2, "0") context['selected_date'] = selected_date context['selected_year'] = year if einnahmen_monat >= abs(ausgaben_monat): context['color_uebersicht_gruppe_1'] = "gray" context['name_uebersicht_gruppe_1'] = 'Gedeckte Ausgaben' context['wert_uebersicht_gruppe_1'] = '%.2f' % abs(ausgaben_monat) context['color_uebersicht_gruppe_2'] = "lightgreen" context['name_uebersicht_gruppe_2'] = 'Einnahmenüberschuss' context['wert_uebersicht_gruppe_2'] = '%.2f' % (einnahmen_monat + ausgaben_monat) else: context['color_uebersicht_gruppe_1'] = "gray" context['name_uebersicht_gruppe_1'] = 'Gedeckte Ausgaben' context['wert_uebersicht_gruppe_1'] = '%.2f' % einnahmen_monat context['color_uebersicht_gruppe_2'] = "red" context['name_uebersicht_gruppe_2'] = 'Ungedeckte Ausgaben' context['wert_uebersicht_gruppe_2'] = '%.2f' % ((ausgaben_monat + einnahmen_monat) * -1) einnahmen_jahr = einzelbuchungen.select().select_einnahmen().select_year(year).sum() ausgaben_jahr = einzelbuchungen.select().select_ausgaben().select_year(year).sum() if einnahmen_jahr >= abs(ausgaben_jahr): context['color_uebersicht_jahr_gruppe_1'] = "gray" context['name_uebersicht_jahr_gruppe_1'] = 'Gedeckte Einnahmen' context['wert_uebersicht_jahr_gruppe_1'] = '%.2f' % abs(ausgaben_jahr) context['color_uebersicht_jahr_gruppe_2'] = "lightgreen" context['name_uebersicht_jahr_gruppe_2'] = 'Einnahmenüberschuss' context['wert_uebersicht_jahr_gruppe_2'] = '%.2f' % (einnahmen_jahr + ausgaben_jahr) else: context['color_uebersicht_jahr_gruppe_1'] = "gray" context['name_uebersicht_jahr_gruppe_1'] = 'Gedeckte Ausgaben' context['wert_uebersicht_jahr_gruppe_1'] = '%.2f' % einnahmen_jahr context['color_uebersicht_jahr_gruppe_2'] = "red" context['name_uebersicht_jahr_gruppe_2'] = 'Ungedeckte Ausgaben' context['wert_uebersicht_jahr_gruppe_2'] = '%.2f' % ((ausgaben_jahr + einnahmen_jahr) * -1) return context def index(request): return request_handler.handle_request(request, _handle_request, 'einzelbuchungen/uebersicht_monat.html') def _abrechnen(request): context = viewcore.generate_base_context('monatsuebersicht') date = time.today() year = date.year month = date.month quantity = 60 if request.method == 'POST': if 'date' in request.values: str_year, str_month = request.values['date'].split('_') year = int(str_year) month = int(str_month) if 'quantity' in request.values: quantity = int(request.values['quantity']) einzelbuchungen = persisted_state.database_instance().einzelbuchungen generator = ReportGenerator('Monatsübersicht für ' + str(month) + '/' + str(year), quantity) table_data_selection = einzelbuchungen.select().select_month(month).select_year(year) table_ausgaben = table_data_selection.select_ausgaben() table_einnahmen = table_data_selection.select_einnahmen() if _is_selected(request, 'zusammenfassung_einnahmen'): data = {} for kategorie, row in table_einnahmen.group_by_kategorie().iterrows(): data[kategorie] = row.Wert generator.add_half_line_elements({'Einnahmen': data}) if _is_selected(request, 'zusammenfassung_ausgaben'): data = {} for kategorie, row in table_ausgaben.group_by_kategorie().iterrows(): data[kategorie] = row.Wert generator.add_half_line_elements({'Ausgaben': data}) if _is_selected(request, 'einnahmen'): generator.add_halfline('') generator.add_halfline('') generator.add_halfline('----Einnahmen----') zusammenfassung = table_einnahmen.zusammenfassung() compiled_zusammenfassung = {} for tag, kategorien_liste in zusammenfassung: compiled_zusammenfassung[datum_to_string(tag)] = {} for einheit in kategorien_liste: compiled_zusammenfassung[datum_to_string(tag)][einheit['name']] = float(einheit['summe']) generator.add_half_line_elements(compiled_zusammenfassung) if _is_selected(request, 'ausgaben'): generator.add_halfline('') generator.add_halfline('') generator.add_halfline('----Ausgaben----') zusammenfassung = table_ausgaben.zusammenfassung() compiled_zusammenfassung = {} for tag, kategorien_liste in zusammenfassung: compiled_zusammenfassung[datum_to_string(tag)] = {} for einheit in kategorien_liste: compiled_zusammenfassung[datum_to_string(tag)][einheit['name']] = float(einheit['summe']) generator.add_half_line_elements(compiled_zusammenfassung) page = '' for line in generator.generate_pages(): page = page + '<br>' + line context['abrechnungstext'] = '<pre>' + page + '</pre>' context['element_titel'] = 'Abrechnung vom {month}/{year}'.format(month=month, year=year) return context def _is_selected(request, name): if request.method != 'POST': return True if name in request.values: return True return False def abrechnen(request): return request_handler.handle_request(request, _abrechnen, 'shared/present_abrechnung.html')
agpl-3.0
-3,474,251,232,784,731,600
41.437186
108
0.665483
false
2.730359
false
false
false
schmodd/forecast.py
forecast.py
1
4251
#!/usr/bin/env python # -*- coding: utf-8 -*- # todo: add alerts, colors import requests import json import datetime import argparse import sys from prettytable import PrettyTable # surf to https://developer.forecast.io/ for an api key # use http://dbsgeo.com/latlon/ to get coordinates for your location API_KEY='' LAT='' LONG='' LIMIT=24 # limit hourly forecast output (48 max) #some api settings UNITS='si' # auto possibly shows wrong measuring unit LANG='en' def formatDatetime(unixTime, outputFormat='%d. %b. %H:%M'): return datetime.datetime.fromtimestamp(unixTime).strftime(outputFormat) def getMeasuringUnit(): return '\N{DEGREE SIGN}F' if UNITS == 'us' else '\N{DEGREE SIGN}C' def getPrecip(probability, type): probability = '{:3.0f} {:1}'.format(probability * 100, '%') return '{:} {:>5}'.format(probability, '-') if type == 0 else '{:} {:>5}'.format(probability, type) def showDaily(measuring_unit): HEAD = ['Date', 'Temp min', 'Temp max', 'HUM', 'SR', 'SS', 'Precip', 'Summary'] table = PrettyTable(HEAD, border = False, padding_width = 2) table.align='r' table.align['Date'] = 'l' table.align['Summary'] = 'l' for day in result['daily']['data']: table.add_row([formatDatetime(day['time'], '%d. %b.'), '{:4.2f} {:2}'.format(day['temperatureMin'], measuring_unit), '{:4.2f} {:2}'.format(day['temperatureMax'], measuring_unit), '{:3.0f} {:1}'.format(day['humidity'] * 100, '%'), formatDatetime(day['sunriseTime'], '%H:%M'), formatDatetime(day['sunsetTime'], '%H:%M'), getPrecip(day['precipProbability'], day['precipType'] if day['precipProbability'] > 0 else 0), day['summary']]) print('\n', end='') print(table) def showHourly(measuring_unit): HEAD = ['Date', 'Temp', 'HUM', 'Precip', 'Summary'] table = PrettyTable(HEAD, border = False, padding_width = 2) table.align='r' table.align['Date'] = 'l' table.align['Summary'] = 'l' for hour in result['hourly']['data'][0:LIMIT]: table.add_row([formatDatetime(hour['time'], '%d. %b. %H:%M'), '{:4.2f} {:2}'.format(hour['temperature'], measuring_unit), '{:3.0f} {:1}'.format(hour['humidity'] * 100, '%'), getPrecip(hour['precipProbability'], hour['precipType'] if hour['precipProbability'] > 0 else 0), hour['summary']]) print('\n', end='') print(table) if __name__ == '__main__': if not API_KEY or not LAT or not LONG: sys.exit("aborted! please make sure api-key and coordinates are specified") parser = argparse.ArgumentParser(description='weather forecast powered by forecast.io') group = parser.add_mutually_exclusive_group() group.add_argument('-df', help='daily forecast', action='store_true') group.add_argument('-hf', help='hourly forecast', action='store_true') args = parser.parse_args() BASE_URL = 'https://api.forecast.io/forecast/' SETTINGS = API_KEY + '/' + LAT + ',' + LONG + '?units=' + UNITS + '&lang='+ LANG + '&exclude=flags,minutely,' URL = BASE_URL + SETTINGS HTTP_HEADERS = {'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:33.0) Gecko/20100101 Firefox/33.0', 'Accept-Encoding': 'gzip'} MEAS_UNIT = getMeasuringUnit() if args.df: URL += 'hourly,currently' elif args.hf: URL += 'daily,currently' else: URL += 'hourly,daily' result = requests.get(URL, headers=HTTP_HEADERS) if result.status_code == 200: result = result.json() if args.df: showDaily(MEAS_UNIT) elif args.hf: showHourly(MEAS_UNIT) else: print('{:} {:10}'.format('\n date:', formatDatetime(result['currently']['time'])), end='') print('{:} {:6.2f} {:2}'.format(' | temp:', result['currently']['temperature'], MEAS_UNIT), end='') print('{:} {:2.0f} {:1}'.format(' | humidity:', result['currently']['humidity'] * 100, '%'), end='') print('{:} {:}'.format(' | precipitation:', getPrecip(result['currently']['precipProbability'], result['currently']['precipType'] if result['currently']['precipProbability'] > 0 else 0))) else: print('aborted! problems connecting to forecast.io')
mit
-898,684,992,294,098,300
40.676471
117
0.605034
false
3.352524
false
false
false
repotvsupertuga/tvsupertuga.repository
script.module.python.koding.aio/lib/koding/tutorials.py
1
7420
# -*- coding: utf-8 -*- # script.module.python.koding.aio # Python Koding AIO (c) by TOTALREVOLUTION LTD ([email protected]) # Python Koding AIO is licensed under a # Creative Commons Attribution-NonCommercial-NoDerivatives 4.0 International License. # You should have received a copy of the license along with this # work. If not, see http://creativecommons.org/licenses/by-nc-nd/4.0. # Please make sure you've read and understood the license, this code can NOT be used commercially # and it can NOT be modified and redistributed. If you're found to be in breach of this license # then any affected add-ons will be blacklisted and will not be able to work on the same system # as any other add-ons which use this code. Thank you for your cooperation. import os import re import sys import urllib import xbmc import xbmcaddon import xbmcgui import xbmcplugin import xbmcvfs from directory import Add_Dir from filetools import Text_File from vartools import Find_In_Text from guitools import Text_Box, Show_Busy, Keyboard from systemtools import Sleep_If_Window_Active from video import Play_Video from web import Open_URL dialog = xbmcgui.Dialog() py_path = 'special://home/addons/script.module.python.koding.aio/lib/koding' video_base = 'http://totalrevolution.tv/videos/python_koding/' #---------------------------------------------------------------- def Grab_Tutorials(): """ internal command ~""" import re full_array = [] dirs,files = xbmcvfs.listdir(py_path) # Check all the modules for functions with tutorial info for file in files: file_path = os.path.join(py_path,file) if file.endswith('.py') and file != 'tutorials.py': content = Text_File(file_path,'r').replace('\r','') # content_array = re.compile('# TUTORIAL #\ndef (.+?)\(').findall(content) content_array = Find_In_Text(content=content, start='# TUTORIAL #\ndef ', end='\(', show_errors=False) if content_array: for item in content_array: item = item.strip() full_array.append('%s~%s'%(item,file_path)) content_array = Find_In_Text(content=content, start='# TUTORIAL #\nclass ', end='\(', show_errors=False) if content_array: for item in content_array: item = item.strip() full_array.append('%s~%s'%(item,file_path)) # Return a list of tutorials Add_Dir('[COLOR=gold]CREATE YOUR FIRST ADD-ON[/COLOR]',video_base+'Create_Addon.mov','play_video', folder=False, icon='', fanart='', description='How to create your own add-on using the Python Koding framework.') for item in sorted(full_array,key=str.lower): name, filepath = item.split('~') filepath = urllib.quote(filepath) Add_Dir(name=name.upper().replace('_',' '), url='%s~%s'%(name,filepath), mode='show_tutorial', folder=False, icon='', fanart='', description='Instructions for how to use the %s function.'%name) #---------------------------------------------------------------- def Show_Tutorial(url): """ internal command ~""" name, filepath = url.split('~') filepath = urllib.unquote(filepath) readfile = Text_File(filepath,'r').replace('\r','') try: raw_find = Find_In_Text(content=readfile, start='# TUTORIAL #\ndef %s' % name,end='~"""')[0] except: raw_find = Find_In_Text(content=readfile, start='# TUTORIAL #\nclass %s' % name,end='~"""')[0] # Check if an example code segment exists in the comments if 'EXAMPLE CODE:' in raw_find: code = re.findall(r'(?<=EXAMPLE CODE:)(?s)(.*$)', raw_find)[0] code = code.replace('script.module.python.koding.aio','temp_replace_string') code = code.replace('koding.','').strip() code = code.replace('temp_replace_string','script.module.python.koding.aio') else: code = None # Check if a video exists in the comments internetstate = xbmc.getInfoLabel('System.InternetState') if internetstate: video_page = Open_URL(video_base) extension = Find_In_Text(video_page, name, '"', False) if extension != '' and extension != None: video = video_base+name+extension[0] else: video = None else: video = None counter = 0 removal_string = '' final_header = '' newline = '' temp_raw = raw_find.splitlines() for line in temp_raw: if counter == 0: removal_string += line if '[' in line: replace_file = Find_In_Text(content=line,start='\[',end='\]') for item in replace_file: line = line.replace(item,'') if ',' in line: header_extension = line.split(',') for item in header_extension: if '=' in item: item = item.split('=')[0] final_header += item+',' final_header = 'koding.'+name+final_header[:-2]+')' else: final_header = 'koding.'+name+line[:-1] else: removal_string += '\n'+line counter += 1 if counter == 2: break if final_header.endswith('))'): final_header = final_header[:-1] if final_header.startswith('koding.User_Info'): final_header = 'koding.User_Info()' full_text = raw_find.replace(removal_string,'').strip() # Initialise the dialog select dialog_array = ['Documentation'] if code: dialog_array.append('Run Example Code') if video: dialog_array.append('Watch Video') # If there's more than one item we show a dialog select otherwise we just load up the text window if len(dialog_array) > 1: choice = dialog.select(name, dialog_array) if choice >= 0: choice = dialog_array[choice] if choice == 'Documentation': Text_Box(final_header,full_text .replace('AVAILABLE PARAMS:','[COLOR=dodgerblue]AVAILABLE PARAMS:[/COLOR]') .replace('EXAMPLE CODE:','[COLOR=dodgerblue]EXAMPLE CODE:[/COLOR]') .replace('IMPORTANT:','[COLOR=gold]IMPORTANT:[/COLOR]') .replace('CODE:','[COLOR=dodgerblue]CODE:[/COLOR]') .replace('AVAILABLE VALUES:','[COLOR=dodgerblue]AVAILABLE VALUES:[/COLOR]') .replace('WARNING:','[COLOR=red]WARNING:[/COLOR]')) elif choice == 'Run Example Code': codefile = filepath.split(os.sep) codefile = codefile[len(codefile)-1].replace('.py','') exec('from %s import *' % codefile) # exec('from %s import %s' % (codefile, params["name"])) exec(code) elif choice == 'Watch Video': Play_Video(video) if choice < 0: return else: Text_Box(final_header,full_text .replace('AVAILABLE PARAMS:','[COLOR=dodgerblue]AVAILABLE PARAMS:[/COLOR]') .replace('EXAMPLE CODE:','[COLOR=dodgerblue]EXAMPLE CODE:[/COLOR]') .replace('IMPORTANT:','[COLOR=gold]IMPORTANT:[/COLOR]') .replace('CODE:','[COLOR=dodgerblue]CODE:[/COLOR]') .replace('AVAILABLE VALUES:','[COLOR=dodgerblue]AVAILABLE VALUES:[/COLOR]') .replace('WARNING:','[COLOR=red]WARNING:[/COLOR]'))
gpl-2.0
-4,688,523,344,214,192,000
42.893491
216
0.588434
false
3.776986
false
false
false
juliushaertl/i3pystatus
i3pystatus/updates/__init__.py
1
3829
import threading from i3pystatus import SettingsBase, Module, formatp from i3pystatus.core.util import internet, require class Backend(SettingsBase): settings = () updates = 0 class Updates(Module): """ Generic update checker. To use select appropriate backend(s) for your system. For list of all available backends see :ref:`updatebackends`. Left clicking on the module will refresh the count of upgradeable packages. This may be used to dismiss the notification after updating your system. .. rubric:: Available formatters * `{count}` — Sum of all available updates from all backends. * For each backend registered there is one formatter named after the backend, multiple identical backends do not accumulate, but overwrite each other. * For example, `{Cower}` (note capitcal C) is the number of updates reported by the cower backend, assuming it has been registered. .. rubric:: Usage example :: from i3pystatus import Status from i3pystatus.updates import pacman, cower status = Status(standalone=True) status.register("updates", format = "Updates: {count}", format_no_updates = "No updates", backends = [pacman.Pacman(), cower.Cower()]) status.run() """ interval = 3600 settings = ( ("backends", "Required list of backends used to check for updates."), ("format", "Format used when updates are available. " "May contain formatters."), ("format_no_updates", "String that is shown if no updates are available." " If not set the module will be hidden if no updates are available."), ("format_working", "Format used while update queries are run. By default the same as ``format``."), "color", "color_no_updates", "color_working", ("interval", "Default interval is set to one hour."), ) required = ("backends",) backends = None format = "Updates: {count}" format_no_updates = None format_working = None color = "#00DD00" color_no_updates = "#FFFFFF" color_working = None on_leftclick = "run" def init(self): if not isinstance(self.backends, list): self.backends = [self.backends] if self.format_working is None: # we want to allow an empty format self.format_working = self.format self.color_working = self.color_working or self.color self.data = { "count": 0 } self.condition = threading.Condition() self.thread = threading.Thread(target=self.update_thread, daemon=True) self.thread.start() def update_thread(self): self.check_updates() while True: with self.condition: self.condition.wait(self.interval) self.check_updates() @require(internet) def check_updates(self): self.output = { "full_text": formatp(self.format_working, **self.data).strip(), "color": self.color_working, } updates_count = 0 for backend in self.backends: updates = backend.updates updates_count += updates self.data[backend.__class__.__name__] = updates if updates_count == 0: self.output = {} if not self.format_no_updates else { "full_text": self.format_no_updates, "color": self.color_no_updates, } return self.data["count"] = updates_count self.output = { "full_text": formatp(self.format, **self.data).strip(), "color": self.color, } def run(self): with self.condition: self.condition.notify()
mit
3,242,534,360,093,105,000
30.628099
107
0.597857
false
4.348864
false
false
false
git-pedro-77/PROYECTOFINALPYTHON
proyectoITSAE/ec/edu/itsae/dao/ventaDao.py
1
2654
# coding:utf-8 ''' Created on 27/1/2015 @author: Programacion ''' from ec.edu.itsae.conn import DBcon #from flask import redirect, url_for import json class VentaDao(DBcon.DBcon):#heredando ''' classdocs ''' def __init__(self): ''' Constructor ''' pass#sirve cuando no hay implementacion en el metodo def reportarventa(self): con=self.conexion().connect().cursor() #capturando de la clase DBcon con.execute(" select * from venta ") reporte=con.fetchall() return reporte #despues del return no se debe colocar nada def grabarVenta(self, vendedor, turno, fechaventa,gestion ): con=self.conexion().connect() sql= """insert into venta(vendedor, turno, fechaventa, gestion) values ('%s','%s', '%s','%s') """ %(vendedor, turno, fechaventa,gestion ) #print sql Para imprimir nuestra consulta para poder ver with con: cursor=con.cursor() cursor.execute(sql)#aqui debe estar sql para que se ejecute el insert #deber actualizar y eliminar ''' def eliminarCliente(self,datoelim): con=self.conexion().connect() sql= """ delete from cliente where id_cliente= %i """ %int(datoelim) #print sql Para imprimir nuestra consulta para poder ver with con: cursor=con.cursor() cursor.execute(sql)''' def buscarVentaFactura(self, datobusca): con=self.conexion().connect().cursor() con.execute(""" select CONCAT (nombre,' ', apellido) as value, id_cliente as id from cliente where upper(CONCAT (nombre,' ', apellido)) like upper('%s') """ %("%"+datobusca+"%") ) reporte=con.fetchall() columna=('value', 'id') lista=[] for row in reporte: lista.append(dict(zip(columna,row))) return json.dumps(lista, indent=2) def buscarVentaDato(self, datobuscado): con=self.conexion().connect().cursor() sql=""" select * from cliente where upper(CONCAT (nombre,' ', apellido)) like upper('%s') """ %("%"+datobuscado+"%") con.execute(sql) reporte=con.fetchall() return reporte def validarventa(self, datot): con=self.conexion().connect().cursor() sql=""" select * from personas p, trabajador t where t.idpersona=%i """ %(datot) con.execute(sql) reporte=con.fetchall() return reporte
gpl-2.0
-8,052,328,900,098,061,000
32.051282
187
0.558779
false
3.451235
false
false
false
erfannoury/capgen-lasagne
another no-ft-ln-hs-largelr.py
1
13876
from __future__ import division, print_function import logging import numpy as np import scipy as sc import skimage from skimage import transform import theano import theano.tensor as T import lasagne import sys import cPickle as pickle from datetime import datetime from collections import OrderedDict from mscoco_threaded_iter import COCOCaptionDataset sys.path.append('/home/noury/codevault/Recipes/modelzoo/') sys.path.append('/home/noury/codevault/seq2seq-lasagne/') from resnet50 import build_model from CustomLSTMLayer import LNLSTMLayer from HierarchicalSoftmax import HierarchicalSoftmaxLayer from LayerNormalization import LayerNormalizationLayer sys.setrecursionlimit(10000) if __name__ == '__main__': logger = logging.getLogger(__name__) logger.setLevel(logging.DEBUG) formatter = logging.Formatter('%(asctime)s %(message)s', '%m/%d/%Y %I:%M:%S %p') fh = logging.FileHandler('another_no_ft_ln_hs_largelr.log') fh.setLevel(logging.DEBUG) fh.setFormatter(formatter) sh = logging.StreamHandler(sys.stdout) sh.setLevel(logging.INFO) sh.setFormatter(formatter) logger.addHandler(fh) logger.addHandler(sh) logger.info('Loading the ResNet50 model.') # First get the ResNet50 model resnet_weights_path = '/home/noury/modelzoo/resnet50.pkl' resnet = build_model() model_params = pickle.load(open(resnet_weights_path, 'rb')) lasagne.layers.set_all_param_values(resnet['prob'], model_params['values']) mean_im = model_params['mean_image'].reshape((1, 3, 224, 224)).astype(np.float32) # Load the files needed for the MS COCO Captions dataset train_images_path = '/home/noury/datasets/mscoco/train2014' valid_images_path = '/home/noury/datasets/mscoco/val2014' train_annotations_filepath = '/home/noury/datasets/mscoco/annotations/captions_train2014.json' valid_annotations_filepath = '/home/noury/datasets/mscoco/annotations/captions_val2014.json' coco_captions = pickle.load(open('coco_captions_trainval2014.pkl', 'rb')) train_buckets = coco_captions['train buckets'] valid_buckets = coco_captions['valid buckets'] wordset = coco_captions['raw wordset'] word2idx = {} word2idx['<PAD>'] = 0 word2idx['<GO>'] = 1 word2idx['<EOS>'] = 2 for i, w in enumerate(wordset): word2idx[w] = i+3 idx2word = map(lambda x: x[0], sorted(word2idx.items(), key=lambda x: x[1])) bucket_minibatch_sizes = {16:256, 32:128, 64:64} logger.info('Creating global variables') CONTINUE = False HIDDEN_SIZE = 2048 EMBEDDING_SIZE = 300 WORD_SIZE = len(idx2word) DENSE_SIZE = 1024 ORDER_VIOLATION_COEFF = 10.0 L2_COEFF = 1e-3 RNN_GRAD_CLIP = 64 TOTAL_MAX_NORM = 128 RECURR_LR = theano.shared(np.float32(0.001), 'recurrent lr') EPOCH_LR_COEFF = np.float32(0.5) NUM_EPOCHS = 15 validation_losses = [] total_loss_values = [] order_embedding_loss_values = [] l2_values = [] recurrent_norm_values = [] validation_total_loss_values = [] validation_order_embedding_loss_values = [] validation_l2_values = [] logger.info('Building the network.') im_features = lasagne.layers.get_output(resnet['pool5']) im_features = T.flatten(im_features, outdim=2) # batch size, number of features cap_out_var = T.imatrix('cap_out') # batch size, seq len cap_in_var = T.imatrix('cap_in') # batch size, seq len mask_var = T.bmatrix('mask_var') # batch size, seq len l_hid = lasagne.layers.InputLayer((None, HIDDEN_SIZE), input_var=im_features, name="l_hid") gate = lasagne.layers.Gate(W_in=lasagne.init.Normal(0.02), W_hid=lasagne.init.Orthogonal(), W_cell=lasagne.init.Normal(), b=lasagne.init.Constant(0.0)) cell_gate = lasagne.layers.Gate(W_in=lasagne.init.Normal(0.02), W_hid=lasagne.init.Orthogonal(), W_cell=None, b=lasagne.init.Constant(0.0), nonlinearity=lasagne.nonlinearities.tanh) forget_gate = lasagne.layers.Gate(W_in=lasagne.init.Normal(0.02), W_hid=lasagne.init.Orthogonal(), W_cell=lasagne.init.Normal(), b=lasagne.init.Constant(5.0)) l_in = lasagne.layers.InputLayer((None, None), cap_in_var, name="l_in") l_mask = lasagne.layers.InputLayer((None, None), mask_var, name="l_mask") l_emb = lasagne.layers.EmbeddingLayer(l_in, input_size=WORD_SIZE, output_size=EMBEDDING_SIZE, name="l_emb") l_lstm = LNLSTMLayer(l_emb, HIDDEN_SIZE, ingate=gate, forgetgate=forget_gate, cell=cell_gate, outgate=gate, hid_init=l_hid, peepholes=False, grad_clipping=RNN_GRAD_CLIP, mask_input=l_mask, precompute_input=False, alpha_init=lasagne.init.Constant(0.1), # as suggested by Ryan Kiros on Twitter normalize_cell=False, name="l_lstm") # batch size, seq len, hidden size l_reshape = lasagne.layers.ReshapeLayer(l_lstm, (-1, [2]), name="l_reshape") # batch size * seq len, hidden size l_fc = lasagne.layers.DenseLayer(l_reshape, DENSE_SIZE, b=lasagne.init.Constant(5.0), nonlinearity=lasagne.nonlinearities.rectify, name="l_fc") l_drp = lasagne.layers.DropoutLayer(l_fc, 0.3, name="l_drp") l_hs = HierarchicalSoftmaxLayer(l_drp, WORD_SIZE, name="l_hs") # batch size * seq len, WORD SIZE l_slice = lasagne.layers.SliceLayer(l_lstm, -1, axis=1, name="l_slice") if CONTINUE: import glob param_values = glob.glob('another_no_ft_ln_hs_largelr_param_values_*.pkl') max_epoch = max(map(lambda x: int(x[len('another_no_ft_ln_hs_largelr_param_values_'):-len('.pkl')]), param_values)) logger.info('Continue training from epoch {}'.format(max_epoch + 1)) logger.info('Setting previous parameter values from epoch {}'.format(max_epoch)) logger.info('Setting model weights from epoch {}'.format(max_epoch)) param_values_file = 'another_no_ft_ln_hs_largelr_param_values_{}.pkl'.format(max_epoch) param_values = pickle.load(open(param_values_file, 'rb')) lasagne.layers.set_all_param_values(l_hs, param_values['recurrent']) lasagne.layers.set_all_param_values(resnet['pool5'], param_values['resnet']) RECURR_LR = theano.shared(np.float32(param_values['lr']), 'recurrent lr') [total_loss_values, order_embedding_loss_values, l2_values, recurrent_norm_values]= pickle.load(open('another_no_ft_ln_hs_largelr_training_losses.pkl', 'rb')) [validation_total_loss_values, validation_order_embedding_loss_values, validation_l2_values] = pickle.load(open('another_no_ft_ln_hs_largelr_validation_losses.pkl', 'rb')) [validation_losses, recurr_lr_val] = pickle.load(open('another_no_ft_ln_hs_largelr_artifacts.pkl', 'rb')) logger.info('Creating output and loss variables') prediction = lasagne.layers.get_output(l_hs, deterministic=False) flat_cap_out_var = T.flatten(cap_out_var, outdim=1) flat_mask_var = T.flatten(lasagne.layers.get_output(l_mask), outdim=1) loss = T.mean(lasagne.objectives.categorical_crossentropy(prediction, flat_cap_out_var)[flat_mask_var.nonzero()]) caption_features = lasagne.layers.get_output(l_slice, deterministic=False) order_embedding_loss = T.pow(T.maximum(0, caption_features - im_features), 2).mean() l2 = lasagne.regularization.regularize_network_params(l_hs, lasagne.regularization.l2) total_loss = loss + ORDER_VIOLATION_COEFF * order_embedding_loss + L2_COEFF * l2 deterministic_prediction = lasagne.layers.get_output(l_hs, deterministic=True) deterministic_captions = lasagne.layers.get_output(l_slice, deterministic=True) deterministic_loss = T.mean(lasagne.objectives.categorical_crossentropy(deterministic_prediction, flat_cap_out_var)[flat_mask_var.nonzero()]) deterministic_order_embedding_loss = T.pow(T.maximum(0, deterministic_captions - im_features), 2).mean() deterministic_l2 = lasagne.regularization.regularize_network_params(l_hs, lasagne.regularization.l2) deterministic_total_loss = deterministic_loss + ORDER_VIOLATION_COEFF * deterministic_order_embedding_loss \ + L2_COEFF * deterministic_l2 logger.info('Getting all parameters and creating update rules.') recurrent_params = lasagne.layers.get_all_params(l_hs, trainable=True) recurrent_grads = T.grad(total_loss, recurrent_params) recurrent_grads, recurrent_norm = lasagne.updates.total_norm_constraint(recurrent_grads, TOTAL_MAX_NORM, return_norm=True) recurrent_updates = lasagne.updates.rmsprop(recurrent_grads, recurrent_params, learning_rate=RECURR_LR) logger.info("Creating the Theano function for Adam update") train_fun = theano.function([resnet['input'].input_var, cap_in_var, mask_var, cap_out_var], [total_loss, order_embedding_loss, l2, recurrent_norm], updates=recurrent_updates) logger.info("Creating the evaluation Theano function") eval_fun = theano.function([resnet['input'].input_var, cap_in_var, mask_var, cap_out_var], [deterministic_total_loss, deterministic_order_embedding_loss, deterministic_l2]) logger.info('Loading the COCO Captions training and validation sets.') coco_train = COCOCaptionDataset(train_images_path, train_annotations_filepath, train_buckets, bucket_minibatch_sizes, word2idx, mean_im, True) coco_valid = COCOCaptionDataset(valid_images_path, valid_annotations_filepath, valid_buckets, bucket_minibatch_sizes, word2idx, mean_im, False) logger.info("Starting the training process...") START = 1 if CONTINUE: START = max_epoch + 1 for e in xrange(START, NUM_EPOCHS + 1): logger.info("Starting epoch".format(e)) if len(validation_losses) > 2 and \ validation_losses[-3] < validation_losses[-1] and \ validation_losses[-2] < validation_losses[-1]: RECURR_LR.set_value(RECURR_LR.get_value() * EPOCH_LR_COEFF) logger.info("Lowering the learning rate to {}".format(RECURR_LR.get_value())) logger.info("Starting training on epoch {} with LR = {}".format(e, RECURR_LR.get_value())) mb = 0 now = datetime.now() for im, cap_in, cap_out in coco_train: tl, oe, el2, recn = train_fun(im, cap_in, (cap_in > 0).astype(np.int8), cap_out) logger.debug("Epoch: {}, Minibatch: {}, Total Loss: {}, Order-embedding loss: {}, L2 value: {}, Recurrent norm: {}".format(e, mb, tl, oe, el2, recn)) total_loss_values.append(tl) order_embedding_loss_values.append(oe) l2_values.append(el2) recurrent_norm_values.append(recn) mb += 1 logger.info("Training epoch {} took {}.".format(e, datetime.now() - now)) logger.info("Epoch {} results:".format(e)) logger.info("\t\tMean total loss: {}".format(np.mean(total_loss_values[-mb:]))) logger.info("\t\tMean order embedding loss: {}".format(np.mean(order_embedding_loss_values[-mb:]))) logger.info("\t\tMean l2 value: {}".format(np.mean(l2_values[-mb:]))) logger.info("\t\tMean Recurrent norm: {}".format(np.mean(recurrent_norm_values[-mb:]))) logger.info("Saving model parameters for epoch {}".format(e)) pickle.dump({'resnet':lasagne.layers.get_all_param_values(resnet['pool5']), 'recurrent':lasagne.layers.get_all_param_values(l_hs), 'mean image':mean_im, 'lr':RECURR_LR.get_value()}, open('another_no_ft_ln_hs_largelr_param_values_{}.pkl'.format(e), 'wb'), protocol=-1) logger.info("Saving loss values for epoch {}".format(e)) pickle.dump([total_loss_values, order_embedding_loss_values, l2_values, recurrent_norm_values], open('another_no_ft_ln_hs_largelr_training_losses.pkl', 'wb'), protocol=-1) logger.info("Validating the model on epoch {} on the validation set.".format(e)) mb = 0 now = datetime.now() for im, cap_in, cap_out in coco_valid: tl, oe, el2 = eval_fun(im, cap_in, (cap_in > 0).astype(np.int8), cap_out) logger.debug("Validation epoch: {}, Minibatch: {}, Validation total loss: {}, Validation order-embedding loss: {}, Validation l2 value: {}".format(e, mb, tl, oe, el2)) validation_total_loss_values.append(tl) validation_order_embedding_loss_values.append(oe) validation_l2_values.append(el2) mb += 1 logger.info("Validating epoch {} took {}.".format(e, datetime.now() - now)) logger.info("Epoch {} validation results:".format(e)) logger.info("\t\tValidation mean total loss: {}".format(np.mean(validation_total_loss_values[-mb:]))) logger.info("\t\tValidation mean order-embedding loss: {}".format(np.mean(validation_order_embedding_loss_values[-mb:]))) logger.info("\t\tValidation mean l2 value: {}".format(np.mean(validation_l2_values[-mb:]))) validation_losses.append(np.mean(validation_total_loss_values[-mb:])) logger.info("Saving validation loss values for epoch {}".format(e)) pickle.dump([validation_total_loss_values, validation_order_embedding_loss_values, validation_l2_values], open('another_no_ft_ln_hs_largelr_validation_losses.pkl', 'wb'), protocol=-1) pickle.dump([validation_losses, RECURR_LR.get_value()], open('another_no_ft_ln_hs_largelr_artifacts.pkl', 'wb'), protocol=-1)
mit
2,184,633,508,584,648,400
55.868852
179
0.652061
false
3.374514
false
false
false
goulu/Goulib
Goulib/plot.py
1
4898
""" plotable rich object display on IPython/Jupyter notebooks """ __author__ = "Philippe Guglielmetti" __copyright__ = "Copyright 2015, Philippe Guglielmetti" __credits__ = [] __license__ = "LGPL" # import matplotlib and set backend once for all from . import itertools2 import os import io import sys import logging import base64 import matplotlib if os.getenv('TRAVIS'): # are we running https://travis-ci.org/ automated tests ? matplotlib.use('Agg') # Force matplotlib not to use any Xwindows backend elif sys.gettrace(): # http://stackoverflow.com/questions/333995/how-to-detect-that-python-code-is-being-executed-through-the-debugger matplotlib.use('Agg') # because 'QtAgg' crashes python while debugging else: pass # matplotlib.use('pdf') #for high quality pdf, but doesn't work for png, svg ... logging.info('matplotlib backend is %s' % matplotlib.get_backend()) class Plot(object): """base class for plotable rich object display on IPython notebooks inspired from http://nbviewer.ipython.org/github/ipython/ipython/blob/3607712653c66d63e0d7f13f073bde8c0f209ba8/docs/examples/notebooks/display_protocol.ipynb """ def _plot(self, ax, **kwargs): """abstract method, must be overriden :param ax: `matplotlib.axis` :return ax: `matplotlib.axis` after plot """ raise NotImplementedError( 'objects derived from plot.PLot must define a _plot method') return ax def render(self, fmt='svg', **kwargs): return render([self], fmt, **kwargs) # call global function def save(self, filename, **kwargs): return save([self], filename, **kwargs) # call global function # for IPython notebooks def _repr_html_(self): """default rich format is svg plot""" try: return self._repr_svg_() except NotImplementedError: pass # this returns the same as _repr_png_, but is Table compatible buffer = self.render('png') s = base64.b64encode(buffer).decode('utf-8') return '<img src="data:image/png;base64,%s">' % s def html(self, **kwargs): from IPython.display import HTML return HTML(self._repr_html_(**kwargs)) def svg(self, **kwargs): from IPython.display import SVG return SVG(self._repr_svg_(**kwargs)) def _repr_svg_(self, **kwargs): return self.render(fmt='svg', **kwargs).decode('utf-8') def png(self, **kwargs): from IPython.display import Image return Image(self._repr_png_(**kwargs), embed=True) def _repr_png_(self, **kwargs): return self.render(fmt='png', **kwargs) def plot(self, **kwargs): """ renders on IPython Notebook (alias to make usage more straightforward) """ return self.svg(**kwargs) def render(plotables, fmt='svg', **kwargs): """renders several Plot objects""" import matplotlib.pyplot as plt # extract optional arguments used for rasterization printargs, kwargs = itertools2.dictsplit( kwargs, ['dpi', 'transparent', 'facecolor', 'background', 'figsize'] ) ylim = kwargs.pop('ylim', None) xlim = kwargs.pop('xlim', None) title = kwargs.pop('title', None) fig, ax = plt.subplots() labels = kwargs.pop('labels', [None] * len(plotables)) # slightly shift the points to make superimposed curves more visible offset = kwargs.pop('offset', 0) for i, obj in enumerate(plotables): if labels[i] is None: labels[i] = str(obj) if not title: try: title = obj._repr_latex_() # check that title can be used in matplotlib from matplotlib.mathtext import MathTextParser parser = MathTextParser('path').parse(title) except Exception as e: title = labels[i] ax = obj._plot(ax, label=labels[i], offset=i * offset, **kwargs) if ylim: plt.ylim(ylim) if xlim: plt.xlim(xlim) ax.set_title(title) if len(labels) > 1: ax.legend() output = io.BytesIO() fig.savefig(output, format=fmt, **printargs) data = output.getvalue() plt.close(fig) return data def png(plotables, **kwargs): from IPython.display import Image return Image(render(plotables, 'png', **kwargs), embed=True) def svg(plotables, **kwargs): from IPython.display import SVG return SVG(render(plotables, 'svg', **kwargs)) plot = svg def save(plotables, filename, **kwargs): ext = filename.split('.')[-1].lower() kwargs.setdefault('dpi', 600) # force good quality return open(filename, 'wb').write(render(plotables, ext, **kwargs))
lgpl-3.0
139,288,707,879,006,940
29.6
161
0.611066
false
3.799845
false
false
false
nuxeh/morph
morphlib/plugins/deploy_plugin.py
1
29928
# Copyright (C) 2013-2015 Codethink Limited # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; version 2 of the License. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License along # with this program. If not, see <http://www.gnu.org/licenses/>. import json import logging import os import shutil import sys import tarfile import tempfile import uuid import cliapp import morphlib class DeployPlugin(cliapp.Plugin): def enable(self): group_deploy = 'Deploy Options' self.app.settings.boolean(['upgrade'], 'specify that you want to upgrade an ' 'existing cluster. Deprecated: use the ' '`morph upgrade` command instead', group=group_deploy) self.app.add_subcommand( 'deploy', self.deploy, arg_synopsis='CLUSTER [DEPLOYMENT...] [SYSTEM.KEY=VALUE]') self.app.add_subcommand( 'upgrade', self.upgrade, arg_synopsis='CLUSTER [DEPLOYMENT...] [SYSTEM.KEY=VALUE]') def disable(self): pass def deploy(self, args): '''Deploy a built system image or a set of images. Command line arguments: * `CLUSTER` is the name of the cluster to deploy. * `DEPLOYMENT...` is the name of zero or more deployments in the morphology to deploy. If none are specified then all deployments in the morphology are deployed. * `SYSTEM.KEY=VALUE` can be used to assign `VALUE` to a parameter named `KEY` for the system identified by `SYSTEM` in the cluster morphology (see below). This will override parameters defined in the morphology. Morph deploys a set of systems listed in a cluster morphology. "Deployment" here is quite a general concept: it covers anything where a system image is taken, configured, and then put somewhere where it can be run. The deployment mechanism is quite flexible, and can be extended by the user. A cluster morphology defines a list of systems to deploy, and for each system a list of ways to deploy them. It contains the following fields: * **name**: MUST be the same as the basename of the morphology filename, sans .morph suffix. * **kind**: MUST be `cluster`. * **systems**: a list of systems to deploy; the value is a list of mappings, where each mapping has the following keys: * **morph**: the system morphology to use in the specified commit. * **deploy**: a mapping where each key identifies a system and each system has at least the following keys: * **type**: identifies the type of development e.g. (kvm, nfsboot) (see below). * **location**: where the deployed system should end up at. The syntax depends on the deployment type (see below). Any additional item on the dictionary will be added to the environment as `KEY=VALUE`. * **deploy-defaults**: allows multiple deployments of the same system to share some settings, when they can. Default settings will be overridden by those defined inside the deploy mapping. # Example name: cluster-foo kind: cluster systems: - morph: devel-system-x86_64-generic.morph deploy: cluster-foo-x86_64-1: type: kvm location: kvm+ssh://user@host/x86_64-1/x86_64-1.img HOSTNAME: cluster-foo-x86_64-1 DISK_SIZE: 4G RAM_SIZE: 4G VCPUS: 2 - morph: devel-system-armv7-highbank deploy-defaults: type: nfsboot location: cluster-foo-nfsboot-server deploy: cluster-foo-armv7-1: HOSTNAME: cluster-foo-armv7-1 cluster-foo-armv7-2: HOSTNAME: cluster-foo-armv7-2 Each system defined in a cluster morphology can be deployed in multiple ways (`type` in a cluster morphology). Morph provides the following types of deployment: * `tar` where Morph builds a tar archive of the root file system. * `rawdisk` where Morph builds a raw disk image and sets up the image with a bootloader and configuration so that it can be booted. Disk size is set with `DISK_SIZE` (see below). * `virtualbox-ssh` where Morph creates a VirtualBox disk image, and creates a new virtual machine on a remote host, accessed over ssh. Disk and RAM size are set with `DISK_SIZE` and `RAM_SIZE` (see below). * `kvm`, which is similar to `virtualbox-ssh`, but uses libvirt and KVM instead of VirtualBox. Disk and RAM size are set with `DISK_SIZE` and `RAM_SIZE` (see below). * `nfsboot` where Morph creates a system to be booted over a network. * `ssh-rsync` where Morph copies a binary delta over to the target system and arranges for it to be bootable. This requires `system-version-manager` from the tbdiff chunk * `initramfs`, where Morph turns the system into an initramfs image, suitable for being used as the early userland environment for a system to be able to locate more complicated storage for its root file-system, or on its own for diskless deployments. There are additional extensions that currently live in the Baserock definitions repo (baserock:baserock/definitions). These include: * `image-package` where Morph creates a tarball that includes scripts that can be used to make disk images outside of a Baserock environment. The example in definitions.git will create scripts for generating disk images and installing to existing disks. * `sdk` where Morph generates something resembing a BitBake SDK, which provides a toolchain for building software to target a system built by Baserock, from outside of a Baserock environment. This creates a self-extracting shell archive which you pass a directory to extract to, and inside that has a shell snippet called environment-setup-$TARGET which can be used to set environment variables to use the toolchain. * `pxeboot` where Morph temporarily network-boots the system you are deploying, so it can install a more permanent system onto local storage. In addition to the deployment type, the user must also give a value for `location`. Its syntax depends on the deployment types. The deployment types provided by Morph use the following syntaxes: * `tar`: pathname to the tar archive to be created; for example, `/home/alice/testsystem.tar` * `rawdisk`: pathname to the disk image to be created; for example, `/home/alice/testsystem.img` * `virtualbox-ssh` and `kvm`: a custom URL scheme that provides the target host machine (the one that runs VirtualBox or `kvm`), the name of the new virtual machine, and the location on the target host of the virtual disk file. The target host is accessed over ssh. For example, `vbox+ssh://[email protected]/testsys/home/alice/testsys.vdi` or `kvm+ssh://[email protected]/testsys/home/alice/testys.img` where * `[email protected]` is the target as given to ssh, **from within the development host** (which may be different from the target host's normal address); * `testsys` is the new VM's name; * `/home/alice/testsys.vdi` and `/home/alice/testys.img` are the pathnames of the disk image files on the target host. * `nfsboot`: the address of the nfsboot server. (Note this is just the _address_ of the trove, _not_ `user@...`, since `root@` will automatically be prepended to the server address.) In addition to the `location`parameter, deployments can take additional `KEY=VALUE` parameters. These can be provided in the following ways: 1. In the cluster definition file, e.g. ... systems: - morph: systems/foo-system.morph deploy: foo: HOSTNAME: foo 2. In the environment before running e.g. `HOSTNAME=foo morph deploy ...` 3. On the command-line e.g. `morph deploy clusters/foo.morph foo.HOSTNAME=foo` For any boolean `KEY=VALUE` parameters, allowed values are: +ve `yes`, `1`, `true`; -ve `no`, `0`, `false`; The following `KEY=VALUE` parameters are supported for `rawdisk`, `virtualbox-ssh` and `kvm` and deployment types: * `DISK_SIZE=X` to set the size of the disk image. `X` should use a suffix of `K`, `M`, or `G` (in upper or lower case) to indicate kilo-, mega-, or gigabytes. For example, `DISK_SIZE=100G` would create a 100 gigabyte disk image. **This parameter is mandatory**. The `kvm` and `virtualbox-ssh` deployment types support an additional parameter: * `RAM_SIZE=X` to set the size of virtual RAM for the virtual machine. `X` is interpreted in the same was as `DISK_SIZE`, and defaults to `1G`. * `AUTOSTART=<VALUE>` - allowed values are `yes` and `no` (default) For the `nfsboot` write extension, * the following `KEY=VALUE` pairs are mandatory * `NFSBOOT_CONFIGURE=yes` (or any non-empty value). This enables the `nfsboot` configuration extension (see below) which MUST be used when using the `nfsboot` write extension. * `HOSTNAME=<STRING>` a unique identifier for that system's `nfs` root when it's deployed on the nfsboot server - the extension creates a directory with that name for the `nfs` root, and stores kernels by that name for the tftp server. * the following `KEY=VALUE` pairs are optional * `VERSION_LABEL=<STRING>` - set the name of the system version being deployed, when upgrading. Defaults to "factory". Each deployment type is implemented by a **write extension**. The ones provided by Morph are listed above, but users may also create their own by adding them in the same git repository and branch as the system morphology. A write extension is a script that does whatever is needed for the deployment. A write extension is passed two command line parameters: the name of an unpacked directory tree that contains the system files (after configuration, see below), and the `location` parameter. Regardless of the type of deployment, the image may be configured for a specific deployment by using **configuration extensions**. The extensions are listed in the system morphology file: ... configuration-extensions: - set-hostname The above specifies that the extension `set-hostname` is to be run. Morph will run all the configuration extensions listed in the system morphology, and no others. (This way, configuration is more easily tracked in git.) Configuration extensions are scripts that get the unpacked directory tree of the system as their parameter, and do whatever is needed to configure the tree. Morph provides the following configuration extension built in: * `set-hostname` sets the hostname of the system to the value of the `HOSTNAME` variable. * `nfsboot` configures the system for nfsbooting. This MUST be used when deploying with the `nfsboot` write extension. Any `KEY=VALUE` parameters given in `deploy` or `deploy-defaults` sections of the cluster morphology, or given through the command line are set as environment variables when either the configuration or the write extension runs (except `type` and `location`). Deployment configuration is stored in the deployed system as /baserock/deployment.meta. THIS CONTAINS ALL ENVIRONMENT VARIABLES SET DURING DEPLOYMENT, so make sure you have no sensitive information in your environment that is being leaked. As a special case, any environment/deployment variable that contains 'PASSWORD' in its name is stripped out and not stored in the final system. ''' # Nasty hack to allow deploying things of a different architecture def validate(self, root_artifact): pass morphlib.buildcommand.BuildCommand._validate_architecture = validate if not args: raise cliapp.AppException( 'Too few arguments to deploy command (see help)') # Raise an exception if there is not enough space in tempdir # / for the path and 0 for the minimum size is a no-op # it exists because it is complicated to check the available # disk space given dirs may be on the same device morphlib.util.check_disk_available( self.app.settings['tempdir'], self.app.settings['tempdir-min-space'], '/', 0) ws = morphlib.workspace.open('.') sb = morphlib.sysbranchdir.open_from_within('.') cluster_filename = morphlib.util.sanitise_morphology_path(args[0]) cluster_filename = sb.relative_to_root_repo(cluster_filename) build_uuid = uuid.uuid4().hex build_command = morphlib.buildcommand.BuildCommand(self.app) build_command = self.app.hookmgr.call('new-build-command', build_command) loader = morphlib.morphloader.MorphologyLoader() name = morphlib.git.get_user_name(self.app.runcmd) email = morphlib.git.get_user_email(self.app.runcmd) build_ref_prefix = self.app.settings['build-ref-prefix'] root_repo_dir = morphlib.gitdir.GitDirectory( sb.get_git_directory_name(sb.root_repository_url)) cluster_text = root_repo_dir.read_file(cluster_filename) cluster_morphology = loader.load_from_string(cluster_text, filename=cluster_filename) if cluster_morphology['kind'] != 'cluster': raise cliapp.AppException( "Error: morph deployment commands are only supported for " "cluster morphologies.") # parse the rest of the args all_subsystems = set() all_deployments = set() deployments = set() for system in cluster_morphology['systems']: all_deployments.update(system['deploy'].iterkeys()) if 'subsystems' in system: all_subsystems.update(loader._get_subsystem_names(system)) for item in args[1:]: if not item in all_deployments: break deployments.add(item) env_vars = args[len(deployments) + 1:] self.validate_deployment_options( env_vars, all_deployments, all_subsystems) if self.app.settings['local-changes'] == 'include': bb = morphlib.buildbranch.BuildBranch(sb, build_ref_prefix) pbb = morphlib.buildbranch.pushed_build_branch( bb, loader=loader, changes_need_pushing=False, name=name, email=email, build_uuid=build_uuid, status=self.app.status) with pbb as (repo, commit, original_ref): self.deploy_cluster(build_command, cluster_morphology, root_repo_dir, repo, commit, env_vars, deployments) else: repo = sb.get_config('branch.root') ref = sb.get_config('branch.name') commit = root_repo_dir.resolve_ref_to_commit(ref) self.deploy_cluster(build_command, cluster_morphology, root_repo_dir, repo, commit, env_vars, deployments) self.app.status(msg='Finished deployment') def validate_deployment_options( self, env_vars, all_deployments, all_subsystems): for var in env_vars: for subsystem in all_subsystems: if subsystem == var: raise cliapp.AppException( 'Cannot directly deploy subsystems. Create a top ' 'level deployment for the subsystem %s instead.' % subsystem) if (not any(deployment in var for deployment in all_deployments) and not subsystem in var): raise cliapp.AppException( 'Variable referenced a non-existent deployment ' 'name: %s' % var) def deploy_cluster(self, build_command, cluster_morphology, root_repo_dir, repo, commit, env_vars, deployments): # Create a tempdir for this deployment to work in deploy_tempdir = tempfile.mkdtemp( dir=os.path.join(self.app.settings['tempdir'], 'deployments')) try: for system in cluster_morphology['systems']: self.deploy_system(build_command, deploy_tempdir, root_repo_dir, repo, commit, system, env_vars, deployments, parent_location='') finally: shutil.rmtree(deploy_tempdir) def deploy_system(self, build_command, deploy_tempdir, root_repo_dir, build_repo, ref, system, env_vars, deployment_filter, parent_location): sys_ids = set(system['deploy'].iterkeys()) if deployment_filter and not \ any(sys_id in deployment_filter for sys_id in sys_ids): return old_status_prefix = self.app.status_prefix system_status_prefix = '%s[%s]' % (old_status_prefix, system['morph']) self.app.status_prefix = system_status_prefix try: # Find the artifact to build morph = morphlib.util.sanitise_morphology_path(system['morph']) srcpool = build_command.create_source_pool(build_repo, ref, morph) artifact = build_command.resolve_artifacts(srcpool) deploy_defaults = system.get('deploy-defaults', {}) for system_id, deploy_params in system['deploy'].iteritems(): if not system_id in deployment_filter and deployment_filter: continue deployment_status_prefix = '%s[%s]' % ( system_status_prefix, system_id) self.app.status_prefix = deployment_status_prefix try: user_env = morphlib.util.parse_environment_pairs( os.environ, [pair[len(system_id)+1:] for pair in env_vars if pair.startswith(system_id)]) final_env = dict(deploy_defaults.items() + deploy_params.items() + user_env.items()) is_upgrade = ('yes' if self.app.settings['upgrade'] else 'no') final_env['UPGRADE'] = is_upgrade deployment_type = final_env.pop('type', None) if not deployment_type: raise morphlib.Error('"type" is undefined ' 'for system "%s"' % system_id) location = final_env.pop('location', None) if not location: raise morphlib.Error('"location" is undefined ' 'for system "%s"' % system_id) morphlib.util.sanitize_environment(final_env) self.check_deploy(root_repo_dir, ref, deployment_type, location, final_env) system_tree = self.setup_deploy(build_command, deploy_tempdir, root_repo_dir, ref, artifact, deployment_type, location, final_env) for subsystem in system.get('subsystems', []): self.deploy_system(build_command, deploy_tempdir, root_repo_dir, build_repo, ref, subsystem, env_vars, [], parent_location=system_tree) if parent_location: deploy_location = os.path.join(parent_location, location.lstrip('/')) else: deploy_location = location self.run_deploy_commands(deploy_tempdir, final_env, artifact, root_repo_dir, ref, deployment_type, system_tree, deploy_location) finally: self.app.status_prefix = system_status_prefix finally: self.app.status_prefix = old_status_prefix def upgrade(self, args): '''Upgrade an existing set of instances using built images. See `morph help deploy` for documentation. ''' if not args: raise cliapp.AppException( 'Too few arguments to upgrade command (see `morph help ' 'deploy`)') if self.app.settings['upgrade']: raise cliapp.AppException( 'Running `morph upgrade --upgrade` does not make sense.') self.app.settings['upgrade'] = True self.deploy(args) def check_deploy(self, root_repo_dir, ref, deployment_type, location, env): # Run optional write check extension. These are separate from the write # extension because it may be several minutes before the write # extension itself has the chance to raise an error. try: self._run_extension( root_repo_dir, deployment_type, '.check', [location], env) except morphlib.extensions.ExtensionNotFoundError: pass def setup_deploy(self, build_command, deploy_tempdir, root_repo_dir, ref, artifact, deployment_type, location, env): # deployment_type, location and env are only used for saving metadata # Create a tempdir to extract the rootfs in system_tree = tempfile.mkdtemp(dir=deploy_tempdir) try: # Unpack the artifact (tarball) to a temporary directory. self.app.status(msg='Unpacking system for configuration') if build_command.lac.has(artifact): f = build_command.lac.get(artifact) elif build_command.rac.has(artifact): build_command.cache_artifacts_locally([artifact]) f = build_command.lac.get(artifact) else: raise cliapp.AppException('Deployment failed as system is' ' not yet built.\nPlease ensure' ' the system is built before' ' deployment.') tf = tarfile.open(fileobj=f) tf.extractall(path=system_tree) self.app.status( msg='System unpacked at %(system_tree)s', system_tree=system_tree) self.app.status( msg='Writing deployment metadata file') metadata = self.create_metadata( artifact, root_repo_dir, deployment_type, location, env) metadata_path = os.path.join( system_tree, 'baserock', 'deployment.meta') with morphlib.savefile.SaveFile(metadata_path, 'w') as f: json.dump(metadata, f, indent=4, sort_keys=True, encoding='unicode-escape') return system_tree except Exception: shutil.rmtree(system_tree) raise def run_deploy_commands(self, deploy_tempdir, env, artifact, root_repo_dir, ref, deployment_type, system_tree, location): # Extensions get a private tempdir so we can more easily clean # up any files an extension left behind deploy_private_tempdir = tempfile.mkdtemp(dir=deploy_tempdir) env['TMPDIR'] = deploy_private_tempdir try: # Run configuration extensions. self.app.status(msg='Configure system') names = artifact.source.morphology['configuration-extensions'] for name in names: self._run_extension( root_repo_dir, name, '.configure', [system_tree], env) # Run write extension. self.app.status(msg='Writing to device') self._run_extension( root_repo_dir, deployment_type, '.write', [system_tree, location], env) finally: # Cleanup. self.app.status(msg='Cleaning up') shutil.rmtree(deploy_private_tempdir) def _report_extension_stdout(self, line): self.app.status(msg=line.replace('%s', '%%')) def _report_extension_stderr(self, error_list): def cb(line): error_list.append(line) sys.stderr.write('%s\n' % line) return cb def _report_extension_logger(self, name, kind): return lambda line: logging.debug('%s%s: %s', name, kind, line) def _run_extension(self, gd, name, kind, args, env): '''Run an extension. The ``kind`` should be either ``.configure`` or ``.write``, depending on the kind of extension that is sought. The extension is found either in the git repository of the system morphology (repo, ref), or with the Morph code. ''' error_list = [] with morphlib.extensions.get_extension_filename(name, kind) as fn: ext = morphlib.extensions.ExtensionSubprocess( report_stdout=self._report_extension_stdout, report_stderr=self._report_extension_stderr(error_list), report_logger=self._report_extension_logger(name, kind), ) returncode = ext.run(fn, args, env=env, cwd=gd.dirname) if returncode == 0: logging.info('%s%s succeeded', name, kind) else: message = '%s%s failed with code %s: %s' % ( name, kind, returncode, '\n'.join(error_list)) raise cliapp.AppException(message) def create_metadata(self, system_artifact, root_repo_dir, deployment_type, location, env): '''Deployment-specific metadata. The `build` and `deploy` operations must be from the same ref, so full info on the root repo that the system came from is in /baserock/${system_artifact}.meta and is not duplicated here. We do store a `git describe` of the definitions.git repo as a convenience for post-upgrade hooks that we may need to implement at a future date: the `git describe` output lists the last tag, which will hopefully help us to identify which release of a system was deployed without having to keep a list of SHA1s somewhere or query a Trove. ''' def remove_passwords(env): is_password = morphlib.util.env_variable_is_password return { k:v for k, v in env.iteritems() if not is_password(k) } meta = { 'system-artifact-name': system_artifact.name, 'configuration': remove_passwords(env), 'deployment-type': deployment_type, 'location': location, 'definitions-version': { 'describe': root_repo_dir.describe(), }, 'morph-version': { 'ref': morphlib.gitversion.ref, 'tree': morphlib.gitversion.tree, 'commit': morphlib.gitversion.commit, 'version': morphlib.gitversion.version, }, } return meta
gpl-2.0
-7,916,551,351,011,835,000
42.882698
79
0.57351
false
4.669683
true
false
false
pravsripad/jumeg
examples/connectivity/plot_grouped_connectivity_circle.py
2
1374
#!/usr/bin/env python ''' Example showing how to read grouped aparc labels from yaml file and plot grouped connectivity circle with these labels. Author: Praveen Sripad <[email protected]> ''' import numpy as np from jumeg import get_jumeg_path from jumeg.connectivity import plot_grouped_connectivity_circle import yaml labels_fname = get_jumeg_path() + '/data/desikan_label_names.yaml' yaml_fname = get_jumeg_path() + '/data/desikan_aparc_cortex_based_grouping.yaml' replacer_dict_fname = get_jumeg_path() + '/data/replacer_dictionaries.yaml' with open(labels_fname, 'r') as f: label_names = yaml.safe_load(f)['label_names'] with open(replacer_dict_fname, 'r') as f: replacer_dict = yaml.safe_load(f)['replacer_dict_aparc'] # make a random matrix with 68 nodes # use simple seed for reproducibility np.random.seed(42) con = np.random.random((68, 68)) con[con < 0.5] = 0. indices = (np.array((1, 2, 3)), np.array((5, 6, 7))) plot_grouped_connectivity_circle(yaml_fname, con, label_names, labels_mode='replace', replacer_dict=replacer_dict, out_fname='example_grouped_con_circle.png', colorbar_pos=(0.1, 0.1), n_lines=10, colorbar=True, colormap='viridis')
bsd-3-clause
-3,088,841,760,635,333,600
35.157895
80
0.624454
false
3.202797
false
false
false