max_stars_repo_path
stringlengths 3
269
| max_stars_repo_name
stringlengths 4
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.05M
| score
float64 0.23
5.13
| int_score
int64 0
5
|
---|---|---|---|---|---|---|
_unittests/ut_grabber/test_box_mock_render.py | sdpython/pymmails | 3 | 12785651 | <gh_stars>1-10
# -*- coding: utf-8 -*-
"""
@brief test log(time=2s)
"""
import os
import unittest
from pyquickhelper.loghelper import fLOG
from pyquickhelper.pycode import get_temp_folder
from pymmails import MailBoxMock, EmailMessageRenderer, EmailMessageListRenderer
class TestMessageBoxMock(unittest.TestCase):
def test_box_mock_render(self):
fLOG(
__file__,
self._testMethodName,
OutputPrint=__name__ == "__main__")
data = os.path.abspath(os.path.join(os.path.dirname(__file__), "data"))
temp = get_temp_folder(__file__, "temp_render_mock_list_mail")
box = MailBoxMock(data, b"unittestunittest", fLOG)
box.login()
folders = box.folders()
assert len(folders) == 1
fLOG(folders)
mails = list(box.enumerate_mails_in_folder("trav"))
box.logout()
email_render = EmailMessageRenderer()
def tempf(message, location, prev_mail, next_mail):
email_render.render(location, message, None,
file_css="mail_style.css",
prev_mail=prev_mail, next_mail=next_mail)
return ""
mails = list((m, tempf) for m in mails)
render = EmailMessageListRenderer(
title="list of mails", email_renderer=email_render, fLOG=fLOG)
res = render.render(iter=mails, location=temp)
render.flush()
# fLOG(res[0])
exp = ('<a href="d_2015-08-01_p_noreply-at-voyages-sncf-com_ii_8de6a63addb7c03407bc6f0caabd967e.html">' +
'2015/08/01 -\n Voyages-sncf.com</a>')
if exp not in res[0]:
raise Exception(res[0])
def test_box_mock_write(self):
fLOG(
__file__,
self._testMethodName,
OutputPrint=__name__ == "__main__")
data = os.path.abspath(os.path.join(os.path.dirname(__file__), "data"))
temp = get_temp_folder(__file__, "temp_write_mock_list_mail")
box = MailBoxMock(data, b"unittestunittest", fLOG)
box.login()
folders = box.folders()
assert len(folders) == 1
fLOG(folders)
mails = list(box.enumerate_mails_in_folder("trav"))
box.logout()
email_render = EmailMessageRenderer()
render = EmailMessageListRenderer(
title="list of mails", email_renderer=email_render, fLOG=fLOG)
res = render.write(iter=mails, location=temp, filename="essai.html")
render.flush()
with open(res[0], "r", encoding="utf8") as f:
content = f.read()
exp = ('<a href="d_2015-12-20_p_noreply-at-voyages-sncf-com_ii_1bb6fa70421145bed927e00c5e292277.html">' +
'2015/12/20 -\n Voyages-sncf.com</a>')
if exp not in content:
raise Exception(content)
if 'list of mails</h1>' not in content:
raise Exception(content)
allfiles = render.BufferWrite.listfiles()
assert len(allfiles) > 0
allfiles.sort()
with open(allfiles[0], "r", encoding="utf8") as f:
content = f.read()
if '<a href="d_2015-08-01_p_noreply-at-voyages-sncf-com_ii_8de6a63addb7c03407bc6f0caabd967e.html"><--</a>' not in content:
raise Exception(content)
if __name__ == "__main__":
unittest.main()
| 2.296875 | 2 |
blogSite/blog/templatetags/blog_tags.py | sharafx2qeshta/Django-3-by-example | 0 | 12785652 | # this file for custom tags
from django import template
from ..models import Post
from django.db.models import Count
# these two lines below for custom filters
from django.utils.safestring import mark_safe
import markdown
register = template.Library()
'''
Each module that contains template tags needs to define a variable called
register to be a valid tag library. This variable is an instance of template.Library,
and it's used to register your own template tags and filters.
'''
@register.simple_tag
def total_posts(): # return the number of post published
return Post.published.count()
@register.inclusion_tag('blog/post/latest_posts.html') # you should create a file to handle the way the returned
# data will be displayed
def show_latest_posts(count=5):
latest_posts = Post.published.order_by('-publish')[:count]
return {'latest_posts': latest_posts}
@register.simple_tag
def get_most_commented_posts(count=5):
return Post.published.annotate(
total_comments=Count('comments')
).order_by('-total_comments')[:count]
'''
After adding a new template tags module, you will need to restart
the Django development server in order to use the new tags and
filters in templates.
'''
'''
custom tags types :
1-simple_tag: Processes the data and returns a string
2-inclusion_tag: Processes the data and returns a rendered template
'''
'''
to use custom tags in html files just load the file by {% load blog_tags %} at the top of the file
to call any function inside html file just use {% functionName %} anywhere in the html file
if the function have parameters you can call it by {% functionName param1 param2 %} and so on
'''
@register.filter(name='markdown')
def markdown_format(text):
return mark_safe(markdown.markdown(text)) | 2.71875 | 3 |
irrd/server/whois/query_parser.py | morrowc/irrd | 0 | 12785653 | import logging
import re
from typing import Optional
import ujson
from IPy import IP
from ordered_set import OrderedSet
from irrd import __version__
from irrd.conf import get_setting, RPKI_IRR_PSEUDO_SOURCE
from irrd.mirroring.nrtm_generator import NRTMGenerator, NRTMGeneratorException
from irrd.rpki.status import RPKIStatus
from irrd.rpsl.rpsl_objects import (OBJECT_CLASS_MAPPING, RPKI_RELEVANT_OBJECT_CLASSES)
from irrd.server.query_resolver import QueryResolver, RouteLookupType, InvalidQueryException
from irrd.storage.database_handler import DatabaseHandler, RPSLDatabaseResponse
from irrd.storage.preload import Preloader
from irrd.storage.queries import DatabaseStatusQuery
from irrd.utils.validators import parse_as_number, ValidationError
from .query_response import WhoisQueryResponseType, WhoisQueryResponseMode, WhoisQueryResponse
from ..access_check import is_client_permitted
logger = logging.getLogger(__name__)
class WhoisQueryParser:
"""
Parser for all whois-style queries.
This parser distinguishes RIPE-style, e.g. '-K 192.0.2.1' or '-i mnt-by FOO'
from IRRD-style, e.g. '!oFOO'. Query processing is mostly handled by
QueryResolver, with a few exceptions that are whois-specific.
Some query flags, particularly -k/!! and -s/!s retain state across queries,
so a single instance of this object should be created per session, with
handle_query() being called for each individual query.
"""
def __init__(self, client_ip: str, client_str: str, preloader: Preloader,
database_handler: DatabaseHandler) -> None:
self.multiple_command_mode = False
self.timeout = 30
self.key_fields_only = False
self.client_ip = client_ip
self.client_str = client_str
self.database_handler = database_handler
self.query_resolver = QueryResolver(
preloader=preloader,
database_handler=database_handler,
)
def handle_query(self, query: str) -> WhoisQueryResponse:
"""
Process a single query. Always returns a WhoisQueryResponse object.
Not thread safe - only one call must be made to this method at the same time.
"""
self.key_fields_only = False
if query.startswith('!'):
try:
return self.handle_irrd_command(query[1:])
except InvalidQueryException as exc:
logger.info(f'{self.client_str}: encountered parsing error while parsing query "{query}": {exc}')
return WhoisQueryResponse(
response_type=WhoisQueryResponseType.ERROR_USER,
mode=WhoisQueryResponseMode.IRRD,
result=str(exc)
)
except Exception as exc:
logger.error(f'An exception occurred while processing whois query "{query}": {exc}', exc_info=exc)
return WhoisQueryResponse(
response_type=WhoisQueryResponseType.ERROR_INTERNAL,
mode=WhoisQueryResponseMode.IRRD,
result='An internal error occurred while processing this query.'
)
try:
return self.handle_ripe_command(query)
except InvalidQueryException as exc:
logger.info(f'{self.client_str}: encountered parsing error while parsing query "{query}": {exc}')
return WhoisQueryResponse(
response_type=WhoisQueryResponseType.ERROR_USER,
mode=WhoisQueryResponseMode.RIPE,
result=str(exc)
)
except Exception as exc:
logger.error(f'An exception occurred while processing whois query "{query}": {exc}', exc_info=exc)
return WhoisQueryResponse(
response_type=WhoisQueryResponseType.ERROR_INTERNAL,
mode=WhoisQueryResponseMode.RIPE,
result='An internal error occurred while processing this query.'
)
def handle_irrd_command(self, full_command: str) -> WhoisQueryResponse:
"""Handle an IRRD-style query. full_command should not include the first exclamation mark. """
if not full_command:
raise InvalidQueryException('Missing IRRD command')
command = full_command[0]
parameter = full_command[1:]
response_type = WhoisQueryResponseType.SUCCESS
result = None
# A is not tested here because it is already handled in handle_irrd_routes_for_as_set
queries_with_parameter = list('tg6ijmnors')
if command in queries_with_parameter and not parameter:
raise InvalidQueryException(f'Missing parameter for {command} query')
if command == '!':
self.multiple_command_mode = True
result = None
response_type = WhoisQueryResponseType.NO_RESPONSE
elif full_command.upper() == 'FNO-RPKI-FILTER':
self.query_resolver.disable_rpki_filter()
result = 'Filtering out RPKI invalids is disabled for !r and RIPE style ' \
'queries for the rest of this connection.'
elif full_command.upper() == 'FNO-SCOPE-FILTER':
self.query_resolver.disable_out_of_scope_filter()
result = 'Filtering out out-of-scope objects is disabled for !r and RIPE style ' \
'queries for the rest of this connection.'
elif command == 'v':
result = self.handle_irrd_version()
elif command == 't':
self.handle_irrd_timeout_update(parameter)
elif command == 'g':
result = self.handle_irrd_routes_for_origin_v4(parameter)
if not result:
response_type = WhoisQueryResponseType.KEY_NOT_FOUND
elif command == '6':
result = self.handle_irrd_routes_for_origin_v6(parameter)
if not result:
response_type = WhoisQueryResponseType.KEY_NOT_FOUND
elif command == 'a':
result = self.handle_irrd_routes_for_as_set(parameter)
if not result:
response_type = WhoisQueryResponseType.KEY_NOT_FOUND
elif command == 'i':
result = self.handle_irrd_set_members(parameter)
if not result:
response_type = WhoisQueryResponseType.KEY_NOT_FOUND
elif command == 'j':
result = self.handle_irrd_database_serial_range(parameter)
elif command == 'J':
result = self.handle_irrd_database_status(parameter)
elif command == 'm':
result = self.handle_irrd_exact_key(parameter)
if not result:
response_type = WhoisQueryResponseType.KEY_NOT_FOUND
elif command == 'n':
self.handle_user_agent(parameter)
elif command == 'o':
result = self.handle_inverse_attr_search('mnt-by', parameter)
if not result:
response_type = WhoisQueryResponseType.KEY_NOT_FOUND
elif command == 'r':
result = self.handle_irrd_route_search(parameter)
if not result:
response_type = WhoisQueryResponseType.KEY_NOT_FOUND
elif command == 's':
result = self.handle_irrd_sources_list(parameter)
else:
raise InvalidQueryException(f'Unrecognised command: {command}')
return WhoisQueryResponse(
response_type=response_type,
mode=WhoisQueryResponseMode.IRRD,
result=result,
)
def handle_irrd_timeout_update(self, timeout: str) -> None:
"""!timeout query - update timeout in connection"""
try:
timeout_value = int(timeout)
except ValueError:
raise InvalidQueryException(f'Invalid value for timeout: {timeout}')
if timeout_value > 0 and timeout_value <= 1000:
self.timeout = timeout_value
else:
raise InvalidQueryException(f'Invalid value for timeout: {timeout}')
def handle_irrd_routes_for_origin_v4(self, origin: str) -> str:
"""!g query - find all originating IPv4 prefixes from an origin, e.g. !gAS65537"""
return self._routes_for_origin(origin, 4)
def handle_irrd_routes_for_origin_v6(self, origin: str) -> str:
"""!6 query - find all originating IPv6 prefixes from an origin, e.g. !6as65537"""
return self._routes_for_origin(origin, 6)
def _routes_for_origin(self, origin: str, ip_version: Optional[int]=None) -> str:
"""
Resolve all route(6)s prefixes for an origin, returning a space-separated list
of all originating prefixes, not including duplicates.
"""
try:
origin_formatted, _ = parse_as_number(origin)
except ValidationError as ve:
raise InvalidQueryException(str(ve))
prefixes = self.query_resolver.routes_for_origin(origin_formatted, ip_version)
return ' '.join(prefixes)
def handle_irrd_routes_for_as_set(self, set_name: str) -> str:
"""
!a query - find all originating prefixes for all members of an AS-set, e.g. !a4AS-FOO or !a6AS-FOO
"""
ip_version: Optional[int] = None
if set_name.startswith('4'):
set_name = set_name[1:]
ip_version = 4
elif set_name.startswith('6'):
set_name = set_name[1:]
ip_version = 6
if not set_name:
raise InvalidQueryException('Missing required set name for A query')
prefixes = self.query_resolver.routes_for_as_set(set_name, ip_version)
return ' '.join(prefixes)
def handle_irrd_set_members(self, parameter: str) -> str:
"""
!i query - find all members of an as-set or route-set, possibly recursively.
e.g. !iAS-FOO for non-recursive, !iAS-FOO,1 for recursive
"""
recursive = False
if parameter.endswith(',1'):
recursive = True
parameter = parameter[:-2]
members = self.query_resolver.members_for_set(parameter, recursive=recursive)
return ' '.join(members)
def handle_irrd_database_serial_range(self, parameter: str) -> str:
"""
!j query - database serial range
This query is legacy and only available in whois, so resolved
directly here instead of in the query resolver.
"""
if parameter == '-*':
sources = self.query_resolver.sources_default if self.query_resolver.sources_default else self.query_resolver.all_valid_sources
else:
sources = [s.upper() for s in parameter.split(',')]
invalid_sources = [s for s in sources if s not in self.query_resolver.all_valid_sources]
query = DatabaseStatusQuery().sources(sources)
query_results = self.database_handler.execute_query(query, refresh_on_error=True)
result_txt = ''
for query_result in query_results:
source = query_result['source'].upper()
keep_journal = 'Y' if get_setting(f'sources.{source}.keep_journal') else 'N'
serial_newest = query_result['serial_newest_mirror']
fields = [
source,
keep_journal,
f'0-{serial_newest}' if serial_newest else '-',
]
if query_result['serial_last_export']:
fields.append(str(query_result['serial_last_export']))
result_txt += ':'.join(fields) + '\n'
for invalid_source in invalid_sources:
result_txt += f'{invalid_source.upper()}:X:Database unknown\n'
return result_txt.strip()
def handle_irrd_database_status(self, parameter: str) -> str:
"""!J query - database status"""
if parameter == '-*':
sources = None
else:
sources = [s.upper() for s in parameter.split(',')]
results = self.query_resolver.database_status(sources)
return ujson.dumps(results, indent=4)
def handle_irrd_exact_key(self, parameter: str):
"""!m query - exact object key lookup, e.g. !maut-num,AS65537"""
try:
object_class, rpsl_pk = parameter.split(',', maxsplit=1)
except ValueError:
raise InvalidQueryException(f'Invalid argument for object lookup: {parameter}')
query = self.query_resolver.key_lookup(object_class, rpsl_pk)
return self._flatten_query_output(query)
def handle_irrd_route_search(self, parameter: str):
"""
!r query - route search with various options:
!r1172.16.58.3/24 returns all exact matching objects
!r192.0.2.0/24,o returns space-separated origins of all exact matching objects
!r192.0.2.0/24,l returns all one-level less specific objects, not including exact
!r192.0.2.0/24,L returns all less specific objects, including exact
!r192.0.2.0/24,M returns all more specific objects, not including exact
"""
option: Optional[str] = None
if ',' in parameter:
address, option = parameter.split(',')
else:
address = parameter
try:
address = IP(address)
except ValueError:
raise InvalidQueryException(f'Invalid input for route search: {parameter}')
lookup_types = {
None: RouteLookupType.EXACT,
'o': RouteLookupType.EXACT,
'l': RouteLookupType.LESS_SPECIFIC_ONE_LEVEL,
'L': RouteLookupType.LESS_SPECIFIC_WITH_EXACT,
'M': RouteLookupType.MORE_SPECIFIC_WITHOUT_EXACT,
}
try:
lookup_type = lookup_types[option]
except KeyError:
raise InvalidQueryException(f'Invalid route search option: {option}')
result = self.query_resolver.route_search(address, lookup_type)
if option == 'o':
prefixes = [r['parsed_data']['origin'] for r in result]
return ' '.join(prefixes)
return self._flatten_query_output(result)
def handle_irrd_sources_list(self, parameter: str) -> Optional[str]:
"""
!s query - set used sources
!s-lc returns all enabled sources, space separated
!sripe,nttcom limits sources to ripe and nttcom
"""
if parameter == '-lc':
return ','.join(self.query_resolver.sources)
sources = parameter.upper().split(',')
self.query_resolver.set_query_sources(sources)
return None
def handle_irrd_version(self):
"""!v query - return version"""
return f'IRRd -- version {__version__}'
def handle_ripe_command(self, full_query: str) -> WhoisQueryResponse:
"""
Process RIPE-style queries. Any query that is not explicitly an IRRD-style
query (i.e. starts with exclamation mark) is presumed to be a RIPE query.
"""
full_query = re.sub(' +', ' ', full_query)
components = full_query.strip().split(' ')
result = None
response_type = WhoisQueryResponseType.SUCCESS
while len(components):
component = components.pop(0)
if component.startswith('-'):
command = component[1:]
try:
if command == 'k':
self.multiple_command_mode = True
elif command in ['l', 'L', 'M', 'x']:
result = self.handle_ripe_route_search(command, components.pop(0))
if not result:
response_type = WhoisQueryResponseType.KEY_NOT_FOUND
break
elif command == 'i':
result = self.handle_inverse_attr_search(components.pop(0), components.pop(0))
if not result:
response_type = WhoisQueryResponseType.KEY_NOT_FOUND
break
elif command == 's':
self.handle_ripe_sources_list(components.pop(0))
elif command == 'a':
self.handle_ripe_sources_list(None)
elif command == 'T':
self.handle_ripe_restrict_object_class(components.pop(0))
elif command == 't':
result = self.handle_ripe_request_object_template(components.pop(0))
break
elif command == 'K':
self.handle_ripe_key_fields_only()
elif command == 'V':
self.handle_user_agent(components.pop(0))
elif command == 'g':
result = self.handle_nrtm_request(components.pop(0))
elif command in ['F', 'r']:
continue # These flags disable recursion, but IRRd never performs recursion anyways
else:
raise InvalidQueryException(f'Unrecognised flag/search: {command}')
except IndexError:
raise InvalidQueryException(f'Missing argument for flag/search: {command}')
else: # assume query to be a free text search
result = self.handle_ripe_text_search(component)
return WhoisQueryResponse(
response_type=response_type,
mode=WhoisQueryResponseMode.RIPE,
result=result,
)
def handle_ripe_route_search(self, command: str, parameter: str) -> str:
"""
-l/L/M/x query - route search for:
-x 192.0.2.0/2 returns all exact matching objects
-l 192.0.2.0/2 returns all one-level less specific objects, not including exact
-L 192.0.2.0/2 returns all less specific objects, including exact
-M 192.0.2.0/2 returns all more specific objects, not including exact
"""
try:
address = IP(parameter)
except ValueError:
raise InvalidQueryException(f'Invalid input for route search: {parameter}')
lookup_types = {
'x': RouteLookupType.EXACT,
'l': RouteLookupType.LESS_SPECIFIC_ONE_LEVEL,
'L': RouteLookupType.LESS_SPECIFIC_WITH_EXACT,
'M': RouteLookupType.MORE_SPECIFIC_WITHOUT_EXACT,
}
lookup_type = lookup_types[command]
result = self.query_resolver.route_search(address, lookup_type)
return self._flatten_query_output(result)
def handle_ripe_sources_list(self, sources_list: Optional[str]) -> None:
"""-s/-a parameter - set sources list. Empty list enables all sources. """
if sources_list:
sources = sources_list.upper().split(',')
self.query_resolver.set_query_sources(sources)
else:
self.query_resolver.set_query_sources(None)
def handle_ripe_restrict_object_class(self, object_classes) -> None:
"""-T parameter - restrict object classes for this query, comma-seperated"""
self.query_resolver.set_object_class_filter_next_query(object_classes.split(','))
def handle_ripe_request_object_template(self, object_class) -> str:
"""-t query - return the RPSL template for an object class"""
return self.query_resolver.rpsl_object_template(object_class)
def handle_ripe_key_fields_only(self) -> None:
"""-K paramater - only return primary key and members fields"""
self.key_fields_only = True
def handle_ripe_text_search(self, value: str) -> str:
result = self.query_resolver.rpsl_text_search(value)
return self._flatten_query_output(result)
def handle_user_agent(self, user_agent: str):
"""-V/!n parameter/query - set a user agent for the client"""
self.query_resolver.user_agent = user_agent
logger.info(f'{self.client_str}: user agent set to: {user_agent}')
def handle_nrtm_request(self, param):
try:
source, version, serial_range = param.split(':')
except ValueError:
raise InvalidQueryException('Invalid parameter: must contain three elements')
try:
serial_start, serial_end = serial_range.split('-')
serial_start = int(serial_start)
if serial_end == 'LAST':
serial_end = None
else:
serial_end = int(serial_end)
except ValueError:
raise InvalidQueryException(f'Invalid serial range: {serial_range}')
if version not in ['1', '3']:
raise InvalidQueryException(f'Invalid NRTM version: {version}')
source = source.upper()
if source not in self.query_resolver.all_valid_sources:
raise InvalidQueryException(f'Unknown source: {source}')
if not is_client_permitted(self.client_ip, f'sources.{source}.nrtm_access_list'):
raise InvalidQueryException('Access denied')
try:
return NRTMGenerator().generate(source, version, serial_start, serial_end, self.database_handler)
except NRTMGeneratorException as nge:
raise InvalidQueryException(str(nge))
def handle_inverse_attr_search(self, attribute: str, value: str) -> str:
"""
-i/!o query - inverse search for attribute values
e.g. `-i mnt-by FOO` finds all objects where (one of the) maintainer(s) is FOO,
as does `!oFOO`. Restricted to designated lookup fields.
"""
result = self.query_resolver.rpsl_attribute_search(attribute, value)
return self._flatten_query_output(result)
def _flatten_query_output(self, query_response: RPSLDatabaseResponse) -> str:
"""
Flatten an RPSL database response into a string with object text
for easy passing to a WhoisQueryResponse.
"""
if self.key_fields_only:
result = self._filter_key_fields(query_response)
else:
result = ''
for obj in query_response:
result += obj['object_text']
if (
self.query_resolver.rpki_aware and
obj['source'] != RPKI_IRR_PSEUDO_SOURCE and
obj['object_class'] in RPKI_RELEVANT_OBJECT_CLASSES
):
comment = ''
if obj['rpki_status'] == RPKIStatus.not_found:
comment = ' # No ROAs found, or RPKI validation not enabled for source'
result += f'rpki-ov-state: {obj["rpki_status"].name}{comment}\n'
result += '\n'
return result.strip('\n\r')
def _filter_key_fields(self, query_response) -> str:
results: OrderedSet[str] = OrderedSet()
for obj in query_response:
result = ''
rpsl_object_class = OBJECT_CLASS_MAPPING[obj['object_class']]
fields_included = rpsl_object_class.pk_fields + ['members', 'mp-members']
for field_name in fields_included:
field_data = obj['parsed_data'].get(field_name)
if field_data:
if isinstance(field_data, list):
for item in field_data:
result += f'{field_name}: {item}\n'
else:
result += f'{field_name}: {field_data}\n'
results.add(result)
return '\n'.join(results)
| 1.851563 | 2 |
ci/clean-cluster.py | azatoth/telepresence | 0 | 12785654 | #!/usr/bin/env python3
"""
Delete old deployments and services with test-prefixed names. This is used to
clean up the Telepresence test cluster, as Telepresence tests currently leak.
"""
import argparse
import datetime
import json
from subprocess import check_output
from typing import Dict, List
def get_kubectl() -> List[str]:
"""Get correct kubectl command"""
k8s_namespace = str(
check_output([
"kubectl", "config", "view", "--minify=true",
"-o=jsonpath={.contexts[0].context.namespace}"
]).strip(), "ascii"
)
if k8s_namespace:
return ["kubectl", "--namespace", k8s_namespace]
return ["kubectl"]
KUBECTL = get_kubectl()
def get_now() -> datetime.datetime:
"""Get current date/time in UTC"""
return datetime.datetime.now(tz=datetime.timezone.utc)
def parse_k8s_timestamp(timestamp: str) -> datetime.datetime:
"""Get date/time in UTC from k8s timestamp"""
fmt = "%Y-%m-%dT%H:%M:%SZ"
naive = datetime.datetime.strptime(timestamp, fmt)
return naive.replace(tzinfo=datetime.timezone.utc)
def get_kubectl_json(cmd: List[str]) -> Dict:
"""Call kubectl and parse resulting JSON"""
output = str(check_output(KUBECTL + cmd + ["-o", "json"]), "utf-8")
return json.loads(output)
def get_resources(kind: str, prefix="",
min_age=datetime.timedelta(seconds=0)) -> List[str]:
"""
Return names of k8s resources with the given name prefix and minimum age
"""
now = get_now()
resources = get_kubectl_json(["get", kind])["items"]
names = []
for resource in resources:
name = resource["metadata"]["name"]
if kind == "svc" and name == "kubernetes":
continue
if not name.startswith(prefix):
continue
timestamp_str = resource["metadata"]["creationTimestamp"]
timestamp = parse_k8s_timestamp(timestamp_str)
age = now - timestamp
if age < min_age:
continue
names.append("{}/{}".format(kind, name))
return names
def seconds(value: str) -> datetime.timedelta:
"""Return a timedelta with the given number of seconds"""
try:
return datetime.timedelta(seconds=int(value))
except ValueError:
message = "Invalid age in seconds: {}".format(value)
raise argparse.ArgumentTypeError(message)
def main():
"""Clean up the current Kubernetes cluster"""
parser = argparse.ArgumentParser(
allow_abbrev=False, # can make adding changes not backwards compatible
description=__doc__
)
parser.add_argument(
"--prefix",
default="testing-",
help="prefix for resource name [testing-]"
)
parser.add_argument(
"--min-age",
type=seconds,
default="86400",
help="minimum age in seconds"
)
parser.add_argument(
"--dry-run", action="store_true", help="don't really delete anything"
)
args = parser.parse_args()
names = [
name
for kind in ("svc", "deploy")
for name in get_resources(kind, args.prefix, args.min_age)
]
if not names:
print("Nothing to clean up.")
return
if args.dry_run:
print("Would clean up:")
else:
print("Cleaning up:")
for name in names:
print(" {}".format(name))
if not args.dry_run:
check_output(KUBECTL + ["delete"] + names)
if __name__ == "__main__":
main()
| 2.390625 | 2 |
cibyl/utils/dicts.py | rhos-infra/cibyl | 3 | 12785655 | """
# Copyright 2022 Red Hat
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
import logging
from cibyl.models.attribute import AttributeDictValue
LOG = logging.getLogger(__name__)
def subset(dictionary, keys):
"""Creates a new dictionary from items from another one. A new
dictionary is formed by extracting the keys explicitly indicated. If one of
the given keys is not present on the dictionary, it is ignored. The
original dictionary is left untouched.
:param dictionary: The dictionary to extract items from.
:type dictionary: dict
:param keys: The keys to get from the dictionary.
:type keys: list
:return: The new dictionary.
:rtype: dict
"""
result = {}
for key in keys:
# Do not crash if a key is not present
if key not in dictionary:
message = "Ignoring key '%s' not found in dictionary: %s"
LOG.debug(message, key, dictionary)
continue
result[key] = dictionary[key]
return result
def nsubset(dictionary, keys):
"""Creates a new dictionary from items from another one. The 'n' stands
for 'negative', meaning that the keys form an excluded list. All keys
from the other dictionary will be extracted except for the ones explicitly
indicated. The original dictionary is left untouched.
:param dictionary: The dictionary to extract items from.
:type dictionary: dict
:param keys: The keys to not get from the dictionary.
:type keys: list
:return: The new dictionary.
:rtype: dict
"""
result = {}
for key in dictionary.keys():
# Ignore keys on the excluded list
if key in keys:
continue
result[key] = dictionary[key]
return result
def chunk_dictionary_into_lists(dictionary: dict, size: int = 300) -> list:
"""It returns a list of sub lists. Each one with the size indicated
in the 'size' parameter where every element is the key of the dictionary
provided. If the size is less than the quantity provided, it creates
just one sublist with those keys.
"""
chunked_list = []
for chunk_max_value in range(
0,
len(list(dictionary.keys())),
size
):
chunked_list.append(
list(
dictionary.keys()
)[chunk_max_value:chunk_max_value + size]
)
return chunked_list
def intersect_models(dict1, dict2):
"""Combine two dictionaries that are returned from a source method call to
keep only those models that are present in both. It assumes that the models
present in both dictionaries are identical and takes them for the first
input dictionary.
:param dict1: The first dictionary with models.
:type dict1: dict
:param dict2: The second dictionary with models.
:type dict2: dict
:return: A new dictionary that contains only the models present in both
input dictionaries.
:rtype: dict
"""
intersection = dict1.keys() & dict2.keys()
models = {key: dict1[key] for key in intersection}
for key, model in models.items():
# make sure that all the information present in models present in both
# dictionaries is incorporated
model.merge(dict2[key])
return AttributeDictValue(dict1.name, attr_type=dict1.attr_type,
value=models)
| 2.28125 | 2 |
braponto-back/core/serializers.py | TheRodrigoBraga/Braponto | 0 | 12785656 | <reponame>TheRodrigoBraga/Braponto<filename>braponto-back/core/serializers.py
from rest_framework import serializers
from .models import Funcionario
from .models import Registro
class FuncionarioSerializer(serializers.ModelSerializer):
class Meta:
model = Funcionario
fields = ['id', 'matricula', 'nome']
class FuncionarioSimpleSerializer(serializers.ModelSerializer):
class Meta:
model = Funcionario
fields = ['id', 'matricula', 'nome']
class RegistroSerializer(serializers.ModelSerializer):
class Meta:
model = Registro
fields = ['id', 'funcionario', 'dia', 'primeiro_registro', 'segundo_registro', 'terceiro_registro', 'quarto_registro'] | 2.03125 | 2 |
sagas/ofbiz/finder.py | samlet/stack | 3 | 12785657 | <reponame>samlet/stack
import sagas.ofbiz.connector
from py4j.java_gateway import java_import
class Finder(object):
def __init__(self, oc):
self.oc=oc
java_import(oc.j, 'org.apache.ofbiz.service.ServiceUtil')
java_import(oc.j, 'org.apache.ofbiz.base.util.UtilDateTime')
java_import(oc.j, 'org.apache.ofbiz.entity.util.*')
self.user=self.default_user()
def success(self, ret):
return self.oc.j.ServiceUtil.isSuccess(ret)
def hash_map(self, *args):
arg_len = len(args)
if arg_len % 2 == 1:
raise ValueError("You must pass an even sized array to the toMap method (size = " + str(arg_len) + ")")
m = self.oc.j.HashMap()
i = 0
while i < arg_len:
m[args[i]] = args[i + 1]
i = i + 2
return m
def default_user(self):
return self.oc.gateway.getUserLogin()
def find(self, entity, inputs):
# inputs=oc.jmap(testingId="PERF_TEST_1")
ret = self.oc.call("performFind", userLogin=self.user, entityName=entity, inputFields=inputs)
if self.oc.j.ServiceUtil.isSuccess(ret):
listIt = ret['listIt']
foundElements = listIt.getCompleteList()
return (True, foundElements)
else:
return (False, self.oc.j.ServiceUtil.getErrorMessage(ret))
def find_one(self, entity, params):
return self.oc.delegator.findOne(entity, params, True)
def find_list(self, entity, limit=20, offset=0):
findOptions = self.oc.j.EntityFindOptions()
findOptions.setLimit(limit)
findOptions.setOffset(offset)
rows = self.oc.delegator.findList(entity, None, None, None, findOptions, False)
return rows
def now(self):
UtilDateTime = self.oc.j.UtilDateTime
nowTimestamp = UtilDateTime.nowTimestamp()
return nowTimestamp
def create(self, entity, *args):
# print(hash_map(*args))
return self.oc.delegator.create(entity, self.hash_map(*args))
| 1.984375 | 2 |
log_parser/row_parsers.py | LightRevan/log_parse | 0 | 12785658 | <reponame>LightRevan/log_parse<gh_stars>0
# -*- coding: utf-8 -*-
__author__ = 'lightrevan'
import re
import functools
import collections
import datetime as dt
class RowParsingError(LookupError):
pass
def not_none_transform(match):
if match is None:
raise RowParsingError
else:
return match
date_transform = lambda match: dt.datetime.strptime(not_none_transform(match), '%y-%m-%d %H:%M:%S,%f')
int_timestamp = ('^\d+', lambda x: int(not_none_transform(x)))
class AbstractRowParser(object):
@classmethod
def _compile_pattern(cls, pattern):
return pattern if isinstance(pattern, re._pattern_type) else re.compile(pattern)
def parse_row(self, row):
raise NotImplementedError
def check_match(self, row):
raise NotImplementedError
def has_pattern(self, name):
raise NotImplementedError
class MultiPatternRowParser(AbstractRowParser):
def __init__(self, match_pattern, **kwargs):
assert 'timestamp' in kwargs, 'Must have timestamp pattern in row parser'
match_pattern = match_pattern
self._patterns = {'match': (self._compile_pattern(match_pattern), lambda x: x)}
for name, data in kwargs.items():
if isinstance(data, tuple):
pattern, transform = data
else:
pattern = data
transform = not_none_transform
self._patterns[name] = (self._compile_pattern(pattern), transform)
def parse_row(self, row):
res = {}
for name, data in self._patterns.items():
pattern, transform = data
match = pattern.search(row)
res[name] = transform(match.group(0) if match else None)
return res
def check_match(self, row):
pattern, transform = self._patterns['match']
match = pattern.search(row)
return transform(match.group(0) if match else None)
def has_pattern(self, name):
return name in self._patterns
class SinglePatternRowParser(AbstractRowParser):
def __init__(self, match_pattern, row_pattern, group_transforms=None):
self._match_pattern = self._compile_pattern(match_pattern)
self._row_pattern = self._compile_pattern(row_pattern)
assert 'timestamp' in self._row_pattern.groupindex, 'Must have timestamp pattern in row parser'
self._group_transforms = {name: not_none_transform for name in self._row_pattern.groupindex}
if group_transforms is not None:
self._group_transforms.update(group_transforms)
def parse_row(self, row):
params_match = self._row_pattern.search(row)
match = self._match_pattern.search(row)
res = {'match': match.group(0) if match else None}
for name, transform in self._group_transforms.items():
res[name] = transform(params_match.group(name) if params_match else None)
return res
def check_match(self, row):
match = self._match_pattern.search(row)
return match.group(0) if match else None
def has_pattern(self, name):
return name in self._row_pattern.groupindex
class SimpleRowGetter(object):
def __init__(self, f, row_parser):
self._f = f
self.row_parser = row_parser
def __iter__(self):
return self
def next(self):
row = self._f.next().strip()
return row, self.row_parser.parse_row(row)
class MergingRowGetter(SimpleRowGetter):
def __init__(self, *args):
super(MergingRowGetter, self).__init__(*args)
self._next_row = None
self._next_params = None
def next(self):
if self._next_row is None:
row, params = '', None
searching_next = False
else:
row, params = self._next_row, self._next_params
self._next_row = None
searching_next = True
need_recheck = False
row_valid = False
try:
parse_row = self._f.next().strip(' \n')
while not row_valid:
try:
parse_params = self.row_parser.parse_row(parse_row)
if searching_next:
self._next_row = parse_row
self._next_params = parse_params
row_valid = True
else:
row += ('\n' if row else '') + parse_row
params = parse_params
parse_row = self._f.next().strip(' \n')
searching_next = True
except RowParsingError:
row += ('\n' if row else '') + parse_row
parse_row = self._f.next().strip(' \n')
need_recheck = True
except StopIteration as e:
if self._next_row is None and not row:
raise e
finally:
if need_recheck:
params['match'] = self.row_parser.check_match(row)
return row, params | 2.546875 | 3 |
chemex/experiments/relaxation_nz.py | gbouvignies/ChemEx | 10 | 12785659 | """
15N T1
======
Analyzes 15N T1 experiments. This keeps the spin system purely in-phase
throughout, and is calculated using the (1n)×(1n), single-spin matrix,
where n is the number of states::
{ Iz(a), Iz(b), ... }
References
----------
Kay, Nicholson, Delaglio, Bax, and Torchia. J Mag Reson (1992) 97:359-375
Note
----
A sample configuration file for this module is available using the command::
$ chemex config relaxation_nz
"""
import functools as ft
import numpy as np
import chemex.experiments.helper as ceh
import chemex.helper as ch
import chemex.nmr.liouvillian as cnl
_SCHEMA = {
"type": "object",
"properties": {
"experiment": {
"type": "object",
"properties": {
"observed_state": {"type": "string", "pattern": "[a-z]", "default": "a"}
},
}
},
}
def read(config):
ch.validate(config, _SCHEMA)
config["basis"] = cnl.Basis(type="iz", spin_system="nh")
config["fit"] = _fit_this()
return ceh.load_experiment(config=config, pulse_seq_cls=PulseSeq)
def _fit_this():
return {
"rates": ["r1_i_{observed_state}"],
"model_free": ["tauc_{observed_state}", "s2_{observed_state}"],
}
class PulseSeq:
def __init__(self, config, propagator):
self.prop = propagator
settings = config["experiment"]
self.prop.detection = f"[iz_{settings['observed_state']}]"
@ft.lru_cache(maxsize=10000)
def calculate(self, times, params_local):
self.prop.update(params_local)
start = self.prop.get_equilibrium()
delays = self.prop.delays(times)
return np.array([self.prop.detect(delay @ start) for delay in delays])
| 2.25 | 2 |
bin/filter_msa.py | CFIA-NCFAD/scovtree | 0 | 12785660 | <reponame>CFIA-NCFAD/scovtree
#!/usr/bin/env python3
import logging
import sys
from collections import defaultdict
from pathlib import Path
from typing import Dict, Set, Tuple, Mapping, Optional
import pandas as pd
import typer
from Bio.SeqIO.FastaIO import SimpleFastaParser
from rich.logging import RichHandler
def main(input_fasta: Path = typer.Option(..., help='FASTA with sequences to filter'),
input_metadata: Path = typer.Option(..., help='Metadata for input sequences'),
lineage_report: Path = typer.Option(..., help='Pangolin lineage report'),
ref_name: str = typer.Option('MN908947.3'),
country: Optional[str] = typer.Option(None, help='Preferentially filter for samples from this country'),
max_seqs: int = typer.Option(10000, help='Max number of sequences to filter down to'),
output_fasta: Path = typer.Option(Path('filtered.fasta'), help='Output filtered sequences FASTA'),
output_metadata: Path = typer.Option(Path('metadata.filtered.tsv'), help='Output filtered metadata table')):
"""Filter MSA FASTA for user specified and higher quality public sequences up to `max_seqs`"""
from rich.traceback import install
install(show_locals=True, width=120, word_wrap=True)
logging.basicConfig(
format="%(message)s",
datefmt="[%Y-%m-%d %X]",
level=logging.INFO,
handlers=[RichHandler(rich_tracebacks=True, tracebacks_show_locals=True)],
)
logging.info(f'Reading metadata table "{input_metadata}".')
df = pd.read_table(input_metadata, index_col=0)
nrow = df.shape[0]
if nrow <= max_seqs:
logging.info(f'MSA sequences ({nrow}) <= {max_seqs}. Creating symlinks...')
make_symlinks(input_fasta, output_fasta, input_metadata, output_metadata)
logging.info(f'Done!')
sys.exit(0)
logging.info(f'Filtering. {nrow} > {max_seqs} sequences.')
sample_seq = read_fasta(input_fasta)
seq_samples = seq_to_samples(sample_seq)
keep_samples = init_samples_to_keep(lineage_report, ref_name)
if country and 'country' in df.columns:
keep_samples = keep_seqs_from_country(df, country, keep_samples, max_seqs)
elif country:
logging.warning(f'Country "{country}" to preferentially select sequences from '
f'specified, but no column "country" in metadata dataframe!')
df_less_n_gaps, keep_samples = quality_filter(keep_samples, seq_samples, df)
if (df_less_n_gaps.shape[0] + len(keep_samples)) <= max_seqs:
keep_samples |= set(df_less_n_gaps['sample'])
else:
keep_samples = sampling_lineages(df_less_n_gaps, keep_samples, max_seqs)
logging.info(f'Sampled {(len(keep_samples))} samples from top quality sequences.')
logging.info(f'Writing {len(keep_samples)} of {len(seq_samples)} sequences to "{output_fasta}".')
write_fasta(output_fasta, keep_samples, sample_seq)
df.loc[keep_samples & set(df.index), :].to_csv(output_metadata, sep='\t', index=True)
logging.info(f'Done!')
def sampling_lineages(df: pd.DataFrame, keep_samples: Set[str], max_seqs: int) -> Set[str]:
df_lineages_count = df['lineage'].value_counts(ascending=True).to_frame('count')
n_lineages = df_lineages_count.shape[0]
seqs_per_lineages = int((max_seqs - len(keep_samples)) / n_lineages)
for i, (lineage, row) in enumerate(df_lineages_count.iterrows()):
seqs_in_lineages = df[df['lineage'] == lineage]
if row['count'] < seqs_per_lineages:
logging.info(
f'No need to sample lineage "{lineage}" (sequences count={row["count"]}; less than {seqs_per_lineages} seqs per lineage)')
keep_samples |= set(seqs_in_lineages['sample'])
else:
logging.info(
f'Sampling lineage "{lineage}" sequences (sequences count={row["count"]}; greater than {seqs_per_lineages} seqs per lineage)')
keep_samples |= set(seqs_in_lineages['sample'].sample(n=seqs_per_lineages))
if n_lineages < i + 1:
seqs_per_lineages = (max_seqs - len(keep_samples)) / (n_lineages - i + 1)
return keep_samples
def keep_seqs_from_country(df: pd.DataFrame, country: str, keep_samples: Set[str], max_seqs: int) -> Set[str]:
country_matching_samples = set(df[df.country.str.contains(country, case=False)].index)
n_country_keep = len(country_matching_samples | keep_samples)
if n_country_keep <= max_seqs:
logging.info(f'Keeping {n_country_keep} sequences matching country "{country}".')
keep_samples |= country_matching_samples
else:
logging.info(f'{len(country_matching_samples)} country "{country}" samples '
f'and {len(keep_samples)} user sequences greater than '
f'{max_seqs} threshold. Randomly sampling all '
f'sequences based on quality.')
return keep_samples
def quality_filter(
keep_samples: Set[str],
seq_samples: Mapping[str, Set[str]],
df: pd.DataFrame
) -> Tuple[pd.DataFrame, Set[str]]:
seq_recs = []
for seq, samples in seq_samples.items():
if samples & keep_samples:
keep_samples |= samples
continue
seq = seq.upper()
sample: str = list(samples)[0]
lineage: str = df.loc[sample, 'Pango_lineage']
seq_recs.append(dict(
sample=sample,
lineage=lineage,
seq_n=seq.count('N'),
seq_gap=seq.count('-'),
))
df_seq_recs = pd.DataFrame(seq_recs)
# TODO: more flexible and dynamic sequence quality filtering based on total number of sequences and number of sequences required so that sequences aren't unnecessarily filtered out [peterk87 2021-06-22]
df_percentile_75 = df_seq_recs.describe()
seq_n_75 = df_percentile_75.loc['75%', 'seq_n']
seq_gap_75 = df_percentile_75.loc['75%', 'seq_gap']
df_less_n_gaps = df_seq_recs.query('seq_n <= @seq_n_75 and seq_gap <= @seq_gap_75')
return df_less_n_gaps, keep_samples
def write_fasta(fasta_output: Path, keep_samples: Set[str], sample_seq: Dict[str, str]) -> None:
with open(fasta_output, 'w') as fout:
for sample in keep_samples:
fout.write(f'>{sample}\n{sample_seq[sample]}\n')
def init_samples_to_keep(lineage_report: Path, ref_name: str) -> Set[str]:
df_lineage_report = pd.read_csv(lineage_report, index_col=0)
keep_samples = set(df_lineage_report.index.astype(str))
keep_samples.add(ref_name)
return keep_samples
def make_symlinks(input_fasta: Path,
fasta_output: Path,
metadata_input: Path,
metadata_output: Path):
metadata_output.symlink_to(metadata_input.resolve())
logging.info(f'Created symlink "{metadata_output}" to "{metadata_input}"')
fasta_output.symlink_to(Path(input_fasta).resolve())
logging.info(f'Created symlink "{fasta_output}" to "{input_fasta}"')
def read_fasta(fasta: Path) -> Dict[str, str]:
out = {}
with open(fasta) as fin:
for strain, seq in SimpleFastaParser(fin):
out[strain] = seq
return out
def seq_to_samples(sample_seq: Dict[str, str]) -> Mapping[str, Set[str]]:
seq_samples = defaultdict(set)
for sample, seq in sample_seq.items():
seq_samples[seq].add(sample)
return seq_samples
if __name__ == '__main__':
typer.run(main)
| 1.953125 | 2 |
python/chap_0/0.5.5.py | RyodoTanaka/Cording_Matrix | 0 | 12785661 | <filename>python/chap_0/0.5.5.py<gh_stars>0
#!/usr/bin/env python
# -*- coding: utf-8 -*-
x={1,2,3,4,5}
ret={i**2 for i in x}
print ret
| 2.4375 | 2 |
functions.py | theSlayer4089/EasyFNBotGlitch | 2 | 12785662 | import fortnitepy,fortniteAPI
async def SetCosmeticMSG(self,message):
msg = message.content.upper().strip()
args = msg.split(" ")
Lang = self.DefaultLang
if "--LANG=" in msg:
msg = msg + " "
Lang = GetValue(msg,"--LANG="," ")
msg = msg.replace("--LANG=" + Lang, "").strip()
Lang = Lang.lower()
if args[0] == "!SKIN":
Item = GetName("!SKIN",msg)
Item = await fortniteAPI.GetSkin(Item,Lang)
elif args[0] == "!BACKPACK":
Item = GetName("!BACKPACK",msg)
Item = await fortniteAPI.GetBackpack(Item,Lang)
elif args[0] == "!PICKAXE":
Item = GetName("!PICKAXE",msg)
Item = await fortniteAPI.GetPickaxe(Item,Lang)
elif args[0] == "!EMOJI":
Item = GetName("!EMOJI",msg)
Item = await fortniteAPI.GetEmoji(Item,Lang)
elif args[0] == "!EMOTE":
Item = GetName("!EMOTE",msg)
Item = await fortniteAPI.GetEmote(Item,Lang)
if "status" in Item:
await message.reply("Can't find this item")
return
else:
v = []
if msg.count("--") != 0:
if Item["variants"][Lang]: #Make sure that the item has variants
for Variant in GetValues(msg):
VariantChannelName = (Variant.split("=")[0])[2:]
Variant = Variant.split("=")[1]
for variant in Item["variants"][Lang]:
if variant["type"].upper() == VariantChannelName:
for tag in variant["options"]:
if tag["name"].upper() == Variant:
v.append(create_variant(variant["channel"],tag["tag"],item=Item["backendType"]))
else: #The item has no variants
await message.reply("Can't find any variants for this item")
asset=f'{str(Item["path"]).replace("FortniteGame/Content","/Game")}.{Item["id"]}'
if args[0] == "!SKIN":
await self.user.party.me.set_outfit(asset=asset,variants=v)
elif args[0] == "!BACKPACK":
await self.user.party.me.set_backpack(asset=asset,variants=v)
elif args[0] == "!PICKAXE":
await self.user.party.me.set_pickaxe(asset=asset,variants=v)
elif args[0] == "!EMOJI":
await self.user.party.me.set_emote(asset=asset)
elif args[0] == "!EMOTE":
await self.user.party.me.set_emote(asset=asset)
await message.reply(f'{Item["type"].capitalize()} set to {Item["Names"][Lang]}')
def GetName(Name,Message):
if Message.count("--") != 0:
Item = GetValue(Message,f'{Name} ',"--")
else:
Item = Message[(len(Name) + 1):]
return Item.strip()
def create_variant(VariantChannelName,Variant,item="AthenaCharacter"):
return {'item': item,'channel': VariantChannelName,'variant': Variant}
def GetValue(fullLine,startWith,endWith):
startIndex = fullLine.index(startWith) + len(startWith)
endIndex = fullLine[startIndex:].index(endWith) + startIndex
return fullLine[startIndex:endIndex]
def GetValues(fullLine):
Variants = []
for Variant in range(0,fullLine.count("--")):
try:
startIndex = fullLine.index("--")
ValueStartIndex = fullLine[startIndex:].index("=") + startIndex + 1
try:
endIndex = fullLine[ValueStartIndex:].index("--") + ValueStartIndex
except:
endIndex = len(fullLine)
Variants.append(fullLine[startIndex:endIndex])
fullLine = fullLine.replace(fullLine[startIndex:endIndex],"")
except:
return None
return Variants | 2.453125 | 2 |
test/test_add_group.py | Zaichkov/python_training | 0 | 12785663 | <gh_stars>0
from model.group import Group
import pytest
import re
# @pytest.mark.parametrize("group", test_data, ids=[repr(x) for x in test_data])
def test_add_group(app, orm, json_groups, check_ui):
group = json_groups
old_groups = orm.get_group_list()
app.group.create(group)
new_groups = orm.get_group_list()
old_groups.append(group)
assert sorted(old_groups, key=Group.id_or_max) == sorted(new_groups, key=Group.id_or_max)
if check_ui:
ui_list = app.group.get_group_list()
orm_list = app.group.make_list_like_ui(new_groups)
assert sorted(orm_list, key=Group.id_or_max) == sorted(ui_list, key=Group.id_or_max)
| 2.46875 | 2 |
tests/cloud_client_test.py | dgorissen/dronekit-python | 0 | 12785664 | <filename>tests/cloud_client_test.py
import unittest
import os
from droneapi.lib.CloudClient import *
class CloudClientTest(unittest.TestCase):
def __init__(self, *args, **kwargs):
api_key = os.environ['DRONEAPI_KEY']
self.api = CloudClient(api_key)
super(CloudClientTest, self).__init__(*args, **kwargs)
def setUp(self):
"""Create simple data set with headers"""
pass
def tearDown(self):
"""Teardown."""
pass
def test_unhandled_endpoint(self):
self.assertRaises(CloudError, self.api.bogus)
def test_mission_endpoint(self):
self.api.mission()
def test_mission_static_map(self):
self.api.mission_staticMap()
def test_mission_by_id_endpoint(self):
self.api.mission_(1141)
def test_mission_by_id_analysis_endpoint(self):
self.api.mission_analysis(1141)
def test_mission_by_id_geo_endpoint(self):
self.api.mission_geo(1141)
def test_mission_by_id_messages_endpoint(self):
self.api.mission_messages(1141)
def test_mission_by_id_parameters_endpoint(self):
self.api.mission_parameters(1141)
def test_mission_by_id_dseries_endpoint(self):
self.api.mission_dseries(1141)
def test_vehicle_endpoint(self):
self.api.vehicle()
def test_vehicle_by_id_endpoint(self):
self.api.vehicle_(218)
def test_user_endpoint(self):
self.api.user()
def test_user_by_login_endpoint(self):
self.api.user('mrpollo')
if __name__ == '__main__':
unittest.main()
| 2.65625 | 3 |
bundle/fpga_simu.py | davidbrochart/bundle | 4 | 12785665 | from pyclk import Sig, Reg, In, Out, List, Module
from .memory import memory
from .ddr2fpga import ddr2fpga
from .fpga2ddr import fpga2ddr
from .iterator import iterator
from .functions import func
from .fpga_state import FPGA_state
from random import randint
import asyncio
import numpy as np
class Simu(Module):
def __init__(self, fpga_config):
self.func_layout = fpga_config.func_layout
self.mem_nb = fpga_config.config['mem_nb']
self.ddr2fpga_nb = fpga_config.config['ddr2fpga_nb']
self.fpga2ddr_nb = fpga_config.config['fpga2ddr_nb']
self.func_nb = fpga_config.config['func_nb']
self.iter_nb = fpga_config.config['iter_nb']
self.mem_depth = fpga_config.config['mem_depth']
self.chunk_array = [[0 for j in range(fpga_config.config['mem_depth'])] for i in range(fpga_config.config['mem_nb'])]
#self.chunk_array = [np.zeros(fpga_config.config['mem_depth'], dtype=np.uint64) for i in range(fpga_config.config['mem_nb'])]
self.cycle_nb = -1
self.randmax = 2
self.trace = None
# memories
self.u_mem = List()
self.s_mem_wena = List()
self.s_mem_addr = List()
self.s_mem_din = List()
self.s_mem_dout = List()
for i in range(self.mem_nb):
self.s_mem_wena[i] = Sig()
self.s_mem_addr[i] = Sig()
self.s_mem_din[i] = Sig()
self.s_mem_dout[i] = Sig()
self.u_mem[i] = _ = memory(self.mem_depth)
_.i_wena (self.s_mem_wena[i])
_.i_addr (self.s_mem_addr[i])
_.i_din (self.s_mem_din[i])
_.o_dout (self.s_mem_dout[i])
# ddr2fpga
self.u_ddr2fpga = List()
self.s_ddr2fpga_mem_i = List()
self.s_ddr2fpga_data_nb = List()
self.s_ddr2fpga_done = List()
self.s_ddr2fpga_wena = List()
self.s_ddr2fpga_addr = List()
self.s_ddr2fpga_din = List()
for i in range(self.ddr2fpga_nb):
self.s_ddr2fpga_mem_i[i] = Sig()
self.s_ddr2fpga_data_nb[i] = Sig()
self.s_ddr2fpga_done[i] = Sig()
self.s_ddr2fpga_wena[i] = Sig()
self.s_ddr2fpga_addr[i] = Sig()
self.s_ddr2fpga_din[i] = Sig()
self.u_ddr2fpga[i] = _ = ddr2fpga()
_.i_data_nb (self.s_ddr2fpga_data_nb[i])
_.o_done (self.s_ddr2fpga_done[i])
_.o_mem_wena (self.s_ddr2fpga_wena[i])
_.o_mem_addr (self.s_ddr2fpga_addr[i])
_.o_mem_din (self.s_ddr2fpga_din[i])
# fpga2ddr
self.s_fpga2ddr_mem_i = List()
self.s_fpga2ddr_data_nb = List()
self.s_fpga2ddr_done = List()
self.s_fpga2ddr_addr = List()
self.s_fpga2ddr_mem_dout = List()
self.u_fpga2ddr = List()
for i in range(self.fpga2ddr_nb):
self.s_fpga2ddr_mem_dout[i] = Sig()
self.s_fpga2ddr_addr[i] = Sig()
self.s_fpga2ddr_mem_i[i] = Sig()
self.s_fpga2ddr_data_nb[i] = Sig()
self.s_fpga2ddr_done[i] = Sig()
self.u_fpga2ddr[i] = _ = fpga2ddr()
_.i_data_nb (self.s_fpga2ddr_data_nb[i])
_.o_done (self.s_fpga2ddr_done[i])
_.o_mem_addr (self.s_fpga2ddr_addr[i])
_.i_mem_dout (self.s_fpga2ddr_mem_dout[i])
# iterators
self.u_iter = List()
self.s_iter_data_nb = List()
self.s_iter_done = List()
self.s_iter_raddr = List()
self.s_iter_waddr = List()
self.s_iter_wena = List()
self.s_iter_arg_valid = List()
self.s_iter_res_valid = List()
for i in range(self.iter_nb):
self.s_iter_data_nb[i] = Sig()
self.s_iter_done[i] = Sig()
self.s_iter_raddr[i] = Sig()
self.s_iter_waddr[i] = Sig()
self.s_iter_wena[i] = Sig()
self.s_iter_arg_valid[i] = Sig()
self.s_iter_res_valid[i] = Sig()
self.u_iter[i] = _ = iterator()
_.i_data_nb (self.s_iter_data_nb[i])
_.o_done (self.s_iter_done[i])
_.o_raddr (self.s_iter_raddr[i])
_.o_waddr (self.s_iter_waddr[i])
_.o_wena (self.s_iter_wena[i])
_.o_arg_valid (self.s_iter_arg_valid[i])
_.i_res_valid (self.s_iter_res_valid[i])
# functions
self.u_func = List()
self.s_func_arg0 = List()
self.s_func_arg1 = List()
self.s_func_arg_valid = List()
self.s_func_res = List()
self.s_func_res_valid = List()
i = 0
for fname, fnb in self.func_layout.items():
for j in range(fnb):
self.s_func_arg0[i] = Sig()
self.s_func_arg1[i] = Sig()
self.s_func_arg_valid[i] = Sig()
self.s_func_res[i] = Sig()
self.s_func_res_valid[i] = Sig()
self.u_func[i] = _ = func(fname)
_.i_arg0 (self.s_func_arg0[i])
_.i_arg1 (self.s_func_arg1[i])
_.i_arg_valid (self.s_func_arg_valid[i])
_.o_res (self.s_func_res[i])
_.o_res_valid (self.s_func_res_valid[i])
i += 1
self.s_iter_rmem0_i = List()
self.s_iter_rmem1_i = List()
self.s_iter_wmem_i = List()
self.s_iter_func_i = List()
for i in range(self.iter_nb):
self.s_iter_rmem0_i[i] = Sig()
self.s_iter_rmem1_i[i] = Sig()
self.s_iter_wmem_i[i] = Sig()
self.s_iter_func_i[i] = Sig()
self.state = FPGA_state(fpga_config)
self.config = fpga_config.config
def logic(self):
# DDR <-> memory
for i in range(self.mem_nb):
self.s_mem_addr[i].d = 0
self.s_mem_din[i].d = 0
self.s_mem_wena[i].d = 0
for i in range(self.fpga2ddr_nb):
self.s_mem_addr[self.s_fpga2ddr_mem_i[i].d].d += self.s_fpga2ddr_addr[i].d
self.s_fpga2ddr_mem_dout[i].d = self.s_mem_dout[self.s_fpga2ddr_mem_i[i].d].d
for i in range(self.ddr2fpga_nb):
self.s_mem_wena[self.s_ddr2fpga_mem_i[i].d].d += self.s_ddr2fpga_wena[i].d
self.s_mem_addr[self.s_ddr2fpga_mem_i[i].d].d += self.s_ddr2fpga_addr[i].d
self.s_mem_din[self.s_ddr2fpga_mem_i[i].d].d += self.s_ddr2fpga_din[i].d
# memory <-> iterator <-> function
for i in range(self.func_nb):
self.s_func_arg_valid[i].d = 0
self.s_func_arg0[i].d = 0
self.s_func_arg1[i].d = 0
for i in range(self.iter_nb):
self.s_mem_addr[self.s_iter_rmem0_i[i].d].d += self.s_iter_raddr[i].d
self.s_mem_addr[self.s_iter_rmem1_i[i].d].d += self.s_iter_raddr[i].d
self.s_mem_addr[self.s_iter_wmem_i[i].d].d += self.s_iter_waddr[i].d
self.s_mem_wena[self.s_iter_wmem_i[i].d].d += self.s_iter_wena[i].d
self.s_func_arg_valid[self.s_iter_func_i[i].d].d += self.s_iter_arg_valid[i].d
self.s_iter_res_valid[i].d = self.s_func_res_valid[self.s_iter_func_i[i].d].d
if self.s_iter_data_nb[i].d != 0:
self.s_mem_din[self.s_iter_wmem_i[i].d].d += self.s_func_res[self.s_iter_func_i[i].d].d
if self.s_iter_arg_valid[i].d == 1:
self.s_func_arg0[self.s_iter_func_i[i].d].d += self.s_mem_dout[self.s_iter_rmem0_i[i].d].d
self.s_func_arg1[self.s_iter_func_i[i].d].d += self.s_mem_dout[self.s_iter_rmem1_i[i].d].d
def set_cycle_nb(self, cycle_nb=-1):
self.cycle_nb = cycle_nb
def set_trace(self, trace):
self.trace = trace
async def op(self, iter_i, func_i, rmem0_i, rmem1_i, wmem_i, data_nb):
# operation request
self.s_iter_data_nb[iter_i].d = data_nb
self.s_iter_func_i[iter_i].d = func_i
self.s_iter_rmem0_i[iter_i].d = rmem0_i
self.s_iter_rmem1_i[iter_i].d = rmem1_i
self.s_iter_wmem_i[iter_i].d = wmem_i
clkNb = randint(1, self.randmax)
self.run(clkNb=clkNb, trace=self.trace)
# operation completion check
# software is polling, run the FPGA
done = False
while not done:
if (self.cycle_nb >= 0) and (self.time >= self.cycle_nb):
return
if self.s_iter_done[iter_i].d == 1:
self.s_iter_data_nb[iter_i].d = 0
done = True
else:
done = False
clkNb = randint(1, self.randmax)
self.run(clkNb=clkNb, trace=self.trace)
await asyncio.sleep(0)
async def ddr2fpga(self, ddr2fpga_i, mem_i, array_ptr, data_nb):
# memory write
self.s_ddr2fpga_mem_i[ddr2fpga_i].d = mem_i
self.s_ddr2fpga_data_nb[ddr2fpga_i].d = data_nb
self.u_ddr2fpga[ddr2fpga_i].array_ptr = array_ptr
clkNb = randint(1, self.randmax)
self.run(clkNb=clkNb, trace=self.trace)
# memory copy completion check
# software is polling, run the FPGA
done = False
while not done:
if (self.cycle_nb >= 0) and (self.time >= self.cycle_nb):
return
if self.s_ddr2fpga_done[ddr2fpga_i].d == 1:
self.s_ddr2fpga_data_nb[ddr2fpga_i].d = 0
done = True
else:
done = False
clkNb = randint(1, self.randmax)
self.run(clkNb=clkNb, trace=self.trace)
await asyncio.sleep(0)
async def fpga2ddr(self, fpga2ddr_i, mem_i, array_ptr, data_nb):
# memory read
self.s_fpga2ddr_mem_i[fpga2ddr_i].d = mem_i
self.s_fpga2ddr_data_nb[fpga2ddr_i].d = data_nb
self.u_fpga2ddr[fpga2ddr_i].array_ptr = array_ptr
clkNb = randint(1, self.randmax)
self.run(clkNb=clkNb, trace=self.trace)
# memory copy completion check
# software is polling, run the FPGA
done = False
while not done:
if (self.cycle_nb >= 0) and (self.time >= self.cycle_nb):
return
if self.s_fpga2ddr_done[fpga2ddr_i].d == 1:
self.s_fpga2ddr_data_nb[fpga2ddr_i].d = 0
done = True
else:
done = False
clkNb = randint(1, self.randmax)
self.run(clkNb=clkNb, trace=self.trace)
await asyncio.sleep(0)
| 2.328125 | 2 |
dandi/metadata.py | AlmightyYakob/dandi-cli | 0 | 12785666 | from datetime import datetime
import os.path as op
import re
from uuid import uuid4
from . import models
from .pynwb_utils import (
_get_pynwb_metadata,
get_neurodata_types,
get_nwb_version,
ignore_benign_pynwb_warnings,
metadata_cache,
)
from .utils import ensure_datetime
from . import __version__, get_logger
from .dandiset import Dandiset
lgr = get_logger()
@metadata_cache.memoize_path
def get_metadata(path):
"""Get selected metadata from a .nwb file or a dandiset directory
If a directory given and it is not a Dandiset, None is returned
Parameters
----------
path: str or Path
Returns
-------
dict
"""
# when we run in parallel, these annoying warnings appear
ignore_benign_pynwb_warnings()
path = str(path) # for Path
meta = dict()
if op.isdir(path):
try:
dandiset = Dandiset(path)
return dandiset.metadata
except ValueError as exc:
lgr.debug("Failed to get metadata for %s: %s", path, exc)
return None
# First read out possibly available versions of specifications for NWB(:N)
meta["nwb_version"] = get_nwb_version(path)
# PyNWB might fail to load because of missing extensions.
# There is a new initiative of establishing registry of such extensions.
# Not yet sure if PyNWB is going to provide "native" support for needed
# functionality: https://github.com/NeurodataWithoutBorders/pynwb/issues/1143
# So meanwhile, hard-coded workaround for data types we care about
ndtypes_registry = {
"AIBS_ecephys": "allensdk.brain_observatory.ecephys.nwb",
"ndx-labmetadata-abf": "ndx_dandi_icephys",
}
tried_imports = set()
while True:
try:
meta.update(_get_pynwb_metadata(path))
break
except KeyError as exc: # ATM there is
lgr.debug("Failed to read %s: %s", path, exc)
import re
res = re.match(r"^['\"\\]+(\S+). not a namespace", str(exc))
if not res:
raise
ndtype = res.groups()[0]
if ndtype not in ndtypes_registry:
raise ValueError(
"We do not know which extension provides %s. "
"Original exception was: %s. " % (ndtype, exc)
)
import_mod = ndtypes_registry[ndtype]
lgr.debug("Importing %r which should provide %r", import_mod, ndtype)
if import_mod in tried_imports:
raise RuntimeError(
"We already tried importing %s to provide %s, but it seems it didn't help"
% (import_mod, ndtype)
)
tried_imports.add(import_mod)
__import__(import_mod)
meta["nd_types"] = get_neurodata_types(path)
return meta
def parse_age(age):
"""
Convert a human-friendly duration string into an ISO 8601 duration
Parameters
----------
age : str
Returns
-------
str
"""
m = re.fullmatch(r"(\d+)\s*(y(ear)?|m(onth)?|w(eek)?|d(ay)?)s?", age, flags=re.I)
if m:
qty = int(m.group(1))
unit = m.group(2)[0].upper()
return f"P{qty}{unit}"
else:
raise ValueError(age)
def extract_age(metadata):
try:
dob = ensure_datetime(metadata["date_of_birth"])
start = ensure_datetime(metadata["session_start_time"])
except (KeyError, TypeError, ValueError):
try:
duration = parse_age(metadata["age"])
except (KeyError, TypeError, ValueError):
return ...
else:
if start < dob:
raise ValueError("session_start_time precedes date_of_birth")
duration = timedelta2duration(start - dob)
return models.PropertyValue(value=duration, unitText="Years from birth")
def timedelta2duration(delta):
"""
Convert a datetime.timedelta to ISO 8601 duration format
Parameters
----------
delta : datetime.timedelta
Returns
-------
str
"""
s = "P"
if delta.days:
s += f"{delta.days}D"
if delta.seconds or delta.microseconds:
sec = delta.seconds
if delta.microseconds:
# Don't add when microseconds is 0, so that sec will be an int then
sec += delta.microseconds / 1000000
s += f"T{sec}S"
if s == "P":
s += "0D"
return s
def extract_sex(metadata):
value = metadata.get("sex", None)
if value is not None:
value = value.lower()
if value in ["m", "male"]:
value_id = "http://purl.obolibrary.org/obo/PATO_0000384"
value = "Male"
elif value in ["f", "female"]:
value_id = "http://purl.obolibrary.org/obo/PATO_0000383"
value = "Female"
elif value in ["unknown"]:
value_id = None
value = "Unknown"
elif value in ["other"]:
value_id = None
value = "Other"
elif value.startswith("http"):
value_id = value
value = None
else:
raise ValueError(f"Cannot interpret sex field: {value}")
return models.SexType(identifier=value_id, name=value)
else:
return ...
def extract_species(metadata):
value = metadata.get("species", None)
if value is not None:
value = value.lower()
if "mouse" in value or value.startswith("mus"):
value_id = "http://purl.obolibrary.org/obo/NCBITaxon_10090"
value = "House mouse"
elif "human" in value or value.startswith("homo"):
value_id = "http://purl.obolibrary.org/obo/NCBITaxon_9606"
value = "Human"
elif "rat" in value:
value_id = "http://purl.obolibrary.org/obo/NCBITaxon_10117"
value = "House rat"
elif "mulatta" in value or "rhesus" in value:
value_id = "http://purl.obolibrary.org/obo/NCBITaxon_9544"
value = "Rhesus monkey"
elif "jacchus" in value:
value_id = "http://purl.obolibrary.org/obo/NCBITaxon_9483"
value = "Common marmoset"
elif "melanogaster" in value or "fruit fly" in value:
value_id = "http://purl.obolibrary.org/obo/NCBITaxon_7227"
value = "Common fruit fly"
elif value.startswith("http"):
value_id = value
value = None
else:
raise ValueError(f"Cannot interpret species field: {value}")
return models.SpeciesType(identifier=value_id, name=value.capitalize())
else:
return ...
def extract_assay_type(metadata):
if "assayType" in metadata:
return [models.AssayType(identifier="assayType", name=metadata["assayType"])]
else:
return ...
def extract_anatomy(metadata):
if "anatomy" in metadata:
return [models.Anatomy(identifier="anatomy", name=metadata["anatomy"])]
else:
return ...
def extract_model(modelcls, metadata, **kwargs):
m = modelcls.unvalidated()
for field in m.__fields__.keys():
value = kwargs.get(field, extract_field(field, metadata))
if value is not Ellipsis:
setattr(m, field, value)
# return modelcls(**m.dict())
return m
def extract_wasDerivedFrom(metadata):
return [
extract_model(models.BioSample, metadata, identifier=metadata.get("subject_id"))
]
def extract_digest(metadata):
if "digest" in metadata:
return models.Digest(
value=metadata["digest"],
cryptoType=models.DigestType[metadata["digest_type"]],
)
else:
return ...
FIELD_EXTRACTORS = {
"wasDerivedFrom": extract_wasDerivedFrom,
"age": extract_age,
"sex": extract_sex,
"assayType": extract_assay_type,
"anatomy": extract_anatomy,
"digest": extract_digest,
"species": extract_species,
}
def extract_field(field, metadata):
if field in FIELD_EXTRACTORS:
return FIELD_EXTRACTORS[field](metadata)
else:
return metadata.get(field, ...)
def nwb2asset(nwb_path, digest=None, digest_type=None):
start_time = datetime.now().astimezone()
metadata = get_metadata(nwb_path)
if digest is not None:
metadata["digest"] = digest
metadata["digest_type"] = digest_type
metadata["contentSize"] = op.getsize(nwb_path)
metadata["encodingFormat"] = "application/x-nwb"
asset = metadata2asset(metadata)
end_time = datetime.now().astimezone()
asset.wasGeneratedBy = models.Activity(
identifier=str(uuid4()),
name="Metadata generation",
description="Metadata generated by DANDI cli",
wasAssociatedWith=models.Software(
identifier={"propertyID": "RRID", "value": "SCR_019009"},
name="DANDI Command Line Interface",
description=f"dandi-cli {__version__}",
version=__version__,
url="https://github.com/dandi/dandi-cli",
),
startedAt=start_time,
endedAt=end_time,
)
return asset
def metadata2asset(metadata):
return extract_model(models.AssetMeta, metadata)
"""
The following section converts metadata schema from the current girder dandiset
model to the new schema in dandi-cli. This section should be removed
after the migration is finished to the
"""
mapping = {
"identifier": ["identifier"],
"name": ["name"],
"description": ["description"],
"contributors": ["contributor"],
"sponsors": ["contributor", ["Sponsor"]],
"license": ["license"],
"keywords": ["keywords"],
"project": ["generatedBy"],
"conditions_studied": ["about"],
"associated_anatomy": ["about"],
"protocols": ["protocol"],
"ethicsApprovals": ["ethicsApproval"],
"access": ["access"],
"associatedData": ["relatedResource", "IsDerivedFrom"],
"publications": ["relatedResource", "IsDescribedBy"],
"age": ["variableMeasured"],
"organism": ["variableMeasured"],
"sex": ["variableMeasured"],
"number_of_subjects": ["assetsSummary", "numberOfSubjects"],
"number_of_cells": ["assetsSummary", "numberOfCells"],
"number_of_tissue_samples": ["assetsSummary", "numberOfSamples"],
}
def toContributor(value):
if not isinstance(value, list):
value = [value]
out = []
for item in value:
contrib = {}
if "name" in item:
name = item["name"].split()
item["name"] = f"{name[-1]}, {' '.join(name[:-1])}"
if "roles" in item:
roles = []
for role in item["roles"]:
tmp = role.split()
if len(tmp) > 1:
roles.append("".join([val.capitalize() for val in tmp]))
else:
roles.append(tmp.pop())
contrib["roleName"] = roles
del item["roles"]
if "awardNumber" in item:
contrib["awardNumber"] = item["awardNumber"]
del item["awardNumber"]
if "orcid" in item:
if item["orcid"]:
contrib["identifier"] = models.PropertyValue(
value=item["orcid"], propertyID="ORCID"
)
else:
contrib["identifier"] = models.PropertyValue()
del item["orcid"]
if "affiliations" in item:
item["affiliation"] = item["affiliations"]
del item["affiliations"]
contrib.update(**{f"{k}": v for k, v in item.items()})
out.append(contrib)
return out
def convertv1(data):
oldmeta = data["dandiset"] if "dandiset" in data else data
newmeta = {}
for oldkey, value in oldmeta.items():
if oldkey in ["language", "altid", "number_of_slices"]:
continue
if oldkey not in mapping:
raise KeyError(f"Could not find {oldkey}")
if len(mapping[oldkey]) == 0:
newkey = f"schema:{oldkey}"
else:
newkey = mapping[oldkey][0]
if oldkey in ["contributors", "sponsors"]:
value = toContributor(value)
if oldkey == "access":
value = [
models.AccessRequirements(
status=models.AccessType.Open, email=value["access_contact_email"]
)
]
if oldkey == "identifier":
value = models.PropertyValue(value=value, propertyID="DANDI")
if len(mapping[oldkey]) == 2:
extra = mapping[oldkey][1]
if newkey == "contributor":
extrakey = "roleName"
if oldkey == "sponsors":
extrakey = "roleName"
if oldkey in ["publications", "associatedData"]:
extrakey = "relation"
if not isinstance(value, list):
value = [value]
out = []
for item in value:
if isinstance(item, dict):
out.append({k: v for k, v in item.items()})
else:
present = False
for val in out:
if item in val.values():
present = True
if not present:
out.append({"url": item})
value = out
if oldkey in [
"number_of_subjects",
"number_of_cells",
"number_of_tissue_samples",
]:
value = {extra: value}
extrakey = None
if isinstance(value, list):
for val in value:
if extrakey:
val[extrakey] = extra
if isinstance(value, dict):
if extrakey:
value[extrakey] = extra
if newkey == "variableMeasured":
if oldkey in ["age", "sex"]:
vm = {"name": oldkey}
if oldkey == "sex":
vm["value"] = value
else:
if "maximum" in value:
if "days" in value["maximum"]:
value["units"] = "days"
if "Gestational" in value["maximum"]:
value["units"] = "Gestational Week"
value["maximum"] = value["maximum"].split()[-1]
if value["maximum"].startswith("P"):
value["maximum"] = value["maximum"][1:-1]
value["units"] = value["maximum"][-1]
if "None" not in value["maximum"]:
value["maximum"] = float(value["maximum"].split()[0])
if "minimum" in value:
if "days" in value["minimum"]:
value["units"] = "days"
if "Gestational" in value["minimum"]:
value["units"] = "Gestational Week"
value["minimum"] = value["minimum"].split()[-1]
if value["minimum"].startswith("P"):
value["minimum"] = value["minimum"][1:-1]
value["units"] = value["minimum"][-1]
if "None" not in value["minimum"]:
value["minimum"] = float(value["minimum"].split()[0])
value["unitText"] = value["units"]
del value["units"]
vm.update(**value)
else:
newvalues = []
for val in value:
if "species" in val:
newvalues.append(val["species"])
vm = {"name": "species", "value": newvalues}
value = vm
if newkey not in newmeta:
newmeta[newkey] = value
else:
curvalue = newmeta[newkey]
if not isinstance(curvalue, list):
newmeta[newkey] = [curvalue]
if not isinstance(value, list):
value = [value]
newmeta[newkey].extend(value)
if "assetsSummary" in newmeta:
del newmeta["assetsSummary"]
if "variableMeasured" in newmeta:
del newmeta["variableMeasured"]
return newmeta
def migrate2newschema(meta):
newmeta = convertv1(meta)
dandimeta = models.DandiMeta.unvalidated(**newmeta)
return dandimeta
| 1.96875 | 2 |
src/user.py | livibetter/twimonial | 0 | 12785667 | import logging
import os
from google.appengine.api import memcache
from google.appengine.ext import webapp
from google.appengine.ext.webapp import template
from google.appengine.ext.webapp.util import run_wsgi_app
from twimonial.models import User, Twimonial
from twimonial.ui import render_write
import config
class UserPage(webapp.RequestHandler):
def get(self, screen_name):
if config.CACHE:
# Check cache first
cached_page = memcache.get(screen_name, 'userpage')
if cached_page:
self.response.out.write(cached_page)
return
user = User.get_by_screen_name(screen_name)
if not user:
self.error(404)
rendered_page = render_write({'screen_name': screen_name},
'user_404.html', self.request, self.response)
return
user.check_profile_image()
tmpl_values = {
'user': user.dictize(),
}
# Send out and cache it
rendered_page = render_write(tmpl_values, 'user.html', self.request,
self.response)
if config.CACHE:
memcache.set(screen_name, rendered_page, config.CACHE_TIME_USERPAGE,
namespace='userpage')
def head(self, screen_name):
pass
class UserListPage(webapp.RequestHandler):
def get(self, screen_name, screen_names_string):
limit = 10
screen_names = [name for name in screen_names_string.split('-') if name][:limit]
screen_names.sort()
screen_names_string = '-'.join(screen_names)
if config.CACHE:
# Check cache first
cached_page = memcache.get(screen_names_string, 'userlist_%s' % screen_name)
if cached_page:
self.response.out.write(cached_page)
return
user = User.get_by_screen_name(screen_name)
if not user:
self.error(404)
rendered_page = render_write({'screen_name': screen_name},
'user_404.html', self.request, self.response)
return
twimonials = [t.dictize() for t in Twimonial.get_tos_from(screen_names, user)]
missings = []
t_screen_names = [t['to_user']['screen_name'].lower() for t in twimonials]
for name in screen_names:
if name.lower() not in t_screen_names:
missings.append(name)
tmpl_values = {
'user': user,
'twimonials': twimonials,
'missings': ', '.join(missings),
}
# Send out and cache it
rendered_page = render_write(tmpl_values, 'userlist.html', self.request,
self.response)
if config.CACHE:
memcache.set(screen_names_string, rendered_page,
config.CACHE_TIME_USERLISTPAGE, namespace='userlist_%s' % screen_name)
application = webapp.WSGIApplication([
('/user/([_a-zA-Z0-9]+)', UserPage),
('/userlist/([_a-zA-Z0-9]+)/([-_a-zA-Z0-9]+)', UserListPage),
],
debug=config.DEBUG)
def main():
run_wsgi_app(application)
if __name__ == "__main__":
main()
| 2.0625 | 2 |
route.py | LemonPi/Pathtreker | 1 | 12785668 | import queue
import networkx
from math import radians, sin, cos, sqrt, asin
from util import hav
def shortest_path(g, source, dest):
"""Return single source single destination shortest path using A* search.
Haversine distance is used as heuristic.
Arguments:
g -- networkx graph loaded from shapefile
source -- source intersection's index in g
dest -- destination intersection's index in g
"""
def heuristic(a,b):
"""Return heuristic distance between two nodes in g.
Haversine distance guranteed to be shorter than actual distance,
since it's the shortest distance between points on a sphere
(which the earth approximates).
Arguments:
a -- one node index in g
b -- another node index in g
"""
# lat and lon internally stored in degrees, convert and call function
# lona, lonb, lata, latb = [g.node[a]['lon'], g.node[b]['lon'], g.node[a]['lat'], g.node[b]['lat']]
return 0 # hav((lona, lata), (lonb, latb))
# frontier of nodes to explore
exploring = queue.PriorityQueue()
# property maps which will be built and returned outside
# actual cost to node
cost = {}
# which immediate node was the shortest path from
parent = {}
# queue.PriorityQueue expects put(priority, data) we store node index as data
exploring.put((0,source))
parent[source] = None
cost[source] = 0
while not exploring.empty():
u_cost, u = exploring.get()
if u == dest:
break
for v in g[u]:
new_cost = cost[u] + g[u][v]['length']
if v not in cost or new_cost < cost[v]:
# relax edge with new_cost
cost[v] = new_cost
parent[v] = u
heuristic_cost = new_cost + heuristic(u,v)
# doesn't matter if v's already in exploring queue with higher cost
# we'll have duplicate nodes, but those won't affect correctness
# since they'll be explored after the cheaper ones are explored,
# they won't yield any new shorter paths
exploring.put((heuristic_cost,v))
return cost, parent | 3.6875 | 4 |
package/spack-curl/package.py | ctuning/ck-spack | 1 | 12785669 | ##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by <NAME>, <EMAIL>, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
import sys
class Curl(AutotoolsPackage):
"""cURL is an open source command line tool and library for
transferring data with URL syntax"""
homepage = "http://curl.haxx.se"
# URL must remain http:// so Spack can bootstrap curl
url = "http://curl.haxx.se/download/curl-7.60.0.tar.bz2"
version('7.60.0', 'bd2aabf78ded6a9aec8a54532fd6b5d7')
version('7.59.0', 'a2192804f7c2636a09320416afcf888e')
version('7.56.0', 'e0caf257103e0c77cee5be7e9ac66ca4')
version('7.54.0', '89bb7ba87384dfbf4f1a3f953da42458')
version('7.53.1', 'fb1f03a142236840c1a77c035fa4c542')
version('7.52.1', 'dd014df06ff1d12e173de86873f9f77a')
version('7.50.3', 'bd177fd6deecce00cfa7b5916d831c5e')
version('7.50.2', '6e161179f7af4b9f8b6ea21420132719')
version('7.50.1', '015f6a0217ca6f2c5442ca406476920b')
version('7.49.1', '6bb1f7af5b58b30e4e6414b8c1abccab')
version('7.47.1', '9ea3123449439bbd960cd25cf98796fb')
version('7.46.0', '9979f989a2a9930d10f1b3deeabc2148')
version('7.45.0', '62c1a352b28558f25ba6209214beadc8')
version('7.44.0', '6b952ca00e5473b16a11f05f06aa8dae')
version('7.43.0', '11bddbb452a8b766b932f859aaeeed39')
version('7.42.1', '296945012ce647b94083ed427c1877a8')
variant('nghttp2', default=False, description='build nghttp2 library (requires C++11)')
variant('libssh2', default=False, description='enable libssh2 support')
variant('libssh', default=False, description='enable libssh support') # , when='7.58:')
variant('darwinssl', default=sys.platform == 'darwin', description="use Apple's SSL/TLS implementation")
conflicts('+libssh', when='@:7.57.99')
# on OSX and --with-ssh the configure steps fails with
# one or more libs available at link-time are not available run-time
# unless the libssh are installed externally (e.g. via homebrew), even
# though spack isn't supposed to know about such a libssh installation.
# C.f. https://github.com/spack/spack/issues/7777
conflicts('platform=darwin', when='+libssh2')
conflicts('platform=darwin', when='+libssh')
conflicts('platform=linux', when='+darwinssl')
depends_on('openssl', when='~darwinssl')
depends_on('zlib')
depends_on('nghttp2', when='+nghttp2')
depends_on('libssh2', when='+libssh2')
depends_on('libssh', when='+libssh')
def configure_args(self):
spec = self.spec
args = ['--with-zlib={0}'.format(spec['zlib'].prefix)]
if spec.satisfies('+darwinssl'):
args.append('--with-darwinssl')
else:
args.append('--with-ssl={0}'.format(spec['openssl'].prefix))
args += self.with_or_without('nghttp2')
args += self.with_or_without('libssh2')
args += self.with_or_without('libssh')
return args
| 1.773438 | 2 |
mlcomp/__version__.py | gorogoroyasu/mlcomp | 0 | 12785670 | <filename>mlcomp/__version__.py<gh_stars>0
__version__ = '19.10'
| 1.023438 | 1 |
atest/testdata/keywords/PositionalOnly.py | bhirsz/robotframework | 7,073 | 12785671 | def one_argument(arg, /):
return arg.upper()
def three_arguments(a, b, c, /):
return '-'.join([a, b, c])
def with_normal(posonly, /, normal):
return posonly + '-' + normal
def defaults(required, optional='default', /):
return required + '-' + optional
def types(first: int, second: float, /):
return first + second
def kwargs(x, /, **y):
return '%s, %s' % (x, ', '.join('%s: %s' % item for item in y.items()))
| 3.15625 | 3 |
rx/internal/basic.py | mmpio/RxPY | 4,342 | 12785672 | from typing import Any
from datetime import datetime
# Defaults
def noop(*args, **kw):
"""No operation. Returns nothing"""
pass
def identity(x: Any) -> Any:
"""Returns argument x"""
return x
def default_now() -> datetime:
return datetime.utcnow()
def default_comparer(x: Any, y: Any) -> bool:
return x == y
def default_sub_comparer(x, y):
return x - y
def default_key_serializer(x: Any) -> str:
return str(x)
def default_error(err) -> Exception:
if isinstance(err, BaseException):
raise err
else:
raise Exception(err)
| 2.953125 | 3 |
wxgigo/wxmp/api/webauth.py | rfancn/wxgigo | 0 | 12785673 | #!/usr/bin/env python
# coding=utf-8
"""
Copyright (C) 2010-2013, <NAME> <<EMAIL>>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Library General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
"""
from __future__ import absolute_import
import requests
import json
import urllib
from celery import shared_task, Task
from sdk.constants import *
from sdk.web.helper import WebHelper
class BaseWeb(Task):
abstract = True
web_helper = WebHelper(Task.app.db)
app_id, app_key = Task.app.db.hmget(WXMP_CONFIG, 'APP_ID', 'APP_KEY')
#class get_access_token(BaseWeb):
# def run(self, open_id):
# return self.web_helper.get_access_token(open_id)
class auth(BaseWeb):
"""
Authorization to obtain web access token
@param: code
@return: if succeed, returns openid
"""
def run(self, code):
if not self.app_id or not self.app_key:
print "No app_id or app_key when doing web authentication"
return None
url = 'https://api.weixin.qq.com/sns/oauth2/access_token?' \
'appid={0}&secret={1}&code={2}&' \
'grant_type=authorization_code'.format(self.app_id, self.app_key, code)
try:
resp = requests.get(url).json()
except Exception,e:
print "Failed to do web authentication because of:{0}".format(e)
return None
if not isinstance(resp, dict):
print "Invalid response format when do web authentication"
return None
if 'errcode' in resp.keys() and (resp['errcode'] != 0):
print "Error response when do web authentication: {0}".format(resp['errmsg'])
return None
if not self.web_helper.save_auth_info(resp):
return None
return resp['openid']
class get_auth_url(BaseWeb):
def run(self, redirect_url, scope):
if not self.app_id:
print "Failed to get app_id in get_auth_url()"
return None
auth_url = 'https://open.weixin.qq.com/connect/oauth2/authorize?' \
'appid={0}&redirect_uri={1}&response_type=code' \
'&scope={2}#wechat_redirect'.format(self.app_id, urllib.quote_plus(redirect_url), scope)
return auth_url
class get_user_info(BaseWeb):
def refresh_access_token(self, open_id):
if not self.app_id:
print "Failed to get app_id when refresh web access token"
return None
refresh_token = self.web_helper.get_refresh_token(open_id)
if not refresh_token:
return None
url = 'https://api.weixin.qq.com/sns/oauth2/refresh_token?' \
'appid={0}&grant_type=refresh_token&refresh_token={1}'.format(
self.app_id, refresh_token
)
try:
resp = requests.get(url).json()
except Exception,e:
print "Failed to get refresh web access token because of:{0}".format(e)
return None
if not isinstance(resp, dict):
print "Invalid response format when refresh web access token"
return None
if 'errcode' in resp.keys() and (resp['errcode'] != 0):
print "Error response when refresh web access token: {0}".format(resp['errmsg'])
return None
# resp is a authentication info dict contains following:
#
# {
# "access_token":"ACCESS_TOKEN",
# "expires_in":7200,
# "refresh_token":"<PASSWORD>_TOKEN",
# "openid":"OPENID",
# "scope":"SCOPE"
# }
if not self.web_helper.save_auth_info(resp):
return None
return resp['access_token']
def run(self, open_id):
access_token = self.web_helper.get_access_token(open_id)
# first time check if we can get valid access_token from db
if not access_token:
# may be access_token expired, try refresh it
print "Failed to get valid access_token from db, try to refresh it..."
access_token = self.refresh_access_token(open_id)
# second time check after refresh
if not access_token:
print "Failed to get access_token after refresh"
return None
url = 'https://api.weixin.qq.com/sns/userinfo?' \
'access_token={0}&openid={1}&lang=zh_CN'.format(access_token, open_id)
try:
resp = requests.get(url)
# Important: Must not use requests.response.json() method here
# otherwise, requests will doing ascii encode against the unicode string
resp = json.loads(resp.content)
except Exception,e:
print "Failed to get userinfo because of:{0}".format(e)
return None
if not isinstance(resp, dict):
print "Invalid response format when get userinfo from Weixin server"
return None
if 'errcode' in resp.keys() and (resp['errcode'] != 0):
print "Error response when get userinfo info from Weixin server: {0}".format(resp['errmsg'])
return None
return resp | 2.234375 | 2 |
src/simod/writers/xml_writer.py | Mcamargo85/SiMo-Discoverer | 12 | 12785674 | <filename>src/simod/writers/xml_writer.py
import uuid
from lxml import etree
from lxml.builder import ElementMaker # lxml only !
# --------------- General methods ----------------
from simod.configuration import QBP_NAMESPACE_URI
def create_file(output_file, element):
# file_exist = os.path.exists(output_file)
with open(output_file, 'wb') as f:
f.write(element)
f.close()
# -------------- kernel --------------
def print_parameters(bpmn_input, output_file, parameters):
my_doc = xml_template(parameters.get('arrival_rate'),
parameters.get('resource_pool'),
parameters.get('elements_data'),
parameters.get('sequences'),
parameters.get('instances'),
parameters.get('start_time'))
# insert timetable
if parameters.get('time_table') is not None:
ns = {'qbp': QBP_NAMESPACE_URI}
childs = parameters['time_table'].findall('qbp:timetable', namespaces=ns)
node = my_doc.find('qbp:timetables', namespaces=ns)
for i, child in enumerate(childs):
node.insert((i + 1), child)
# Append parameters to the bpmn model
root = append_parameters(bpmn_input, my_doc)
create_file(output_file, etree.tostring(root, pretty_print=True))
def xml_template(arrival_rate=None, resource_pool=None, elements_data=None, sequences=None, instances=None,
start_time=None):
E = ElementMaker(namespace=QBP_NAMESPACE_URI,
nsmap={'qbp': QBP_NAMESPACE_URI})
PROCESSSIMULATIONINFO = E.processSimulationInfo
ARRIVALRATEDISTRIBUTION = E.arrivalRateDistribution
TIMEUNIT = E.timeUnit
TIMETABLES = E.timetables
RESOURCES = E.resources
RESOURCE = E.resource
ELEMENTS = E.elements
ELEMENT = E.element
DURATION = E.durationDistribution
RESOURCESIDS = E.resourceIds
RESOURCESID = E.resourceId
SEQUENCEFLOWS = E.sequenceFlows
SEQUENCEFLOW = E.sequenceFlow
rootid = "qbp_" + str(uuid.uuid4())
arrival_doc = None
if arrival_rate:
arrival_doc = ARRIVALRATEDISTRIBUTION(
TIMEUNIT("seconds"),
type=arrival_rate['dname'],
mean=str(arrival_rate['dparams']['mean']),
arg1=str(arrival_rate['dparams']['arg1']),
arg2=str(arrival_rate['dparams']['arg2']))
resources_doc = None
if resource_pool:
resources_doc = RESOURCES(
*[
RESOURCE(
id=res['id'],
name=res['name'],
totalAmount=res['total_amount'],
costPerHour=res['costxhour'],
timetableId=res['timetable_id']) for res in resource_pool
]
)
elements_doc = None
if elements_data:
elements_doc = ELEMENTS(
*[
ELEMENT(
DURATION(
TIMEUNIT("seconds"),
type=e['type'],
mean=e['mean'],
arg1=e['arg1'],
arg2=e['arg2']
),
RESOURCESIDS(
RESOURCESID(str(e['resource']))
),
id=e['id'], elementId=e['elementid']
) for e in elements_data
]
)
sequences_doc = None
if sequences:
sequences_doc = SEQUENCEFLOWS(
*[
SEQUENCEFLOW(
elementId=seq['elementid'],
executionProbability=str(seq['prob'])) for seq in sequences
]
)
docs = list(filter(lambda doc: doc is not None, [arrival_doc, resources_doc, elements_doc, sequences_doc]))
my_doc = PROCESSSIMULATIONINFO(
TIMETABLES(),
*docs,
id=rootid,
processInstances=str(instances) if instances else "",
startDateTime=start_time if start_time else "",
currency="EUR"
)
return my_doc
def append_parameters(bpmn_input, my_doc):
node = etree.fromstring(etree.tostring(my_doc, pretty_print=True))
tree = etree.parse(bpmn_input)
root = tree.getroot()
froot = etree.fromstring(etree.tostring(root, pretty_print=True))
froot.append(node)
return froot
| 2.40625 | 2 |
hkpy/hkpyo/model/hko_model.py | renan-souza/hkpy | 7 | 12785675 | <gh_stars>1-10
###
# Copyright (c) 2019-present, IBM Research
# Licensed under The MIT License [see LICENSE for details]
###
import json
from collections import Counter
from typing import TypeVar, Union, Dict
from hkpy import hkfy
CONCEPT_IRI = "http://brl.ibm.com/ontologies/hko#Concept"
PROPERTY_IRI = "http://brl.ibm.com/ontologies/hko#Property"
INDIVIDUAL_IRI = "http://brl.ibm.com/ontologies/hko#Individual"
#TOP_CONTEXT_IRI = "http://brl.ibm.com/ontology/hko$Everything"
TOP_CONTEXT_IRI = None
TOP_CONCEPT_IRI = "http://brl.ibm.com/ontologies/hko#Entity"
TOP_PROPERTY_IRI = "http://brl.ibm.com/ontologies/hko#property"
HKOContext = TypeVar('HKOContext')
HKOProperty = TypeVar('HKOProperty')
HKOContextManager = TypeVar("HKOContextManager")
HKOLiteral = TypeVar('Union[str,int,float]')
HKO_NUMBER_DATATYPE = "http://brl.ibm.com/ontologies/hko#Number"
HKO_STRING_DATATYPE = "http://brl.ibm.com/ontologies/hko#String"
class HKOElement:
"""Any syntactic element should extend this class"""
def __init__(self):
pass
class HKOContextElement(HKOElement):
def __init__(self, context: HKOContext) -> None:
super().__init__()
self.context = context
class HKONamedElement(HKOContextElement):
"""All named elements are contextualized """
def __init__(self, iri: str, context: HKOContext) -> None:
super().__init__(context=context)
self.iri = iri
def __str__(self) -> str:
return self.iri
#do not add hash and eq.
class HKOConceptExpression(HKOElement):
pass
class HKOConcept(HKOConceptExpression, HKONamedElement):
def __init__(self, iri: str, context: HKOContext):
HKONamedElement.__init__(self, iri, context)
def __eq__(self, o: object) -> bool:
if isinstance(o, HKOConcept):
#it might happen that the same iri can identify concepts/properties and individuals
return self.iri == o.iri
else:
return super().__eq__(o)
def __hash__(self) -> int:
return hash(str(HKOConcept)+self.iri)
class HKOExistsExpression(HKOConceptExpression):
def __init__(self, property: HKOProperty, concept: HKOConceptExpression):
super().__init__()
self.property = property
self.concept = concept
def __str__(self) -> str:
return f"""(exists {self.property} {self.concept})"""
def __eq__(self, o: object) -> bool:
return isinstance(o, HKOExistsExpression) and o.concept == self.concept and o.property == self.property
def __hash__(self) -> int:
return hash(hash(HKOExistsExpression) + self.property.__hash__() + self.concept.__hash__())
class HKOForallExpression(HKOConceptExpression):
def __init__(self, property: HKOProperty, concept: HKOConceptExpression):
super().__init__()
self.property = property
self.concept = concept
def __str__(self) -> str:
return f"""(forall {self.property} {self.concept})"""
def __eq__(self, o: object) -> bool:
return isinstance(o, HKOForallExpression) and o.concept == self.concept and o.property == self.property
def __hash__(self) -> int:
return hash(hash(HKOForallExpression) + self.property.__hash__() + self.concept.__hash__())
class HKOConjunctionExpression(HKOConceptExpression):
def __init__(self, *concepts: HKOConceptExpression):
super().__init__()
self.concepts = tuple(concepts)
def __str__(self) -> str:
return f"""(and {' '.join(map(lambda x : str(x), self.concepts))})"""
def __eq__(self, o: object) -> bool:
return isinstance(o, HKOConjunctionExpression) and Counter(o.concepts) == Counter(self.concepts)
def __hash__(self) -> int:
#TODO: check if self.concepts should be changed to a set implementation
return hash(hash(HKOConjunctionExpression) + sum(hash(e) for e in self.concepts))
class HKODisjunctionExpression(HKOConceptExpression):
def __init__(self, *concepts: HKOConceptExpression):
super().__init__()
self.concepts = concepts
def __str__(self) -> str:
return f"""(or {' '.join(map(lambda x : str(x), self.concepts))})"""
def __eq__(self, o: object) -> bool:
return isinstance(o, HKODisjunctionExpression) and Counter(o.concepts) == Counter(self.concepts)
def __hash__(self) -> int:
#TODO: check if self.concepts should be changed to a set implementation
return hash(hash(HKODisjunctionExpression) + sum(hash(e) for e in self.concepts))
class HKOConceptNegationExpression(HKOConceptExpression):
def __init__(self, concept_expr: HKOConceptExpression):
super().__init__()
self.concept_expr = concept_expr
def __str__(self) -> str:
return f"""(not {self.concept_expr})"""
# class HKOPropertyExpression(HKOElement):
# context: HKOContext
#
# def __init__(self, context: HKOContext):
# super().__init__()
# self.context = context
class HKOIndividual(HKONamedElement):
def __init__(self, iri: str, context: HKOContext):
HKONamedElement.__init__(self, iri, context)
def __str__(self) -> str:
return self.iri
def __eq__(self, o: object) -> bool:
if isinstance(o, HKOIndividual):
#it might happen that the same iri can identify concepts/properties and individuals
return self.iri == o.iri
else:
return super().__eq__(o)
def __hash__(self) -> int:
return hash(str(HKOIndividual)+self.iri)
class HKOProperty(HKONamedElement):
def __init__(self, iri: str, context: HKOContext):
HKONamedElement.__init__(self, iri, context)
def __str__(self) -> str:
return self.iri
def __eq__(self, o: object) -> bool:
if isinstance(o, HKOProperty):
#it might happen that the same iri can identify concepts/properties and individuals
return self.iri == o.iri
else:
return super().__eq__(o)
def __hash__(self) -> int:
return hash(str(HKOProperty)+self.iri)
# class HKORoleExpression(HKOPropertyExpression) :
# property: HKOProperty
# //role: HKORole
# concept: HKOConceptExpression
#
# constructor(iri: string, context: HKOContext, property: HKOProperty, concept: HKOConcept)
# super(context)
# self.property = property
# //self.role = role
# self.concept = concept
#
#
class HKOAxiom(HKOContextElement):
def __init__(self, context: HKOContext):
super().__init__(context=context)
class HKOSubConceptAxiom(HKOAxiom):
def __init__(self, context: HKOContext, sub: HKOConceptExpression, sup: HKOConceptExpression):
assert(isinstance(context, HKOContext))
assert(isinstance(sub, HKOConceptExpression))
assert(isinstance(sup, HKOConceptExpression))
super().__init__(context)
self.sub = sub
self.sup = sup
def __str__(self) -> str:
return f"""(subconcept {self.sub} {self.sup})"""
def __eq__(self, o: object) -> bool:
return isinstance(o, HKOSubConceptAxiom) and o.context == self.context and o.sub == self.sub and o.sup == self.sup
def __hash__(self) -> int:
return hash(hash(HKOSubConceptAxiom) + hash(self.context) + hash(self.sub)+ hash(self.sup))
class HKOEquivalentConceptAxiom(HKOAxiom):
def __init__(self, context: HKOContext, conceptA: HKOConceptExpression, conceptB: HKOConceptExpression):
assert(isinstance(context, HKOContext))
assert(isinstance(conceptA, HKOConceptExpression))
assert(isinstance(conceptB, HKOConceptExpression))
super().__init__(context)
self.conceptA = conceptA
self.conceptB = conceptB
def __str__(self) -> str:
return f"""(eqconcept {self.conceptA} {self.conceptB})"""
def __eq__(self, o: object) -> bool:
return isinstance(o, HKOEquivalentConceptAxiom) and o.context == self.context and o.conceptA == self.conceptA and o.conceptB == self.conceptB
def __hash__(self) -> int:
return hash(str(HKOEquivalentConceptAxiom) + hash(self.context) + hash(self.conceptA)+ hash(self.conceptB))
class HKOAssertion(HKOContextElement):
def __init__(self, context: HKOContext):
super().__init__(context=context)
class HKOConceptAssertion(HKOAssertion):
def __init__(self, context: HKOContext, concept: HKOConceptExpression, individual: HKOIndividual):
super().__init__(context)
assert(isinstance(concept,HKOConceptExpression))
assert(isinstance(individual,HKOIndividual))
self.concept = concept
self.individual = individual
def __str__(self) -> str:
return f"""({self.concept} {self.individual})"""
def __eq__(self, o: object) -> bool:
return isinstance(o, HKOConceptAssertion) \
and o.context == self.context \
and o.concept == self.concept \
and o.individual == self.individual
def __hash__(self) -> int:
return hash(hash(HKOConceptAssertion) +
self.context.__hash__()
+ self.concept.__hash__()
+ self.individual.__hash__())
class HKOPropertyAssertion(HKOAssertion):
def __init__(self, context: HKOContext, property: HKOProperty, arg1: HKOIndividual, arg2: Union[HKOIndividual, str, int, float]):
assert(isinstance(property,HKOProperty))
assert(isinstance(arg1,HKOIndividual))
assert(isinstance(arg2,HKOIndividual) or isinstance(arg2,str) or isinstance(arg2,int) or isinstance(arg2,float))
super().__init__(context)
self.property = property
self.arg1 = arg1
#self.arg2 = arg2
self.arg2 = arg2 if isinstance(arg2, HKOIndividual) else str(arg2)
def __str__(self) -> str:
if isinstance(self.arg2, HKOIndividual):
return f"""({self.property} {self.arg1} {self.arg2})"""
else:
return f"""({self.property} {self.arg1} "{str(self.arg2)}")"""
def __eq__(self, o: object) -> bool:
return isinstance(o, HKOPropertyAssertion) \
and o.context == self.context \
and o.property == self.property \
and o.arg1 == self.arg1 \
and o.arg2 == self.arg2
def __hash__(self) -> int:
return hash(hash(HKOConceptAssertion)
+ self.context.__hash__()
+ self.property.__hash__()
+ self.arg1.__hash__()
+ self.arg2.__hash__())
class HKOContext(HKONamedElement):
def __init__(self, iri: str, context: HKOContext, *elements: HKOElement):
super().__init__(iri, context)
self.elements = list(elements)
def __eq__(self, o: object) -> bool:
#simple implementation
return isinstance(o, HKOContext) and o.iri == self.iri
def __hash__(self) -> int:
return hash(str(HKOContext) + self.iri)
def addAxiom(self, axiom: HKOAxiom):
self.elements.append(axiom)
def axioms(self):
return self.elements
def addAssertion(self, assertion: HKOAxiom):
self.elements.append(assertion)
def __str__(self) -> str:
str_elements = ',\n'.join(map(lambda x : str(x), self.elements))
return f"""{self.iri}:[ {str_elements} ]"""
TOP_CONTEXT: HKOContext = HKOContext(TOP_CONTEXT_IRI, None)
class HKOContextBuilder:
def __init__(self, context):
self.context = context
def getHKOConcept(self, iri: str) -> HKOConcept:
return HKOConcept(iri, self.context)
def getHKOProperty(self, iri: str) -> HKOProperty:
return HKOProperty(iri, self.context)
def getHKOExistsExpression(self, property: HKOProperty, concept: HKOConceptExpression) -> HKOExistsExpression:
return HKOExistsExpression(property, concept)
def getHKOForallExpression(self, property: HKOProperty, concept: HKOConceptExpression) -> HKOExistsExpression:
return HKOForallExpression(property, concept)
def getHKOConjunctionExpression(self, *concepts: HKOConceptExpression) -> HKOConjunctionExpression:
return HKOConjunctionExpression(*concepts)
def getHKODisjunctionExpression(self, *concepts: HKOConceptExpression) -> HKODisjunctionExpression:
return HKODisjunctionExpression(*concepts)
def getHKOConceptNegationExpression(self, concept: HKOConceptExpression) -> HKOConceptNegationExpression:
return HKOConceptNegationExpression(concept)
def getHKOSubConceptAxiom(self, sub: HKOConcept, sup: HKOConceptExpression) -> HKOSubConceptAxiom:
return HKOSubConceptAxiom(self.context, sub, sup)
def getHKOEquivalentConceptAxiom(self, conceptA: HKOConcept,
conceptB: HKOConceptExpression) -> HKOEquivalentConceptAxiom:
return HKOEquivalentConceptAxiom(self.context, conceptA, conceptB)
def getHKOIndividual(self, iri) -> HKOIndividual:
return HKOIndividual(iri, self.context)
def getHKOConceptAssertion(self, concept : HKOConceptExpression, individual : HKOIndividual) -> HKOConceptAssertion:
return HKOConceptAssertion(self.context, concept, individual)
def getHKOPropertyAssertion(self, property: HKOProperty, arg1: HKOIndividual, arg2: Union[HKOIndividual, str, int, float]):
return HKOPropertyAssertion(self.context, property, arg1, arg2)
class HKOContextManager:
_manager_singleton : HKOContextManager = None
def __init__(self):
self.loaded_contexts: Dict[str, HKOContext] = {}
def get_global_context_manager() -> HKOContextManager:
if not HKOContextManager._manager_singleton:
HKOContextManager._manager = HKOContextManager()
return HKOContextManager._manager
def getHKOContext(self, iri: str, parent: Union[str,HKOContext] = TOP_CONTEXT) -> HKOContext:
if iri not in self.loaded_contexts:
return None
context = HKOContext(iri, parent)
return context
def createHKOContext(self, iri: str, parent: Union[str,HKOContext] = TOP_CONTEXT) -> HKOContext:
if iri in self.loaded_contexts:
raise Exception('Existing context already loaded.')
context = HKOContext(iri, parent)
self.loaded_contexts[iri] = context
return context
def getHKOContextBuilder(self, context=TOP_CONTEXT) -> HKOContextBuilder:
return HKOContextBuilder(context=context)
def addAxiom(self, context: HKOContext, *axioms: HKOAxiom) -> None:
for axiom in axioms : axiom.context = context
context.elements.extend(axioms)
def addAssertion(self, context: HKOContext, *assertions: HKOAxiom) -> None:
for assertion in assertions: assertion.context = context
context.elements.extend(assertions)
def saveHKOContextToFile(self, context : HKOContext, file_path : str):
with open(file_path, mode="w") as f:
from hkpy.hkpyo.converters.HKOWriterHKG import HKOWriterHKG
writer = HKOWriterHKG()
entities = writer.writeHKOContext(context)
buffer = {}
for x in entities:
buffer[x.id_] = x.to_dict()
json_entities = list(buffer.values())
f.write(json.dumps(json_entities))
def readHKOContextFromFile(self, context_iri : str, file_path : str) -> HKOContext:
with open(file_path, mode="r") as f:
file_data = f.read()
file_data_json = json.loads(file_data)
hkg_graph = {}
for e in file_data_json:
hke = hkfy(e)
hkg_graph[hke.id_] = hke
hkg_context = hkg_graph.get('<' + context_iri + '>', None)
del hkg_graph[hkg_context.id_]
if hkg_context is None:
raise Exception('Context iri is not present in the file.')
from hkpy.hkpyo.converters.HKOReaderHKG import HKOContextExpandable
hko_pcontext = HKOContext(hkg_context.id_[1:-1], HKOContextExpandable(iri=hkg_context.parent))
from hkpy.hkpyo.converters.HKOReaderHKG import HKOReaderHKG
reader = HKOReaderHKG()
reader.readHKOintoContextFromHKGJson(file_data_json, self.getHKOContextBuilder(hko_pcontext))
return hko_pcontext | 2.203125 | 2 |
plot_clustering.py | lthUniBonn/AWERA | 0 | 12785676 | <gh_stars>0
from AWERA import config, reference_locs, ChainAWERA
import time
from AWERA.utils.convenience_utils import write_timing_info
since = time.time()
if __name__ == '__main__':
add_plot_eval_dir = 'clustering/'
# read config from jobnumber
# 8 small jobs
# 4 big jobs
# settings_id = int(os.environ['SETTINGS_ID'])
# test_final_setup_settings = training_settings[10:12] # 5000, 1000
# settings = training_settings[settings_id]
settings = {
'Data': {'n_locs': 5000,
'location_type': 'europe'},
'Clustering': {
'n_clusters': 8,
'training': {
'n_locs': 5000,
'location_type': 'europe'
}
},
}
settings['General'] = {'use_memmap': True}
settings['IO'] = {
'result_dir': "/cephfs/user/s6lathim/AWERA_results/",
'format': {
'plot_output':
add_plot_eval_dir + config.IO.format.plot_output,
'plot_output_data':
add_plot_eval_dir + config.IO.format.plot_output_data,
'training_plot_output':
add_plot_eval_dir + config.IO.format.training_plot_output,
# 'power_curve':
# add_plot_eval_dir + config.IO.format.power_curve,
# 'cut_wind_speeds':
# add_plot_eval_dir + config.IO.format.cut_wind_speeds,
# 'refined_cut_wind_speeds':
# add_plot_eval_dir + config.IO.format.refined_cut_wind_speeds,
# 'optimizer_history':
# add_plot_eval_dir + config.IO.format.optimizer_history,
}
}
settings['Processing'] = {'n_cores': 15}
print(settings)
# Single locaion evaluation
loc = reference_locs[0]
sample_id = 27822
# Update settings to config
config.update(settings)
# Initialise AWERA eval with chosen config
awera = ChainAWERA(config)
working_title = 'Plotting clustering'
n_clusters_list = [8, 16] # 80
for n_clusters in n_clusters_list:
prod_settings = {
'Clustering': {'n_clusters': n_clusters}}
awera.config.update(prod_settings)
# awera.cluster_frequency_maps(use_rel='cluster')
# awera.cluster_frequency_maps(use_rel='loc')
# # ----------------------------------------
# awera.visualize_clustering_flow(loc=loc, sample_id=sample_id)
# awera.original_vs_cluster_wind_profile_shapes(loc=loc,
# sample_id=sample_id,
# x_lim=(-12, 17),
# y_lim=(-12, 12))
# awera.plot_cluster_shapes(scale_back_sf=False,
# x_lim_profiles=[-2.2, 3.2],
# y_lim_profiles=[-1.7, 1.7])
data = awera.cluster_pc_projection(return_data=True)
awera.analyse_pc(data=data)
# ----------------------------------------
print('{} clusters done.'.format(n_clusters))
print('------------------------------ Time:')
write_timing_info('{} AWERA run finished.'.format(working_title),
time.time() - since)
# --------------------------------------
print('Done.')
print('------------------------------ Config:')
print(awera.config)
print('------------------------------ Time:')
write_timing_info('{} AWERA run finished.'.format(working_title),
time.time() - since)
# plt.show()
| 1.8125 | 2 |
filter_provider.py | aniloutdo/Fitness-Gadgets | 2 | 12785677 | <gh_stars>1-10
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from numpy import arange
class AcceptanceTester:
"""Tests data points for acceptance into dataset_container."""
def __init__(self, tester_type):
"""Initialize with the type of data to test.
Parameters
----------
tester_type : string
The type of data that validity should be tested for.
Returns
-------
None
"""
self._tester_map = {'heartrate': self._test_heartrate,
'intensity': self._test_intensity,
'steps': self._test_steps}
self._tester = self._tester_map.get(tester_type, self._test_accept_all)
def __call__(self, Datapoint):
"""Pass a Datapoint to test, returns acceptance based on the type set.
Parameters
----------
Datapoint : Datapoint
The Datapoint to test for acceptance
Returns
-------
bool
Whether or not the value is valid.
"""
return self._tester(Datapoint)
def _test_accept_all(self, Datapoint):
"""Accept all datapoints.
Parameters
----------
Datapoint : Datapoint
The Datapoint being tested.
Returns
-------
True
This function always returns True to accept all data points.
"""
return True
def _test_heartrate(self, Datapoint):
"""Test HR datapoints for acceptance. Do not accept values that match
the following:
* HR == 255
* HR <= 0
Parameters
----------
Datapoint : Datapoint
The Datapoint to test for acceptance
Returns
-------
bool
Whether or not the value is valid.
"""
return not Datapoint.value >= 255 and not Datapoint.value <= 0
def _test_intensity(self, Datapoint):
"""Test intensity datapoints for acceptance. Do not accept values that
match the following:
* intensity == 255
Parameters
----------
Datapoint : Datapoint
The Datapoint to test for acceptance
Returns
-------
bool
Whether or not the value is valid.
"""
return not Datapoint.value >= 255
def _test_steps(self, Datapoint):
"""Test step datapoints for acceptance. Do not accept values that match
the following:
* steps < 0
Parameters
----------
Datapoint : Datapoint
The Datapoint to test for acceptance
Returns
-------
bool
Whether or not the value is valid.
"""
return not Datapoint.value < 0
class DatasetFilter:
"""A class providing dataset filtering."""
def __init__(self):
"""Initialize the filters.
Parameters
----------
None
Returns
-------
None
"""
self._filter_map = {'heartrate': self._filter_hr}
self._filter_params = {}
self._filters = []
def add_filter(self, filtername, **kwargs):
"""Add a filter to be applied to the dataset. The first parameter
selects the filter type, any other parameters must be named and will be
stored and passed to the filter function as parameters.
Parameters
----------
filtername : string
The name of the filter to add.
kwargs : dict
Any other named parameters will be stored and passed to the
filter when it is called.
Returns
-------
None
"""
if filtername in self._filter_map and not filtername in self._filters:
self._filters.append(filtername)
self._filter_params[filtername] = kwargs
def __call__(self, timestamps, values):
"""Apply the filters that have been set up for this provider to the
dataset passed and return the resulting dataset.
Parameters
----------
timestamps : numpy.array
The timestamps of the data to be filtered
values : numpy.array
The values of the data to be filtered
"""
for filtername in self._filters:
timestamps, values = self._filter_map[filtername](timestamps, values,
**self._filter_params[filtername])
return timestamps, values
def count(self):
"""Returns the number of filter functions applied to data.
Parameters
----------
None
Returns
int
The number of filters stored
"""
return len(self._filters)
def _filter_hr(self, timestamps, values, delta_doublefilter=3):
"""A heartrate-specific filter. It checks if a value is twice as high as
the ones preceding and following it, within the delta_doublefilter
environment, and divides the values matching by two.
Parameters
----------
timestamps : numpy.array
The timestamps of the data to be filtered
values : numpy.array
The values of the data to be filtered
delta_doublefilter : int
The delta around double the value of a heartrate value for which
the filter will still be applied.
Returns
-------
None
"""
for i in arange(1, len(values) - 1):
diff_lower = abs((float(values[i])/2.) - values[i - 1])
diff_upper = abs((float(values[i])/2.) - values[i + 1])
if (diff_lower < delta_doublefilter)*\
(diff_upper < delta_doublefilter):
values[i] /= 2.
return timestamps, values
| 3.265625 | 3 |
conjur/controller/hostfactory_controller.py | cyberark/conjur-api-python3 | 16 | 12785678 | <gh_stars>10-100
# -*- coding: utf-8 -*-
"""
HostFactoryController
This Module represents the Presentation Layer for the HostFactory command
"""
import http
import logging
import sys
# Internals
from conjur.errors import MissingRequiredParameterException
from conjur.data_object.create_token_data import CreateTokenData
from conjur.data_object.create_host_data import CreateHostData
from conjur.logic.hostfactory_logic import HostFactoryLogic
# pylint: disable=too-few-public-methods,logging-fstring-interpolation
class HostFactoryController:
"""
HostFactoryController
This class represents the Presentation Layer for the HostFactory command
"""
def __init__(self, hostfactory_logic: HostFactoryLogic):
self.hostfactory_logic = hostfactory_logic
def create_token(self, create_token_data: CreateTokenData):
"""
Method that facilitates create token call to the logic
"""
if create_token_data is None:
raise MissingRequiredParameterException('Missing required parameters')
logging.debug(f"Creating token for hostfactory '{create_token_data.host_factory}'...")
result = self.hostfactory_logic.create_token(create_token_data)
sys.stdout.write(result + '\n')
logging.debug("Successfully created token for hostfactory "
f"'{create_token_data.host_factory}'")
def create_host(self, create_host_data: CreateHostData):
"""
Method that facilitates create token call to the logic
"""
if create_host_data is None:
raise MissingRequiredParameterException('Missing required parameters')
logging.debug(f"Creating host: '{create_host_data.host_id}'...")
result = self.hostfactory_logic.create_host(create_host_data)
sys.stdout.write(result + '\n')
logging.debug("Successfully created host using hostfactory: host_id:"
f"'{create_host_data.host_id}'")
def revoke_token(self, token: str):
"""
Method that facilitates token revocation call to the logic
"""
if token is None:
raise MissingRequiredParameterException('Missing required parameters')
logging.debug("Attempting to revoke a token")
response = self.hostfactory_logic.revoke_token(token)
if response == http.HTTPStatus.NO_CONTENT:
sys.stdout.write(f'Token \'{token}\' has been revoked.\n')
logging.debug(f'Successfully revoked token'
f', return code: {response}')
| 2.515625 | 3 |
test.py | EMUN123/TTF-xunlei | 0 | 12785679 | <reponame>EMUN123/TTF-xunlei<filename>test.py<gh_stars>0
import os
import re
# in the following codes, I almost made every important mistakes about re...
file_list = os.listdir(r'C:\Users\HZB\Downloads')
file_str = "match".join(file_list)
'''
re can't deel with "()" in english, we need to switch it into sth else
'''
file_str = file_str.replace('(', '')
file_str = file_str.replace(')', '')
print(file_str)
'''
re can't affectively compile blankspace and tabs
'''
'''
re also can't direct compile '$ < > /'... and maybe other special characters
as they stand for different meanings in re
'''
'''
we need to use [\u4e00-\u9fa5]+ to match Chinese characters
'''
'''
we add 'r' before the pattern to be compiled is just to avoid
'''
regex = re.compile(r'match([\u4e00-\u9fa5]+.*?).crdownload')
if regex.findall(file_str):
print('yes') | 2.703125 | 3 |
optionvisualizer/utilities.py | GBERESEARCH/optionvisualizer | 8 | 12785680 | <gh_stars>1-10
"""
Utility functions for refreshing parameters
"""
import copy
import numpy as np
import scipy.stats as si
from optionvisualizer.visualizer_params import vis_params_dict
# pylint: disable=invalid-name
class Utils():
"""
Utility functions for refreshing parameters
"""
@staticmethod
def _init_params(inputs):
"""
Initialise parameter dictionary
Parameters
----------
inputs : Dict
Dictionary of parameters supplied to the function.
Returns
-------
params : Dict
Dictionary of parameters.
"""
# Copy the default parameters
params = copy.deepcopy(vis_params_dict)
# For all the supplied arguments
for key, value in inputs.items():
# Replace the default parameter with that provided
params[key] = value
return params
@staticmethod
def refresh_dist_params(opt_params, params):
"""
Calculate various parameters and distributions
Returns
-------
Various
Assigns parameters to the object
"""
# Cost of carry as risk free rate less dividend yield
params['b'] = opt_params['r'] - opt_params['q']
params['carry'] = np.exp(
(params['b'] - opt_params['r']) * opt_params['T'])
params['discount'] = np.exp(-opt_params['r'] * opt_params['T'])
with np.errstate(divide='ignore'):
params['d1'] = (
(np.log(opt_params['S'] / opt_params['K'])
+ (params['b'] + (0.5 * opt_params['sigma'] ** 2))
* opt_params['T'])
/ (opt_params['sigma'] * np.sqrt(opt_params['T'])))
params['d2'] = (
(np.log(opt_params['S'] / opt_params['K'])
+ (params['b'] - (0.5 * opt_params['sigma'] ** 2))
* opt_params['T'])
/ (opt_params['sigma'] * np.sqrt(opt_params['T'])))
# standardised normal density function
params['nd1'] = (
(1 / np.sqrt(2 * np.pi)) * (np.exp(-params['d1'] ** 2 * 0.5)))
# Cumulative normal distribution function
params['Nd1'] = si.norm.cdf(params['d1'], 0.0, 1.0)
params['minusNd1'] = si.norm.cdf(-params['d1'], 0.0, 1.0)
params['Nd2'] = si.norm.cdf(params['d2'], 0.0, 1.0)
params['minusNd2'] = si.norm.cdf(-params['d2'], 0.0, 1.0)
return params
@staticmethod
def refresh_combo_params(params, inputs):
"""
Set parameters for use in various pricing functions
Parameters
----------
**kwargs : Various
Takes any of the arguments of the various methods
that use it to refresh data.
Returns
-------
Various
Runs methods to fix input parameters and reset defaults
if no data provided
"""
default_values = copy.deepcopy(vis_params_dict)
# Certain combo payoffs (found in the mod_payoffs list) require
# specific default parameters
if params['combo_payoff'] in params['mod_payoffs']:
# For each parameter in the combo parameters dictionary
# corresponding to this combo payoff
for parameter in params[
'combo_parameters'][params['combo_payoff']]:
# If the parameter has been supplied as an input
if parameter in inputs.keys():
# Set this value in the parameter dictionary
params[parameter] = inputs[parameter]
# Otherwise if the parameter is in the mod_params list
elif parameter in params['mod_params']:
# Try to extract this from the combo dict default
try:
params[parameter] = params['combo_dict'][str(
params['combo_payoff'])][str(parameter)]
# Otherwise
except KeyError:
# Set to the standard default value
params[parameter] = default_values[str(parameter)]
# Otherwise
else:
# Set to the standard default value
params[parameter] = default_values[str(parameter)]
# For all the other combo_payoffs
else:
# For each parameter in the combo parameters dictionary
# corresponding to this combo payoff
for parameter in params[
'combo_parameters'][params['combo_payoff']]:
# If the parameter has been supplied as an input
if parameter in inputs.keys():
# Set this value in the parameter dictionary
params[parameter] = inputs[parameter]
# Otherwise
else:
# Set to the standard default value
params[parameter] = default_values[str(parameter)]
return params
@staticmethod
def barrier_factors(params):
"""
Calculate the barrier option specific parameters
Returns
-------
Various
Assigns parameters to the object
"""
# Cost of carry as risk free rate less dividend yield
params['b'] = params['r'] - params['q']
barrier_factors = {}
barrier_factors['mu'] = ((
params['b'] - ((params['sigma'] ** 2) / 2))
/ (params['sigma'] ** 2))
barrier_factors['lambda'] = (
np.sqrt(barrier_factors['mu'] ** 2 + (
(2 * params['r']) / params['sigma'] ** 2)))
barrier_factors['z'] = (
(np.log(params['H'] / params['S'])
/ (params['sigma'] * np.sqrt(params['T'])))
+ (barrier_factors['lambda']
* params['sigma']
* np.sqrt(params['T'])))
barrier_factors['x1'] = (
np.log(params['S'] / params['K'])
/ (params['sigma'] * np.sqrt(params['T']))
+ ((1 + barrier_factors['mu'])
* params['sigma']
* np.sqrt(params['T'])))
barrier_factors['x2'] = (
np.log(params['S'] / params['H'])
/ (params['sigma']
* np.sqrt(params['T']))
+ ((1 + barrier_factors['mu'])
* params['sigma']
* np.sqrt(params['T'])))
barrier_factors['y1'] = (
np.log((params['H'] ** 2)
/ (params['S']
* params['K']))
/ (params['sigma']
* np.sqrt(params['T']))
+ ((1 + barrier_factors['mu'])
* params['sigma']
* np.sqrt(params['T'])))
barrier_factors['y2'] = (
np.log(params['H'] / params['S'])
/ (params['sigma']
* np.sqrt(params['T']))
+ ((1 + barrier_factors['mu'])
* params['sigma']
* np.sqrt(params['T'])))
params['carry'] = np.exp((params['b'] - params['r']) * params['T'])
barrier_factors['A'] = (
(params['phi']
* params['S']
* params['carry']
* si.norm.cdf((params['phi']
* barrier_factors['x1']), 0.0, 1.0))
- (params['phi']
* params['K']
* np.exp(-params['r']
* params['T'])
* si.norm.cdf(
((params['phi'] * barrier_factors['x1'])
- (params['phi'] * params['sigma']
* np.sqrt(params['T']))), 0.0, 1.0)))
barrier_factors['B'] = (
(params['phi']
* params['S']
* params['carry']
* si.norm.cdf((params['phi']
* barrier_factors['x2']), 0.0, 1.0))
- (params['phi']
* params['K']
* np.exp(-params['r']
* params['T'])
* si.norm.cdf(
((params['phi'] * barrier_factors['x2'])
- (params['phi'] * params['sigma']
* np.sqrt(params['T']))), 0.0, 1.0)))
barrier_factors['C'] = (
(params['phi']
* params['S']
* params['carry']
* ((params['H'] / params['S'])
** (2 * (barrier_factors['mu'] + 1)))
* si.norm.cdf((params['eta']
* barrier_factors['y1']), 0.0, 1.0))
- (params['phi']
* params['K']
* np.exp(-params['r']
* params['T'])
* ((params['H'] / params['S'])
** (2 * barrier_factors['mu']))
* si.norm.cdf(
((params['eta'] * barrier_factors['y1'])
- (params['eta'] * params['sigma']
* np.sqrt(params['T']))), 0.0, 1.0)))
barrier_factors['D'] = (
(params['phi'] * params['S'] * params['carry']
* ((params['H'] / params['S'])
** (2 * (barrier_factors['mu'] + 1)))
* si.norm.cdf((params['eta']
* barrier_factors['y2']), 0.0, 1.0))
- (params['phi']
* params['K']
* np.exp(-params['r']
* params['T'])
* ((params['H'] / params['S'])
** (2 * barrier_factors['mu']))
* si.norm.cdf(
((params['eta'] * barrier_factors['y2'])
- (params['eta'] * params['sigma']
* np.sqrt(params['T']))), 0.0, 1.0)))
barrier_factors['E'] = (
(params['R'] * np.exp(-params['r'] * params['T']))
* (si.norm.cdf(
((params['eta'] * barrier_factors['x2'])
- (params['eta']
* params['sigma']
* np.sqrt(params['T']))), 0.0, 1.0)
- (((params['H'] / params['S'])
** (2 * barrier_factors['mu']))
* si.norm.cdf(
((params['eta'] * barrier_factors['y2'])
- (params['eta'] * params['sigma']
* np.sqrt(params['T']))), 0.0, 1.0))))
barrier_factors['F'] = (
params['R'] * (((params['H'] / params['S'])
** (barrier_factors['mu']
+ barrier_factors['lambda']))
* (si.norm.cdf((params['eta']
* barrier_factors['z']), 0.0, 1.0))
+ (((params['H'] / params['S'])
** (barrier_factors['mu']
- barrier_factors['lambda']))
* si.norm.cdf(
((params['eta'] * barrier_factors['z'])
- (2 * params['eta']
* barrier_factors['lambda']
* params['sigma']
* np.sqrt(params['T']))), 0.0, 1.0))))
return barrier_factors, params
| 2.65625 | 3 |
C-logging/logging_example3.py | johnehunt/PythonCleanCode | 1 | 12785681 | # Using the logger.exception() method
import logging
logger = logging.getLogger()
try:
print('starting')
x = 1 / 0
print(x)
except:
logger.exception('an exception message')
print('Done')
| 3.09375 | 3 |
examples/pytorch/gin/gin.py | ydwu4/dgl-hack | 0 | 12785682 | <gh_stars>0
"""
How Powerful are Graph Neural Networks
https://arxiv.org/abs/1810.00826
https://openreview.net/forum?id=ryGs6iA5Km
Author's implementation: https://github.com/weihua916/powerful-gnns
"""
import torch
import torch as th
import torch.nn as nn
import torch.nn.functional as F
import dgl
from dgl.nn.pytorch.conv import GINConv
from dgl.nn.pytorch.glob import SumPooling, AvgPooling, MaxPooling
import egl
from egl import ContextManager
from dgl.utils import expand_as_pair
class ApplyNodeFunc(nn.Module):
"""Update the node feature hv with MLP, BN and ReLU."""
def __init__(self, mlp):
super(ApplyNodeFunc, self).__init__()
self.mlp = mlp
self.bn = nn.BatchNorm1d(self.mlp.output_dim)
def forward(self, h):
h = self.mlp(h)
h = self.bn(h)
h = F.relu(h)
return h
class MLP(nn.Module):
"""MLP with linear output"""
def __init__(self, num_layers, input_dim, hidden_dim, output_dim):
"""MLP layers construction
Paramters
---------
num_layers: int
The number of linear layers
input_dim: int
The dimensionality of input features
hidden_dim: int
The dimensionality of hidden units at ALL layers
output_dim: int
The number of classes for prediction
"""
super(MLP, self).__init__()
self.linear_or_not = True # default is linear model
self.num_layers = num_layers
self.output_dim = output_dim
if num_layers < 1:
raise ValueError("number of layers should be positive!")
elif num_layers == 1:
# Linear model
self.linear = nn.Linear(input_dim, output_dim)
else:
# Multi-layer model
self.linear_or_not = False
self.linears = torch.nn.ModuleList()
self.batch_norms = torch.nn.ModuleList()
self.linears.append(nn.Linear(input_dim, hidden_dim))
for layer in range(num_layers - 2):
self.linears.append(nn.Linear(hidden_dim, hidden_dim))
self.linears.append(nn.Linear(hidden_dim, output_dim))
for layer in range(num_layers - 1):
self.batch_norms.append(nn.BatchNorm1d((hidden_dim)))
def forward(self, x):
if self.linear_or_not:
# If linear model
return self.linear(x)
else:
# If MLP
h = x
for i in range(self.num_layers - 1):
h = F.relu(self.batch_norms[i](self.linears[i](h)))
return self.linears[-1](h)
class GIN(nn.Module):
"""GIN model"""
def __init__(self, num_layers, num_mlp_layers, input_dim, hidden_dim,
output_dim, final_dropout, learn_eps, graph_pooling_type,
neighbor_pooling_type):
"""model parameters setting
Paramters
---------
num_layers: int
The number of linear layers in the neural network
num_mlp_layers: int
The number of linear layers in mlps
input_dim: int
The dimensionality of input features
hidden_dim: int
The dimensionality of hidden units at ALL layers
output_dim: int
The number of classes for prediction
final_dropout: float
dropout ratio on the final linear layer
learn_eps: boolean
If True, learn epsilon to distinguish center nodes from neighbors
If False, aggregate neighbors and center nodes altogether.
neighbor_pooling_type: str
how to aggregate neighbors (sum, mean, or max)
graph_pooling_type: str
how to aggregate entire nodes in a graph (sum, mean or max)
"""
super(GIN, self).__init__()
self.num_layers = num_layers
self.learn_eps = learn_eps
# List of MLPs
self.ginlayers = torch.nn.ModuleList()
self.batch_norms = torch.nn.ModuleList()
for layer in range(self.num_layers - 1):
if layer == 0:
mlp = MLP(num_mlp_layers, input_dim, hidden_dim, hidden_dim)
else:
mlp = MLP(num_mlp_layers, hidden_dim, hidden_dim, hidden_dim)
self.ginlayers.append(
GINConv(ApplyNodeFunc(mlp), neighbor_pooling_type, 0, self.learn_eps))
self.batch_norms.append(nn.BatchNorm1d(hidden_dim))
# Linear function for graph poolings of output of each layer
# which maps the output of different layers into a prediction score
self.linears_prediction = torch.nn.ModuleList()
for layer in range(num_layers):
if layer == 0:
self.linears_prediction.append(
nn.Linear(input_dim, output_dim))
else:
self.linears_prediction.append(
nn.Linear(hidden_dim, output_dim))
self.drop = nn.Dropout(final_dropout)
if graph_pooling_type == 'sum':
self.pool = SumPooling()
elif graph_pooling_type == 'mean':
self.pool = AvgPooling()
elif graph_pooling_type == 'max':
self.pool = MaxPooling()
else:
raise NotImplementedError
def forward(self, g, h):
# list of hidden representation at each layer (including input)
hidden_rep = [h]
for i in range(self.num_layers - 1):
h = self.ginlayers[i](g, h)
h = self.batch_norms[i](h)
h = F.relu(h)
hidden_rep.append(h)
score_over_layer = 0
# perform pooling over all nodes in each graph in every layer
for i, h in enumerate(hidden_rep):
pooled_h = self.pool(g, h)
score_over_layer += self.drop(self.linears_prediction[i](pooled_h))
return score_over_layer
class EglGINConv(nn.Module):
r"""Graph Isomorphism Network layer from paper `How Powerful are Graph
Neural Networks? <https://arxiv.org/pdf/1810.00826.pdf>`__.
.. math::
h_i^{(l+1)} = f_\Theta \left((1 + \epsilon) h_i^{l} +
\mathrm{aggregate}\left(\left\{h_j^{l}, j\in\mathcal{N}(i)
\right\}\right)\right)
Parameters
----------
apply_func : callable activation function/layer or None
If not None, apply this function to the updated node feature,
the :math:`f_\Theta` in the formula.
aggregator_type : str
Aggregator type to use (``sum``, ``max`` or ``mean``).
init_eps : float, optional
Initial :math:`\epsilon` value, default: ``0``.
learn_eps : bool, optional
If True, :math:`\epsilon` will be a learnable parameter.
"""
def __init__(self,
apply_func,
aggregator_type,
init_eps=0,
learn_eps=False):
super(EglGINConv, self).__init__()
self.apply_func = apply_func
self.aggregator_type = aggregator_type
# to specify whether eps is trainable or not.
if learn_eps:
self.eps = th.nn.Parameter(th.FloatTensor([init_eps]))
else:
self.register_buffer('eps', th.FloatTensor([init_eps]))
self.cm = ContextManager(dgl.backend.run_egl)
def forward(self, graph, feat):
r"""Compute Graph Isomorphism Network layer.
Parameters
----------
graph : DGLGraph
The graph.
feat : torch.Tensor or pair of torch.Tensor
If a torch.Tensor is given, the input feature of shape :math:`(N, D_{in})` where
:math:`D_{in}` is size of input feature, :math:`N` is the number of nodes.
If a pair of torch.Tensor is given, the pair must contain two tensors of shape
:math:`(N_{in}, D_{in})` and :math:`(N_{out}, D_{in})`.
If ``apply_func`` is not None, :math:`D_{in}` should
fit the input dimensionality requirement of ``apply_func``.
Returns
-------
torch.Tensor
The output feature of shape :math:`(N, D_{out})` where
:math:`D_{out}` is the output dimensionality of ``apply_func``.
If ``apply_func`` is None, :math:`D_{out}` should be the same
as input dimensionality.
"""
graph = graph.local_var()
dgl_context = dgl.utils.to_dgl_context(feat.device)
g = graph._graph.get_immutable_gidx(dgl_context)
feat_src, feat_dst = expand_as_pair(feat)
with self.cm.zoomIn(namespace=[self, torch], graph=g, node_feats={'fsrc':feat_src, 'fdst':feat_dst}) as v:
if self.aggregator_type == 'sum':
rst = sum([nb.fsrc for nb in v.innbs])
elif self.aggregator_type == 'mean':
rst = self.cm.mean([nb.fsrc for nb in v.innbs])
elif self.aggregator_type == 'max':
rst = self.cm.max([nb.fsrc for nb in v.innbs])
else:
raise NotImplementedError('Cannot find aggregator typoe', self.aggregator_type)
# Temp workaround for rst = (1 + self.eps) * v.fdst + rst
rst = v.fdst + self.eps * v.fdst + rst
self.cm.collect_output(rst)
rst = self.cm.zoomOut()
if self.apply_func is not None:
rst = self.apply_func(rst)
return rst
class EglGIN(nn.Module):
"""EglGIN model"""
def __init__(self, num_layers, num_mlp_layers, input_dim, hidden_dim,
output_dim, final_dropout, learn_eps, graph_pooling_type,
neighbor_pooling_type):
"""model parameters setting
Paramters
---------
num_layers: int
The number of linear layers in the neural network
num_mlp_layers: int
The number of linear layers in mlps
input_dim: int
The dimensionality of input features
hidden_dim: int
The dimensionality of hidden units at ALL layers
output_dim: int
The number of classes for prediction
final_dropout: float
dropout ratio on the final linear layer
learn_eps: boolean
If True, learn epsilon to distinguish center nodes from neighbors
If False, aggregate neighbors and center nodes altogether.
neighbor_pooling_type: str
how to aggregate neighbors (sum, mean, or max)
graph_pooling_type: str
how to aggregate entire nodes in a graph (sum, mean or max)
"""
super(EglGIN, self).__init__()
self.num_layers = num_layers
self.learn_eps = learn_eps
# List of MLPs
self.ginlayers = torch.nn.ModuleList()
self.batch_norms = torch.nn.ModuleList()
for layer in range(self.num_layers - 1):
if layer == 0:
mlp = MLP(num_mlp_layers, input_dim, hidden_dim, hidden_dim)
else:
mlp = MLP(num_mlp_layers, hidden_dim, hidden_dim, hidden_dim)
self.ginlayers.append(
EglGINConv(ApplyNodeFunc(mlp), neighbor_pooling_type, 0, self.learn_eps))
self.batch_norms.append(nn.BatchNorm1d(hidden_dim))
# Linear function for graph poolings of output of each layer
# which maps the output of different layers into a prediction score
self.linears_prediction = torch.nn.ModuleList()
for layer in range(num_layers):
if layer == 0:
self.linears_prediction.append(
nn.Linear(input_dim, output_dim))
else:
self.linears_prediction.append(
nn.Linear(hidden_dim, output_dim))
self.drop = nn.Dropout(final_dropout)
if graph_pooling_type == 'sum':
self.pool = SumPooling()
elif graph_pooling_type == 'mean':
self.pool = AvgPooling()
elif graph_pooling_type == 'max':
self.pool = MaxPooling()
else:
raise NotImplementedError
def forward(self, g, h):
# list of hidden representation at each layer (including input)
hidden_rep = [h]
for i in range(self.num_layers - 1):
h = self.ginlayers[i](g, h)
h = self.batch_norms[i](h)
h = F.relu(h)
hidden_rep.append(h)
score_over_layer = 0
# perform pooling over all nodes in each graph in every layer
for i, h in enumerate(hidden_rep):
pooled_h = self.pool(g, h)
score_over_layer += self.drop(self.linears_prediction[i](pooled_h))
return score_over_layer
| 3.265625 | 3 |
motor_controls/getArduinoData2.py | ArifSohaib/AutonomousRobotChallenge | 13 | 12785683 | # import curses and GPIO
import curses
import serial
import time
from picamera.array import PiRGBArray
from picamera import PiCamera
import cv2
import numpy as np
ser = serial.Serial("/dev/ttyUSB0", "9600")
serLidar = serial.Serial("/dev/ttyACM0", "115200")
cap = cv2.VideoCapture(0)
piCam = False
#check if picamera exists
try:
camera = PiCamera()
camera.resolution = (224,224)
camera.framerate = 20
rawCapture = PiRGBArray(camera, size=(224,224))
piCam = True
except:
print("Pi camera does not exist, using USB camera")
# Get the curses window, turn off echoing of keyboard to screen, turn on
# instant (no waiting) key response, and use special values for cursor keys
screen = curses.initscr()
curses.noecho()
curses.cbreak()
screen.keypad(True)
keyRec = open('key_strokes.txt','w+')
train_data = []
try:
while True:
distString = serLidar.readline()
dist = 1000
try:
dist = int(distString.decode("utf-8"))
except:
print("can't convert dist")
if piCam == True:
for frame in camera.capture_continuous(rawCapture, format="bgr", use_video_port=True):
image_np = np.array(frame.array)
rawCapture.truncate(0)
char = screen.getch()
key = [0,0,0,0,1]
if char == ord('x'):
np.save("train_data.npy", train_data)
ser.write(b'5')
keyRec.close()
curses.nocbreak(); screen.keypad(0); curses.echo()
curses.endwin()
break
elif char == ord('w') and dist > 100:
ser.write(b'1')
key = [1,0,0,0,0]
elif char == ord('s') and dist > 100:
ser.write(b'2')
key = [0,1,0,0,0]
elif char == ord('a') and dist > 100:
ser.write(b'3')
key = [0,0,1,0,0]
elif char == ord('d') and dist > 100:
ser.write(b'4')
key = [0,0,0,1,0]
elif char == ord(' '):
ser.write(b'5')
key = [0,0,0,0,1]
val_dict = {"input":key, "image":image_np}
train_data.append(val_dict)
keyRec.write(str(key)+"\n")
if len(train_data) % 100 == 0:
np.save("train_data.npy", train_data)
#no pi camera, using USB
else:
ret, image_np = cap.read()
char = screen.getch()
key = [0,0,0,0,1]
if char == ord('x'):
np.save("train_data.npy", train_data)
ser.write(b'5')
keyRec.close()
curses.nocbreak(); screen.keypad(0); curses.echo()
curses.endwin()
break
elif char == ord('w') and dist > 100:
ser.write(b'1')
key = [1,0,0,0,0]
elif char == ord('s') and dist > 100:
ser.write(b'2')
key = [0,1,0,0,0]
elif char == ord('a') and dist > 100:
ser.write(b'3')
key = [0,0,1,0,0]
elif char == ord('d') and dist > 100:
ser.write(b'4')
key = [0,0,0,1,0]
elif char == ord(' '):
ser.write(b'5')
key = [0,0,0,0,1]
val_dict = {"input":key, "image":image_np}
train_data.append(val_dict)
keyRec.write(str(key)+"\n")
if len(train_data) % 100 == 0:
np.save("train_data.npy", train_data)
finally:
#Close down curses properly, inc turn echo back on!
keyRec.close()
curses.nocbreak(); screen.keypad(0); curses.echo()
curses.endwin()
| 2.796875 | 3 |
mitmirror/data/security/authorization.py | Claayton/mitmirror-api | 0 | 12785684 | <reponame>Claayton/mitmirror-api
"""Caso de uso: Authorization"""
from typing import Type
from fastapi import Request as RequestFastApi
import jwt
from mitmirror.data.interfaces import UserRepositoryInterface
from mitmirror.domain.usecases import AuthorizationInterface
from mitmirror.errors import HttpUnauthorized, HttpForbidden
from mitmirror.config import SECRET_KEY
class Authorization(AuthorizationInterface):
"""
Classe responsavel pela autorizaçao dos usuarios no sistema.
"""
def __init__(self, user_repository: Type[UserRepositoryInterface]) -> None:
self.__user_repository = user_repository
def token_required(self, request: RequestFastApi):
"""
Serve como dependencia para rotas que necessitam de authorizaçao.
:param request: Requisicao que deve receber o header Authorization, com o token de acesso.
:return informacoes do usuario.
"""
try:
token = request.headers["Authorization"]
except (KeyError, TypeError) as error:
raise HttpUnauthorized(
message="Essa requisicao necessita de um token de acesso!, error"
) from error
try:
data = jwt.decode(jwt=token, key=SECRET_KEY, algorithms="HS256")
current_user = self.__user_repository.get_user(email=data["email"])
except Exception as error: # pylint: disable=W0703
raise HttpForbidden(message="Token invalido ou expirado!, error") from error
return current_user
| 2.390625 | 2 |
examples/tridimensional.py | rennelou/rust-fdmbpm | 0 | 12785685 | <reponame>rennelou/rust-fdmbpm<filename>examples/tridimensional.py
import h5py
import optical_waveguide
import numpy as np
import matplotlib.pyplot as plt
origin = 'lower'
lines = 50
output_filename = "tridimensional_result.h5"
x_axis = optical_waveguide.get_axis(40, 0.4)
y_axis = optical_waveguide.get_axis(40, 0.4)
z_axis = optical_waveguide.get_axis(200, 0.5)
core = optical_waveguide.get_core(3.377, 3.38, 8, 20, 20)
beam = optical_waveguide.get_beam(5.4636, 0.0, 4, 20, 20)
simulation = optical_waveguide.get_simulation(core, beam, z_axis, x_axis, y_axis)
optical_waveguide.run(simulation, output_filename)
with h5py.File(output_filename, "r") as f:
[zdelta, ydelta, xdelta] = f['deltas'][()]
data = f['intensity'][()]
core = f['core'][()]
xdepht = data[0][0].size
ydepht = data[0].size / xdepht
zdepht = data.size / (ydepht * xdepht)
y = np.arange(0., ydepht * ydelta, ydelta)
x = np.arange(0., xdepht * xdelta, xdelta)
X, Y = np.meshgrid(x, y)
zstep = zdepht / 4
fig1, axs = plt.subplots(1, 4, constrained_layout=True)
for i in range(4):
index = int(i * zstep)
Z = data[index]
ax = axs[i]
cs = ax.contourf(X, Y, Z, 10, origin=origin)
cs1 = ax.contour(cs, levels=cs.levels[::2], origin=origin)
C = core[index]
cs3 = ax.contour(X, Y, C, origin=origin)
if i == 3:
cbar = fig1.colorbar(cs, ax=ax) #barra lateral de intensidade
cbar.ax.set_ylabel('intensity')
cbar.add_lines(cs1)
plt.show()
| 2.078125 | 2 |
applications/tenant/fields.py | dev-easyshares/mighty | 0 | 12785686 | <reponame>dev-easyshares/mighty
tenant = ('group', 'user', 'sync', 'invitation', 'roles')
#tenant_alternate = ('tenant', 'alternate', 'position')
role = ('group', 'name', 'is_immutable', 'number',)
tenant_invitation = ('group', 'email', 'by', 'roles', 'tenant', 'status', 'token') | 1.34375 | 1 |
src/transformer_deploy/backends/pytorch_utils.py | dumpmemory/transformer-deploy | 698 | 12785687 | <filename>src/transformer_deploy/backends/pytorch_utils.py
# Copyright 2022, <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Utils related to Pytorch inference.
"""
from typing import Callable, Dict, Tuple
import torch
from torch.onnx import TrainingMode
from transformers import AutoConfig, PreTrainedModel
def infer_classification_pytorch(
model: PreTrainedModel, run_on_cuda: bool
) -> Callable[[Dict[str, torch.Tensor]], torch.Tensor]:
"""
Perform Pytorch inference for classification task
:param model: Pytorch model (transformers)
:param run_on_cuda: True if should be ran on GPU
:return: a function to perform inference
"""
def infer(inputs: Dict[str, torch.Tensor]) -> torch.Tensor:
model_output = model(**inputs).logits.detach() # noqa: F821
if run_on_cuda:
torch.cuda.synchronize()
return model_output
return infer
def infer_feature_extraction_pytorch(
model: PreTrainedModel, run_on_cuda: bool
) -> Callable[[Dict[str, torch.Tensor]], torch.Tensor]:
"""
Perform Pytorch inference for feature extraction task
:param model: Pytorch model (sentence-transformers)
:param run_on_cuda: True if should be ran on GPU
:return: a function to perform inference
"""
def infer(inputs: Dict[str, torch.Tensor]) -> torch.Tensor:
model_output = model(**inputs).detach() # noqa: F821
if run_on_cuda:
torch.cuda.synchronize()
return model_output
return infer
def get_model_size(path: str) -> Tuple[int, int]:
"""
Find number of attention heads and hidden layer size of a model
:param path: path to model
:return: tupple of # of attention heads and hidden layer size (0 if not found)
"""
config = AutoConfig.from_pretrained(pretrained_model_name_or_path=path)
num_attention_heads = getattr(config, "num_attention_heads", 0)
hidden_size = getattr(config, "hidden_size", 0)
return num_attention_heads, hidden_size
# TODO manage encoder / decoder architecture + cache
def convert_to_onnx(
model_pytorch: torch.nn.Module,
output_path: str,
inputs_pytorch: Dict[str, torch.Tensor],
quantization: bool,
var_output_seq: bool,
) -> None:
"""
Convert a Pytorch model to an ONNX graph by tracing the provided input inside the Pytorch code.
Pytorch sometimes fails to infer output tensor shape of models
In ONNX graph, some axis name may be marked like "Divoutput_dim_1" which is a generated name,
and there may be a warning:
** "WARNING: The shape inference of prim::Constant type is missing, so it may result in wrong shape inference
for the exported graph. Please consider adding it in symbolic function." **
ex.: https://discuss.pytorch.org/t/bidirectional-lstm-and-onnx-runtime-warnings/136374
:param model_pytorch: Pytorch model (transformers)
:param output_path: where to save ONNX file
:param inputs_pytorch: Tensor, can be dummy data, shape is not important as we declare all axes as dynamic.
Should be on the same device than the model (CPU or GPU)
:param quantization: model is quantized
:param var_output_seq: variable size sequence
"""
if quantization:
try:
from pytorch_quantization.nn import TensorQuantizer
except ImportError:
raise ImportError(
"It seems that pytorch-quantization is not yet installed. "
"It is required when you enable the quantization flag and use CUDA device."
"Please find installation instructions on "
"https://github.com/NVIDIA/TensorRT/tree/main/tools/pytorch-quantization or use:\n"
"pip3 install git+ssh://[email protected]/NVIDIA/TensorRT#egg=pytorch-quantization\\&"
"subdirectory=tools/pytorch-quantization/"
)
TensorQuantizer.use_fb_fake_quant = True
if hasattr(model_pytorch, "config") and hasattr(model_pytorch.config, "use_cache"):
use_cache = getattr(model_pytorch.config, "use_cache")
setattr(model_pytorch.config, "use_cache", False)
# dynamic axis == variable length axis
dynamic_axis = dict()
for k in inputs_pytorch.keys():
if var_output_seq:
# seq axis name is fixed to be matched with output seq axis name (for output shape prediction)
dynamic_axis[k] = {0: "batch_size", 1: "sequence"}
else:
# if there is no specific requirement, each axis name is unique, fix some issue on T5 model
dynamic_axis[k] = {0: "batch_size", 1: f"sequence-{k}"}
dynamic_axis["output"] = {0: "batch_size"}
if var_output_seq:
dynamic_axis["output"][1] = "sequence"
# replace int64 input tensors by int32 -> for ONNX Runtime binding API and expected by TensorRT engine
for k, v in inputs_pytorch.items():
if not isinstance(v, torch.Tensor):
continue
if v.dtype in [torch.long, torch.int64]:
inputs_pytorch[k] = v.type(torch.int32)
with torch.no_grad():
torch.onnx.export(
model_pytorch, # model to optimize
args=tuple(inputs_pytorch.values()), # tuple of multiple inputs
f=output_path, # output path / file object
opset_version=13, # the ONNX version to use, >= 13 supports channel quantized model
do_constant_folding=True, # simplify model (replace constant expressions)
input_names=list(inputs_pytorch.keys()), # input names
output_names=["output"], # output axis name, hard coded so only 1 output supported
dynamic_axes=dynamic_axis, # declare dynamix axis for each input / output
training=TrainingMode.EVAL, # always put the model in evaluation mode
verbose=False,
)
if quantization:
TensorQuantizer.use_fb_fake_quant = False
if hasattr(model_pytorch, "config") and hasattr(model_pytorch.config, "use_cache"):
setattr(model_pytorch.config, "use_cache", use_cache)
| 2 | 2 |
lucid4keras/optimizers.py | smgpulse007/lucid4keras | 15 | 12785688 | <reponame>smgpulse007/lucid4keras
import numpy as np
def rmsprop(grads, cache=None, decay_rate=0.95):
if cache is None:
cache = np.zeros_like(grads)
cache = decay_rate * cache + (1 - decay_rate) * grads ** 2
step = -grads / np.sqrt(cache + 1e-8)
return step, cache
def adam(grads, cache_m,cache_v,iters,
lr, beta1, beta2):
if cache_m is None:
cache_m = np.zeros_like(grads)
if cache_v is None:
cache_v = np.zeros_like(grads)
iters += 1
cache_m = (beta1 * cache_m) + (1. - beta1) * grads
cache_v = (beta2 * cache_v) + (1 - beta2) * (grads * grads)
mc = cache_m / (1. - (beta1 ** iters))
vc = cache_v / (1. - (beta2 ** iters))
lr_t = lr * np.sqrt(1. - (beta2 ** iters)) / (1. - (beta1 ** iters)) #learning rate
step = (lr_t * mc)/ (np.sqrt(vc) + 1e-8)
return step, cache_m, cache_v, iters | 2.296875 | 2 |
gs/group/messages/topic/base/latestpost.py | groupserver/gs.group.messages.topic.base | 0 | 12785689 | # -*- coding: utf-8 -*-
############################################################################
#
# Copyright © 2013, 2015 OnlineGroups.net and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
############################################################################
from __future__ import absolute_import, unicode_literals
from zope.cachedescriptors.property import Lazy
from zope.component import createObject
from gs.group.base import GroupViewlet
class LatestPost(GroupViewlet):
def __init__(self, messages, request, view, manager):
super(LatestPost, self).__init__(messages, request, view, manager)
@Lazy
def topic(self):
retval = [post for post in self.view.topic if not(post['hidden'])]
return retval
@Lazy
def relativeUrl(self):
retval = ''
if self.topic:
lastPost = self.topic[-1]
url = '{groupUrl}/messages/topic/{lastPostId}/'\
'#post-{lastPostId}'
retval = url.format(groupUrl=self.groupInfo.relativeURL,
lastPostId=lastPost['post_id'])
return retval
@Lazy
def authorInfo(self):
if self.topic:
lastPost = self.topic[-1]
authorId = lastPost['author_id']
else:
authorId = ''
retval = createObject('groupserver.UserFromId', self.context,
authorId)
return retval
@Lazy
def lastPostDate(self):
retval = None
if self.topic:
retval = self.topic[-1]['date']
return retval
@Lazy
def show(self):
retval = len(self.topic) > 1
return retval
| 1.960938 | 2 |
reward/batcher/prioritized_replay_batcher.py | lgvaz/torchrl | 5 | 12785690 | <gh_stars>1-10
import reward.utils as U
from reward.batcher import ReplayBatcher
from reward.utils.buffers import PrReplayBuffer
# TODO: Fix args and kwargs
class PrReplayBatcher(ReplayBatcher):
def __init__(
self,
*args,
min_pr=0.01,
pr_factor=0.6,
is_factor=1.,
rbuff_fn=PrReplayBuffer,
**kwargs
):
self.min_pr = min_pr
self._pr_factor = pr_factor
self._is_factor = is_factor
# TODO: Remove *args and **kwargs
super().__init__(*args, rbuff_fn=rbuff_fn, **kwargs)
def _create_rbuff(self, rbuff_fn):
return rbuff_fn(
maxlen=self.maxlen,
num_envs=self.runner.num_envs,
min_pr=self.min_pr,
pr_factor=self._pr_factor,
is_factor=self._is_factor,
)
@property
def pr_factor(self):
return self.rbuff.get_pr_factor(step=self.num_steps)
@property
def is_factor(self):
return self.rbuff.get_is_factor(step=self.num_steps)
def update_pr(self, idx, pr):
self.rbuff.update_pr(idx=idx, pr=pr, step=self.num_steps)
def get_is_weight(self, idx):
return self.rbuff.get_is_weight(idx=idx, step=self.num_steps)
def write_logs(self, logger):
super().write_logs(logger=logger)
logger.add_log("ExpReplay/alpha", self.pr_factor)
logger.add_log("ExpReplay/beta", self.is_factor)
| 2.109375 | 2 |
templateMatching_classes.py | Fs-agadir/StageDetect | 0 | 12785691 | # Copyright (c) 2021, Fs-Agadir
# All rights reserved.
import numpy as np
import pylab as plt
import cv2
#perform template matching in images to detect GCPs
class templateMatch:
#define template position and size and search area position
def __init__(self, template_size_x=300, template_size_y=300,
search_area_x=0, search_area_y=0, plot_results=False):
self.template_size_x = template_size_x
self.template_size_y = template_size_y
self.search_area_x = search_area_x
self.search_area_y = search_area_y
self.plot_results=plot_results
#define template at image point position (of corresponding GCP)
def getTemplateAtImgpoint(self, img, img_pts, template_width=10, template_height=10):
#consideration that row is y and column is x
#careful that template extent even to symmetric size around point of interest
template_img = []
anchor_pts = []
for pt in img_pts:
if img_pts.shape[1] > 2:
template_width_for_cut_left = pt[2]/2
template_width_for_cut_right = pt[2]/2 + 1
elif template_width > 0:
template_width_for_cut_left = template_width/2
template_width_for_cut_right = template_width/2 + 1
else:
print 'missing template size assignment'
if img_pts.shape[1] > 2:
template_height_for_cut_lower = pt[3]/2
template_height_for_cut_upper = pt[3]/2 + 1
elif template_height > 0:
template_height_for_cut_lower = template_height/2
template_height_for_cut_upper = template_height/2 + 1
else:
print 'missing template size assignment'
cut_anchor_x = pt[0] - template_width_for_cut_left
cut_anchor_y = pt[1] - template_height_for_cut_lower
#consideration of reaching of image boarders (cutting of templates)
if pt[1] + template_height_for_cut_upper > img.shape[0]:
template_height_for_cut_upper = np.int(img.shape[0] - pt[1])
if pt[1] - template_height_for_cut_lower < 0:
template_height_for_cut_lower = np.int(pt[1])
cut_anchor_y = 0
if pt[0] + template_width_for_cut_right > img.shape[1]:
template_width_for_cut_right = np.int(img.shape[1] - pt[0])
if pt[0] - template_width_for_cut_left < 0:
template_width_for_cut_left = np.int(pt[0])
cut_anchor_x = 0
template = img[pt[1]-template_height_for_cut_lower:pt[1]+template_height_for_cut_upper,
pt[0]-template_width_for_cut_left:pt[0]+template_width_for_cut_right]
#template_img = np.dstack((template_img, template))
template_img.append(template)
anchor_pts.append([cut_anchor_x, cut_anchor_y])
anchor_pts = np.asarray(anchor_pts, dtype=np.float32)
#template_img = np.delete(template_img, 0, axis=2)
return template_img, anchor_pts #anchor_pts defines position of lower left of template in image
#template matching for automatic detection of image coordinates of GCPs
def performTemplateMatch(self, img_extracts, template_img, anchor_pts):
new_img_pts = []
template_nbr = 0
count_pts = 0
while template_nbr < len(template_img):
template_array = np.asarray(template_img[template_nbr])
if (type(img_extracts) is list and len(img_extracts) > 1) or (type(img_extracts) is tuple and len(img_extracts.shape) > 2):
img_extract = img_extracts[template_nbr]
else:
img_extract = img_extracts
res = cv2.matchTemplate(img_extract, template_array, cv2.TM_CCORR_NORMED)
min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res) #min_loc for TM_SQDIFF
match_position_x = max_loc[0] + template_array.shape[1]/2
match_position_y = max_loc[1] + template_array.shape[0]/2
del min_val, min_loc
if max_val > 0.9:
new_img_pts.append([match_position_x + anchor_pts[template_nbr,0],
match_position_y + anchor_pts[template_nbr,1]])
count_pts = count_pts + 1
template_nbr = template_nbr + 1
if self.plot_results:
plt.subplot(131),plt.imshow(res,cmap = 'gray')
plt.title('Matching Result'), plt.xticks([]), plt.yticks([])
plt.plot(match_position_x-template_array.shape[1]/2, match_position_y-template_array.shape[0]/2, "r.", markersize=10)
plt.subplot(132),plt.imshow(img_extract,cmap = 'gray')
plt.title('Detected Point'), plt.xticks([]), plt.yticks([])
plt.plot(match_position_x, match_position_y, "r.", markersize=10)
plt.subplot(133),plt.imshow(template_array,cmap = 'gray')
plt.title('Template'), plt.xticks([]), plt.yticks([])
plt.show()
new_img_pts = np.asarray(new_img_pts, dtype=np.float32)
new_img_pts = new_img_pts.reshape(count_pts, 2)
return new_img_pts
#perform template matching
def templateMatching(self, img_pts, img_for_template, img_for_search):
#if no search area defined, squared search are triple of template size
if self.search_area_x == 0 and self.search_area_y == 0:
search_area = np.ones((img_pts.shape[0], 2)) * self.template_size_x * 3
#search_area_...: if only in x direction defined, used as same size for y (squared search area)
elif self.search_area_x > 0 and self.search_area_y == 0:
search_area = np.ones((img_pts.shape[0], 2)) * self.search_area_x
elif self.search_area_x == 0 and self.search_area_y > 0:
search_area = np.ones((img_pts.shape[0], 2)) * self.search_area_y
else:
search_area_x = np.ones((img_pts.shape[0], 1)) * self.search_area_x
search_area_y = np.ones((img_pts.shape[0], 1)) * self.search_area_y
search_area = np.hstack((search_area_x.reshape(search_area_x.shape[0],1),
search_area_y.reshape(search_area_y.shape[0],1)))
#if template size defined only in one direction, squared template size are calculated
if self.template_size_x == 30 and self.template_size_y == 30:
template_sizes = np.ones((img_pts.shape[0], 2)) * self.template_size_x
#else side specific template size
else:
template_size_x = np.ones((img_pts.shape[0], 1)) * self.template_size_x
template_size_y = np.ones((img_pts.shape[0], 1)) * self.template_size_y
template_sizes = np.hstack((template_size_x.reshape(template_size_x.shape[0],1),
template_size_y.reshape(template_size_y.shape[0],1)))
#calculate template with corresponding template size
template_prepare = np.hstack((img_pts, template_sizes))
pt_templates, _ = self.getTemplateAtImgpoint(img_for_template, template_prepare)
#approximation of template position for subsequent images
searchArea_perPoint = np.hstack((img_pts, search_area))
searchArea_clip, anchor_pts = self.getTemplateAtImgpoint(img_for_search, searchArea_perPoint)
#perform template matching
img_pts_matched = self.performTemplateMatch(searchArea_clip, pt_templates, anchor_pts)
return img_pts_matched
#quality measure of detected templates considering distance pattern
def pt_distances(self, pts):
#pts: id and x y coordinates of image points (np array)
pt_distances_sum = []
for pt in pts:
pt_for_dist = np.ones((pts.shape[0], pts.shape[1])) * pt
pt_dist = np.sqrt(np.square(pt_for_dist[:,1] - pts[:,1]) + np.square(pt_for_dist[:,2] - pts[:,2]))
pt_distances_sum.append([pt[0], np.sum(pt_dist)])
return pt_distances_sum
#plot matched points into corresponding image
def plot_pts(self, img, points, switchColRow=False, plt_title='', output_save=False, output_img=None,
edgecolor='blue'):
plt.clf()
plt.figure(frameon=False)
plt.gray()
if switchColRow:
plt.plot([p[1] for p in points],
[p[0] for p in points],
marker='o', ms=5, color='none', markeredgecolor=edgecolor, markeredgewidth=1)
else:
plt.plot([p[0] for p in points],
[p[1] for p in points],
marker='o', ms=5, color='none', markeredgecolor=edgecolor, markeredgewidth=1)
plt.title(plt_title)
plt.axis('off')
plt.imshow(img)
if not output_save:
plt.waitforbuttonpress()
plt.close()
else:
plt.savefig(output_img, dpi=600)
| 2.71875 | 3 |
predict.py | hdvvip/AI_Image_Classifier | 0 | 12785692 | <reponame>hdvvip/AI_Image_Classifier
from functions_col import import_func, process_image, imshow, predict, save_checkpoint, load_checkpoint, load_train_data, load_val_data, train_model, get_key_from_value
import argparse
import pandas as pd
import numpy as np
import torch
from torch import nn,optim
import torch.nn.functional as F
from torchvision import datasets, transforms, models
from workspace_utils import active_session
from PIL import Image
import json
# Main
if __name__ == '__main__':
# Set up ArgumentParser to retrieve data from command line
parser = argparse.ArgumentParser(
description="AI Image Classfication App"
)
# Add parameters
parser.add_argument('data_input', help="Path to Input File for ex: ..../image_07090.jpg")
parser.add_argument('checkpoint', help="File Path where trained model saved to", default='checkpointTrainedModel.pth')
parser.add_argument('--top_k', help="Top Predicted Classes", default=5, type=int)
parser.add_argument('--category_names', help="file hold real name of classes", default='cat_to_name.json')
parser.add_argument('--gpu', help="Train on GPU", action='store_true')
#Parser the arguments
args = parser.parse_args()
# get values parsed from command line
data_input = args.data_input
checkpoint = args.checkpoint
top_k = args.top_k
category_names = args.category_names
gpu = args.gpu
# Get real names
cat_to_name = None
with open(category_names, 'r') as f:
cat_to_name = json.load(f)
# set up AUTOMATICALLY device to use is: gpu or cpu
device = None
# if(torch.cuda.is_available()):
# print("GPU available: YES \nRunning On: GPU \n")
# else:
# print("GPU available: NO \nRunning On: CPU \n")
if (gpu):
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
else:
device = torch.device("cpu")
if(gpu and device == torch.device("cuda")):
print("GPU available: YES \nRunning On: GPU \n")
else:
check_device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
if(check_device == torch.device("cuda")):
print("GPU available: YES \nRunning On: CPU \n")
else:
print("GPU available: NO \nRunning On: CPU \n")
# Load model
model = load_checkpoint(checkpoint)
model.to(device)
# Image path
image_path = data_input
# Get Image Label
label = image_path.split('/')[2]
# Predict the flower class based on iamge_path
top_p, top_class = predict(image_path, model, device, top_k)
# Map the predicted class to real label based on train_data.class_to_idx
# The reason is: train_data = datasets.ImageFolder(train_dir, transform=train_transform)
# train_data map 0,1,2,3,4,5... to images based on the folder position from top to bottom
# NOT the real label of image
# Hence train_data.class_to_idx saved information about real label of the image
# Therefore map the label 0,1,2,3,4,5... back to the real label of the iamge
# model.class_to_idx == train_data.class_to_idx ==> TRUE
lable_name_dictionary = model.class_to_idx
top_class_mapped = [get_key_from_value(lable_name_dictionary, class_name) for class_name in top_class]
# Get predicted top_class_name and real class name
top_class_name = [cat_to_name.get(class_name) for class_name in top_class_mapped]
# label_name = cat_to_name.get(str(label))
print(top_p)
print(top_class_name)
| 2.640625 | 3 |
runtime/stdlib/jitlog.py | cheery/lever | 136 | 12785693 | <gh_stars>100-1000
# There is a convenient PYPYLOG=jit-log-opt:logfile
# to enable jit logging from outside.
# But I like having the option to
# enable it from the inside.
from rpython.rtyper.lltypesystem import rffi, lltype, llmemory
from rpython.rlib.rjitlog import rjitlog
from rpython.rlib import jit
from space import *
import fs
module = Module(u'jitlog', {}, frozen=True)
def builtin(fn):
name = fn.__name__.rstrip('_').decode('utf-8')
module.setattr_force(name, Builtin(fn, name))
return fn
@builtin
@signature(fs.File)
def enable(fileobj):
try:
rjitlog.enable_jitlog(rffi.r_long(fileobj.fd))
except rjitlog.JitlogError as error:
raise unwind(LError(
error.msg.decode('utf-8')))
return null
@builtin
@signature()
@jit.dont_look_inside
def disable():
rjitlog.disable_jitlog()
return null
| 1.8125 | 2 |
tests/integration/operators_test/asinh_test.py | gglin001/popart | 61 | 12785694 | # Copyright (c) 2020 Graphcore Ltd. All rights reserved.
import numpy as np
import popart
import torch
from op_tester import op_tester
def test_asinh(op_tester):
# create test data
# Notice: as asinh(x) = ln(x + sqrt(x^2 + 1)), absolute precision
# deteriorates for larger negative numbers as you will have ln(0.0001).
d1 = np.array([
-30.0, -20.12, -2.2, -1.5, -0.2, 0.0, 0.234, 1.0, 1.2, 2.0, 3.0, 10.0,
100.0, 2001.0
],
dtype=np.float32)
def init_builder(builder):
i1 = builder.addInputTensor(d1)
o = builder.aiOnnx.asinh([i1])
builder.addOutputTensor(o)
return [o]
def reference(ref_data):
out = np.arcsinh(d1)
return [out]
op_tester.setPatterns(['DecomposeBinaryConstScalar'],
enableRuntimeAsserts=False)
op_tester.run(init_builder, reference, 'infer')
def test_asinh_inplace(op_tester):
# create test data
d1 = np.array([
-30.0, -20.12, -2.2, -1.5, -0.2, 0.0, 0.234, 1.0, 1.2, 2.0, 3.0, 10.0,
100.0, 2001.0
],
dtype=np.float32)
def init_builder(builder):
i1 = builder.addInputTensor(d1)
o = builder.aiOnnx.asinh([i1])
builder.addOutputTensor(o)
return [o]
def reference(ref_data):
out = np.arcsinh(d1)
return [out]
op_tester.setPatterns(['InPlace', 'DecomposeBinaryConstScalar'],
enableRuntimeAsserts=False)
op_tester.run(init_builder, reference, 'infer')
def test_asinh_grad(op_tester):
# create test data
d1 = np.array([
-20.12, -2.2, -1.5, -0.2, 0.0, 0.234, 1.0, 1.2, 2.0, 3.0, 10.0, 100.0,
2001.0
],
dtype=np.float32)
def derivative_asinh(x):
return 1 / (np.sqrt(np.power(x, 2) + 1))
def init_builder(builder):
i1 = builder.addInputTensor(d1)
o = builder.aiOnnx.asinh([i1])
builder.addOutputTensor(o)
return [
o,
popart.reservedGradientPrefix() + i1,
popart.reservedGradientPrefix() + o,
]
def reference(ref_data):
out = np.arcsinh(d1)
d__o = derivative_asinh(d1) * ref_data.getOutputTensorGrad(0)
return [out, d__o, None]
op_tester.setPatterns([
'SubtractArg1GradOp', 'LogGradOp', 'SqrtGradOp', 'PowArg0GradOp',
'DecomposeBinaryConstScalar'
],
enableRuntimeAsserts=False)
op_tester.run(init_builder, reference, 'train')
| 2.1875 | 2 |
actions.py | ElectricMaxxx/jon-knows-rasa | 0 | 12785695 | <filename>actions.py
import logging
import json
from rasa_core.actions import Action
from rasa_core.events import AllSlotsReset
class ActionWhoKnowsTopic(Action):
def name(self):
return 'action_who_knows_topic'
def run(self, dispatcher, tracker, domain):
topic = tracker.get_slot('topic') if tracker.get_slot('topic') else None
if topic is None:
dispatcher.utter_message('Ich habe kein Topic bekommen')
return []
topic = str(topic)
bests = []
with open('./data/skills.json') as f:
skillData = json.load(f)
if 'skills' not in skillData:
dispatcher.utter_message('Keine Skills sinds vorhanden')
return []
for persistedTopic in skillData['skills']:
if topic.lower() != persistedTopic.lower() or len(skillData['skills'][persistedTopic]) == 0: continue
for user in skillData['skills'][persistedTopic]: bests.append(user)
if len(bests) == 0:
dispatcher.utter_message('Kein Kollege weiß etwas zun Thema '+topic)
else:
bestsString = ''
for user in bests:
bestsString += user['name']+' (Score: '+str(user['score'])+'), '
if bestsString.endswith(", "): bestsString = bestsString[:-2]
dispatcher.utter_message('Die folgenden Kollegen meinen Ahnung zu haben: '+bestsString)
return [AllSlotsReset()]
class ActionIForgot(Action):
def name(self):
return 'action_forgotten'
def run(self, dispatcher, tracker, domain):
dispatcher.utter_message('Action forgotten')
return []
class ActionClaimToKnowTopic(Action):
def name(self):
return 'action_claim_to_know_topic'
def run(self, dispatcher, tracker, domain):
topic = tracker.get_slot('topic')
user = tracker.get_slot('user')
if topic is None:
dispatcher.utter_message('No topic given')
return []
if user is None:
dispatcher.utter_message('No user given')
return []
topic = str(topic)
with open('./data/skills.json') as f:
skillData = json.load(f)
if 'skills' not in skillData:
dispatcher.utter_message('Keine Skills sinds vorhanden')
return []
if topic not in skillData['skills']:
skillData['skills'][topic] = []
dispatcher.utter_message('Das Topic '+topic+' ist noch nicht bekannt, wird angelegt')
persistedTopic = skillData['skills'][topic]
foundUser = False
for key in persistedTopic:
if persistedTopic[key].name == user:
persistedTopic[key].score = persistedTopic[key].score + 1
dispatcher.utter_message('User '+user+'` Score um eins erhöht für Topic: '+topic)
foundUser = True
break
if foundUser is True:
skillData[topic].append({"name": user, "score": 1})
dispatcher.utter_message('User '+user+'` wurde für das Topic vermerkt: '+topic)
return [AllSlotsReset()]
def dump(self, obj):
for attr in dir(obj):
print("obj.%s = %r" % (attr, getattr(obj, attr)))
class ActionTopicsInCategory(Action):
def name(self):
return 'action_topics_in_category'
def run(self, dispatcher, tracker, domain):
category = tracker.get_slot('category') if tracker.get_slot('category') else 'None'
category = str(category)
with open('./data/skills.json') as f:
skillData = json.load(f)
if 'categories' not in skillData:
dispatcher.utter_message('Keine Skills sinds vorhanden')
return []
for persistedCategory in skillData['categories']:
if persistedCategory.lower() != category.lower():
continue
topics = skillData['categories'][persistedCategory]
if len(topics) == 0:
dispatcher.utter_message('Kein Topic gefunden in Kategroie: '+category)
return []
topicsString = ''
for topic in topics:
topicsString += ', '+topic
dispatcher.utter_message('Folgen Topics habe ich in Kategoie '+category+' gefunden: '+topicsString)
return [AllSlotsReset()]
categories = ''
for category in skillData['categories']:
categories += ', '+category
dispatcher.utter_message('Keine Kategorie mit dem name '+category+' gefunden, wähle doch eine von '+categories)
return [AllSlotsReset()] | 2.5 | 2 |
Python/kepco.py | ultpnml/ult_instruments | 1 | 12785696 | <gh_stars>1-10
#Controls KEPCO BHK 2000-0.1MG high voltage power supply
#For now, it only reads. It does not set voltage/current.
import visa
class kepco:
#The primary address is assumed to be 6
def __init__(self, address = 6, gpib_num = 1):
self.primary_id = 'GPIB' + str(gpib_num) + '::' +str(address) +'::INSTR'
#Measures voltage
def query(self, message):
rm = visa.ResourceManager()
if self.primary_id in rm.list_resources():
inst = rm.open_resource(self.primary_id)
answer = inst.query(message)
inst.control_ren(6)
inst.close()
rm.close()
return answer
#Measures volgage
def read_voltage(self):
return float(self.query('MEAS:SCAL:VOLT?'))
#Measures current
def read_current(self):
return float(self.query('MEAS:SCAL:CURR?'))
| 2.890625 | 3 |
src/pythontemplate/example.py | colms/pythontemplate | 0 | 12785697 | import argparse
def add_one(num: int) -> int:
if not num:
return 0
return num + 1
def add_with_args(num: int) -> int:
added = add_one(num)
return added
def parse_command_line_args() -> argparse.Namespace:
parser = argparse.ArgumentParser(description='My python template.')
parser.add_argument('--foo', type=int, help='foo help')
args = parser.parse_args()
return args
def main() -> None:
args = parse_command_line_args()
added = add_with_args(args.foo)
print(f'added: {added}')
if __name__ == '__main__':
main()
| 3.734375 | 4 |
test_80.py | VinkDong/scripts | 0 | 12785698 | <filename>test_80.py
from werkzeug.wrappers import Request, Response
@Request.application
def application(request):
return Response('Hello World! 80 port')
if __name__ == '__main__':
from werkzeug.serving import run_simple
run_simple('0.0.0.0', 80, application) | 2 | 2 |
examples/stable_radical_optimization/stable_radical_molecule_state.py | dmdu/rlmolecule | 0 | 12785699 | <reponame>dmdu/rlmolecule
from typing import Optional, Sequence
import rdkit
from rdkit import Chem
from rdkit.Chem import Mol, MolToSmiles
from rlmolecule.molecule.molecule_state import MoleculeState
from rlmolecule.molecule.builder.builder import AddNewAtomsAndBonds
class StableRadMoleculeState(MoleculeState):
"""
A State implementation which uses simple transformations (such as adding a bond) to define a
graph of molecules that can be navigated.
Molecules are stored as rdkit Mol instances, and the rdkit-generated SMILES string is also stored for
efficient hashing.
"""
def __init__(
self,
molecule: Mol,
builder: any,
force_terminal: bool = False,
smiles: Optional[str] = None,
) -> None:
super(StableRadMoleculeState, self).__init__(molecule, builder, force_terminal, smiles)
def get_next_actions(self) -> Sequence['StableRadMoleculeState']:
result = []
if not self._forced_terminal:
if self.num_atoms < self.builder.max_atoms:
result.extend(
(StableRadMoleculeState(molecule, self.builder) for molecule in self.builder(self.molecule)))
if self.num_atoms >= self.builder.min_atoms:
result.extend((StableRadMoleculeState(radical, self.builder, force_terminal=True)
for radical in build_radicals(self.molecule)))
return result
def build_radicals(starting_mol):
"""Build organic radicals. """
generated_smiles = set()
for i, atom in enumerate(starting_mol.GetAtoms()):
if AddNewAtomsAndBonds._get_free_valence(atom) > 0:
rw_mol = rdkit.Chem.RWMol(starting_mol)
rw_mol.GetAtomWithIdx(i).SetNumRadicalElectrons(1)
Chem.SanitizeMol(rw_mol)
smiles = Chem.MolToSmiles(rw_mol)
if smiles not in generated_smiles:
# This makes sure the atom ordering is standardized
yield Chem.MolFromSmiles(smiles)
generated_smiles.add(smiles)
| 2.625 | 3 |
packages/core/minos-microservice-common/minos/common/datetime.py | sorasful/minos-python | 247 | 12785700 | from datetime import (
datetime,
timezone,
)
def current_datetime() -> datetime:
"""Get current datetime in `UTC`.
:return: A ``datetime`` instance.
"""
return datetime.now(tz=timezone.utc)
NULL_DATETIME = datetime.max.replace(tzinfo=timezone.utc)
| 3.140625 | 3 |
pyroms_toolbox/pyroms_toolbox/BGrid_GFDL/__init__.py | dcherian/pyroms | 1 | 12785701 | """
BGrid_GFDL module
"""
from BGrid_GFDL import BGrid_GFDL
from get_nc_BGrid_GFDL import get_nc_BGrid_GFDL
from make_remap_grid_file import make_remap_grid_file
from get_coast_line import get_coast_line
from plot_coast_line import plot_coast_line
from get_Bgrid_proj import get_Bgrid_proj
from flood import flood
| 1.203125 | 1 |
neverlose/models/base/event.py | neverlosecc/api-wrapper | 2 | 12785702 | <reponame>neverlosecc/api-wrapper
from pydantic import BaseModel
class BaseEvent(BaseModel):
unique_id: int
signature: str
| 1.632813 | 2 |
painting.py | applejenny66/PythonDrawing | 0 | 12785703 | # painting.py
import os
import cv2 as cv
import numpy as np
from matplotlib import pyplot as plt
from preprocess import Kmeans
from monitor import Monitor
import tkinter as tk
import csv
from utils import BlankImg
class Painting():
def __init__(self, K, shape):
#self.radius = 3
self.K = K
self.size = shape
self.count = 0
self.fixcount = 0
self.brush_size = 3
self.img = np.zeros((self.size))
for i in range(0, self.size[0]):
for j in range(0, self.size[1]):
self.img[i, j, 0] = self.img[i, j, 1] = self.img[i, j, 2] = 255
def Painting(self):
img = BlankImg(self.size)
self.color_list = []
for i in range(0, self.K):
filename = "./points/" + str(i) + "_point.csv"
with open(filename, newline='') as csvfile:
rows = csv.reader(csvfile)
for row in rows:
print(row)
if (len(row) != 2):
r, g, b = int(row[3]), int(row[4]), int(row[5])
self.color_list.append((r, g, b))
else:
x = int(row[0])
y = int(row[1])
for a in range(x-self.brush_size, x+self.brush_size):
for b in range(y-self.brush_size, y+self.brush_size):
if (a >= 0 and a <= self.size[0]-1):
if (b >= 0 and b <= self.size[1]-1):
img[a, b, 0] = r
img[a ,b ,1] = g
img[a ,b, 2] = b
save_name = "./painting/" + str(i) + ".png"
cv.imwrite(save_name, img)
words = "finished " + str(i)
print (words)
return (self.color_list)
def DectectImg(self, targetname, comparename):
target_img = cv.imread(targetname)
compare_img = cv.imread(comparename)
different_img = BlankImg(self.size)
for x in range(0, self.size[0]):
for y in range(0, self.size[1]):
if (int(target_img[x, y, 0]) != int(compare_img[x, y, 0])):
different_img[x, y, 0] = target_img[x, y, 0]
different_img[x, y, 1] = target_img[x, y, 1]
different_img[x, y, 2] = target_img[x, y, 2]
else:
if (int(target_img[x, y, 1]) != int(compare_img[x, y, 1])):
different_img[x, y, 0] = target_img[x, y, 0]
different_img[x, y, 1] = target_img[x, y, 1]
different_img[x, y, 2] = target_img[x, y, 2]
else:
if (int(target_img[x, y, 2]) != int(compare_img[x, y, 2])):
different_img[x, y, 0] = target_img[x, y, 0]
different_img[x, y, 1] = target_img[x, y, 1]
different_img[x, y, 2] = target_img[x, y, 2]
save_name = "./difference/" + str(self.count) + ".png"
cv.imwrite(save_name, different_img)
self.count += 1
"""
def DectectImg(self, targetname, comparedname):
targetimg = cv.imread(targetname)
comparedimg = cv.imread(comparedname)
print (type(targetimg))
print (type(comparedimg))
fiximg = np.zeros((self.size))
for x in range(0, self.size[0]):
for y in range(0, self.size[1]):
if (targetimg[x, y, 0] == comparedimg[x, y, 0] and \
targetimg[x, y, 1] == comparedimg[x, y, 1] and \
targetimg[x, y, 2] == comparedimg[x, y, 2]):
fiximg[x, y, 0] = fiximg[x, y, 1] = fiximg[x, y, 2] = 255
else:
fiximg[x, y, 0] = targetimg[x, y, 0]
fiximg[x, y, 1] = targetimg[x, y, 1]
fiximg[x, y, 2] = targetimg[x, y, 2]
save_name = "./fixpoint/" + str(self.fixcount) + "_fix.png"
cv.imwrite(save_name, fiximg)
print ("save name: ", save_name)
self.fixcount += 1
return (save_name)
"""
if __name__ == "__main__":
K = 298
filename = "K_298_1_2.png"
img = cv.imread(filename)
size = img.shape
new = Painting(K, size)
#filename = "./points/0_line.csv"
color_list = new.Painting()
comparename = "./painting/297.png"
new.DectectImg(filename, comparename)
print ("finished.")
| 3.015625 | 3 |
pages/views.py | Garinmckayl/monet | 0 | 12785704 |
import re
import requests
from accounts.models import Company, CustomUser
from django.contrib.auth.decorators import login_required
from django.shortcuts import redirect, render
from django.urls import reverse
from django.views import View
from django.views.generic import DetailView, ListView, TemplateView
from django.views.generic.edit import CreateView, UpdateView
from pages.models import Auction, Bid, DataSource
class HomePageView(TemplateView):
template_name = 'pages/home.html'
def get_context_data(self, *args, **kwargs):
context = super(HomePageView, self).get_context_data(*args, **kwargs)
context['users'] = CustomUser.objects.all()
return context
class AboutPageView(TemplateView):
template_name = 'pages/about.html'
# @login_required
class DashboardPageView(TemplateView):
template_name = 'pages/dashboard.html'
def get_context_data(self, *args, **kwargs):
context = super(DashboardPageView, self).get_context_data(
*args, **kwargs)
return context
class AuctionListView(ListView):
model = Auction
def get_context_data(self, *args, **kwargs):
context = super(AuctionListView, self).get_context_data(
*args, **kwargs)
# company=Company.objects.get(user=self.request.user)
# context['company'] = company
return context
class MyAuctionDetailView(View):
def get(self, request, *args, **kwargs):
my_auction = Auction.objects.filter(user=request.user).first()
if my_auction:
return render(request, 'pages/my-auction-detail.html', {"my_auction": my_auction})
else:
return redirect('auction-create')
class AuctionCreateView(CreateView):
model = Auction
fields = ['description', 'starting_price', 'category', 'active']
def form_valid(self, form):
company = Company.objects.filter(user=self.request.user).first()
if company:
form.instance.user = self.request.user
form.instance.company = company
return super().form_valid(form)
else:
return redirect('company-create')
class AuctionUpdateView(UpdateView):
model = Auction
fields = ['description', 'starting_price', 'category', 'active']
template_name_suffix = '_update_form'
class AuctionDetailView(DetailView):
context_object_name = 'auction'
queryset = Auction.objects.all()
def get_context_data(self, *args, **kwargs):
context = super(AuctionDetailView, self).get_context_data(
*args, **kwargs)
company = Company.objects.get(user=self.request.user)
context['company'] = company
return context
class StripeConnectionView(View):
# stripe.api_key = "<KEY>"
# stripe.client_id = "ca_L3q4MuEPR0JHtn2AlFe5bbf8TqrZDAcq"
def get(self, request, *args, **kwargs):
# get the company of the user
company, created = Company.objects.get_or_create(user=request.user)
# at this time, the Company should be created at Codat
baseUrl = "https://api-uat.codat.io"
authHeaderValue = "Basic bDRlbDRiWDhwdGdhbzVYR1c2d2dxV0s2NHpEa3NOYTlIQk9wOVFEZQ=="
# Add your authorization header
headers = {"Authorization": authHeaderValue}
# TODO first create the company
data = {"name": "Recipe test company"}
response = requests.post(
'https://api.codat.io/companies', json=data, headers=headers)
data = response.json()
data_source, created = DataSource.objects.get_or_create(
company=company)
data_source.codat_id = data['id']
data_source.save()
redirect_url = data['redirect']
# url = stripe.OAuth.authorize_url(scope='read_write')
# company=Company.objects.first()
# data, created= DataSource.objects.update_or_create(company=company, url=url)
# print("this is the url ........................", url)
# return render(request, "pages/stripe-connection.html")
return redirect(redirect_url)
class DatasourceView(View):
def get(self, request, *args, **kwargs):
# get the company of the user
company, created = Company.objects.get_or_create(user=request.user)
# at this time, the Company should be created at Codat
baseUrl = "https://api-uat.codat.io"
authHeaderValue = "Basic bDRlbDRiWDhwdGdhbzVYR1c2d2dxV0s2NHpEa3NOYTlIQk9wOVFEZQ=="
# Add your authorization header
headers = {"Authorization": authHeaderValue}
data_source, created = DataSource.objects.get_or_create(
company=company)
codat_id = data_source.codat_id
print('codat id ....................', codat_id)
response = requests.get(
'https://api.codat.io/companies/'+codat_id, headers=headers)
data = response.json()
print('data, ........', data)
print('hey .................',
data['dataConnections'][0]['status'] == 'Linked')
if data['dataConnections'][0]['status'] == 'Linked':
data_source.codac_id = data['id']
data_source.platform = data['platform']
data_source.redirect = data['redirect']
data_source.last_sync = data['lastSync']
data_source.status = data['dataConnections'][0]['status']
data_source.save()
return render(request, 'pages/data-source.html', {'data_source': data_source})
else:
return redirect('company-create')
def get_context_data(self, *args, **kwargs):
context = super(DatasourceView, self).get_context_data(
*args, **kwargs)
company = Company.objects.get(user=self.request.user)
context['company'] = company
return context
class BidCreateView(CreateView):
model = Bid
fields = ['user', 'auction', 'bid_price']
def get_absolute_url(self):
return reverse('bid-detail', kwargs={'pk': self.pk})
def get_context_data(self, *args, **kwargs):
context = super(BidCreateView, self).get_context_data(*args, **kwargs)
company = Company.objects.get(user=self.request.user)
context['company'] = company
return context
class BidDetailView(DetailView):
context_object_name = 'bid'
queryset = Bid.objects.all()
def get_context_data(self, *args, **kwargs):
context = super(BidDetailView, self).get_context_data(*args, **kwargs)
company = Company.objects.get(user=self.request.user)
context['company'] = company
return context
| 2.109375 | 2 |
examples/diameter-app2/bromelia_hss.py | post-cyberlabs/bromelia | 25 | 12785705 | # -*- coding: utf-8 -*-
"""
examples.bromelia_hss
~~~~~~~~~~~~~~~~~~~~~
This module contains an example on how to setup a dummy HSS
by using the Bromelia class features of bromelia library.
:copyright: (c) 2020 <NAME>.
:license: MIT, see LICENSE for more details.
"""
import os
import sys
basedir = os.path.dirname(os.path.abspath(__file__))
examples_dir = os.path.dirname(basedir)
bromelia_dir = os.path.dirname(examples_dir)
sys.path.insert(0, bromelia_dir)
from bromelia import Bromelia
from bromelia.avps import *
from bromelia.constants import *
from bromelia.etsi_3gpp_s6a_s6d.avps import *
from bromelia.etsi_3gpp_s6a_s6d.messages import CancelLocationAnswer as CLA
from bromelia.etsi_3gpp_s6a_s6d.messages import CancelLocationRequest as CLR
#: Application initialization
config_file = os.path.join(basedir, "bromelia_hss_config.yaml")
app = Bromelia(config_file=config_file)
app.load_messages_into_application_id([CLA, CLR], DIAMETER_APPLICATION_S6a_S6d)
CLR = app.s6a_s6d.CLR #: Creating CLR alias
if __name__ == "__main__":
app.run() #: It will be blocked until connection has been established
clr = CLR(user_name="123456789012345",
clr_flags=2,
destination_host=app.configs[0]["PEER_NODE_HOSTNAME"],
supported_features=[
VendorIdAVP(VENDOR_ID_3GPP),
FeatureListIdAVP(1),
FeatureListAVP(134217728)])
cla = app.send_message(clr)
| 2.015625 | 2 |
setup.py | OscarSilvaOfficial/fast_orm | 2 | 12785706 | import setuptools
with open("README.md", "r", encoding="utf-8") as fh:
long_description = fh.read()
setuptools.setup(
name="fast_sql_manager",
version="0.1.5",
author="<NAME>",
author_email="<EMAIL>",
description="Um pacote simples para realizar operações no banco",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/OscarSilvaOfficial/easy_sql",
packages=setuptools.find_packages(),
install_requires=[
'six>=1.15.0',
'mysqlclient>=2.0.3',
'mysql-connector-python>=8.0.22',
'mysql>=0.0.2'
],
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
python_requires='>=3.6',
) | 1.421875 | 1 |
cq/views.py | furious-luke/django-cq | 31 | 12785707 | from django.db import transaction
from rest_framework import viewsets
from .serializers import TaskSerializer, CreateTaskSerializer
from .models import Task
class TaskViewSet(viewsets.ModelViewSet):
queryset = Task.objects.all()
serializer_class = TaskSerializer
def get_serializer(self, data=None, *args, **kwargs):
if getattr(self, 'creating', False):
return CreateTaskSerializer(data=data)
return super().get_serializer(data, *args, **kwargs)
def create(self, request, *args, **kwargs):
self.creating = True
with transaction.atomic():
return super().create(request, *args, **kwargs)
| 2.140625 | 2 |
Code/Python/Kamaelia/Kamaelia/BaseIPC.py | sparkslabs/kamaelia_orig | 12 | 12785708 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2010 British Broadcasting Corporation and Kamaelia Contributors(1)
#
# (1) Kamaelia Contributors are listed in the AUTHORS file and at
# http://www.kamaelia.org/AUTHORS - please extend this file,
# not this notice.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# -------------------------------------------------------------------------
# Licensed to the BBC under a Contributor Agreement: RJL
"""\
Base IPC class. Subclass it to create your own IPC classes.
When doing so, make sure you set the following:
- Its doc string, so a string explanation can be generated for an
instance of your subclass.
- 'Parameters' class attribute to a list of named parameters you accept at creation,
prefixing optional parameters with "?", e.g. "?depth"
For example
-----------
A custom IPC class to report a theft taking place! ::
class Theft(Kamaelia.BaseIPC.IPC):
\"\"\"Something has been stolen!\"\"\"
Parameters = ["?who","what"]
So what happens when we use it? ::
>>> ipc = Theft(who="Sam", what="sweeties")
>>> ipc.__doc__
'Something has been stolen!'
>>> ipc.who
'Sam'
>>> ipc.what
'sweeties'
"""
class IPC(object):
"""explanation %(foo)s did %(bar)s"""
Parameters = [] # ["foo", "bar"]
def __init__(self, **kwds):
super(IPC, self).__init__()
for param in self.Parameters:
optional = False
if param[:1] == "?":
param = param[1:]
optional = True
if not ( param in kwds ) :
if not optional:
raise ValueError(param + " not given as a parameter to " + str(self.__class__.__name__))
else:
self.__dict__[param] = None
else:
self.__dict__[param] = kwds[param]
del kwds[param]
for additional in kwds.keys():
raise ValueError("Unknown parameter " + additional + " to " + str(self.__class__.__name__))
self.__dict__.update(kwds)
def __str__(self):
return self.__class__.__doc__ % self.__dict__
| 2.359375 | 2 |
paranoia_data.py | reckenrode/ParanoiaCharGen | 1 | 12785709 | import operator
specs = {
'Management': ['Bootlicking', 'Chutzpah', 'Hygiene', 'Con Games',
'Interrogation', 'Intimidation', 'Moxie', 'Oratory'],
'Stealth': ['Concealment', 'Disguise', 'High Alert',
'Security Systems', 'Shadowing', 'Sleight of Hand',
'Sneaking', 'Surveillance'],
'Violence': ['Agility', 'Energy Weapons', 'Demolition',
'Field Weapons', 'Fine Manipulation', 'Hand Weapons',
'Projectile Weapons', 'Thrown Weapons',
'Unarmed Combat', 'Vehicular Combat'],
'Hardware': ['Bot Ops & Maintenance', 'Chemical Engineering',
'Electronic Engineering', 'Habitat Engineering',
'Mechanical Engineering', 'Nuclear Engineering',
'Vehicle Ops & Maintenance',
'Weapons & Armor Maintenance'],
'Software': ['Bot Programming', 'C-Bay', 'Data Analysis',
'Data Search', 'Financial Systems', 'Hacking',
'Operating Systems', 'Vehicle Programming'],
'Wetware': ['Biosciences', 'Bioweapons', 'Cloning', 'Medical',
'Outdoor Life', 'Pharmatherapy', 'Psychotherapy',
'Suggestion']
}
action_skills = ['Management', 'Stealth', 'Violence']
knowledge_skills = ['Hardware', 'Software', 'Wetware']
groups = {
'Armed Forces': {
'weight': 3,
'firms': ['Ammunition Fresheners', 'Armed Forces Friends Network',
'Bodyguard Communications Liaisons', 'Blast Shield Maintenance',
'Crowd Control', 'Sensitivity Trainers', 'Threat Assessors',
'Tool & Die Works', 'Vulture Squadron Recruiters'],
'specs': ['Demolition', 'Energy Weapons', 'Hand Weapons', 'Projectile Weapons',
'Thrown Weapons', 'Unarmed Combat', 'Vehicle Ops & Maintenance'],
'societies': [(3, 'Anti-Mutant'), (3, 'Death Leopard'), (3, 'Frankenstein Destroyers'),
(3, 'PURGE'), (1, 'Communists'), (1, 'FCCC-P'), (1, 'Free Enterprise'),
(1, 'Pro Tech'), (1, 'Psion'), (1, 'Illuminati'), (1, 'Spy')]
},
'Central Processing Unit': {
'weight': 2,
'firms': ['116 Emergency Systems', 'Credit License Checkers', 'Facility Surveillance'
'Form Facilitators', 'Form Inventory Officers', 'Form Disposal Advisors',
'Pocket Protector Refurbishers', 'Security System Installers',
'Volunteer Collection Agencies'],
'specs': ['Security Systems', 'Electronic Engineering', 'Bot Programming',
'Data Analysis', 'Financial Systems', 'Data Search', 'Vehicle Programming'],
'societies': [(4, 'Computer Phreaks'), (4, 'Corpore Metal'), (2, 'FCCC-P'),
(2, 'Sierra Club'), (1, 'Anti-Mutant'), (1, 'Communists'), (1, 'Pro Tech'),
(1, 'Psion'), (1, 'PURGE'), (1, 'Illuminati'), (1, 'Spy')]
},
'HPD & Mind Control': {
'weight': 3,
'firms': ['Entertainment Scouting Agencies', 'History Purifiers',
'News Services', 'Public Hating Coordination',
'Psyche Ward Administration', 'Sector Expansion Surveyors',
'Semantics Control', 'Singalong Agents', 'Subliminals Police'],
'specs': ['Bootlicking', 'Chutzpah', 'Con Games', 'Moxie', 'Bot Ops & Maintenance',
'Pharmatherapy', 'Medical'],
'societies': [(2, 'Anti-Mutant'), (2, 'FCCC-P'), (3, 'Humanists'),
(4, 'Romantics'), (2, 'Sierra Club'), (1, 'Communists'), (1, 'Mystics'),
(1, 'Psion'), (1, 'PURGE'), (1, 'Illuminati'), (1, 'Spy')]
},
'Internal Security': {
'weight': 2,
'firms': ['Crowd Control', 'Forensic Analysis', 'Glee Quota Adjutants',
'Re-Education Client Procurement', 'Surveillance Operatives',
'Termination Center Janitorial', 'Thought Surveyors',
'Threat Assessors', 'Treason Scene Cleanup'],
'specs': ['Interrogation', 'Intimidation', 'Security Systems',
'Surveillance', 'Energy Weapons', 'Hand Weapons', 'Unarmed Combat'],
'societies': [(3, 'Anti-Mutant'), (3, 'Death Leopard'), (3, 'FCCC-P'),
(3, 'Frankenstein Destroyers'), (1, 'Communists'), (1, 'Free Enterprise'), (1, 'Pro Tech'),
(1, 'Psion'), (1, 'PURGE'), (1, 'Illuminati'), (1, 'Spy')]
},
'Production, Logistics & Commissary': {
'weight': 3,
'firms': ['Armored Autocar Escorts', 'BLUE Room Caterers', 'Equipment Assembly Control',
'Field Logistics Advisors', 'Food Vat Control', 'Inventory System Updaters',
'Printing Office Field Checkers', 'Storage Media Integrity Assessors',
'Warehouse System Inspectors'],
'specs': ['Chutzpah', 'Con Games', 'Bot Ops & Maintenance', 'Habitat Engineering',
'Vehicle Ops & Maintenance', 'Data Search', 'Biosciences'],
'societies': [(5, 'Free Enterprise'), (3, 'Humanists'), (2, 'Mystics'),
(2, 'Romantics'), (1, 'Communists'), (1, 'Pro Tech'), (1, 'Psion'),
(1, 'Sierra Club'), (1, 'Illuminati'), (1, 'Spy')]
},
'Power Services': {
'weight': 2,
'firms': ['Battery Backup', 'Bum Radius Assessors', 'Circuit Maintenance',
'Fuel Cell Replenishment', 'Fuel Rod Disposal Consultants',
'Odor Fresheners', 'Power Oscillation Professionals', 'Safe Atoms Initiative',
'Wire Supply Checkers'],
'specs': ['Data Analysis', 'Data Search', 'Chemical Engineering',
'Electronic Engineering', 'Habitat Engineering', 'Mechanical Engineering',
'Nuclear Engineering'],
'societies': [(2, 'Computer Phreaks'), (2, 'Death Leopard'), (2, 'FCCC-P'),
(2, 'Frankenstein Destroyers'), (2, 'Free Enterprise'), (2, 'Mystics'), (2, 'Pro Tech'),
(2, 'PURGE'), (1, 'Communists'), (1, 'Illuminati'), (1, 'Spy')]
},
'Research & Design': {
'weight': 2,
'firms': ['Biological Niceness Indexers', 'Bot Processing', 'Drug Interaction Testers',
'Field Data Collectors', 'Goo Cleanup', 'RoboPsych Auditing',
'Scienctist Sanity Checkers', 'Vehicle Therapists',
'Weapon Effectiveness Assessors'],
'specs': ['Chemical Engineering', 'Mechanical Engineering', 'Nuclear Engineering',
'Bot Programming', 'Vehicle Programming', 'Bioweapons', 'Cloning'],
'societies': [(3, 'Computer Phreaks'), (3, 'Corpore Metal'), (3, 'Pro Tech'),
(3, 'Psion'), (3, 'PURGE'), (1, 'FCCC-P'), (1, 'Communists'),
(1, 'Illuminati'), (1, 'Spy')]
},
'Technical Services': {
'weight': 2,
'firms': ['Bedding Inspectors', 'Clone Tank Support Services',
'Consolidated Motorized Transport (CMT)', 'Fuel Cell Replenishment',
'MemoMax Quality Assurance', 'Medical Services', 'Paint Control',
'Slime Identification', 'Tech Support'],
'specs': ['Chemical Engineering', 'Electronic Engineering', 'Habitat Engineering',
'Vehicle Ops & Maintenance', 'Bot Programming', 'Vehicle Programming',
'Pharmatherapy'],
'societies': [(2, 'Computer Phreaks'), (2, 'Corpore Metal'), (2, 'Death Leopard'),
(2, 'Frankenstein Destroyers'), (2, 'Mystics'), (2, 'Pro Tech'), (2, 'Psion'),
(2, 'Sierra Club'), (1, 'Communists'), (1, 'Illuminati'), (1, 'Spy')]
},
'Industrial spy or saboteur': {
'weight': 1,
'firms': [],
'specs': []
}
}
weighted_groups = reduce(operator.add, [[g for w in xrange(v['weight'])] for g, v in groups.iteritems()])
powers = {
'classic': ['Charm', 'Corrosion', 'Detect Mutant Power', 'Electroshock', 'Empathy',
'Energy Field', 'Hypersenses', 'Levitation', 'Machine Empathy',
'Matter Eater', 'Mental Blast', 'Polymorphism', 'Puppeteer',
'Pyrokinesis', 'Regeneration', 'Slippery Skin', 'Telekinesis',
'Teleportation', 'Uncanny Luck', 'X-Ray Vision'],
'straight': ['Adhesive Skin', 'Adrenalin Control', 'Bureaucratic Intuition',
'Charm', 'Death Simulation', 'Deep Thought', 'Electroshock',
'Empathy', 'Energy Field', 'Hypersenses', 'Machine Empathy',
'Matter Eater', 'Mechanical Intuition', 'Mental Blast',
'Pyrokinesis', 'Regeneration', 'Rubbery Bones', 'Toxic Metabolism',
'Uncanny Luck', 'Ventriloquist'],
'zap': ['Absorption', 'Chameleon', 'Charm', 'Desolidity', 'Electroshock',
'Energy Field', 'Growth', 'Levitation', 'Machine Empathy',
'Matter Eater', 'Mental Blast', 'Polymorphism', 'Puppeteer',
'Pyrokinesis', 'Regeneration', 'Shrinking', 'Telekinesis',
'Teleportation', 'Transmutaiton', 'X-Ray Vision']
}
mutant_experience_powers = {
'classic': ["Charm", "Corrosion", "Detect Mutant Power",
"Electroshock", "Empathy", "Energy Field",
"Hypersenses", "Levitation", "Machine Empathy",
"Matter Eater", "Mental Blast", "Polymorphism",
"Puppeteer", "Pyrokinesis", "Regeneration",
"Slippery Skin", "Telekinesis", "Teleportation",
"Uncanny Luck", "X-Ray Vision", "Call Bots",
"Chromativariation", "Creeping Madness", "Environmental Control",
"Gravity Manipulation", "Haze", "Hyperreflexes",
"Jump!", "Light Control", "Magnetize",
"Pouches", "Push Mutant Powers", "Radioactivity",
"Scream", "Sculpt", "Second Skin", "Speed", "Spikes",
"Stench", "Stretchy"],
'straight': ["Adhesive Skin", "Adrenalin Control", "Bureaucratic Intuition",
"Charm", "Death Simulation", "Deep Thought",
"Electroshock", "Empathy", "Energy Field",
"Hypersenses", "Machine Empathy", "Matter Eater",
"Mechanical Intuition", "Mental Blast", "Pyrokinesis",
"Regeneration", "Rubbery Bones", "Toxic Metabolism",
"Uncanny Luck", "Ventriloquist", "Adaptive Metabolism",
"Call Bots", "Clean Slate", "Creeping Madness",
"Environmental Control", "Find Location", "Forgettable",
"Haze", "Hyperreflexes", "Jump!",
"Light Control", "Mind Sense", "Pouches",
"Psychometry", "Push Mutant Powers", "Radioactivity",
"Second Skin", "Speed", "Stench",
"Stretchy"],
'zap': ["Absorption", "Chameleon", "Charm",
"Desolidity", "Electroshock", "Energy Field",
"Growth", "Levitation", "Machine Empathy",
"Matter Eater", "Mental Blast", "Polymorphism",
"Puppeteer", "Pyrokinesis", "Regeneration",
"Shrinking", "Telekinesis", "Teleportation",
"Transmutation", "X-Ray Vision", "Acidic Spit",
"Bouncy", "Chromativariation", "Creeping Madness",
"Cryokinesis", "Enervating Darkness", "Gravity Manipulation",
"Haze", "Hyperreflexes", "Jump!",
"Magnetize", "Radioactivity", "Scream",
"Sculpt", "Second Skin", "Speed",
"Spikes", "Stasis", "Stench",
"Stretchy"]
}
# A skill of 'M' indicates the next is mandatory for that society
societyskills = [# Uncommon Unlikely Unhealthy
['Anti-Mutant', 'Power Studies', 'Comic Book Trivia', 'Twitchtalk'],
['Computer Phreaks', 'Cash Hacking', 'Jargon', ('Hacking', 'programming skills')],
['Communists', 'Demolition', 'Tractor Maintenance', ('M', 'Propaganda')],
['Corpore Metal', 'Cyborging', 'Botspotting', 'Bioweapons'],
['Death Leopard', ('M', 'Demolition'), ('Action Movies', 'Partying'), 'Gambling'],
['FCCC-P', 'Alpha Complex History', 'priestly skills', 'Meeting Machine Empaths'],
['Frankenstein Destroyers', 'Demolition', 'toolmaking skills', 'programming skills'],
['Free Enterprise', 'Haggling', 'Advertising & Marketing', ('Bribery', 'Forgery')],
['Humanists', 'Marital Arts', ('hobbies', 'languages'), 'Old Reckoning Cultures'],
['Mystics', ('M', 'Drug Procurement'), ('Meditation', 'Partying'), 'Old Reckoning Drugs'],
['Pro Tech', 'Experimental Equipment Repair', 'Video Games', 'WMD'],
['Psion', 'Power Studies', 'Comic Book Trivia', 'Twitchtalk'],
['PURGE', ('M', 'Demolition'), 'Gloating', ('Bioweapons', 'Twitchtalk')],
['Romantics', 'Archival Studies', ('Cooking', 'Knitting', 'Music'), ('M', 'Old Reckoning Cultures')],
['Sierra Club', ('Survial', 'Wild Lore', 'Travel'), ('Birdwatching', 'Botany', 'Spoor Recognition'), 'Bioweapons']
] | 1.648438 | 2 |
tardis/gui/tests/test_gui.py | saksham-kaushal/tardis | 1 | 12785710 | <filename>tardis/gui/tests/test_gui.py<gh_stars>1-10
import os
import pytest
from tardis.io.config_reader import Configuration
from tardis.simulation import Simulation
import astropy.units as u
from tardis.gui import interface
from tardis.gui.widgets import Tardis
from tardis.gui.datahandler import SimpleTableModel
from PyQt5 import QtWidgets
@pytest.fixture(scope='module')
def refdata(tardis_ref_data):
def get_ref_data(key):
return tardis_ref_data[os.path.join(
'test_simulation', key)]
return get_ref_data
@pytest.fixture(scope='module')
def config():
return Configuration.from_yaml(
'tardis/io/tests/data/tardis_configv1_verysimple.yml')
@pytest.fixture(scope='module')
def simulation_one_loop(
atomic_data_fname, config,
tardis_ref_data, generate_reference):
config.atom_data = atomic_data_fname
config.montecarlo.iterations = 2
config.montecarlo.no_of_packets = int(4e4)
config.montecarlo.last_no_of_packets = int(4e4)
simulation = Simulation.from_config(config)
simulation.run()
return simulation
def test_gui(simulation_one_loop):
simulation = simulation_one_loop
app = QtWidgets.QApplication([])
tablemodel = SimpleTableModel
win = Tardis(tablemodel)
win.show_model(simulation)
app.quit()
| 1.984375 | 2 |
techk/apps/base/configs.py | danilovergara/fullstack | 0 | 12785711 | <filename>techk/apps/base/configs.py
scraper_url = "http://books.toscrape.com/" # The URL to be scraped | 1.476563 | 1 |
Task/Even-or-odd/Python/even-or-odd-1.py | LaudateCorpus1/RosettaCodeData | 5 | 12785712 | <reponame>LaudateCorpus1/RosettaCodeData<filename>Task/Even-or-odd/Python/even-or-odd-1.py<gh_stars>1-10
>>> def is_odd(i): return bool(i & 1)
>>> def is_even(i): return not is_odd(i)
>>> [(j, is_odd(j)) for j in range(10)]
[(0, False), (1, True), (2, False), (3, True), (4, False), (5, True), (6, False), (7, True), (8, False), (9, True)]
>>> [(j, is_even(j)) for j in range(10)]
[(0, True), (1, False), (2, True), (3, False), (4, True), (5, False), (6, True), (7, False), (8, True), (9, False)]
>>>
| 3.390625 | 3 |
ML_service/run_classifier_app.py | ykpgrr/Hate-Speech-Detection | 4 | 12785713 | import os
import pickle
import re
import string
from flask import Flask, request, jsonify
CUR_DIR = os.path.dirname(__file__)
STOP_WORDS = pickle.load(open(
os.path.join(CUR_DIR,
'pkl_objects',
'stopwords.pkl'), 'rb'))
VECTORIZER = pickle.load(open(
os.path.join(CUR_DIR,
'pkl_objects',
'vectorizer.pkl'), 'rb'))
CLF = pickle.load(open(
os.path.join(CUR_DIR,
'pkl_objects',
'classifier.pkl'), 'rb'))
LABEL_DICT = {0: 'The tweet contains hate speech',
1: 'The tweet is not offensive',
2: 'The tweet uses offensive language but not hate speech'}
app = Flask(__name__)
def preprocess_tweet(tweet):
tweet = tweet.lower()
# Remove urls
tweet = re.sub('((www\.[^\s]+)|(https?://[^\s]+))', '', tweet)
# Remove usernames
tweet = re.sub('@[^\s]+', '', tweet)
# Remove white space
tweet = tweet.strip()
# Remove hashtags
tweet = re.sub(r'#([^\s]+)', '', tweet)
# Remove stopwords
tweet = " ".join([word for word in tweet.split(' ') if word not in STOP_WORDS])
# Remove punctuation
tweet = "".join(l for l in tweet if l not in string.punctuation)
return tweet
@app.route("/analyse/sentiment", methods=['POST'])
def classify_tweet():
sentence = request.get_json()['sentence']
sentence_to_clf = preprocess_tweet(sentence)
sentence_to_clf = VECTORIZER.transform([sentence_to_clf])
label = CLF.predict(sentence_to_clf)[0]
confidence = max(CLF.predict_proba(sentence_to_clf)[0]) * 100
return jsonify(
sentence=LABEL_DICT[label],
polarity=confidence
)
if __name__ == '__main__':
app.run(host='0.0.0.0', port=5000)
| 2.859375 | 3 |
src/users/models/componentsschemasmicrosoft_graph_onenoteentitybasemodelallof1.py | peombwa/Sample-Graph-Python-Client | 0 | 12785714 | <filename>src/users/models/componentsschemasmicrosoft_graph_onenoteentitybasemodelallof1.py<gh_stars>0
# coding=utf-8
# --------------------------------------------------------------------------
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class ComponentsschemasmicrosoftGraphOnenoteentitybasemodelallof1(Model):
"""onenoteEntityBaseModel.
:param self:
:type self: str
"""
_attribute_map = {
'self': {'key': 'self', 'type': 'str'},
}
def __init__(self, self=None):
super(ComponentsschemasmicrosoftGraphOnenoteentitybasemodelallof1, self).__init__()
self.self = self
| 2.09375 | 2 |
testdata/python/setup/setup.py | lade-io/jet | 18 | 12785715 | try:
from setuptools import setup
except ImportError:
from distutils.core import setup
REQUIRES = ['numpy', 'pytest', 'flask']
config = {
'description': 'Fibonacci webservice (with neural network)',
'author': '<NAME>',
'url': 'https://github.com/soerendip/fibo',
'download_url': 'https://github.com/soerendip/fibo',
'author_email': '<EMAIL>',
'version': '0.1',
'install_requires': [REQUIRES],
'name': 'fibo',
'packages': ['fibo'],
'description': 'A webservice for fibonacci numbers.',
'platform': 'Linux'
}
setup(**config)
| 1.460938 | 1 |
venv/lib/python3.8/site-packages/azureml/_tracing/_tracer.py | amcclead7336/Enterprise_Data_Science_Final | 0 | 12785716 | <reponame>amcclead7336/Enterprise_Data_Science_Final
# ---------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# ---------------------------------------------------------
import os
from ._constants import USER_FACING_NAME, TRACEPARENT_ENV_VAR
from ._span import Span
from ._vendored import _execution_context as execution_context
class AmlTracer:
def __init__(self, span_processors):
self._span_processors = span_processors
def start_as_current_span(self, name, parent=None, user_facing_name=None):
parent = parent or self.__class__._get_ambient_parent()
span = Span(name, parent, self._span_processors)
self.__class__.decorate_span(span, user_facing_name)
span.__enter__()
return span
def start_span(self, name, parent=None, user_facing_name=None):
span = Span(name, parent, self._span_processors)
self.__class__.decorate_span(span, user_facing_name)
return span
@staticmethod
def decorate_span(span, user_facing_name):
if user_facing_name:
span.attributes[USER_FACING_NAME] = user_facing_name
@staticmethod
def _get_ambient_parent():
current_parent = execution_context.get_current_span()
if current_parent:
return current_parent
traceparent = os.environ.get(TRACEPARENT_ENV_VAR, '').split('-')
if not traceparent or len(traceparent) != 4:
return None
return Span._from_traceparent(*traceparent)
class DefaultTraceProvider:
def __init__(self, tracer):
self._tracer = tracer
def get_tracer(self, name):
return self._tracer
def get_current_span(self):
return execution_context.get_current_span()
| 2.0625 | 2 |
wpca/tests/test_utils.py | radicamc/wpca | 123 | 12785717 | from itertools import chain, combinations
import numpy as np
from numpy.testing import assert_allclose
from wpca.tests.tools import assert_allclose_upto_sign
from wpca.utils import orthonormalize, random_orthonormal, weighted_mean
def test_orthonormalize():
rand = np.random.RandomState(42)
X = rand.randn(3, 4)
X2 = orthonormalize(X)
assert_allclose_upto_sign(X[0] / np.linalg.norm(X[0]), X2[0])
assert_allclose(np.dot(X2, X2.T), np.eye(X2.shape[0]), atol=1E-15)
def test_random_orthonormal():
def check_random_orthonormal(N, M, rows):
X = random_orthonormal(N, M, rows=rows, random_state=42)
assert X.shape == (N, M)
if rows:
C = np.dot(X, X.T)
else:
C = np.dot(X.T, X)
assert_allclose(C, np.eye(C.shape[0]), atol=1E-15)
for M in [5]:
for N in range(1, M + 1):
yield check_random_orthonormal, N, M, True
yield check_random_orthonormal, M, N, False
def test_weighted_mean():
def check_weighted_mean(shape, axis):
rand = np.random.RandomState(0)
x = rand.rand(*shape)
w = rand.rand(*shape)
wm = weighted_mean(x, w, axis)
assert_allclose(wm, np.average(x, axis, w))
assert_allclose(wm, (w * x).sum(axis) / w.sum(axis))
for ndim in range(1, 5):
shape = tuple(range(3, 3 + ndim))
axis_tuples = chain(*(combinations(range(ndim), nax)
for nax in range(ndim + 1)))
for axis in chain([None], range(ndim), axis_tuples):
yield check_weighted_mean, shape, axis
| 2.375 | 2 |
notebooks/utils.py | djfedos/djfedos-boostrap | 0 | 12785718 | def find_max(list_of_numbers:list):
max_number = list_of_numbers[0]
for number in list_of_numbers:
if number > max_number:
max_number = number
return(max_number) | 3.796875 | 4 |
src/bxcommon/models/blockchain_peer_info.py | dolphinridercrypto/bxcommon | 12 | 12785719 | <reponame>dolphinridercrypto/bxcommon<filename>src/bxcommon/models/blockchain_peer_info.py
from dataclasses import dataclass
from typing import Optional
from bxcommon.utils.blockchain_utils.eth import eth_common_constants
@dataclass
class BlockchainPeerInfo:
ip: str
port: int
node_public_key: Optional[str] = None
blockchain_protocol_version: int = eth_common_constants.ETH_PROTOCOL_VERSION
connection_established: bool = False
def __repr__(self):
return f"BlockchainPeerInfo(ip address: {self.ip}, " \
f"port: {self.port}, " \
f"node public key: {self.node_public_key}, " \
f"blockchain protocol version: {self.blockchain_protocol_version})"
def __eq__(self, other) -> bool:
return (
isinstance(other, BlockchainPeerInfo)
and other.port == self.port
and other.ip == self.ip
)
def __hash__(self):
return hash(f"{self.ip}:{self.port}")
| 2.453125 | 2 |
Camera_comments.py | JoseZalez/Houdini-scripts | 19 | 12785720 | <filename>Camera_comments.py<gh_stars>10-100
#Add parameters to the camera
#https://www.linkedin.com/in/jose-gonzalezvfx/
import os
import sys
from PySide2.QtWidgets import QDialog, QApplication, QLineEdit, QLabel, QPushButton, QCheckBox, QHBoxLayout, QVBoxLayout
from PySide2.QtCore import Qt
class UI(QDialog):
""""""
def __init__(self, parent=None):
"""Constructor"""
super(UI, self).__init__(parent)
main_layout = QVBoxLayout()
self.setWindowTitle("Add parameters to camera")
#Keep the window on top always
self.setWindowFlags(self.windowFlags() | Qt.WindowStaysOnTopHint)
#Get Houdini window style and apply to interface
self.setStyleSheet(hou.qt.styleSheet())
self.setProperty("houdiniStyle", True)
#Create a path input
camera_layout = QHBoxLayout()
lbl = QLabel("Camera node:")
self.camera = QLineEdit("")
camera_layout.addWidget(lbl)
camera_layout.addWidget(self.camera)
camera_layout.setSpacing(10)
#Create an extension input
solver_layout = QHBoxLayout()
lbl = QLabel("Node:")
self.solver = QLineEdit("")
solver_layout.addWidget(lbl)
solver_layout.addWidget(self.solver)
solver_layout.setSpacing(10)
#Set a button to start
self.buttonCreate = QPushButton('Create')
#Set a button to delete old parms
self.buttonAdd = QPushButton('Add')
#Set a button to delete old parms
self.buttonReset = QPushButton('Reset')
#Add all the layout together
main_layout.addLayout(camera_layout, stretch=1)
main_layout.addLayout(solver_layout, stretch=1)
main_layout.addWidget(self.buttonCreate)
main_layout.addWidget(self.buttonAdd)
main_layout.addWidget(self.buttonReset)
self.setLayout(main_layout)
#Start the main code
self.buttonCreate.clicked.connect(self.createcomment)
self.buttonAdd.clicked.connect(self.add)
self.buttonReset.clicked.connect(self.reset)
def getCameraNode(self):
camera_path = self.camera.text()
return hou.node(camera_path)
def gettext(self):
#Gets the camera and solver node
solver_path = self.solver.text()
solver = hou.node(solver_path)
camera = self.getCameraNode()
#Creates a list of parms from the selected node
all_parms = solver.parms()
#Initialize a dictionary and a list for the parms
thisdict ={}
parmlist=[]
i=0
#Iterates for each paramater, adds the name with the value in the dictionary, also adds the name to the list
filter_word=["Interpolation","Position","Value","Visualization","Control"]
previousname=""
for parm in all_parms:
name=parm.name()
long_name=parm.description()
if not any(x in long_name for x in filter_word):
if "enable" not in name:
if long_name == previousname:
vectorname=name[:-1]
del parmlist[-1]
i-=1
thisdict[str(i)] = "data+=" + "'"+ long_name +": "+"'"+" + "+ 'str(solver.parmTuple({}).eval())'.format("'"+vectorname+"'") + "+" + "'\\n'"
previousname=long_name
long_name+=" (vector) "
else:
thisdict[str(i)] = "data+=" + "'"+ long_name +": "+"'"+" + "+ 'str(solver.parm({}).eval())'.format("'"+name+"'") + "+" + "'\\n'"
previousname=long_name
parmlist.append(long_name)
i+=1
text = 'data=""' + "\n"+'solver = hou.node({})'.format("'"+self.solver.text()+"'") +"\n"
#Shows a list of all the parameters for the user to select which he wants
selected = hou.ui.selectFromList(parmlist, exclusive=False, title='Import parameters', column_header="Parameters", num_visible_rows=10, clear_on_cancel=False)
#Iterates for all the parms with the values from the ditionary and appends it to a string with a line jump
for x in range(len(selected)):
index = str(selected[x])
text += thisdict[index] + '\n'
return text
def createcomment(self):
text_out=self.gettext()
camera = self.getCameraNode()
text_out+="return data"
if not camera.parm("vcomment"):
#Add a string parameter to the camera input
ptg = camera.parmTemplateGroup()
parm_folder = hou.FolderParmTemplate('folder', 'Notes')
parmtemplate=hou.StringParmTemplate('vcomment', 'Text', 1)
parmtemplate.setTags({"editor": "1","editorlang": "python"})
parm_folder.addParmTemplate(parmtemplate)
ptg.append(parm_folder)
camera.setParmTemplateGroup(ptg)
#Set the paramaters with the values in the string parameter as a expression
camera.parm("vcomment").setExpression(text_out, hou.exprLanguage.Python)
else:
hou.ui.displayMessage("Please click 'Reset' to create new parameters or 'Add' to add new parameters", buttons=('OK',), severity=hou.severityType.Message, default_choice=0, close_choice=0, title="Select a node",details_expanded=False)
def add(self):
text_out=self.gettext()
camera = self.getCameraNode()
#Set the paramaters with the values in the string parameter
current_text=camera.parm("vcomment").expression()
old_out=current_text.split("\n", 1)[1]
new_text=text_out+old_out
camera.parm("vcomment").setExpression(new_text, hou.exprLanguage.Python)
def reset(self):
#Deletes the folder and the comment stored in the camera node
camera = self.getCameraNode()
ptg = camera.parmTemplateGroup()
folder_to_delete = ptg.findFolder('Notes')
ptg.remove(folder_to_delete)
camera.setParmTemplateGroup(ptg)
#Starts the script window for the user
app = QApplication.instance()
if app is None:
app = QApplication(sys.argv)
CommentUI = UI()
CommentUI.show()
| 2.65625 | 3 |
examples/turbulence.py | andimarafioti/python-acoustics | 1 | 12785721 | import numpy as np
from acoustics.turbulence import Gaussian2DTemp, VonKarman2DTemp, Comparison, Field2D
def main():
mu_0 = np.sqrt(10.0**(-6))
correlation_length = 1.0 # Typical correlation length for Gaussian spectrum.
x = 20.0
y = 0.0
z = 40.0
plane = (1,0,1)
#f_resolution = wavenumber_resolution / (2.0*np.pi)
spatial_resolution = 0.05
N = 100
min_wavenumber = 0.01
max_wavenumber = 10.0
wavenumber_resolution = (max_wavenumber - min_wavenumber) / N
"""Create an object to describe an Gaussian turbulence spectrum."""
g = Gaussian2DTemp(plane=plane, a=correlation_length, mu_0=mu_0, wavenumber_resolution=wavenumber_resolution, max_mode_order=N)
"""Create an object to describe a VonKarman turbulence spectrum."""
s = VonKarman2DTemp(plane=plane, a=correlation_length, mu_0=mu_0, wavenumber_resolution=wavenumber_resolution, max_mode_order=N)
g.plot_mode_amplitudes('Gaussian2DTemp_mode_amplitudes.png')
s.plot_mode_amplitudes('VonKarman2DTemp_mode_amplitudes.png')
c = Comparison([g, s])
c.plot_mode_amplitudes('Gaussian2DTemp_and_VonKarman2DTemp_mode_amplitudes.png')
field_g = Field2D(x=x, y=y, z=z, spatial_resolution=spatial_resolution, spectrum=g)
field_s = Field2D(x=x, y=y, z=z, spatial_resolution=spatial_resolution, spectrum=s)
field_g.generate().plot('Gaussian2DTemp_field.png')
field_s.generate().plot('VonKarman2DTemp_field.png')
if __name__ == '__main__':
main() | 3.1875 | 3 |
LightSail.py | CFP106020008/Light-Sail-Simulation | 0 | 12785722 | <filename>LightSail.py
# -*- coding: utf-8 -*-
"""
Created on Wed Mar 24 01:09:23 2021
@author: <EMAIL>
"""
import numpy as np
import matplotlib.pyplot as plt
from scipy.integrate import solve_ivp
import matplotlib.cm as cm
from matplotlib.animation import FuncAnimation
import matplotlib as mpl
from matplotlib.gridspec import GridSpec
# Fundamental Constants
Msun = 2e30 # Solar Mass (k)
G = 6.67e-11 # Gravitational Constant
AU = 1.5e11 # Astronmical Unit (m)
rE = 1.*AU # Orbital radius of Earth (m)
Rsun = 7e8 # Radius of the sun (m)
yr2s = 86400*365 # Conversion of year to second
#%%
# Adjustable parameters
# Simulation properties
tmax = 1e0*yr2s # Simulation time
# Sail properties
aE = 1e-3 # Maxium acceleration of the sail at 1AU.
delay = 0.*yr2s # How long does the sail wait
# on the earth orbit before starting manuver
perihelion = 10*Rsun
# Visualization properties
Box_size = 3e11 # Size of the plot
frames = int(1e3) # Output frames
Tracing = False # Viewing the sail with tracing mode.
SAVE_VIDEO = False # Whether you want to save the video
#%%
def initial_condition():
def cicular(k):
v_sun_circular = np.sqrt(G*Msun/k)
return [0, -k, v_sun_circular, 0]
def eliptical():
a = AU
R_init = a*2-1*Rsun
v = np.sqrt(G*Msun*(2/R_init-1/a))
return [R_init, 0, 0, v]
return cicular(AU)
y_0 = initial_condition()
#%%
def Etot(x, y, vx, vy):
r = np.sqrt(x**2 + y**2)
K = 0.5*(vx**2 + vy**2)
return K - G*Msun/r
#%%
def Decide_Pointing(t, x, y, vx, vy):
r = np.array([x, y])
v = np.array([vx, vy])
rhat = r/np.linalg.norm(r)
vhat = v/np.linalg.norm(v)
Acc = False
if t > delay:
if np.linalg.norm(r)>perihelion and Acc == False:
phat = (rhat - vhat)/np.sqrt(2)
else:
Acc = True
phat = (rhat + vhat)/np.sqrt(2)
else:
phat = np.array([-rhat[1], rhat[0]])
return phat
#%%
def function(t, y):
r_vec = y[:2]
r = np.linalg.norm(r_vec)
v_vec = y[2:]
phat = Decide_Pointing(t, y[0], y[1], y[2], y[3])
dxdt = v_vec[0]
dydt = v_vec[1]
a_rp = aE*rE**2/r**2*np.dot(r_vec, phat)/np.linalg.norm(r_vec)*phat
a_g = -G*Msun/r**3*r_vec
a = a_rp + a_g
dvxdt = a[0]
dvydt = a[1]
return np.array([dxdt, dydt, dvxdt, dvydt])
#%%
# Solving the orbit
sol = solve_ivp(fun=function,
t_span=[0, tmax],
y0=y_0,
t_eval=np.linspace(0,tmax,frames),
method='LSODA')
t = sol.t
Data = sol.y
x = Data[0,:]
y = Data[1,:]
vx = Data[2,:]
vy = Data[3,:]
#%%
# Visualization Setup
COLOR = '#303030'
LineColor = 'silver'
#fig, ax = plt.subplots(facecolor=COLOR)
fig = plt.figure(figsize = (8, 4.5), facecolor=COLOR)
gs = GridSpec(2, 4, figure=fig)
# Picture
ax = fig.add_subplot(gs[:, :2])
ax.set_facecolor(COLOR)
ax.xaxis.set_visible(False)
ax.yaxis.set_visible(False)
ax.spines['bottom'].set_color(COLOR)
ax.spines['top'].set_color(COLOR)
ax.spines['right'].set_color(COLOR)
ax.spines['left'].set_color(COLOR)
# Solar system bodies
sun = plt.Circle((0, 0), Rsun, color='y')
mercury = plt.Circle((0, 0), 0.387*AU, edgecolor='cyan', fill=False)
venus = plt.Circle((0, 0), 0.723*AU, edgecolor='y', fill=False)
earth = plt.Circle((0, 0), 1.*AU, edgecolor='skyblue', fill=False)
mars = plt.Circle((0, 0), 1.524*AU, edgecolor='r', fill=False)
ax.add_patch(sun)
ax.add_patch(mercury)
ax.add_patch(venus)
ax.add_patch(earth)
ax.add_patch(mars)
ax.set_aspect('equal', 'box')
line, = ax.plot(x[0], y[0], color='silver', linestyle='-', linewidth=1)
dot, = ax.plot([], [], color='silver', marker='o', markersize=1, markeredgecolor='w', linestyle='')
#Vel = ax.text(0.05, 0.9, 'Velocity: {:.2e} m/s'.format(np.sqrt(vx[0]**2 + vy[0]**2)), horizontalalignment='left',
# verticalalignment='top', transform=ax.transAxes, color='w')
#E_tot = ax.text(0.05, 0.85, 'Specific Total Energy: {:.2e} J/kg'.format(Etot(x[0], y[0], vx[0], vy[0])), horizontalalignment='left',
# verticalalignment='top', transform=ax.transAxes, color='w')
#Time = ax.text(0.05, 0.95, 'Time: {:.2f} yr'.format(t[0]/86400/365), horizontalalignment='left',
# verticalalignment='top', transform=ax.transAxes, color='w')
ax.set_xlim([-Box_size,Box_size])
ax.set_ylim([-Box_size,Box_size])
#%%
# Velocity Plot
ax1 = fig.add_subplot(gs[0, 2:])
ax1.set_facecolor(COLOR)
velline, = ax1.plot(t[0]/yr2s, np.sqrt(vx[0]**2+vy[0]**2), color='silver')
ax1.spines['bottom'].set_color(LineColor)
ax1.spines['top'].set_color(LineColor)
ax1.spines['right'].set_color(LineColor)
ax1.spines['left'].set_color(LineColor)
ax1.set_xlim([0,tmax/yr2s])
ax1.set_ylim([0,np.max(np.sqrt(vx**2+vy**2))*1.2])
ax1.tick_params(labelcolor=LineColor, labelsize='medium', width=3, colors=LineColor)
ax1.ticklabel_format(axis='y', style='sci', useMathText=True, scilimits=(4,5))
ax1.set_xlabel('Time (yr)')
ax1.set_ylabel('Velocity (m/s)')
ax1.xaxis.label.set_color(LineColor)
ax1.yaxis.label.set_color(LineColor)
#%%
# Energy Plot
ax2 = fig.add_subplot(gs[1, 2:])
ax2.set_facecolor(COLOR)
Etotline, = ax2.plot(t[0]/yr2s, Etot(x[0], y[0], vx[0], vy[0]), color='silver')
ax2.spines['bottom'].set_color(LineColor)
ax2.spines['top'].set_color(LineColor)
ax2.spines['right'].set_color(LineColor)
ax2.spines['left'].set_color(LineColor)
ax2.set_xlim([0, tmax/yr2s])
ax2.set_ylim([np.min(Etot(x, y, vx, vy))*1.2, np.max(Etot(x, y, vx, vy))*1.2])
ax2.tick_params(labelcolor=LineColor, labelsize='medium', width=3, colors=LineColor)
ax2.ticklabel_format(style='sci', useMathText=True)
ax2.set_xlabel('Time (yr)')
ax2.set_ylabel('Specific total energy (J/kg)')
ax2.xaxis.label.set_color(LineColor)
ax2.yaxis.label.set_color(LineColor)
plt.tight_layout()
#%%
ms2AUyr = 86400*365/1.5e11
def update(i):
dot.set_data(x[i], y[i])
line.set_data(x[:i], y[:i])
velline.set_data(t[:i]/yr2s, np.sqrt(vx[:i]**2+vy[:i]**2))
Etotline.set_data(t[:i]/yr2s, Etot(x[:i], y[:i], vx[:i], vy[:i]))
r = np.sqrt(x[i]**2 + y[i]**2)
if Tracing:
ax.set_xlim([-1.5*r,1.5*r])
ax.set_ylim([-1.5*r,1.5*r])
O1 = ax.add_patch(sun)
O2 = ax.add_patch(mercury)
O3 = ax.add_patch(venus)
O4 = ax.add_patch(earth)
O5 = ax.add_patch(mars)
#Vel.set_text('Velocity: {:.2e} m/s'.format(np.sqrt(vx[i]**2 + vy[i]**2)))
#Vel.set_text('Velocity: {:.2e} AU/yr'.format(np.sqrt(vx[i]**2 + vy[i]**2)*ms2AUyr))
#E_tot.set_text('Total Energy: {:.2e} J/kg'.format(Etot(x[i], y[i], vx[i], vy[i])))
#Time.set_text('Time: {:.2f} yr'.format(t[i]/86400/365))
return [dot, line, velline, Etotline, O1, O2, O3, O4, O5]
ani = FuncAnimation(fig=fig,
func=update,
frames=frames,
interval=10000/frames,
blit=True,
repeat=False)
if SAVE_VIDEO:
ani.save("sail.mp4", dpi=300, savefig_kwargs={'facecolor':COLOR})
plt.show()
| 2.875 | 3 |
tests/test_0434.py | msztylko/2020ify-leetcoding | 0 | 12785723 | import ctypes
import pytest
c_lib = ctypes.CDLL('../solutions/0434-segment-string/segment-string.so')
@pytest.mark.parametrize('str, ans',
[(b'Hello, my name is John', 5),
(b'Hello', 1),
(b"love live! mu'sic forever", 4),
(b"", 0)])
def test_segment_string(str, ans):
out = c_lib.countSegments(str)
assert out == ans
| 2.421875 | 2 |
polyaxon/hpsearch/tasks/health.py | elyase/polyaxon | 0 | 12785724 | <gh_stars>0
from checks import health_task
from polyaxon.celery_api import celery_app
from polyaxon.settings import HPCeleryTasks
@celery_app.task(name=HPCeleryTasks.HP_HEALTH, ignore_result=False)
def hp_health(x, y):
return health_task.health_task(x, y)
| 1.59375 | 2 |
openhgnn/trainerflow/GATNE_trainer.py | clearhanhui/OpenHGNN | 0 | 12785725 | import torch as th
from tqdm import tqdm
from . import BaseFlow, register_flow
from ..models import build_model
from ..models.GATNE import NSLoss
import torch
from tqdm.auto import tqdm
from numpy import random
import dgl
from ..sampler.GATNE_sampler import NeighborSampler, generate_pairs
@register_flow("GATNE_trainer")
class GATNE(BaseFlow):
def __init__(self, args):
super(GATNE, self).__init__(args)
self.model = build_model(self.model_name).build_model_from_args(self.args, self.hg).to(self.device)
self.train_pairs = None
self.train_dataloader = None
self.nsloss = None
self.neighbor_sampler = None
self.orig_val_hg = self.task.val_hg
self.orig_test_hg = self.task.test_hg
self.preprocess()
self.train()
def preprocess(self):
assert len(self.hg.ntypes) == 1
bidirected_hg = dgl.to_bidirected(dgl.to_simple(self.hg.to('cpu')))
all_walks = []
for etype in self.hg.etypes:
nodes = torch.unique(bidirected_hg.edges(etype=etype)[0]).repeat(self.args.rw_walks)
traces, types = dgl.sampling.random_walk(
bidirected_hg, nodes, metapath=[etype] * (self.args.rw_length - 1)
)
all_walks.append(traces)
self.train_pairs = generate_pairs(all_walks, self.args.window_size, self.args.num_workers)
self.neighbor_sampler = NeighborSampler(bidirected_hg, [self.args.neighbor_samples])
self.train_dataloader = torch.utils.data.DataLoader(
self.train_pairs,
batch_size=self.args.batch_size,
collate_fn=self.neighbor_sampler.sample,
shuffle=True,
num_workers=self.args.num_workers,
pin_memory=True,
)
self.nsloss = NSLoss(self.hg.num_nodes(), self.args.neg_size, self.args.dim).to(self.device)
self.optimizer = torch.optim.Adam(
[{"params": self.model.parameters()}, {"params": self.nsloss.parameters()}], lr=self.args.learning_rate
)
return
def train(self):
best_score = 0
patience = 0
for self.epoch in range(self.args.max_epoch):
self._full_train_step()
cur_score = self._full_test_step()
if cur_score > best_score:
best_score = cur_score
patience = 0
else:
patience += 1
if patience > self.args.patience:
self.logger.train_info(f'Early Stop!\tEpoch:{self.epoch:03d}.')
break
def _full_train_step(self):
self.model.train()
random.shuffle(self.train_pairs)
data_iter = tqdm(
self.train_dataloader,
desc="epoch %d" % self.epoch,
total=(len(self.train_pairs) + (self.args.batch_size - 1)) // self.args.batch_size,
)
avg_loss = 0.0
for i, (block, head_invmap, tails, block_types) in enumerate(data_iter):
self.optimizer.zero_grad()
# embs: [batch_size, edge_type_count, embedding_size]
block_types = block_types.to(self.device)
embs = self.model(block[0].to(self.device))[head_invmap]
embs = embs.gather(
1, block_types.view(-1, 1, 1).expand(embs.shape[0], 1, embs.shape[2])
)[:, 0]
loss = self.nsloss(
block[0].dstdata[dgl.NID][head_invmap].to(self.device),
embs,
tails.to(self.device),
)
loss.backward()
self.optimizer.step()
avg_loss += loss.item()
post_fix = {
"epoch": self.epoch,
"iter": i,
"avg_loss": avg_loss / (i + 1),
"loss": loss.item(),
}
data_iter.set_postfix(post_fix)
def _full_test_step(self):
self.model.eval()
# {'1': {}, '2': {}}
final_model = dict(
zip(self.hg.etypes, [th.empty(self.hg.num_nodes(), self.args.dim) for _ in range(len(self.hg.etypes))]))
for i in tqdm(range(self.hg.num_nodes()), desc='Evaluating...'):
train_inputs = (
torch.tensor([i for _ in range(len(self.hg.etypes))])
.unsqueeze(1)
.to(self.device)
) # [i, i]
train_types = (
torch.tensor(list(range(len(self.hg.etypes)))).unsqueeze(1).to(self.device)
) # [0, 1]
pairs = torch.cat(
(train_inputs, train_inputs, train_types), dim=1
) # (2, 3)
(
train_blocks,
train_invmap,
fake_tails,
train_types,
) = self.neighbor_sampler.sample(pairs)
node_emb = self.model(train_blocks[0].to(self.device))[train_invmap]
node_emb = node_emb.gather(
1,
train_types.to(self.device)
.view(-1, 1, 1)
.expand(node_emb.shape[0], 1, node_emb.shape[2]),
)[:, 0]
for j in range(len(self.hg.etypes)):
final_model[self.hg.etypes[j]][i] = node_emb[j].detach()
metric = {}
score = []
for etype in self.hg.etypes:
self.task.val_hg = dgl.edge_type_subgraph(self.orig_val_hg, [etype])
self.task.test_hg = dgl.edge_type_subgraph(self.orig_test_hg, [etype])
for split in ['test', 'valid']:
n_embedding = {self.hg.ntypes[0]: final_model[etype].to(self.device)}
res = self.task.evaluate(n_embedding=n_embedding, mode=split)
metric[split] = res
if split == 'valid':
score.append(res.get('roc_auc'))
self.logger.train_info(etype + self.logger.metric2str(metric))
avg_score = sum(score) / len(score)
return avg_score
| 2.078125 | 2 |
STACK/minStack.py | rajansh87/Algorithms-Implementations | 1 | 12785726 | class Stack:
def __init__(self):
self.min = None
self.stack = list()
# function to get minimum element of the stack
def getMin(self):
if len(self.stack) == 0:
return -1
else:
return self.min
# function to return the top element
def top(self):
if len(self.stack) == 0:
return -1
top = self.stack[-1]
if self.min > top:
return self.min
else:
return top
# function to pop the element from the stack
def pop(self):
if len(self.stack) == 0:
return
top = self.stack[-1]
self.stack.pop()
if top < self.min:
self.min = 2*self.min-top
# function to append element in the array
def push(self, item):
if len(self.stack) == 0:
self.min = item
self.stack.append(item)
return
if item < self.min:
self.stack.append(2*item - self.min)
self.min = item
else:
self.stack.append(item)
| 3.921875 | 4 |
c/exam/B.py | wangrunlin/lanqiao | 0 | 12785727 | n = 2020
cnt = 0
for i in range(2, n+1):
tmp = i
while tmp > 1:
if tmp % 10 == 2:
cnt += 1
tmp /= 10
print(cnt)
| 3.25 | 3 |
Building/urls.py | LukaszHoszowski/Django_ProEstate | 1 | 12785728 | <gh_stars>1-10
from django.urls import path
from Building.views import BuildingListView, BuildingDetailView, \
BuildingCartographyView, BuildingCoopView, BuildingPhotosView, BuildingDocsView, BuildingPhotosCreate, \
BuildingDocsCreate, BuildingFlatsView, FlatDetailView, FlatUpdateView, FlatAddUserUpdate, FlatDeleteUserUpdate, \
MeasureUpdateView
app_name = 'Building'
urlpatterns = [
# Building urls
path('', BuildingListView.as_view(), name='buildings'),
path('<slug:slug>/', BuildingDetailView.as_view(), name='building_details'),
path('<slug:slug>/flats/', BuildingFlatsView.as_view(), name='building_flats'),
path('<slug:slug>/cartography/', BuildingCartographyView.as_view(), name='building_cartography'),
path('<slug:slug>/coop/', BuildingCoopView.as_view(), name='building_coop'),
path('<slug:slug>/photos/', BuildingPhotosView.as_view(), name='building_photos'),
path('<slug:slug>/add_photos/', BuildingPhotosCreate.as_view(), name='building_photos_add'),
path('<slug:slug>/documents/', BuildingDocsView.as_view(), name='building_documents'),
path('<slug:slug>/add_docs/', BuildingDocsCreate.as_view(), name='building_docs_add'),
# Flat urls
path('<slug:slug>/<int:pk>', FlatDetailView.as_view(), name='flat_details'),
path('<slug:slug>/<int:pk>/update', FlatUpdateView.as_view(), name='flat_update'),
path('add/user_to_flat/<int:pk>/', FlatAddUserUpdate.as_view(), name='flat_add_user'),
path('del/user_from_flat/<int:pk>/', FlatDeleteUserUpdate.as_view(), name='flat_delete_user'),
path('update/measure/<int:pk>/', MeasureUpdateView.as_view(), name='measure_update'),
]
| 1.867188 | 2 |
provider_daft/app/main.py | madpin/renthub | 0 | 12785729 | # import logging
from pathlib import Path
from enum import Enum
import uvicorn
from fastapi import FastAPI
from custom_logger import CustomizeLogger
import schemas
from mappings.daft_listings import get_daft_search_result
from mappings.listing_details import get_listing_details
# logger = logging.getLogger(__name__)
config_path=Path(__file__).with_name("custom_logger.json")
def create_app() -> FastAPI:
app = FastAPI(title='CustomLogger', debug=False)
logger = CustomizeLogger.make_logger(config_path)
app.logger = logger
return app
app = create_app()
# app = FastAPI()
@app.get("/search_result/", response_model=schemas.SearchResultList)
async def search_result():
result = await get_daft_search_result()
return result
class DaftMethodListing(str, Enum):
json_details = "json_details"
selenium = "selenium"
@app.get("/listing_details/", response_model=schemas.DaftListing)
async def daft_listing(url, method: DaftMethodListing):
result = await get_listing_details(url)
return result
if __name__ == "__main__":
uvicorn.run(app, host="0.0.0.0", port=8000)
| 2.328125 | 2 |
src/JaccardWikipediaHeart.py | nik7273/computational-medical-knowledge | 0 | 12785730 | # -*- coding: utf-8 -*-
#GET JACCARD SIMILARITY OF ALL HEART RELATED ARTICLES ON WIKIPEDIA (BETWEEN THEMSELVES)
from utils import jaccard
import wikipedia, codecs, nltk
from findRelevantArticles import findRelevantArticles
from pprint import pprint
from nltk.corpus import stopwords
from nltk.tokenize import word_tokenize
import subprocess as sub #Running BASH script within python???
import matplotlib.pyplot as plt
import numpy as np
lemma = nltk.WordNetLemmatizer()
relArticles = findRelevantArticles("Heart Attack")
articlefilelist = []
wordslist = ['../STEMI_words','../NSTEMI_words','../WIKI_words']
for article in relArticles:
articlefilename = "content_"+str(article)+".txt"
with codecs.open(articlefilename,'wb', 'utf-8') as outfile:
content = wikipedia.page(article).content
content = [lemma.lemmatize(word) for word in content]
content = set(content)
for word in content:
print>>outfile,word
articlefilelist.append(articlefilename)
for piece in wordslist:
articlefilelist.append(piece)
matrix = np.matrix([[jaccard(i,j) for i in articlefilelist] for j in articlefilelist])
print matrix
with open('jaccardVals', 'wb') as outfile:
print>>outfile,matrix
| 2.96875 | 3 |
test.py | mu2019/pyrestrecord | 0 | 12785731 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import restrecord
a=restrecord.Meta()
a.a = 'abc'
a.b = 1
print(a['a'],a.a,a.b)
del a.a
print(a.a)
print(dir(a))
print('record')
rc = restrecord.Record((['a','b'],[1,2]))
print(rc.keys())
print(rc[0],rc.a
| 2.421875 | 2 |
cogs/mod.py | jeraldlyh/horizon | 2 | 12785732 | import asyncio
import discord
import datetime
import pytz
import random
import colorsys
import os
from discord.ext import commands
from cogs.utils.embed import passembed
from cogs.utils.embed import errorembed
class Mod(commands.Cog):
def __init__(self, bot):
self.bot = bot
self.case = {}
def check_me(ctx):
return ctx.message.author.id == os.getenv("OWNER_ID")
# Purge message
@commands.command()
@commands.has_any_role('Server Moderator')
async def purge(self, ctx, amount=100):
amount = int(amount)
await ctx.channel.purge(limit=amount+1)
pembed = passembed(description='{0} messages have been deleted.'.format(amount))
await ctx.send(embed=pembed, delete_after=25)
@purge.error
async def purge_error(self, ctx, error):
if isinstance(error, commands.CheckFailure):
return
# Ban command
@commands.command()
@commands.has_any_role('Server Moderator')
async def ban(self, ctx, user: discord.Member, *, reason: str=None):
if reason is None:
reason = 'no reason'
await user.ban(reason=reason)
pembed = passembed(description='{0} has been banned by {1} due to {2}.'.format(user, ctx.message.author, reason))
await ctx.send(embed=pembed)
# Logging
for channel in ctx.guild.channels:
if channel.name == 'mod-logs':
guild_id = ctx.message.guild.id
if guild_id in self.case:
self.case[guild_id]+=1
embed=discord.Embed(color=discord.Color.red())
embed.timestamp=datetime.datetime.now(tz=pytz.timezone('Asia/Singapore'))
embed.set_author(name='Case #{0} | Ban | {1}'.format(int(self.case.get(guild_id)), user), icon_url=user.avatar_url)
embed.add_field(name='User',value='{0}'.format(user.mention), inline=True)
embed.add_field(name='Moderator',value='{0}'.format(ctx.message.author.mention), inline=True)
embed.add_field(name='Reason', value='{0}'.format(reason), inline=True)
embed.set_footer(text='ID: {0}'.format(user.id))
await channel.send(embed=embed)
else:
self.case[guild_id]=0
self.case[guild_id]+=1
print(self.case)
embed=discord.Embed(color=discord.Color.red())
embed.timestamp=datetime.datetime.now(tz=pytz.timezone('Asia/Singapore'))
embed.set_author(name='Case #{0} | Ban | {1}'.format(int(self.case.get(guild_id)), user), icon_url=user.avatar_url)
embed.add_field(name='User',value='{0}'.format(user.mention), inline=True)
embed.add_field(name='Moderator',value='{0}'.format(ctx.message.author.mention), inline=True)
embed.add_field(name='Reason', value='{0}'.format(reason), inline=True)
embed.set_footer(text='ID: {0}'.format(user.id))
await channel.send(embed=embed)
@ban.error
async def ban_error(self, ctx, error):
if isinstance(error, commands.MissingRequiredArgument):
eembed = errorembed(description='Please indicate the User you wish to ban.')
return await ctx.send(embed=eembed)
elif isinstance(error, commands.BadArgument):
eembed = errorembed(description='Invalid User. Please tag the User you wish to ban.')
return await ctx.send(embed=eembed)
elif isinstance(error, commands.CheckFailure):
return
# Force ban command
@commands.command()
@commands.has_any_role('Server Moderator')
async def forceban(self, ctx, id: int, *, reason: str=None):
if reason is None:
reason = 'no reason'
try:
limitedUser = await self.bot.fetch_user(id)
pembed = passembed(description='{0} has been banned by {1} due to {2}.'.format(user, ctx.message.author, reason))
await ctx.send(embed=pembed)
except Exception as e:
if 'Unknown User' in str(e):
eembed = errorembed(description='User ID could not be found. Please input a valid User ID.')
await ctx.send(embed=eembed)
@forceban.error
async def forceban_error(self, ctx, error):
if isinstance(error, commands.MissingRequiredArgument):
eembed = errorembed(description='Please indicate the User you wish to force ban.')
return await ctx.send(embed=eembed)
elif isinstance(error, commands.BadArgument):
eembed = errorembed(description='User ID is invalid. Please input a valid User ID.')
return await ctx.send(embed=eembed)
elif isinstance(error, commands.CheckFailure):
return
# Unban command
@commands.command()
@commands.has_any_role('Server Moderator')
async def unban(self, ctx, id: int):
try:
banuser = await self.bot.fetch_user(id)
await ctx.guild.unban(banuser)
pembed = passembed(description='{0} has been unbanned by {1} due to {2}.'.format(banuser, ctx.message.author, reason))
await ctx.send(embed=pembed)
except Exception as e:
if 'Unknown Ban' in str(e):
eembed = errorembed(description='{0} {1} is not banned in the server. Please check again.'.format(ctx.message.author.mention, banuser))
await ctx.send(embed=eembed)
elif 'Unknown User' in str(e):
eembed = errorembed(description='User ID could not be found. Please input a valid User ID.')
await ctx.send(embed=eembed)
# Logging
for channel in ctx.guild.channels:
if channel.name == 'mod-logs':
guild_id = ctx.message.guild.id
if guild_id in self.case:
self.case[guild_id]+=1
print(self.case)
embed=discord.Embed(color=discord.Color.red())
embed.timestamp=datetime.datetime.now(tz=pytz.timezone('Asia/Singapore'))
embed.set_author(name='Case #{0} | Unban | {1}'.format(int(self.case.get(guild_id)), banuser), icon_url=banuser.avatar_url)
embed.add_field(name='User',value='{0}'.format(banuser.mention), inline=True)
embed.add_field(name='Moderator',value='{0}'.format(ctx.message.author.mention), inline=True)
embed.set_footer(text='ID: {0}'.format(banuser.id))
await channel.send(embed=embed)
else:
self.case[guild_id]=0
self.case[guild_id]+=1
print(self.case)
embed=discord.Embed(color=discord.Color.red())
embed.timestamp=datetime.datetime.now(tz=pytz.timezone('Asia/Singapore'))
embed.set_author(name='Case #{0} | Unban | {1}'.format(int(self.case.get(guild_id)), banuser), icon_url=banuser.avatar_url)
embed.add_field(name='User',value='{0}'.format(banuser.mention), inline=True)
embed.add_field(name='Moderator',value='{0}'.format(ctx.message.author.mention), inline=True)
embed.set_footer(text='ID: {0}'.format(banuser.id))
await channel.send(embed=embed)
@unban.error
async def unban_error(self, ctx, error):
if isinstance(error, commands.MissingRequiredArgument):
eembed = errorembed(description='Please indicate the User ID you wish to unban.')
return await ctx.send(embed=eembed)
elif isinstance(error, commands.BadArgument):
eembed = errorembed(description='User ID is either not banned or invalid/not found. Please input a valid User ID.')
return await ctx.send(embed=eembed)
elif isinstance(error, commands.CheckFailure):
return
# Mute command
@commands.command()
@commands.has_any_role('Server Moderator')
async def mute(self, ctx, user: discord.Member, reason: str=None, time: int=5):
# If not specified, defaulted as 5 minutes.
secs = time * 60
if reason is None:
reason = 'no reason'
for channel in ctx.guild.channels:
if isinstance(channel, discord.TextChannel):
await ctx.channel.set_permissions(user, overwrite=discord.PermissionOverwrite(send_messages=False))
elif isinstance(channel, discord.VoiceChannel):
await ctx.channel.set_permissions(user, overwrite=discord.PermissionOverwrite(connect=False))
pembed = passembed(description='{0} has been muted for {1} minutes due to {2}.'.format(user, time, reason))
await ctx.send(embed=pembed)
# Logging
for channel in ctx.guild.channels:
if channel.name == 'mod-logs':
guild_id = ctx.message.guild.id
if guild_id in self.case:
self.case[guild_id]+=1
print(self.case)
embed=discord.Embed(color=discord.Color.red())
embed.timestamp=datetime.datetime.now(tz=pytz.timezone('Asia/Singapore'))
embed.set_author(name='Case #{0} | Mute | {1}'.format(int(self.case.get(guild_id)), user.name), icon_url=user.avatar_url)
embed.add_field(name='User',value='{0}'.format(user.mention), inline=True)
embed.add_field(name='Moderator',value='{0}'.format(ctx.message.author.mention), inline=True)
embed.add_field(name='Length', value='{0} mins'.format(time), inline=True)
embed.add_field(name='Reason', value='{0}'.format(reason), inline=True)
embed.set_footer(text='ID: {0}'.format(user.id))
await channel.send(embed=embed)
else:
self.case[guild_id]=0
self.case[guild_id]+=1
print(self.case)
embed=discord.Embed(color=discord.Color.red())
embed.timestamp=datetime.datetime.now(tz=pytz.timezone('Asia/Singapore'))
case = self.case.get(guild_id)
embed.set_author(name='Case #{0} | Mute | {1}'.format(int(self.case.get(guild_id)), user.name), icon_url=user.avatar_url)
embed.add_field(name='User',value='{0}'.format(user.mention), inline=True)
embed.add_field(name='Moderator',value='{0}'.format(ctx.message.author.mention), inline=True)
embed.add_field(name='Length', value='{0} mins'.format(time), inline=True)
embed.add_field(name='Reason', value='{0}'.format(reason), inline=True)
embed.set_footer(text='ID: {0}'.format(user.id))
await channel.send(embed=embed)
await asyncio.sleep(secs)
for channel in ctx.guild.channels:
if isinstance(channel, discord.TextChannel):
await ctx.channel.set_permissions(user, overwrite=None)
elif isinstance(channel, discord.VoiceChannel):
await ctx.channel.set_permissions(user, overwrite=None)
pembed = passembed(description='{0} has been unmuted in the server.'.format(user))
await ctx.send(embed=pembed)
# Logging
for channel in ctx.guild.channels:
if channel.name == 'mod-logs':
guild_id = ctx.message.guild.id
if guild_id in self.case:
self.case[guild_id]+=1
print(self.case)
embed=discord.Embed(color=discord.Color.red())
embed.timestamp=datetime.datetime.now(tz=pytz.timezone('Asia/Singapore'))
embed.set_author(name='Case #{0} | Unmute | {1}'.format(int(self.case.get(guild_id)), user.name), icon_url=user.avatar_url)
embed.add_field(name='User',value='{0}'.format(user.mention), inline=True)
embed.add_field(name='Moderator',value='{0}'.format(self.bot.user.mention), inline=True)
embed.add_field(name='Reason', value='timeout', inline=True)
embed.set_footer(text='ID: {0}'.format(user.id))
await channel.send(embed=embed)
else:
self.case[guild_id]=0
self.case[guild_id]+=1
print(self.case)
embed=discord.Embed(color=discord.Color.red())
embed.timestamp=datetime.datetime.now(tz=pytz.timezone('Asia/Singapore'))
case = self.case.get(guild_id)
embed.set_author(name='Case #{0} | Unmute | {1}'.format(int(self.case.get(guild_id)), user.name), icon_url=user.avatar_url)
embed.add_field(name='User',value='{0}'.format(user.mention), inline=True)
embed.add_field(name='Moderator',value='{0}'.format(self.bot.user.mention), inline=True)
embed.add_field(name='Reason', value='timeout', inline=True)
embed.set_footer(text='ID: {0}'.format(user.id))
await channel.send(embed=embed)
@mute.error
async def mute_error(self, ctx, error):
if isinstance(error, commands.MissingRequiredArgument):
eembed = errorembed(description='Please indicate the user you wish to mute.')
return await ctx.send(embed=eembed)
elif isinstance(error, commands.BadArgument):
eembed = errorembed(description='User could not be found. Please tag a valid User.')
return await ctx.send(embed=eembed)
elif isinstance(error, commands.CheckFailure):
return
# Unmute command
@commands.command()
@commands.has_any_role('Server Moderator')
async def unmute(self, ctx, user: discord.Member, reason: str=None):
if reason is None:
reason = 'no reason'
for channel in ctx.guild.channels:
if isinstance(channel, discord.TextChannel):
await ctx.channel.set_permissions(user, send_messages=None)
elif isinstance(channel, discord.VoiceChannel):
await ctx.channel.set_permissions(user, connect=None)
pembed = passembed(description='{0} has been unmuted in the server.'.format(user))
await ctx.send(embed=pembed)
# Logging
for channel in ctx.guild.channels:
if channel.name == 'mod-logs':
guild_id = ctx.message.guild.id
if guild_id in self.case:
self.case[guild_id]+=1
print(self.case)
embed=discord.Embed(color=discord.Color.red())
embed.timestamp=datetime.datetime.now(tz=pytz.timezone('Asia/Singapore'))
embed.set_author(name='Case #{0} | Unmute | {1}'.format(int(self.case.get(guild_id)), user.name), icon_url=user.avatar_url)
embed.add_field(name='User',value='{0}'.format(user.mention), inline=True)
embed.add_field(name='Moderator',value='{0}'.format(ctx.message.author.mention), inline=True)
embed.add_field(name='Reason', value='{0}'.format(reason), inline=True)
embed.set_footer(text='ID: {0}'.format(user.id))
await channel.send(embed=embed)
else:
self.case[guild_id]=0
self.case[guild_id]+=1
print(self.case)
embed=discord.Embed(color=discord.Color.red())
embed.timestamp=datetime.datetime.now(tz=pytz.timezone('Asia/Singapore'))
case = self.case.get(guild_id)
embed.set_author(name='Case #{0} | Unmute | {1}'.format(int(self.case.get(guild_id)), user.name), icon_url=user.avatar_url)
embed.add_field(name='User',value='{0}'.format(user.mention), inline=True)
embed.add_field(name='Moderator',value='{0}'.format(ctx.message.author.mention), inline=True)
embed.add_field(name='Reason', value='{0}'.format(reason), inline=True)
embed.set_footer(text='ID: {0}'.format(user.id))
await channel.send(embed=embed)
@unmute.error
async def unmute_error(self, ctx, error):
if isinstance(error, commands.MissingRequiredArgument):
eembed = errorembed(description='Please indicate the user you wish to unmute.')
return await ctx.send(embed=eembed)
elif isinstance(error, commands.BadArgument):
eembed = errorembed(description='User could not be found. Please tag a valid User.')
return await ctx.send(embed=eembed)
elif isinstance(error, commands.CheckFailure):
return
# Announce command
@commands.command()
@commands.has_any_role('Server Moderator')
async def announce(self, ctx, channel: discord.TextChannel, *,message: str):
await channel.send(message)
@announce.error
async def announce_error(self, ctx, error):
if isinstance(error, commands.MissingRequiredArgument):
eembed = errorembed(description='Please specify text channel in the command.')
return await ctx.send(embed=eembed)
elif isinstance(error, commands.BadArgument):
eembed = errorembed(description='Channel could not be found in the server. Please specify the correct text channel.')
return await ctx.send(embed=eembed)
elif 'message is a required argument' in str(error):
eembed = errorembed(description='Please indicate your message in the command.')
return await ctx.send(embed=eembed)
elif isinstance(error, commands.CheckFailure):
return
# Embed announce command
@commands.command()
@commands.has_any_role('Server Moderator')
async def emannounce(self, ctx, channel: discord.TextChannel, *, message: str):
r, g, b = tuple(int(x * 255) for x in colorsys.hsv_to_rgb(random.random(), 1, 1))
embed=discord.Embed(description="{0}".format(message), color=discord.Color((r << 16) + (g << 8) + b), icon_url=self.bot.user.avatar_url)
#embed.timestamp=datetime.datetime.now(tz=pytz.timezone('Asia/Singapore'))
#embed.set_footer(text='Announced by {0}'.format(ctx.message.author), icon_url=ctx.message.author.avatar_url)
await channel.send(embed=embed)
@emannounce.error
async def emannounce_error(self, ctx, error):
if isinstance(error, commands.MissingRequiredArgument):
eembed = errorembed(description='Please specify text channel in the command.')
return await ctx.send(embed=eembed)
elif isinstance(error, commands.BadArgument):
eembed = errorembed(description='Channel could not be found. Please specify the correct text channel.')
return await ctx.send(embed=eembed)
elif 'message is a required argument' in str(error):
eembed = errorembed(description='Please indicate your message in the command.')
return await ctx.send(embed=eembed)
elif isinstance(error, commands.CheckFailure):
return
# Set watching status
@commands.command()
@commands.check(check_me)
async def watching(self, ctx, *name: str):
type = discord.ActivityType.watching
activity = discord.Activity(name=name, type=type)
await self.bot.change_presence(activity=activity)
pembed = passembed(description='Status has been updated.')
await ctx.send(embed=pembed, delete_after=5)
#@watching.error
#async def watching_error(self, ctx, error):
# if isinstance(error, commands.CheckFailure):
# return
# Resets status of bot
@commands.command()
@commands.check(check_me)
async def reset(self, ctx):
await self.bot.change_presence(activity=discord.Activity(name=f'{str(len(bot.users))} users in FortniteAsia', type=2))
pembed = passembed(description='Status has been reseted.')
await ctx.send(embed=pembed, delete_after=5)
@reset.error
async def reset_error(self, ctx, error):
if isinstance(error, commands.CheckFailure):
return
# Lock command
@commands.command()
@commands.has_any_role('Server Moderator')
async def lock(self, ctx, channelname: discord.TextChannel=None):
overwrite = discord.PermissionOverwrite(send_messages=False)
# Can be used without specifying channel name
if channelname is None:
await ctx.message.channel.set_permissions(ctx.guild.default_role, overwrite=overwrite)
pembed = passembed(description='{0} has been locked by {1}.'.format(ctx.channel.mention, ctx.message.author))
await ctx.send(embed=pembed)
else:
await channelname.set_permissions(ctx.guild.default_role, overwrite=overwrite)
pembed = passembed(description='{0} has been locked by {1}.'.format(channelname.mention, ctx.message.author))
await ctx.send(embed=pembed)
@lock.error
async def lock_error(self, ctx, error):
if isinstance(error, commands.CheckFailure):
return
# Unlock command
@commands.command()
@commands.has_any_role('Server Moderator')
async def unlock(self, ctx, channelname: discord.TextChannel=None):
overwrite = discord.PermissionOverwrite(send_messages=True)
# Can be used without specifying channel name
if channelname is None:
await ctx.message.channel.set_permissions(ctx.guild.default_role, overwrite=overwrite)
pembed = passembed(description='{0} has been unlocked by {1}.'.format(ctx.channel.mention, ctx.message.author))
await ctx.send(embed=pembed)
else:
await channelname.set_permissions(ctx.guild.default_role, overwrite=overwrite)
pembed = passembed(description='{0} has been unlocked by {1}.'.format(channelname.mention, ctx.message.author))
await ctx.send(embed=pembed)
@unlock.error
async def unlock_error(self, ctx, error):
if isinstance(error, commands.CheckFailure):
return
# Adding the cog to main script
def setup(bot):
bot.add_cog(Mod(bot))
| 2.359375 | 2 |
src/main/python/programmingtheiot/cda/sim/TemperatureSensorSimTask.py | Taowyoo/-constrained-device-app | 0 | 12785733 | <reponame>Taowyoo/-constrained-device-app<gh_stars>0
#####
#
# This class is part of the Programming the Internet of Things project.
#
# It is provided as a simple shell to guide the student and assist with
# implementation for the Programming the Internet of Things exercises,
# and designed to be modified by the student as needed.
#
import logging
from programmingtheiot.cda.sim.BaseSensorSimTask import BaseSensorSimTask
from programmingtheiot.cda.sim.SensorDataGenerator import SensorDataGenerator
from programmingtheiot.common import ConfigConst
from programmingtheiot.data.SensorData import SensorData
class TemperatureSensorSimTask(BaseSensorSimTask):
"""
Implementation of TemperatureSensorSimTask
"""
def __init__(self, sensorType: int = SensorData.TEMP_SENSOR_TYPE, dataSet=None,
minVal: float = SensorDataGenerator.LOW_NORMAL_INDOOR_TEMP,
maxVal: float = SensorDataGenerator.HI_NORMAL_INDOOR_TEMP):
"""
Init TemperatureSensorSimTask by using super class constructor with values especially for TemperatureSensorSimTask
:param sensorType: Sensor Type, here is temperature sensor
:param dataSet: Using local generated default data set
:param minVal: Using default indoor temperature from SensorDataGenerator
:param maxVal: Using default indoor temperature from SensorDataGenerator
"""
super(TemperatureSensorSimTask, self).__init__(sensorType=sensorType, dataSet=dataSet, minVal=minVal, maxVal=maxVal)
self._sensorName = ConfigConst.TEMP_SENSOR_NAME
pass
| 3.28125 | 3 |
attic/gen.py | lojikil/coastML | 6 | 12785734 | <filename>attic/gen.py<gh_stars>1-10
#@(#) lazy person's way of generating a lot of classes:
#@(#) write the code, then grep for returns, then write
#@(#) the classes. This is why I want to make a functional
#@(#) language with ADTs to handle this sort of thing...
#@(#)
#@(#) Usage:
#@(#)
#@(#) `grep 'return Token' boot.py | awk 'BEGIN {FS="return "} {print $2}' > tokens`
#@(#) then `python3 gen.py`
tmpl3 = """
class {0}(Token):
def __init__(self, c, l, o):
self.lexeme = c
self.line = l
self.offset = o
def __repr__(self):
return "{0}({{0}})".format(self.lexeme)
def __str__(self):
return self.lexeme"""
tmpl2 = """
class {0}(Token):
def __init__(self, l, o):
self.line = l
self.offset = o
def __repr__(self):
return "{0}()"
def __str__(self):
return self.lexeme"""
with open('tokens') as fh:
data = fh.readlines()
generate_cache = []
for d in data:
line = d.split('(')
if line[0] in generate_cache:
continue
cnt = len(line[1].split(','))
print("\n#", line[0], cnt)
generate_cache.append(line[0])
if cnt == 2:
print(tmpl2.format(line[0]))
elif cnt == 3:
print(tmpl3.format(line[0]))
| 2.6875 | 3 |
models/prov.py | LvanWissen/saa-bevolkingsregisters | 0 | 12785735 | <filename>models/prov.py
"""
Bio ontology to be used in the enrichment of SAA indices by Golden Agents.
"""
from rdflib import Dataset, Graph, Namespace
from rdflib import XSD, RDF, RDFS, OWL
from rdflib import URIRef, BNode, Literal
from rdfalchemy.rdfSubject import rdfSubject
from rdfalchemy.rdfsSubject import rdfsSubject
from rdfalchemy import rdfSingle, rdfMultiple, rdfContainer
prov = Namespace("http://www.w3.org/ns/prov#")
########
# PROV #
########
class Entity(rdfsSubject):
rdf_type = prov.Entity
class Derivation(rdfsSubject):
rdf_type = prov.Derivation
hadActivity = rdfSingle(prov.hadActivity, range_type=prov.Activity)
entity = rdfMultiple(prov.entity, range_type=prov.Entity)
class Activity(rdfsSubject):
rdf_type = prov.Activity
wasAssociatedWith = rdfSingle(prov.wasAssociatedWith,
range_type=prov.Agent)
qualifiedAssociation = rdfSingle(prov.qualifiedAssociation,
range_type=prov.Association)
comment = rdfSingle(RDFS.comment)
class Association(rdfsSubject):
rdf_type = prov.Association
hadPlan = rdfSingle(prov.hadPlan, range_type=prov.Plan)
agent = rdfSingle(prov.agent, range_type=prov.Agent)
comment = rdfSingle(RDFS.comment)
class Plan(rdfsSubject):
rdf_type = prov.Plan
comment = rdfSingle(RDFS.comment)
class Agent(rdfsSubject):
rdf_type = prov.Agent | 2.328125 | 2 |
test_driven_development/main.py | steph409/snippets_of_code | 0 | 12785736 | <gh_stars>0
import logging
import re
def add(input: str = None) -> str:
'''adds numbers from a string'''
if not isinstance(input, str):
raise TypeError(f"The {input=} is of incorrect type")
logging.info(f"received string value {input}")
if not re.match("^[0-9,\n]*$", input):
raise ValueError(f"The {input=} is not valid")
if not input: # == "":
return "0"
return str(sum([int(i) for i in re.split(r",|\n", input)]))
| 3.40625 | 3 |
rip/ripping_guide.py | My-Little-Cable-Co/rip | 0 | 12785737 | <filename>rip/ripping_guide.py
import json
from pathlib import Path
from typing import Dict, List, Optional
from jsonschema.validators import validator_for
from ripping_guide_schema import RIPPING_GUIDE_SCHEMA
class Ripable:
def __init__(self, name, disc_title, disc_chapters, file_path):
self.name = name
self.disc_title = disc_title
self.disc_chapters = disc_chapters
self.file_path = file_path
def __str__(self) -> str:
return f'{self.name}: Title {self.disc_title}, Chapter(s) {self.disc_chapters}, saving to "{self.file_path}".'
class RippingGuide:
def __init__(self, ripping_guide_dict={}, *args, **kwargs):
self._ripping_guide_dict = ripping_guide_dict
def set_disc_id(self, disc_id):
self._ripping_guide_dict['disc_id'] = str(disc_id)
def set_title(self, title):
self._ripping_guide_dict['title'] = str(title)
@staticmethod
def from_json(json_string) -> Optional['RippingGuide']:
# ensure the string is parsable JSON
try:
ripping_guide_dict = json.loads(json_string)
except json.decoder.JSONDecodeError:
print('ERROR: File contents were not valid JSON.')
return None
# ensure the JSON conforms to the Ripping Guide Schema
validator_class = validator_for(RIPPING_GUIDE_SCHEMA)
validator = validator_class(RIPPING_GUIDE_SCHEMA)
if not validator.is_valid(instance=ripping_guide_dict):
print(ripping_guide_dict)
print('ERROR: JSON did not adhere to Ripping Guide schema.')
return None
return RippingGuide(ripping_guide_dict=ripping_guide_dict)
@staticmethod
def retrieve_from_internet(disc_id) -> Optional['RippingGuide']:
raise NotImplementedError('This method is TBD')
# ripping_guide_response = requests.get(f'https://example.com/{disc_id}')
# if ripping_guide_response.status_code == 200:
# RippingGuide.from_json(ripping_guide_response.json())
@staticmethod
def from_path(file_path) -> Optional['RippingGuide']:
file_candidate = Path(file_path)
if not file_candidate.is_file():
return None
return RippingGuide.from_json(file_candidate.read_text())
def to_json(self) -> str:
return json.dumps(self._ripping_guide_dict, indent=2)
@property
def title(self) -> Optional[str]:
return self._ripping_guide_dict.get('title')
@property
def features(self) -> List[Dict]:
return self._ripping_guide_dict.get('features', [])
@property
def episodes(self) -> List[Dict]:
return self._ripping_guide_dict.get('episodes', [])
def add_feature(self, feature: Dict):
if 'features' not in self._ripping_guide_dict:
self._ripping_guide_dict['features'] = []
self._ripping_guide_dict['features'].append(feature)
def add_episode(self, episode: Dict):
if 'episodes' not in self._ripping_guide_dict:
self._ripping_guide_dict['episodes'] = []
self._ripping_guide_dict['episodes'].append(episode)
def ripables(self) -> List[Ripable]:
things_to_rip = []
for feature in self.features:
feature_ripable = Ripable(
name=feature['feature_title'],
disc_title=feature['title'],
disc_chapters=feature['chapters'],
file_path=f"{feature['feature_title']}/{feature['filename']}"
)
things_to_rip.append(feature_ripable)
for special_feature in feature.get('special_features', []):
special_feature_ripable = Ripable(
name=special_feature['special_feature_title'],
disc_title=special_feature['title'],
disc_chapters=special_feature['chapters'],
file_path=f"{feature['feature_title']}/{special_feature['filename']}"
)
things_to_rip.append(special_feature_ripable)
for episode in self.episodes:
episode_ripable = Ripable(
name=' '.join(episode['filename'].split('.')[0:-1]),
disc_title=episode['title'],
disc_chapters=episode['chapters'],
file_path=f"{episode['show_title']}/Season {episode['season']}/{episode['filename']}"
)
things_to_rip.append(episode_ripable)
return things_to_rip
| 2.71875 | 3 |
dumbpm/shared/shared.py | poros/dumbpm | 7 | 12785738 | <reponame>poros/dumbpm
from pandas import DataFrame
def compute_stats(duration: list[int]) -> DataFrame:
"""Statistics to visualize for the result of a Monte Carlo simulation."""
return DataFrame(duration, columns=["Duration"]).describe(
percentiles=[0.5, 0.75, 0.90, 0.99]
)
| 2.9375 | 3 |
src/test_acceptance.py | AviBuckman/Vulnerability-Checker | 0 | 12785739 | <gh_stars>0
import pytest
import os
from CSV_handler import CSVHandler, DataHandler
from checker import Checker
import csv
libsrpm = ['dbus-tools-1.12.8-9.el8.x86_64']
libsdpkg = [
{'name': 'accountsservice', 'version': '0.6.45-1ubuntu1', 'architecture': 'amd64'}
]
def test():
data_handler: DataHandler = CSVHandler()
checker: Checker = Checker(data_handler)
if checker.PackageOrganized["manager"] == "rpm":
checker.scan_library_list(libsrpm)
t1 = open('./resources/acceptanceFiles/realOutputRpm.csv', 'r')
t3 = open('./resources/acceptanceFiles/realOutputStringsRpm.csv', 'r')
else:
checker.scan_library_list(libsdpkg)
t1 = open('./resources/acceptanceFiles/realOutputUbuntu.csv', 'r')
t3 = open('./resources/acceptanceFiles/realOutputStringsUbuntu.csv', 'r')
t2 = open('./resources/vulnerability.csv', 'r')
t4 = open('./resources/strings.csv', 'r')
fileone = t1.readlines()
filetwo = t2.readlines()
fileone2 = t3.readlines()
filetwo2 = t4.readlines()
t1.close()
t2.close()
t3.close()
t4.close()
vul = equals(fileone, filetwo)
st = equals(fileone2, filetwo2)
assert vul and st
def equals(fileone, filetwo):
x = 0
for i in fileone:
if i not in filetwo and x != 0:
print(i)
return False
x = x+1
return True
def remove_files():
os.remove('./resources/vulnerability.csv')
os.remove('./resources/strings.csv')
@pytest.yield_fixture(autouse=True, scope='module')
def teardown():
yield
print("tearing down...")
remove_files()
| 2.234375 | 2 |
dragnet/dll/models.py | mozilla/dragnet | 1 | 12785740 | from django.db import models
from django.db.models.signals import pre_save
from django.dispatch import receiver
from django.contrib.auth.models import User
import datetime
class File(models.Model):
"""The actual DLL file itself"""
STATUS_UNKNOWN = 'unknown'
STATUS_VALID = 'valid'
STATUS_MALWARE = 'malware'
STATUS_LIKELY_VALID = 'likely_valid'
STATUS_LIKELY_MALWARE = 'likely_malware'
STATUS_CHOICES = (
(STATUS_UNKNOWN, 'Unknown'),
(STATUS_VALID, 'Valid'),
(STATUS_LIKELY_VALID, 'Likely Valid'),
(STATUS_LIKELY_MALWARE, 'Likely Malware'),
(STATUS_MALWARE, 'Malware'),
)
PLATFORM_WINDOWS = 'Windows'
PLATFORM_LINUX = 'Linux'
PLATFORM_MAC = 'Mac OS X'
PLATFORM_CHOICES = (
(PLATFORM_WINDOWS, 'Windows'),
(PLATFORM_LINUX, 'Linux'),
(PLATFORM_MAC, 'Mac OS X')
)
date_created = models.DateTimeField(default=datetime.datetime.utcnow)
date_modified = models.DateTimeField(default=datetime.datetime.utcnow,
auto_now=True)
created_by = models.ForeignKey(User, related_name="created_by")
modified_by = models.ForeignKey(User, related_name="modified_by")
file_name = models.CharField(max_length=200)
common_name = models.CharField(max_length=200, blank=True, null=True)
version = models.CharField(max_length=100, blank=True, null=True)
platform = models.CharField(max_length=10, choices=PLATFORM_CHOICES,
blank=True)
vendor = models.CharField(max_length=200, blank=True, null=True)
distributors = models.CharField(max_length=200, blank=True, null=True)
md5_hash = models.CharField(max_length=32, blank=True, null=True)
debug = models.CharField(max_length=60, blank=True, null=True)
debug_filename = models.CharField(max_length=60, blank=True, null=True)
status = models.CharField(max_length=10, choices=STATUS_CHOICES)
released = models.DateField(blank=True, null=True)
obsolete = models.BooleanField(default=False)
replaced_by = models.CharField(max_length=200, blank=True, null=True)
details = models.TextField(blank=True, null=True)
def __unicode__(self):
return self.file_name
class Meta:
unique_together = ('file_name', 'debug_filename', 'debug')
class Comment(models.Model):
"""Comments users have made on given DLL files"""
user = models.ForeignKey(User)
dll = models.ForeignKey(File)
date = models.DateTimeField(default=datetime.datetime.utcnow,
auto_now=True)
comment = models.TextField()
class FileHistory(models.Model):
"""A historical record of the DLL file and the changes made to it over
time"""
dll = models.ForeignKey(File)
user = models.ForeignKey(User)
date_changed = models.DateTimeField(auto_now=True)
field = models.CharField(max_length=40)
original_state = models.CharField(max_length=200, blank=True, null=True)
changed_state = models.CharField(max_length=200, blank=True, null=True)
@receiver(pre_save, sender=File)
def compare_history(sender, instance, **kwargs):
if not File.objects.filter(pk=instance.pk).exists():
return sender
EVALUATE = ('file_name', 'common_name', 'vendor', 'distributors',
'md5_hash', 'debug', 'status', 'released', 'obsolete',
'replaced_by', 'details', )
existing = File.objects.get(pk=instance.id)
for key in EVALUATE:
if (getattr(existing, key) != getattr(instance, key) and
any([getattr(existing, key), getattr(instance, key)])):
user = User.objects.get(pk=instance.modified_by_id)
FileHistory.objects.create(user=user,
dll=existing,
field=key,
original_state=getattr(existing, key),
changed_state=getattr(instance, key))
return sender
| 2.140625 | 2 |
thiefringer/thiefringer.py | KyuzoM/thiefringer | 0 | 12785741 | <gh_stars>0
#!/usr/bin/env python
'''
ThiefRinger alarm system - Onion Omega specific python package, motion detection, SMS notification.
'''
from __future__ import print_function
import signal
import sys
import os
import argparse
import json
import threading
try:
from Queue import Queue
except (ImportError, ModuleNotFoundError):
from queue import Queue
import onionGpio
import gpydem
import ooutils
class ThiefRinger(object):
'''
ThiefRinger alarm object.
'''
def __init__(self, opt):
self.opt = opt
with open(self.opt.config, 'r') as f:
config = json.load(f)
self.main_ctl = threading.Event()
self.thread_ctl = threading.Event()
self.thread_msgq = Queue(maxsize=2)
self.threads = []
# setup OS signal handlers
signal.signal(signal.SIGINT, self.signal_terminate)
signal.signal(signal.SIGTERM, self.signal_terminate)
#signal.signal(signal.SIGKILL, self.signal_terminate)
# config defaults
self.cfg_PIR = config.get('PIR', {})
self.cfg_GSM = config.get('GSM', {})
self.cfg_Battery = config.get('Battery', {})
def run(self):
'''
Run alarm system main loop - blocking.
Start alarm system, wait for external signal, stop alarm system.
'''
if self.opt.verbose:
print('run')
self.stop_threads()
self.start_threads()
while not self.main_ctl.wait(60.0):
pass
self.stop_threads(wait=True)
def terminate(self):
'''
Terminate alarm system main loop - nonblocking.
'''
if self.opt.verbose:
print('terminate')
self.main_ctl.set()
def signal_terminate(self, *args):
'''
Catch process exit signals.
@param *args: [list] List of additional arguments
'''
if self.opt.verbose:
print('signal_terminate')
self.terminate()
def start_threads(self):
'''
Start alarm system - nonblocking.
'''
if self.opt.verbose:
print('start_threads')
self.thread_ctl.clear()
self.thread_msgq = Queue()#self.thread_msgq.clear()
self.threads = [
threading.Thread(target=self.PIR_motion, args=(self.cfg_PIR, self.thread_ctl, self.thread_msgq), name='PIR_motion'),
threading.Thread(target=self.GSM_modem, args=(self.cfg_GSM, self.thread_ctl, self.thread_msgq), name='GSM_modem'),
threading.Thread(target=self.Battery_monitor, args=(self.cfg_Battery, self.thread_ctl, self.thread_msgq), name='Battery_monitor')
]
self.threads.append(
threading.Thread(target=self.thread_heartbeat, args=(self.threads, self.thread_ctl))
)
if self.opt.verbose:
print('num of threads: %d' % len(self.threads))
for t in self.threads:
t.start()
def stop_threads(self, wait=False):
'''
Stop alarm system - nonblocking.
@param wait: [bool] Wait until all alarms are sent or not
'''
if self.opt.verbose:
print('stop_threads')
self.thread_ctl.set()
if wait:
self.thread_msgq.join()
for t in self.threads:
t.join()
def PIR_motion(self, cfg, ctl, msgq):
'''
PIR motion detector main loop.
@param cfg: [dict] Configurations
@param ctl: [object] Threading control object (Event)
@param msgq: [object] Message Queue
'''
if self.opt.verbose:
print('PIR_motion')
# config defaults
cfg_pin = cfg.get('pin', -1)
cfg_active_value = cfg.get('active_value', '0')
cfg_alarm = cfg.get('alarm', {})
cfg_alarm_number = cfg_alarm.get('number', '')
cfg_alarm_message = cfg_alarm.get('message', 'ALERT')
if sys.version_info[0] == 2:
cfg_alarm_number = str(cfg_alarm_number)
cfg_alarm_message = str(cfg_alarm_message)
# PIR motion detector GPIO pin init
gpio = onionGpio.OnionGpio(cfg_pin)
if gpio.setInputDirection():
raise RuntimeError("Could not set GPIO direction to input!")
prev_value = '0' if cfg_active_value == '1' else '1'
# infinite loop
malert = False
while not ctl.wait(0.1):
value = gpio.getValue().strip()
if prev_value != value:
if (value == cfg_active_value) and (not malert):
malert = True
if self.opt.verbose:
print("PIR_motion ALERT: motion detected!")
msgq.put((cfg_alarm_number, cfg_alarm_message), block=True, timeout=0.1)
prev_value = value
if self.opt.verbose:
print('PIR_motion exit')
def GSM_modem(self, cfg, ctl, msgq):
'''
GSM 3G USB modem main loop.
@param cfg: [dict] Configurations
@param ctl: [object] Threading control object (Event)
@param msgq: [object] Message Queue
'''
if self.opt.verbose:
print('GSM_modem')
# config defaults
cfg_modem_type = cfg.get('modem_type', '')
cfg_dev_id = cfg.get('dev_id', '/dev/ttyS0')
cfg_baudrate = cfg.get('baudrate', 9600)
cfg_PIN = cfg.get('PIN', '')
cfg_re_timeout = cfg.get('re_timeout', 1)
# GSM 3G USB modem serial port init
gsm = gpydem.Modem.get(cfg_modem_type, cfg_dev_id, baudrate=cfg_baudrate, PIN=cfg_PIN, re_timeout=cfg_re_timeout)
# infinite loop
while not ctl.wait(1.0):
if not msgq.empty():
number, message = msgq.get(block=True, timeout=0.1)
msgq.task_done()
if self.opt.verbose:
print('GSM_modem sending SMS: {0}, {1}'.format(number, message))
gsm.sendSMS(number, message)
if self.opt.verbose:
print('GSM_modem SMS sent.')
if self.opt.verbose:
print('GSM_modem exit')
def Battery_monitor(self, cfg, ctl, msgq):
'''
LiPo battery level check main loop.
@param cfg: [dict] Configurations
@param ctl: [object] Threading control object (Event)
@param msgq: [object] Message Queue
'''
if self.opt.verbose:
print('Battery_monitor')
# config defaults
cfg_frequency = cfg.get('frequency', 60)
cfg_timeout = cfg.get('timeout', 1)
cfg_vmax = cfg.get('vmax', 4.2)
cfg_vmin = cfg.get('vmin', 3.5)
cfg_vpthreshold = cfg.get('vpthreshold', 20)
cfg_alarm = cfg.get('alarm', {})
cfg_alarm_number = cfg_alarm.get('number', '')
cfg_alarm_message = cfg_alarm.get('message', 'BATTERY')
if sys.version_info[0] == 2:
cfg_alarm_number = str(cfg_alarm_number)
cfg_alarm_message = str(cfg_alarm_message)
# infinite loop
battery = ooutils.ABattery()
id = -1
vprevious = -1
valert = False
while not ctl.wait(cfg_frequency):
id = battery.percentage(vmax=cfg_vmax, vmin=cfg_vmin)
opcode, vactual = battery.wait(id, timeout=cfg_timeout)
if self.opt.verbose:
print('Battery_monitor opcode: {0}, result: {1}'.format(opcode, vactual))
battery.terminate(id)
if opcode != ooutils.ABattery.OP_SUCCESS:
continue
if self.opt.verbose:
print('Battery_monitor level: {0} %'.format(vactual))
if vprevious != vactual:
vprevious = vactual
if vactual <= cfg_vpthreshold:
if self.opt.verbose:
print('Battery_monitor level low!')
if not valert:
valert = True # send notification only once
msgq.put((cfg_alarm_number, cfg_alarm_message), block=True, timeout=0.1)
#return # stop further measurement
battery.terminate(id)
if self.opt.verbose:
print('Battery_monitor exit')
def thread_heartbeat(self, threads, ctl):
'''
Thread heartbeat main loop.
@param threads: [list] Threads to check periodically
@param ctl: [object] Threading control object (Event)
'''
if self.opt.verbose:
print('thread_heartbeat')
# infinite loop
while not ctl.wait(1.0):
for t in threads:
if not t.is_alive():
print('Thread {0} is dead! See previous messages for more details.'.format(t.name), file=sys.stderr)
self.terminate()
if self.opt.verbose:
print('thread_heartbeat exit')
def main():
'''
Main ThiefRinger function.
'''
arg_parser = argparse.ArgumentParser(description='ThiefRinger alarm system.')
arg_parser.add_argument('-c', '--config', type=str, default=os.path.join(os.path.abspath(os.path.dirname(__file__)), '.thiefringer.json'), help='Config file')
arg_parser.add_argument('-v', '--verbose', action='store_true', help='Increase verbosity')
opt = arg_parser.parse_args()
try:
alarm = ThiefRinger(opt)
rc = alarm.run()
except KeyboardInterrupt:
alarm.terminate()
except Exception as e:
print(str(e), file=sys.stderr)
print("\033[91mUnexpected error happened! Please see the details above.\033[0m", file=sys.stderr)
sys.exit(1)
else:
sys.exit(rc)
if __name__ == '__main__':
main()
| 2.265625 | 2 |
modules/shared/domain/repository/unit_of_work.py | eduardolujan/hexagonal_architecture_django | 6 | 12785742 | # -*- coding: utf-8 -*-
# from __future__ import annotations
from typing import Optional, TypeVar
from abc import ABC, abstractmethod
UnitOfWork = TypeVar('UnitOfWork', bound='UnitOfWork')
class UnitOfWork(ABC):
"""
Unit of work
"""
@abstractmethod
def commit(self):
"""
Commit transaction
"""
raise NotImplementedError("Not implemented yet")
@abstractmethod
def add(self, entity) -> None:
"""
Add entity to transactions
@param entity:
@type entity:
@return: None
"""
raise NotImplementedError("Not implemented yet")
@abstractmethod
def flush(self) -> None:
"""
Remove all transactions
@return: None
"""
raise NotImplementedError("Not implemented yet")
| 3.34375 | 3 |
RunToolkit/for_str/transform.py | Ijustwantyouhappy/RunToolkit | 0 | 12785743 | <reponame>Ijustwantyouhappy/RunToolkit
# -*- coding: utf-8 -*-
# @Time : 2019/10/4 1:51
# @Author : Run
# @File : transform.py
# @Software : PyCharm
from RunToolkit.for_str import split_to_words
def camel(s: str) -> str:
"""
Converts a string to camelcase.
"""
tmp = ' '.join(split_to_words(s)).title().replace(' ', '')
return tmp[0].lower() + tmp[1:]
def kebab(s: str) -> str:
"""
Converts a string to kebab case.
Break the string into lowercase words and combine them with `-`.
"""
return '-'.join(split_to_words(s)).lower()
def snake(s: str) -> str:
"""
Converts a string to snake case.
Break the string into lowercase words and combine them with `_`.
"""
return '_'.join(split_to_words(s)).lower()
if __name__ == "__main__":
print(camel('some_database_field_name')) # 'someDatabaseFieldName'
print(camel('Some label that needs to be camelized')) # 'someLabelThatNeedsToBeCamelized'
print(camel('some-javascript-property')) # 'someJavascriptProperty'
print(camel('some-mixed_string with spaces_underscores-and-hyphens')) # 'someMixedStringWithSpacesUnderscoresAndHyphens'
print(camel('AllThe-small Things'))
print()
print(kebab('camelCase')) # 'camel-case'
print(kebab('some text')) # 'some-text'
print(kebab('some-mixed_string With spaces_underscores-and-hyphens')) # 'some-mixed-string-with-spaces-underscores-and-hyphens'
print(kebab('AllThe-small Things'))
print()
print(snake('camelCase')) # 'camel_case'
print(snake('some text')) # 'some_text'
print(snake('some-mixed_string With spaces_underscores-and-hyphens')) # 'some_mixed_string_with_spaces_underscores_and_hyphens'
print(snake('AllThe-small Things')) # "all_the_smal_things"
print()
| 3.421875 | 3 |
quero/offer/serializers.py | felipewove/quero-listar | 0 | 12785744 | from django.contrib.auth.models import Group, User
from rest_framework import serializers
from .models import Campus, Course, Offer, University
class UserSerializer(serializers.ModelSerializer):
""" Serialization of User model to render as JSON.
The depth is responsible to render/show information of foreign key
"""
class Meta:
model = User
fields = ["id", "username", "email", "groups"]
depth = 1
class GroupSerializer(serializers.ModelSerializer):
""" Serialization of Group model to render as JSON.
"""
class Meta:
model = Group
fields = ["id", "name"]
class OfferSerializer(serializers.ModelSerializer):
""" Serialization of Offer model to render as JSON.
The depth is responsible to render/show information of foreign key
"""
course = serializers.PrimaryKeyRelatedField(queryset=Course.objects.all())
class Meta:
model = Offer
depth = 3
fields = "__all__"
class CourseSerializer(serializers.ModelSerializer):
""" Serialization of Course model to render as JSON.
The depth is responsible to render/show information of foreign key
"""
campus = serializers.PrimaryKeyRelatedField(queryset=Campus.objects.all())
class Meta:
model = Course
depth = 2
fields = "__all__"
class CampusSerializer(serializers.ModelSerializer):
""" Serialization of Campus model to render as JSON.
The depth is responsible to render/show information of foreign key
"""
university = serializers.PrimaryKeyRelatedField(queryset=University.objects.all())
class Meta:
model = Campus
depth = 1
fields = "__all__"
class UniversitySerializer(serializers.ModelSerializer):
""" Serialization of University model to render as JSON.
"""
class Meta:
model = University
fields = "__all__"
| 2.5 | 2 |
chap11/Member03.py | ytianjin/GitTest | 0 | 12785745 | <filename>chap11/Member03.py
# 健身房会员类(第三版)
class Member:
"""健身房会员类(第三版)"""
def __init__(self, no: int, name: str, weight: float) -> None:
"""构造函数"""
self.__no = no
self.__name = name
self.__weight = weight
def lose_weight(self, loss: float) -> None:
"""减重loss千克"""
self.__weight -= loss
def print(self) -> None:
"""打印输出数据"""
print('{}: {} {}kg'.format(self.__no, self.__name, self.__weight))
# 测试会员类
yamada = Member(15, '山田太郎', 72.7)
sekine = Member(37, '关根信彦', 65.3)
yamada.lose_weight(3.5) # 山田君减重3.5kg
sekine.lose_weight(5.3) # 关根君减重5.3kg
yamada.print()
sekine.print() | 3.515625 | 4 |
xc/xc7/tests/serdes/generate_tests.py | bl0x/symbiflow-arch-defs | 183 | 12785746 | #!/usr/bin/env python3
"""
Creates the header file for the OSERDES test with the correct configuration
of the DATA_WIDTH and DATA_RATE
"""
import argparse
def main():
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument(
'--input', required=True, help="Input top file to be generated"
)
parser.add_argument(
'--output', required=True, help="Output top file to be generated"
)
parser.add_argument(
'--data_width', required=True, help="Data width of the OSERDES"
)
parser.add_argument(
'--data_rate', required=True, help="Data rate of the OSERDES"
)
args = parser.parse_args()
with open(args.input, "r") as f:
lines = f.read().splitlines()
with open(args.output, 'w') as f:
print('`define DATA_WIDTH_DEFINE {}'.format(args.data_width), file=f)
print('`define DATA_RATE_DEFINE \"{}\"'.format(args.data_rate), file=f)
for line in lines:
print(line, file=f)
if __name__ == "__main__":
main()
| 2.78125 | 3 |
bot/dialogs/phrases.py | sen-den/fcspm-ksu-bot | 1 | 12785747 | """
Any phrases used by bot.
Tips:
Avoid prefix commands as keywords wor different sates: shortest prefix will intercept handling
"""
EDIT_EMOJI = '🖋 '
MAP_EMOJI = '🗺 '
DOC_EMOJI = '🗒 '
BELL_EMOJI = '🔔 '
TICK_EMOJI = '✔️ '
CROSS_EMOJI = '❌ '
SIGN_EMOJI = '❗ '
WEB_EMOJI = '🌐 '
HOW_TO_ORDER_CERTIFICATE = 'Довідку можна замовити в деканаті, використавши форму на сайті або за допомогою бота'
ORDER_CERTIFICATE_HASHTAG = '#ЗАМОВЛЕННЯ_ДОВІДКИ'
THANKS = 'Дякую!'
WHERE_IS_PREFIX = MAP_EMOJI + 'Де знаходиться'
WHERE_IS_SOMETHING = WHERE_IS_PREFIX + '...?'
WHERE_IS_FCSPM_DEANS_OFFICE = WHERE_IS_PREFIX + ' деканат на факультеті комп`ютерних наук, фізики та математики'
ORDER_CERTIFICATE_PREFIX = DOC_EMOJI + 'Замовити довідку'
ORDER_CERTIFICATE_WITH_BOT = ORDER_CERTIFICATE_PREFIX + ' через бота'
ORDER_CERTIFICATE_WITH_DATA = DOC_EMOJI + 'Підтвердити замовлення через бота з введеними даними'
BELLS_SCHEDULE = BELL_EMOJI + 'Розклад дзвінків'
PUT_NAME = TICK_EMOJI + 'Змінити ПІП'
POST_NAME = SIGN_EMOJI + 'Ввесити ПІП'
PUT_SPECIALITY = TICK_EMOJI + 'Змінити спеціальність'
POST_SPECIALITY = SIGN_EMOJI + 'Обрати спеціальність'
PUT_COURSE = TICK_EMOJI + 'Змінити курс'
POST_COURSE = SIGN_EMOJI + 'Обрати курс'
PUT_COMMENT = EDIT_EMOJI + 'Змінити коментар'
POST_COMMENT = EDIT_EMOJI + 'Додати коментар'
ORDER_CERTIFICATE_COMMANDS = [
ORDER_CERTIFICATE_WITH_BOT,
PUT_NAME, POST_NAME,
PUT_SPECIALITY, POST_SPECIALITY,
PUT_COURSE, POST_COURSE,
PUT_COMMENT, POST_COMMENT
]
INPUT_PROMPT = 'Введіть, будь ласка'
FULFILL = 'Необхідно заповнити інформацію'
UPDATE_OR_REQUEST = 'За необхідності можна виправити, якщо інформація вірна - замовити'
CANCEL = 'Скасувати'
SORRY = 'Вибачте, я нічого не знайшов для вас'
BELLS_SCHEDULE_TEXT = '''```
Розклад дзвінків:
І пара: 08.30 – 09.50
ІІ пара: 10.00 – 11.20
ІІІ пара: 11.50 – 13.10
ІV пара: 13.30 – 14.50
V пара: 15.00 – 16.20
VІ пара: 16.30 – 17.50
```'''
KSPU_ORDER_CERTIFICATE_PAGE = 'http://www.kspu.edu/About/Faculty/FPhysMathemInformatics/storinki_faculti' \
'/OrderCertificateFromDeansOffice.aspx '
| 1.8125 | 2 |
src/dcos-migrate.py | Ganasagar/migration-tools-repo | 0 | 12785748 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import re
import sys
from dcos_migrate.cmd import run
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
run()
| 1.710938 | 2 |
python/hackathon/src/parser/name_extractor.py | FISHackathon2020/RAN | 0 | 12785749 | <gh_stars>0
import spacy
from spacy.matcher import Matcher
def extract_name_with_rules(resume_text):
# load pre-trained model
nlp = spacy.load('en_core_web_sm')
# initialize matcher with a vocab
matcher = Matcher(nlp.vocab)
nlp_text = nlp(resume_text)
# First name and Last name are always Proper Nouns
pattern = [{'POS': 'PROPN'}, {'POS': 'PROPN'}]
matcher.add('NAME', None, pattern)
matches = matcher(nlp_text)
for match_id, start, end in matches:
span = nlp_text[start:end]
return span.text | 3.109375 | 3 |
src/PlayVideo.py | jluckenbaugh2/Deepcut | 1 | 12785750 | """
Modification History:
Date: 3/16/2021
Time: 6:00PM
Description:
Will extract the phonemes from a TextGrid file and change them into
syllables using the ARPABET dictionary. Two functions will be used
to do this process. First function, get_phoneme, will get all of the
phonemes and their timings from the TextGrid file into arrays. The second
function, phoneme_to_syllables, will convert the set of phonemes into
syllables. The syllabifier package will be used for the conversion.
The syllable codes and syllable timings are saved.
Current inputs:
TextGrid File
Current output:
Size of the phoneme interval
Output all of the phonemes from the phoneme intervals.
NOTES:
The code only works iff all TextGrid files are formatted the same as the current input
Currently using Anaconda interpreter in base/root environment
Packages Downloaded:
Download the packages via cmd or anaconda cmd
ARPABET dictionary: https://github.com/vgautam/arpabet-syllabifier/blob/master/tests/tmp.ipynb
Textgrid Tool: https://github.com/kylebgorman/textgrid
CMD download: pip install TextGrid
"""
# importing pyglet module
import pyglet
def play_video(video_file):
# width / height of window
width = 720
height = 720
# creating a window
title = "demo"
window = pyglet.window.Window(width, height, title)
# video path
vidPath ="facetest.mp4"
# creating a media player object
player = pyglet.media.Player()
source = pyglet.media.StreamingSource()
MediaLoad = pyglet.media.load(vidPath)
# add this media in the queue
player.queue(MediaLoad)
# play the video
player.play()
# on draw event
@window.event
def on_draw():
# clea the window
window.clear()
# if player sorce exist
# and video format exist
if player.source and player.source.video_format:
# get the texture of video and
# make surface to display on the screen
player.get_texture().blit(0, 0)
# # key press event
# @window.event
# def on_key_press(symbol, modifier):
# # key "p" get press
# if symbol == pyglet.window.key.P:
# # pause the video
# player.pause()
# # printing message
# print("Video is paused")
# # key "r" get press
# if symbol == pyglet.window.key.R:
# # resume the video
# player.play()
# # printing message
# print("Video is resumed")
# run the pyglet application
pyglet.app.run()
| 3.59375 | 4 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.