max_stars_repo_path
stringlengths 3
269
| max_stars_repo_name
stringlengths 4
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.05M
| score
float64 0.23
5.13
| int_score
int64 0
5
|
---|---|---|---|---|---|---|
Heapsort/Heapsort.py | CrypticCalamari/Algorithms | 0 | 12795451 | #!/usr/bin/python
import random
class Heapsort:
"""Heapsort"""
@staticmethod
def bubble_down(a, begin, end):
root = begin
child = root * 2 + 1
while (child <= end):
if ((child + 1 <= end) and (a[child] < a[child + 1])):
child += 1
if (a[root] < a[child]):
a[root], a[child] = a[child], a[root]
root = child
child = root * 2 + 1
else:
return
@staticmethod
def heapify_max(a):
end = len(a) - 1
begin = (end - 1) // 2
while (begin >= 0):
Heapsort.bubble_down(a, begin, end)
begin -= 1
@staticmethod
def sort(a):
end = len(a) - 1
Heapsort.heapify_max(a)
while (end > 0):
a[0], a[end] = a[end], a[0]
end -= 1
Heapsort.bubble_down(a, 0, end)
@staticmethod
def verify(a):
prev = a[0]
for item in range(1, len(a)):
if prev > a[i]:
print "FAIL: Array not sorted properly"
print "--SUCCESS: Sorted!--"
if __name__ == "__main__":
test = []
random.seed()
for i in range(20):
test.append(random.randint(0, 100))
print test
Heapsort.sort(test)
print test
Heapsort.verify(test)
| 4.09375 | 4 |
tests/unit/pipelines.py | ethanjli/phylline | 0 | 12795452 | <reponame>ethanjli/phylline
"""Test the pipelines module."""
# Builtins
# Packages
from phylline.links.clocked import DelayedEventLink
from phylline.links.events import EventLink
from phylline.links.links import ChunkedStreamLink
from phylline.links.loopback import TopLoopbackLink
from phylline.links.streams import StreamLink
from phylline.pipelines import AutomaticPipeline, ManualPipeline, PipelineBottomCoupler
from phylline.pipes import AutomaticPipe
import pytest
from tests.unit.links.links import HIGHER_CHUNKED_STREAM, LOWER_CHUNKED_STREAM
from tests.unit.pipes import (
assert_bottom_events,
write_bottom_chunked_buffers,
write_top_events
)
def make_pipeline_singular(pipeline_type):
"""Make a singular pipeline."""
pipeline = pipeline_type(
ChunkedStreamLink()
)
assert isinstance(pipeline.bottom, ChunkedStreamLink)
assert isinstance(pipeline.top, ChunkedStreamLink)
return pipeline
def make_pipeline_short(pipeline_type):
"""Make a short pipeline."""
pipeline = pipeline_type(
ChunkedStreamLink(),
EventLink(),
)
assert isinstance(pipeline.bottom, ChunkedStreamLink)
assert isinstance(pipeline.top, EventLink)
return pipeline
def make_pipeline_long(pipeline_type):
"""Make a long pipeline."""
pipeline = pipeline_type(
StreamLink(),
StreamLink(),
StreamLink(),
ChunkedStreamLink(),
EventLink(),
EventLink(),
EventLink()
)
assert isinstance(pipeline.bottom, StreamLink)
assert isinstance(pipeline.top, EventLink)
return pipeline
def make_pipeline_delayed(pipeline_type):
"""Make a short pipeline."""
pipeline = pipeline_type(
StreamLink(),
ChunkedStreamLink(),
DelayedEventLink(),
EventLink(),
EventLink()
)
assert isinstance(pipeline.bottom, StreamLink)
assert isinstance(pipeline.top, EventLink)
return pipeline
def make_pipeline_events(pipeline_type):
"""Make a events-only pipeline."""
pipeline = pipeline_type(
EventLink(),
EventLink(),
EventLink(),
EventLink()
)
assert isinstance(pipeline.bottom, EventLink)
assert isinstance(pipeline.top, EventLink)
return pipeline
def make_pipeline_nested(pipeline_type):
"""Make a nested pipeline."""
pipeline = pipeline_type(
make_pipeline_singular(pipeline_type),
make_pipeline_events(pipeline_type),
EventLink(),
AutomaticPipe(EventLink(), EventLink()),
make_pipeline_events(pipeline_type)
)
return pipeline
@pytest.mark.parametrize('pipeline_factory', [
make_pipeline_singular,
make_pipeline_short,
make_pipeline_long,
make_pipeline_nested
])
def test_manual_pipeline(pipeline_factory):
"""Exercise ManualPipeline's interface."""
print('Testing Manual Pipeline with factory {}:'.format(pipeline_factory.__name__))
pipeline = pipeline_factory(ManualPipeline)
print(pipeline)
# Read/write on links with directional sync
write_bottom_chunked_buffers(pipeline.bottom)
assert pipeline.sync_up() is None
assert_bottom_events(pipeline.top)
write_top_events(pipeline.top)
assert pipeline.sync_down() is None
result = pipeline.bottom.to_write()
print('Pipeline bottom wrote to stream: {}'.format(result))
assert result == HIGHER_CHUNKED_STREAM
# Read/write on links with bidirectional sync
write_bottom_chunked_buffers(pipeline.bottom)
assert pipeline.sync() is None
assert_bottom_events(pipeline.top)
write_top_events(pipeline.top)
assert pipeline.sync() is None
result = pipeline.bottom.to_write()
print('Pipeline bottom wrote to stream: {}'.format(result))
assert result == HIGHER_CHUNKED_STREAM
# Read/write on pipe with bidirectionl sync
write_bottom_chunked_buffers(pipeline)
assert pipeline.sync() is None
assert_bottom_events(pipeline)
write_top_events(pipeline)
assert pipeline.sync() is None
result = pipeline.to_write()
print('Pipeline bottom wrote to stream: {}'.format(result))
assert result == HIGHER_CHUNKED_STREAM
def test_manual_pipeline_clocked():
"""Exercise ManualPipeline's clock functionality."""
print('Testing Manual Pipeline with factory make_pipeline_delayed:')
pipeline = make_pipeline_delayed(ManualPipeline)
print(pipeline)
assert pipeline.update_clock(0) is None
write_bottom_chunked_buffers(pipeline.bottom)
assert pipeline.sync() == 1.0
assert not pipeline.top.has_receive()
assert pipeline.update_clock(0.5) == 1.0
assert not pipeline.top.has_receive()
assert pipeline.update_clock(0.99) == 1.0
assert not pipeline.top.has_receive()
assert pipeline.update_clock(1.0) is None
assert_bottom_events(pipeline.top)
print('Resetting clock...')
assert pipeline.update_clock(0) is None
write_top_events(pipeline.top)
assert pipeline.sync() == 1.0
assert pipeline.update_clock(0.5) == 1.0
assert not pipeline.bottom.to_write()
assert pipeline.update_clock(0.75) == 1.0
assert not pipeline.bottom.to_write()
assert pipeline.update_clock(1.5) is None
result = pipeline.bottom.to_write()
assert result == HIGHER_CHUNKED_STREAM
@pytest.mark.parametrize('pipeline_factory', [
make_pipeline_singular,
make_pipeline_short,
make_pipeline_long,
make_pipeline_nested
])
def test_automatic_pipeline(pipeline_factory):
"""Exercise AutomaticPipeline's interface."""
print('Testing Automatic Pipeline with factory {}:'.format(pipeline_factory.__name__))
automatic_pipeline = pipeline_factory(AutomaticPipeline)
print(automatic_pipeline)
# Read/write on links
write_bottom_chunked_buffers(automatic_pipeline.bottom)
assert_bottom_events(automatic_pipeline.top)
write_top_events(automatic_pipeline.top)
result = automatic_pipeline.bottom.to_write()
print('Pipeline bottom wrote to stream: {}'.format(result))
assert result == HIGHER_CHUNKED_STREAM
# Read/write on pipeline
write_bottom_chunked_buffers(automatic_pipeline)
assert_bottom_events(automatic_pipeline)
write_top_events(automatic_pipeline)
result = automatic_pipeline.to_write()
print('Pipeline bottom wrote to stream: {}'.format(result))
assert result == HIGHER_CHUNKED_STREAM
def test_automatic_pipeline_clocked():
"""Exercise AutomaticPipeline's clock functionality."""
print('Testing Automatic Pipeline with factory make_pipeline_delayed:')
pipeline = make_pipeline_delayed(AutomaticPipeline)
print(pipeline)
assert pipeline.update_clock(0) is None
write_bottom_chunked_buffers(pipeline.bottom)
assert pipeline.update_clock(0) == 1.0
assert not pipeline.top.has_receive()
assert pipeline.update_clock(0.5) == 1.0
assert not pipeline.top.has_receive()
assert pipeline.update_clock(0.99) == 1.0
assert not pipeline.top.has_receive()
assert pipeline.update_clock(1.0) is None
assert_bottom_events(pipeline.top)
print('Resetting clock...')
assert pipeline.update_clock(0) is None
write_top_events(pipeline.top)
assert pipeline.update_clock(0) == 1.0
assert pipeline.update_clock(0.5) == 1.0
assert not pipeline.bottom.to_write()
assert pipeline.update_clock(0.75) == 1.0
assert not pipeline.bottom.to_write()
assert pipeline.update_clock(1.5) is None
result = pipeline.bottom.to_write()
assert result == HIGHER_CHUNKED_STREAM
def make_pipeline_loopback(pipeline_factory):
"""Make a long pipeline with a loopback at the top."""
manual_pipeline = pipeline_factory(
StreamLink(),
StreamLink(),
StreamLink(),
ChunkedStreamLink(),
EventLink(),
EventLink(),
EventLink(),
TopLoopbackLink()
)
assert isinstance(manual_pipeline.bottom, StreamLink)
assert isinstance(manual_pipeline.top, TopLoopbackLink)
return manual_pipeline
def test_manual_pipeline_loopback():
"""Exercise ManualPipeline's interface."""
print('Testing Manual Pipeline with factory make_pipeline_loopback:')
manual_pipeline = make_pipeline_loopback(ManualPipeline)
print(manual_pipeline)
write_bottom_chunked_buffers(manual_pipeline.bottom)
assert manual_pipeline.sync() is None
result = manual_pipeline.bottom.to_write()
print('Pipeline bottom wrote to stream: {}'.format(result))
assert result == LOWER_CHUNKED_STREAM
def test_automatic_pipeline_loopback():
"""Exercise ManualPipeline's interface."""
print('Testing Automatic Pipeline with factory make_pipeline_loopback:')
automatic_pipeline = make_pipeline_loopback(AutomaticPipeline)
print(automatic_pipeline)
write_bottom_chunked_buffers(automatic_pipeline.bottom)
result = automatic_pipeline.bottom.to_write()
print('Pipeline bottom wrote to stream: {}'.format(result))
assert result == LOWER_CHUNKED_STREAM
def assert_loopback_below(stack, payload):
"""Asset that the stack has correct below-loopback behavior."""
stack.send(payload)
assert stack.has_receive()
result = stack.receive()
print('Loopback received: {}'.format(result))
assert result.data == payload
def test_loopback_pipeline_bottom_coupler_stream():
"""Test for pipeline bottom coupling on streams."""
print('Testing byte buffer loopback with PipelineBottomCoupler...')
pipeline_one = make_pipeline_nested(AutomaticPipeline)
pipeline_two = make_pipeline_loopback(AutomaticPipeline)
coupler = PipelineBottomCoupler(pipeline_one, pipeline_two)
print(coupler)
payload = b'\1\2\3\4'
assert_loopback_below(pipeline_one.top, payload)
assert_loopback_below(pipeline_one, payload)
assert_loopback_below(coupler.pipeline_one, payload)
def test_loopback_pipeline_bottom_coupler_event():
"""Test for pipeline bottom coupling on events."""
print('Testing byte buffer loopback with PipelineBottomCoupler...')
pipeline_one = make_pipeline_events(AutomaticPipeline)
pipeline_two = AutomaticPipeline(
make_pipeline_events(AutomaticPipeline), TopLoopbackLink()
)
coupler = PipelineBottomCoupler(pipeline_one, pipeline_two)
print(coupler)
payload = b'\1\2\3\4'
assert_loopback_below(pipeline_one.top, payload)
assert_loopback_below(pipeline_one, payload)
assert_loopback_below(coupler.pipeline_one, payload)
| 2.28125 | 2 |
fullcyclepy/tests/test_utils.py | dfoderick/fullcyclemining | 26 | 12795453 | <filename>fullcyclepy/tests/test_utils.py<gh_stars>10-100
import unittest
import datetime
import backend.fcmutils as utils
import messaging.schema as schema
class TestUtilityFunctions(unittest.TestCase):
def test_safe_string_null(self):
nullstring = utils.safestring(None)
self.assertFalse(nullstring)
def test_safe_string_other(self):
astring = utils.safestring(b'test')
self.assertTrue(astring)
def test_formattime(self):
dtnow = utils.formattime(datetime.datetime.now())
self.assertTrue(dtnow)
def test_deserializelist(self):
thelist = ['{"miner_type":"test", "minerid":"test"}']
los = utils.deserializelist_withschema(schema.MinerInfoSchema(), thelist)
self.assertTrue(len(los) > 0)
def test_deserializelist_string(self):
thelist = ['{"miner_type":"test", "minerid":"test"}']
los = utils.deserializelistofstrings(thelist, schema.MinerInfoSchema())
self.assertTrue(len(los) > 0)
| 2.265625 | 2 |
src/oci/apm_traces/models/query_result_metadata_summary.py | Manny27nyc/oci-python-sdk | 249 | 12795454 | <gh_stars>100-1000
# coding: utf-8
# Copyright (c) 2016, 2021, Oracle and/or its affiliates. All rights reserved.
# This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel # noqa: F401
from oci.decorators import init_model_state_from_kwargs
@init_model_state_from_kwargs
class QueryResultMetadataSummary(object):
"""
Summary containing the metadata about the query result set.
"""
def __init__(self, **kwargs):
"""
Initializes a new QueryResultMetadataSummary object with values from keyword arguments.
The following keyword arguments are supported (corresponding to the getters/setters of this class):
:param query_result_row_type_summaries:
The value to assign to the query_result_row_type_summaries property of this QueryResultMetadataSummary.
:type query_result_row_type_summaries: list[oci.apm_traces.models.QueryResultRowTypeSummary]
:param source_name:
The value to assign to the source_name property of this QueryResultMetadataSummary.
:type source_name: str
:param query_results_grouped_by:
The value to assign to the query_results_grouped_by property of this QueryResultMetadataSummary.
:type query_results_grouped_by: list[oci.apm_traces.models.QueryResultsGroupedBySummary]
:param query_results_ordered_by:
The value to assign to the query_results_ordered_by property of this QueryResultMetadataSummary.
:type query_results_ordered_by: list[oci.apm_traces.models.QueryResultsOrderedBySummary]
:param time_series_interval_in_mins:
The value to assign to the time_series_interval_in_mins property of this QueryResultMetadataSummary.
:type time_series_interval_in_mins: int
"""
self.swagger_types = {
'query_result_row_type_summaries': 'list[QueryResultRowTypeSummary]',
'source_name': 'str',
'query_results_grouped_by': 'list[QueryResultsGroupedBySummary]',
'query_results_ordered_by': 'list[QueryResultsOrderedBySummary]',
'time_series_interval_in_mins': 'int'
}
self.attribute_map = {
'query_result_row_type_summaries': 'queryResultRowTypeSummaries',
'source_name': 'sourceName',
'query_results_grouped_by': 'queryResultsGroupedBy',
'query_results_ordered_by': 'queryResultsOrderedBy',
'time_series_interval_in_mins': 'timeSeriesIntervalInMins'
}
self._query_result_row_type_summaries = None
self._source_name = None
self._query_results_grouped_by = None
self._query_results_ordered_by = None
self._time_series_interval_in_mins = None
@property
def query_result_row_type_summaries(self):
"""
Gets the query_result_row_type_summaries of this QueryResultMetadataSummary.
A collection of QueryResultRowTypeSummary objects that describe the type and properties of the individual row elements of the query rows
being returned. The ith element in this list contains the QueryResultRowTypeSummary of the ith key value pair in the QueryResultRowData map.
:return: The query_result_row_type_summaries of this QueryResultMetadataSummary.
:rtype: list[oci.apm_traces.models.QueryResultRowTypeSummary]
"""
return self._query_result_row_type_summaries
@query_result_row_type_summaries.setter
def query_result_row_type_summaries(self, query_result_row_type_summaries):
"""
Sets the query_result_row_type_summaries of this QueryResultMetadataSummary.
A collection of QueryResultRowTypeSummary objects that describe the type and properties of the individual row elements of the query rows
being returned. The ith element in this list contains the QueryResultRowTypeSummary of the ith key value pair in the QueryResultRowData map.
:param query_result_row_type_summaries: The query_result_row_type_summaries of this QueryResultMetadataSummary.
:type: list[oci.apm_traces.models.QueryResultRowTypeSummary]
"""
self._query_result_row_type_summaries = query_result_row_type_summaries
@property
def source_name(self):
"""
Gets the source_name of this QueryResultMetadataSummary.
Source of the query result set (traces, spans, etc).
:return: The source_name of this QueryResultMetadataSummary.
:rtype: str
"""
return self._source_name
@source_name.setter
def source_name(self, source_name):
"""
Sets the source_name of this QueryResultMetadataSummary.
Source of the query result set (traces, spans, etc).
:param source_name: The source_name of this QueryResultMetadataSummary.
:type: str
"""
self._source_name = source_name
@property
def query_results_grouped_by(self):
"""
Gets the query_results_grouped_by of this QueryResultMetadataSummary.
Columns or attributes of the query rows which are group by values. This is a list of ResultsGroupedBy summary objects,
and the list will contain as many elements as the attributes and aggregate functions in the group by clause in the select query.
:return: The query_results_grouped_by of this QueryResultMetadataSummary.
:rtype: list[oci.apm_traces.models.QueryResultsGroupedBySummary]
"""
return self._query_results_grouped_by
@query_results_grouped_by.setter
def query_results_grouped_by(self, query_results_grouped_by):
"""
Sets the query_results_grouped_by of this QueryResultMetadataSummary.
Columns or attributes of the query rows which are group by values. This is a list of ResultsGroupedBy summary objects,
and the list will contain as many elements as the attributes and aggregate functions in the group by clause in the select query.
:param query_results_grouped_by: The query_results_grouped_by of this QueryResultMetadataSummary.
:type: list[oci.apm_traces.models.QueryResultsGroupedBySummary]
"""
self._query_results_grouped_by = query_results_grouped_by
@property
def query_results_ordered_by(self):
"""
Gets the query_results_ordered_by of this QueryResultMetadataSummary.
Order by which the query results are organized. This is a list of queryResultsOrderedBy summary objects, and the list
will contain more than one OrderedBy summary object, if the sort was multidimensional.
:return: The query_results_ordered_by of this QueryResultMetadataSummary.
:rtype: list[oci.apm_traces.models.QueryResultsOrderedBySummary]
"""
return self._query_results_ordered_by
@query_results_ordered_by.setter
def query_results_ordered_by(self, query_results_ordered_by):
"""
Sets the query_results_ordered_by of this QueryResultMetadataSummary.
Order by which the query results are organized. This is a list of queryResultsOrderedBy summary objects, and the list
will contain more than one OrderedBy summary object, if the sort was multidimensional.
:param query_results_ordered_by: The query_results_ordered_by of this QueryResultMetadataSummary.
:type: list[oci.apm_traces.models.QueryResultsOrderedBySummary]
"""
self._query_results_ordered_by = query_results_ordered_by
@property
def time_series_interval_in_mins(self):
"""
Gets the time_series_interval_in_mins of this QueryResultMetadataSummary.
Interval for the time series function in minutes.
:return: The time_series_interval_in_mins of this QueryResultMetadataSummary.
:rtype: int
"""
return self._time_series_interval_in_mins
@time_series_interval_in_mins.setter
def time_series_interval_in_mins(self, time_series_interval_in_mins):
"""
Sets the time_series_interval_in_mins of this QueryResultMetadataSummary.
Interval for the time series function in minutes.
:param time_series_interval_in_mins: The time_series_interval_in_mins of this QueryResultMetadataSummary.
:type: int
"""
self._time_series_interval_in_mins = time_series_interval_in_mins
def __repr__(self):
return formatted_flat_dict(self)
def __eq__(self, other):
if other is None:
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
| 2.03125 | 2 |
signal_interpreter_server/tests/unit_tests/test_json_parser.py | mtormane/signal-interpreter-server | 0 | 12795455 | import json
import os
from unittest.mock import patch, mock_open
import pytest
from signal_interpreter_server.json_parser import JsonParser
from signal_interpreter_server.exceptions import SignalError
@pytest.mark.parametrize("identifier, expected_result", [
("11", "ECU Reset"),
("99", "Not existing"),
])
def test_get_signal_title(identifier, expected_result):
jason_parser = JsonParser()
jason_parser.data = {"services": [{"title": "ECU Reset", "id": "11"}]}
if identifier != '99':
assert jason_parser.get_signal_title(identifier) == expected_result
else:
with pytest.raises(SignalError):
jason_parser.get_signal_title(identifier)
@pytest.fixture(scope="session")
def test_load_file_with_fixure(tmpdir):
tmp_db = {"services": [{"title": "ECU Reset", "id": "11"}]}
filepath = os.path.join(tmpdir, "tmp_json.json")
with open(filepath, 'w') as jfile:
json.dump(tmp_db, jfile)
jason_parser = JsonParser()
jason_parser.load_file(filepath)
assert isinstance(jason_parser.data, dict)
assert jason_parser.data == tmp_db
def test_load_file_simple():
with patch("builtins.open",
mock_open(read_data='{"services": [{"title": "ECU Reset", "id": "11"}]}')):
json_parser = JsonParser()
json_parser.load_file("path/to/json/file")
assert json_parser.data == {"services": [{"title": "ECU Reset", "id": "11"}]}
def test_load_file_wrong_type():
with patch("builtins.open", mock_open(read_data="This is wrong data!")):
with pytest.raises(ValueError):
json_parser = JsonParser()
json_parser.load_file("path/to/json/file")
| 2.375 | 2 |
test/test_gpu.py | tranlethaison/learnRL | 0 | 12795456 | <reponame>tranlethaison/learnRL
if __name__ == "__main__":
print("> Test tensorflow-gpu")
import tensorflow as tf
is_gpu_available = tf.test.is_gpu_available()
print(">> __version__: ", tf.__version__)
print(">> is_gpu_available:", is_gpu_available)
| 2.3125 | 2 |
examples/with_signals.py | vBLFTePebWNi6c/Flask-Shell2HTTP | 0 | 12795457 | # web imports
from flask import Flask
from blinker import Namespace # or from flask.signals import Namespace
from flask_executor import Executor
from flask_executor.futures import Future
from flask_shell2http import Shell2HTTP
# Flask application instance
app = Flask(__name__)
# application factory
executor = Executor(app)
shell2http = Shell2HTTP(app, executor, base_url_prefix="/cmd/")
ENDPOINT = "echo"
CMD = "echo"
# Signal Handling
signal_handler = Namespace()
my_signal = signal_handler.signal(f"on_{CMD}_complete")
# ..or any other name of your choice
@my_signal.connect
def my_callback_fn(extra_callback_context, future: Future):
"""
Will be invoked on every process completion
"""
print("Process completed ?:", future.done())
print("Result: ", future.result())
shell2http.register_command(
endpoint=ENDPOINT, command_name=CMD, callback_fn=my_signal.send
)
# Test Runner
if __name__ == "__main__":
app.testing = True
c = app.test_client()
# request new process
data = {"args": ["Hello", "Friend!"]}
c.post(f"cmd/{ENDPOINT}", json=data)
# request new process
data = {"args": ["Bye", "Friend!"]}
c.post(f"cmd/{ENDPOINT}", json=data)
| 2.578125 | 3 |
synergy/db/dao/unit_of_work_dao.py | eggsandbeer/scheduler | 0 | 12795458 | __author__ = '<NAME>'
from threading import RLock
from bson.objectid import ObjectId
from pymongo import ASCENDING
from pymongo.errors import DuplicateKeyError as MongoDuplicateKeyError
from synergy.system import time_helper
from synergy.system.time_qualifier import *
from synergy.system.decorator import thread_safe
from synergy.scheduler.scheduler_constants import COLLECTION_UNIT_OF_WORK, TYPE_MANAGED
from synergy.conf import context
from synergy.db.error import DuplicateKeyError
from synergy.db.model import unit_of_work
from synergy.db.model.unit_of_work import UnitOfWork
from synergy.db.manager import ds_manager
QUERY_GET_FREERUN_SINCE = lambda timeperiod, unprocessed_only: {
unit_of_work.TIMEPERIOD: {'$gte': timeperiod},
unit_of_work.UNIT_OF_WORK_TYPE: unit_of_work.TYPE_FREERUN,
unit_of_work.STATE: {'$ne': unit_of_work.STATE_PROCESSED if unprocessed_only else None}
}
class UnitOfWorkDao(object):
""" Thread-safe Data Access Object from units_of_work table/collection """
def __init__(self, logger):
super(UnitOfWorkDao, self).__init__()
self.logger = logger
self.lock = RLock()
self.ds = ds_manager.ds_factory(logger)
@thread_safe
def get_one(self, key):
""" method finds unit_of_work record and returns it to the caller"""
if not isinstance(key, ObjectId):
# cast key to ObjectId
key = ObjectId(key)
query = {'_id': key}
collection = self.ds.connection(COLLECTION_UNIT_OF_WORK)
document = collection.find_one(query)
if document is None:
msg = 'Unit_of_work with ID=%s was not found' % str(key)
self.logger.warn(msg)
raise LookupError(msg)
return UnitOfWork.from_json(document)
@thread_safe
def get_reprocessing_candidates(self, since=None):
""" method queries Unit Of Work whose <start_timeperiod> is younger than <since>
and who could be candidates for re-processing """
collection = self.ds.connection(COLLECTION_UNIT_OF_WORK)
query = {unit_of_work.STATE: {'$in': [unit_of_work.STATE_IN_PROGRESS,
unit_of_work.STATE_INVALID,
unit_of_work.STATE_REQUESTED]},
unit_of_work.UNIT_OF_WORK_TYPE: TYPE_MANAGED}
if since is None:
cursor = collection.find(query).sort('_id', ASCENDING)
candidates = [UnitOfWork.from_json(document) for document in cursor]
else:
candidates = []
yearly_timeperiod = time_helper.cast_to_time_qualifier(QUALIFIER_YEARLY, since)
query[unit_of_work.START_TIMEPERIOD] = {'$gte': yearly_timeperiod}
cursor = collection.find(query).sort('_id', ASCENDING)
for document in cursor:
uow = UnitOfWork.from_json(document)
if uow.process_name not in context.process_context:
# this is a decommissioned process
continue
time_qualifier = context.process_context[uow.process_name].time_qualifier
if time_qualifier == QUALIFIER_REAL_TIME:
time_qualifier = QUALIFIER_HOURLY
process_specific_since = time_helper.cast_to_time_qualifier(time_qualifier, since)
if process_specific_since <= uow.start_timeperiod:
candidates.append(uow)
if len(candidates) == 0:
raise LookupError('MongoDB has no reprocessing candidates units of work')
return candidates
@thread_safe
def get_by_params(self, process_name, timeperiod, start_obj_id, end_obj_id):
""" method finds unit_of_work record and returns it to the caller"""
query = {unit_of_work.PROCESS_NAME: process_name,
unit_of_work.TIMEPERIOD: timeperiod,
unit_of_work.START_OBJ_ID: start_obj_id,
unit_of_work.END_OBJ_ID: end_obj_id}
collection = self.ds.connection(COLLECTION_UNIT_OF_WORK)
document = collection.find_one(query)
if document is None:
raise LookupError('Unit_of_work satisfying query %r was not found' % query)
return UnitOfWork.from_json(document)
@thread_safe
def update(self, instance):
""" method finds unit_of_work record and change its status"""
assert isinstance(instance, UnitOfWork)
collection = self.ds.connection(COLLECTION_UNIT_OF_WORK)
document = instance.document
if instance.db_id:
document['_id'] = ObjectId(instance.db_id)
instance.db_id = collection.save(document, safe=True)
return instance.db_id
@thread_safe
def insert(self, instance):
""" inserts a unit of work into MongoDB.
:raises DuplicateKeyError: if such record already exist """
assert isinstance(instance, UnitOfWork)
collection = self.ds.connection(COLLECTION_UNIT_OF_WORK)
try:
return collection.insert(instance.document, safe=True)
except MongoDuplicateKeyError as e:
exc = DuplicateKeyError(instance.process_name,
instance.start_timeperiod,
instance.start_id,
instance.end_id,
e)
raise exc
@thread_safe
def remove(self, uow_id):
assert isinstance(uow_id, (str, ObjectId))
collection = self.ds.connection(COLLECTION_UNIT_OF_WORK)
return collection.remove(uow_id, safe=True)
@thread_safe
def run_query(self, query):
""" method runs the query and returns a list of filtered UnitOfWork records """
cursor = self.ds.filter(COLLECTION_UNIT_OF_WORK, query)
return [UnitOfWork.from_json(document) for document in cursor]
def recover_from_duplicatekeyerror(self, e):
""" method tries to recover from DuplicateKeyError """
if isinstance(e, DuplicateKeyError):
try:
return self.get_by_params(e.process_name, e.timeperiod, e.start_id, e.end_id)
except LookupError as e:
self.logger.error('Unable to recover from DuplicateKeyError error due to %s' % e.message, exc_info=True)
else:
msg = 'Unable to recover from DuplicateKeyError due to unspecified unit_of_work primary key'
self.logger.error(msg)
| 2.0625 | 2 |
c_translator/formative/m_ifelse.py | mahudu97/ANSI-C_Compiler | 0 | 12795459 | def main():
a=0
b=0
if(1<2):
a = 1
else:
a = 2
if(1>2):
b = 1
else:
b = 2
return a + b
# Boilerplate
if __name__ == "__main__":
import sys
ret=main()
sys.exit(ret)
| 3.375 | 3 |
models/Stereo/PSMNetDown.py | pidan1231239/SR-Stereo | 2 | 12795460 | import os
import time
import torch.optim as optim
import torch
import torch.nn.functional as F
import torch.nn as nn
from evaluation import evalFcn
from utils import myUtils
from .RawPSMNet import stackhourglass as rawPSMNet
from .RawPSMNet_TieCheng import stackhourglass as rawPSMNet_TieCheng
from ..Model import Model
from .. import SR
import collections
import torch.nn.parallel as P
from .PSMNet import *
class RawPSMNetDown(RawPSMNetScale):
def __init__(self, maxdisp, dispScale, multiple):
super(RawPSMNetDown, self).__init__(maxdisp, dispScale, multiple)
self.pool = nn.AvgPool2d((2, 2))
# input: RGB value range 0~1
# outputs: disparity range 0~self.maxdisp * self.dispScale / 2
def forward(self, left, right):
outDispHighs = super(RawPSMNetDown, self).forward(left, right)
outDispLows = myUtils.forNestingList(outDispHighs, lambda disp: self.pool(disp) / 2)
return outDispHighs, outDispLows
class PSMNetDown(PSMNet):
# dataset: only used for suffix of saveFolderName
def __init__(self, maxdisp=192, dispScale=1, cuda=True, half=False, stage='unnamed', dataset=None,
saveFolderSuffix=''):
super(PSMNetDown, self).__init__(maxdisp, dispScale, cuda, half, stage, dataset, saveFolderSuffix)
self.outputMaxDisp = self.outputMaxDisp // 2
self.getModel = RawPSMNetDown
def loss(self, outputs, gts, kitti=False, outputMaxDisp=None):
if outputMaxDisp is not None:
raise Exception('Error: outputMaxDisp of PSMNetDown has no use!')
losses = []
for output, gt, outputMaxDisp in zip(outputs, gts, (self.outputMaxDisp * 2, self.outputMaxDisp)):
losses.append(super(PSMNetDown, self).loss(
output, gt, kitti=kitti, outputMaxDisp=outputMaxDisp
) if gt is not None else None)
return losses
def trainOneSide(self, imgL, imgR, gts, returnOutputs=False, kitti=False, weights=(1, 0)):
self.optimizer.zero_grad()
outDispHighs, outDispLows = self.model.forward(imgL, imgR)
losses = self.loss((outDispHighs, outDispLows), gts, kitti=kitti)
loss = sum([weight * loss for weight, loss in zip(weights, losses) if loss is not None])
with self.amp_handle.scale_loss(loss, self.optimizer) as scaled_loss:
scaled_loss.backward()
self.optimizer.step()
dispOuts = []
if returnOutputs:
with torch.no_grad():
dispOuts.append(outDispHighs[2].detach() / (self.outputMaxDisp * 2))
dispOuts.append(outDispLows[2].detach() / self.outputMaxDisp)
losses = [loss] + losses
return [loss.data.item() for loss in losses], dispOuts
def train(self, batch, returnOutputs=False, kitti=False, weights=(1, 0), progress=0):
myUtils.assertBatchLen(batch, 8)
self.trainPrepare()
losses = myUtils.NameValues()
outputs = collections.OrderedDict()
imgL, imgR = batch.highResRGBs()
for inputL, inputR, gts, process, side in zip(
(imgL, imgR), (imgR, imgL),
zip(batch.highResDisps(), batch.lowResDisps()),
(lambda im: im, myUtils.flipLR),
('L', 'R')
):
if not all([gt is None for gt in gts]):
lossesList, outputsList = self.trainOneSide(
*process((inputL, inputR, gts)),
returnOutputs=returnOutputs,
kitti=kitti,
weights=weights
)
for suffix, loss in zip(('', 'DispHigh', 'Disp'), lossesList):
if loss is not None:
losses['loss' + suffix + side] = loss
if returnOutputs:
for suffix, output in zip(('High', 'Low'), outputsList):
outputs['outputDisp' + suffix + side] = process(output)
return losses, outputs
def test(self, batch, evalType='l1', returnOutputs=False, kitti=False):
myUtils.assertBatchLen(batch, 8)
batch = myUtils.Batch(batch.highResRGBs() + batch.lowestResDisps(), cuda=batch.cuda, half=batch.half)
scores, outputs, rawOutputs = super(PSMNetDown, self).test(batch, evalType, returnOutputs, kitti)
for rawOutputsSide, side in zip(rawOutputs, ('L', 'R')):
if rawOutputsSide is not None:
(outDispHigh, outDispLow) = rawOutputsSide
if returnOutputs:
if outDispHigh is not None:
outputs['outputDispHigh' + side] = outDispHigh / (self.outputMaxDisp * 2)
return scores, outputs, rawOutputs
| 2.125 | 2 |
learned_optimization/population/examples/simple_cnn/common.py | Sohl-Dickstein/learned_optimization | 70 | 12795461 | # coding=utf-8
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Common code for the simple cnn example."""
import functools
import os
from absl import logging
from flax import serialization
import haiku as hk
import jax
import jax.numpy as jnp
from learned_optimization import filesystem
import numpy as onp
import optax
import tensorflow_datasets as tfds
HKTree = hk.data_structures.to_immutable_dict({}).__class__
# We use flax for serialization but haiku's data struct is not registered.
def _ty_to_state_dict(v):
return serialization.to_state_dict(
{k: v for k, v in hk.data_structures.to_mutable_dict(v).items()})
def _ty_from_state_dict(target, d):
return HKTree(
**
{k: serialization.from_state_dict(target[k], v) for (k, v) in d.items()})
serialization.register_serialization_state(
HKTree, _ty_to_state_dict, _ty_from_state_dict, override=True)
def hk_forward_fn(batch):
"""Forward function for haiku."""
x = batch["image"].astype(jnp.float32) / 255.
mlp = hk.Sequential([
hk.Conv2D(64, (3, 3), stride=2),
jax.nn.relu,
hk.Conv2D(64, (3, 3), stride=1),
jax.nn.relu,
hk.Conv2D(64, (3, 3), stride=2),
jax.nn.relu,
hk.Conv2D(64, (3, 3), stride=1),
jax.nn.relu,
functools.partial(jnp.mean, axis=(1, 2)),
hk.Linear(10),
])
return mlp(x)
@jax.jit
def loss(params, key, batch):
net = hk.transform(hk_forward_fn)
logits = net.apply(params, key, batch)
labels = jax.nn.one_hot(batch["label"], 10)
softmax_xent = -jnp.sum(labels * jax.nn.log_softmax(logits))
softmax_xent /= labels.shape[0]
return softmax_xent
@jax.jit
def update(params, key, state, batch, meta_params):
opt = optax.adam(meta_params["learning_rate"])
l, grad = jax.value_and_grad(loss)(params, key, batch)
updates, new_state = opt.update(grad, state, params)
new_params = optax.apply_updates(params, updates)
return new_params, new_state, l
def save_state(path, state):
filesystem.make_dirs(os.path.dirname(path))
with filesystem.file_open(path, "wb") as fp:
fp.write(serialization.to_bytes(state))
def load_state(path, state):
logging.info("Restoring state %s:", path)
with filesystem.file_open(path, "rb") as fp:
state_new = serialization.from_bytes(state, fp.read())
tree = jax.tree_structure(state)
leaves_new = jax.tree_leaves(state_new)
return jax.tree_unflatten(tree, leaves_new)
def get_data_iterators(fake_data=False):
"""Get training and test data iterators."""
batch_size = 128
if not fake_data:
remap_label = lambda x: {"image": x["image"], "label": x["label"]}
def data(split):
dataset = tfds.load("cifar10", split=split)
iterator = iter(
tfds.as_numpy(
dataset.repeat(-1).shuffle(
batch_size * 10).batch(batch_size).map(remap_label)))
return iterator
return data("train"), data("test")
else:
def data():
while True:
yield {
"image": onp.zeros([batch_size, 32, 32, 3]),
"label": onp.zeros([batch_size], dtype=onp.int32)
}
return data(), data()
| 2.109375 | 2 |
performance/zero_copy_prc1.py | lukasdean/robust_python | 0 | 12795462 | <gh_stars>0
#!/user/bin/env python
# -*-coding:utf-8 -*-
# @CreateTime : 2021/10/25 22:52
# @Author : xujiahui
# @Project : robust_python
# @File : zero_copy_prc1.py
# @Version : V0.0.1
# @Desc : ?
# 借助于 memoryview 来实现零拷贝
import timeit
data = b"shave and a haircut, two bits"
view = memoryview(data)
chunk = view[12:19]
print(chunk)
print("Size: ", chunk.nbytes)
print("Data in view: ", chunk.tobytes())
print("Underlying data: ", chunk.obj)
# bytes有个限制,就是只能读取不能修改,我们不能单独更新其中某个位置上的字节,而 bytearray 则相当于可修改的bytes,
# 它允许我们修改任意位置上面的内容,bytearray采用整数表示其中的内容,而不像 bytes 那样,采用b开头的字面值
my_array = bytearray(b'hello')
my_array[0] = 0x79
print(my_array)
"""
bytearray 与 bytes 一样,也可以用 memoryview 封装,在这种 memoryview 上面切割出来的对象,其内容可以用另一份数据替换,
这样做,替换的实际上是 memoryview 背后那个底层缓冲区里面的相应部分。这使得我们可以通过 memoryview 来修改它所封装的 bytearray,
而不像 bytes 那样,必须先将 bytes 拆散再拼起来
"""
my_array = bytearray(b'row, row, row your boat')
my_view = memoryview(my_array)
write_view = my_view[3:13]
write_view[:] = b'-10 bytes-'
print(my_array)
"""
Python 里面很多库之中的方法,例如 socket.recv_into 与 RawIOBase.readinto,都使用缓冲协议来迅速接受或读取数据。
这种方法的好处是不用分配内存,也不用给原数据制作副本,它们会把收到的内容直接写入现有的缓冲区。
"""
| 2.59375 | 3 |
test/ursina/test.py | rahul38888/opworld | 0 | 12795463 | from ursina import *
class Inventory(Entity):
def __init__(self, capacity):
super(Inventory, self).__init__(
parent=camera.ui, model='quad',
origin=(-0.5, 0.5), texture="white_cube",
color=color.dark_gray
)
self.capacity = capacity
self.scale=(capacity/10, 0.1)
self.position=(-capacity/20, -0.35)
self.texture_scale= (capacity, 1)
self.item_parent = Entity(parent=self, scale=(1/capacity, 1))
self.spots = [None for i in range(capacity)]
def next_slot(self) -> int:
for i in range(self.capacity):
if self.spots[i] is None:
return i
return -1
def append(self, index: int=-1):
spot = index if index>=0 else self.next_slot()
if spot >= 0:
item = Button(parent=self.item_parent, model = "quad", color=color.random_color(),
position=(spot, 0), origin=(-0.5, 0.5))
self.spots[spot] = item
def input(key):
if key == input_handler.Keys.escape:
application.quit()
if __name__ == '__main__':
app = Ursina()
window.fullscreen = True
inventory = Inventory(8)
inventory.append(0)
inventory.append(1)
app.run() | 2.46875 | 2 |
p575e/distribute_candies.py | l33tdaima/l33tdaima | 1 | 12795464 | <reponame>l33tdaima/l33tdaima<gh_stars>1-10
from typing import List
class Solution:
def distributeCandies(self, candyType: List[int]) -> int:
return min(len(candyType) // 2, len(set(candyType)))
# TESTS
for candyType, expected in [
([1, 1, 2, 2, 3, 3], 3),
([1, 1, 2, 3], 2),
([6, 6, 6, 6], 1),
]:
sol = Solution()
actual = sol.distributeCandies(candyType)
print("The maximum number of different types in", candyType, "->", actual)
assert actual == expected
| 3.65625 | 4 |
04Cuarto/Redes_MultiServicio_RMS/01_Lector_de_etiquetas_MPLS/src/controller/controllers/Main.py | elsudano/Facultad | 2 | 12795465 | <gh_stars>1-10
#!/usr/bin/python
# -*- coding: UTF-8 -*-
"""Lista de controladores del programa.
En este fichero podemos encontrarnos todos los controladores,
de todas las vistas de nuestro programa.
"""
from src.controller.Controller import Controller
from src.model import models
from src.controller import controllers
from src.view_app import views
class MainController(Controller):
def back(self, event):
# FIXME: implementar esta función para regresar al menu anterior
pass
def open_pcapng(self, event):
pass
def captura_mpls(self, event):
"""Cambia la vista de la ventana.
Creamos todos los componentes necesarios para realizar
la captura de los paquetes MPLS y mostrarlos en pantalla
"""
model = models.MplsModel()
controller = controllers.MplsController(self._window, model)
view = views.MplsView(self._window, controller)
# Ponemos el tirulo a la nueva vista
self._window.set_title("Lector cabecera MPLS")
controller.set_view(view)
view.init_view()
def captura_rtp(self, event):
"""Cambia la vista de la ventana.
pasamos a crear todos los componentes para la primera practica
"""
model = models.RtpModel()
controller = controllers.RtpController(self._window, model)
view = views.RtpView(self._window, controller)
# Ponemos el tirulo a la nueva vista
self._window.set_title("Lector de RTP")
controller.set_view(view)
view.init_view() | 2.703125 | 3 |
examples/petting_zoo/waterworld.py | LuisFMCuriel/ai-traineree | 22 | 12795466 | """
This example is for demonstartion purpose only.
No agent learns here anything useful, yet.
Well, maybe they do but it might take a long time to check.
Ain't nobody got time for that.
"""
from pettingzoo.sisl import waterworld_v3
from ai_traineree.agents.ppo import PPOAgent
from ai_traineree.multi_agents.independent import IndependentAgents
from ai_traineree.runners.multiagent_env_runner import MultiAgentCycleEnvRunner
from ai_traineree.tasks import PettingZooTask
env = waterworld_v3.env()
task = PettingZooTask(env=env)
task.reset() # Needs to be reset to access env.agents()
agents = []
for actor_name in task.agents:
obs_space = task.observation_spaces[actor_name]
action_space = task.action_spaces[actor_name]
agents.append(PPOAgent(obs_space, action_space))
multi_agent = IndependentAgents(agents, agent_names=task.agents)
runner = MultiAgentCycleEnvRunner(task, multi_agent=multi_agent)
runner.run(max_episodes=3)
| 2.5 | 2 |
queue/queue.py | Sherlock-dev/algos | 1,126 | 12795467 | class Queue(object):
def __init__(self):
self._list = []
def count(self):
return len(self._list)
def is_empty(self):
return self.count() == 0
def enqueue(self, item):
self._list.append(item)
def dequeue(self):
try:
return self._list.pop(0)
except IndexError:
raise IndexError('pop from empty stack')
def main():
queue = Queue()
n = 100
print('Empty queue: {0}'.format(queue.is_empty()))
while queue.count() < 5:
print('pushing elements: {0}'.format(n))
queue.enqueue(n)
n = n + 100
print('Number of items: {0}'.format(queue.count()))
print('Empty queue: {0}'.format(queue.is_empty()))
while True:
try:
print('Removing element: {0}'.format(queue.dequeue()))
except Exception as e:
print('Exception: {0}'.format(e))
break
print('Number of items: {0}'.format(queue.count()))
print('Empty queue: {0}'.format(queue.is_empty()))
if __name__ == '__main__':
main()
| 4.03125 | 4 |
anagrams/anagrams.py | learnalgorithms/problems | 0 | 12795468 | <gh_stars>0
def isAnagram(s: str, t: str) -> bool:
if len(s) == len(t):
s_map = {}
t_map = {}
for l in s:
if l in s_map:
s_map[l] += 1
else:
s_map[l] = 0
for l in t:
if l in t_map:
t_map[l] += 1
else:
t_map[l] = 0
for l in s_map:
if l not in t_map or s_map[l] != t_map[l]:
return False
else:
return False
return True
| 3.25 | 3 |
imgtoxl.py | findsarfaraz/ImageToXL | 0 | 12795469 | <filename>imgtoxl.py
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'imgtoxl.ui'
#
# Created by: PyQt4 UI code generator 4.11.4
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName(_fromUtf8("MainWindow"))
MainWindow.resize(590, 96)
MainWindow.setMaximumSize(QtCore.QSize(590, 96))
font = QtGui.QFont()
font.setFamily(_fromUtf8("Calibri"))
font.setPointSize(11)
MainWindow.setFont(font)
MainWindow.setTabShape(QtGui.QTabWidget.Rounded)
self.centralwidget = QtGui.QWidget(MainWindow)
self.centralwidget.setObjectName(_fromUtf8("centralwidget"))
self.lineEdit = QtGui.QLineEdit(self.centralwidget)
self.lineEdit.setGeometry(QtCore.QRect(90, 10, 411, 21))
self.lineEdit.setObjectName(_fromUtf8("lineEdit"))
self.label = QtGui.QLabel(self.centralwidget)
self.label.setGeometry(QtCore.QRect(10, 10, 71, 16))
self.label.setObjectName(_fromUtf8("label"))
self.progressBar = QtGui.QProgressBar(self.centralwidget)
self.progressBar.setGeometry(QtCore.QRect(2, 70, 583, 23))
self.progressBar.setProperty("value", 24)
self.progressBar.setObjectName(_fromUtf8("progressBar"))
self.btn_Open_File = QtGui.QPushButton(self.centralwidget)
self.btn_Open_File.setGeometry(QtCore.QRect(500, 10, 75, 23))
self.btn_Open_File.setObjectName(_fromUtf8("btn_Open_File"))
self.btn_Save_File = QtGui.QPushButton(self.centralwidget)
self.btn_Save_File.setGeometry(QtCore.QRect(500, 40, 75, 23))
self.btn_Save_File.setObjectName(_fromUtf8("btn_Save_File"))
MainWindow.setCentralWidget(self.centralwidget)
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
self.progressBar.setVisible(0)
QtGui.QApplication.setStyle(QtGui.QStyleFactory.create("Plastique"))
def retranslateUi(self, MainWindow):
MainWindow.setWindowTitle(_translate("MainWindow", "ImgToXL", None))
self.label.setText(_translate("MainWindow", "Image File", None))
self.btn_Open_File.setText(_translate("MainWindow", "Select File", None))
self.btn_Save_File.setText(_translate("MainWindow", "Save File", None))
if __name__ == "__main__":
import sys
app = QtGui.QApplication(sys.argv)
MainWindow = QtGui.QMainWindow()
ui = Ui_MainWindow()
ui.setupUi(MainWindow)
MainWindow.show()
sys.exit(app.exec_())
| 1.84375 | 2 |
tests/SharedDataFrameTest.py | pywash/pywash | 7 | 12795470 | from unittest import TestCase
from src.PyWash import SharedDataFrame
from src.Exceptions import *
import pandas as pd
verbose = False
class TestDecorators(TestCase):
""" TestClass for SharedDataFrame methods """
def test_is_mergeable_column_names(self):
if verbose:
print("Testing: is_mergeable_columns")
df1 = pd.DataFrame({'employee': ['Bob', 'Jake', 'Lisa', 'Sue'],
'group': ['Accounting', 'Engineering', 'Engineering', 'HR']})
df2 = pd.DataFrame({'employee': ['Lisa', 'Bob', 'Jake', 'Sue'],
'hire_date': [2004, 2008, 2012, 2014]})
test1 = SharedDataFrame(df=df1, verbose=verbose)
test2 = SharedDataFrame(df=df2, verbose=verbose)
self.assertTrue(test1.is_mergeable(test2))
def test_is_mergeable_common_values(self):
if verbose:
print("Testing: is_mergeable_values")
df1 = pd.DataFrame({'employee': ['Bob', 'Jake', 'Lisa', 'Sue'],
'group': ['Accounting', 'Engineering', 'Engineering', 'HR']})
df2 = pd.DataFrame({'names': ['Lisa', 'Bob', 'Jake', 'Sue'],
'hire_date': [2004, 2008, 2012, 2014]})
test1 = SharedDataFrame(df=df1, verbose=verbose)
test2 = SharedDataFrame(df=df2, verbose=verbose)
self.assertTrue(test1.is_mergeable(test2))
def test_is_mergeable_false(self):
if verbose:
print("Testing: is_mergeable_false")
df1 = pd.DataFrame({'employee': ['Bob', 'Jake', 'Lisa', 'Sue'],
'group': ['Accounting', 'Engineering', 'Engineering', 'HR']})
df2 = pd.DataFrame({'names': ['Lisa', 'Bob', 'Jake', 'Sue', 'Bobby'],
'hire_date': [2004, 2008, 2012, 2014, 2019]})
test1 = SharedDataFrame(df=df1, verbose=verbose)
test2 = SharedDataFrame(df=df2, verbose=verbose)
self.assertFalse(test1.is_mergeable(test2))
def test_merge_on_column_names(self):
if verbose:
print("Testing: merge_on_columns")
df1 = pd.DataFrame({'employee': ['Bob', 'Jake', 'Lisa', 'Sue'],
'group': ['Accounting', 'Engineering', 'Engineering', 'HR']})
df2 = pd.DataFrame({'employee': ['Lisa', 'Bob', 'Jake', 'Sue'],
'hire_date': [2004, 2008, 2012, 2014]})
target = pd.DataFrame({'employee': ['Bob', 'Jake', 'Lisa', 'Sue'],
'group': ['Accounting', 'Engineering', 'Engineering', 'HR'],
'hire_date': [2008, 2012, 2004, 2014]})
test1 = SharedDataFrame(df=df1, verbose=verbose)
test2 = SharedDataFrame(df=df2, verbose=verbose)
test1.merge_into(test2)
self.assertTrue(test1.get_dataframe().equals(target), "Successfully merged the 2 DataFrames")
def test_merge_on_common_values(self):
if verbose:
print("Testing: merge_on_values")
df1 = pd.DataFrame({'employee': ['Bob', 'Jake', 'Lisa', 'Sue'],
'group': ['Accounting', 'Engineering', 'Engineering', 'HR']})
df2 = pd.DataFrame({'names': ['Lisa', 'Bob', 'Jake', 'Sue'],
'hire_date': [2004, 2008, 2012, 2014]})
target = pd.DataFrame({'employee': ['Bob', 'Jake', 'Lisa', 'Sue'],
'group': ['Accounting', 'Engineering', 'Engineering', 'HR'],
'names': ['Bob', 'Jake', 'Lisa', 'Sue'],
'hire_date': [2008, 2012, 2004, 2014]})
test1 = SharedDataFrame(df=df1, verbose=verbose)
test2 = SharedDataFrame(df=df2, verbose=verbose)
test1.merge_into(test2)
if verbose:
print(test1.get_dataframe())
print(target)
self.assertTrue(test1.get_dataframe().equals(target), "Successfully merged the 2 DataFrames")
def test_merge_on_false(self):
if verbose:
print("Testing: merge_false")
df1 = pd.DataFrame({'employee': ['Bob', 'Jake', 'Lisa', 'Sue'],
'group': ['Accounting', 'Engineering', 'Engineering', 'HR']})
df2 = pd.DataFrame({'names': ['Lisa', 'Bob', 'Jake', 'Sue', 'Bobby'],
'hire_date': [2004, 2008, 2012, 2014, 2019]})
test1 = SharedDataFrame(df=df1, verbose=verbose)
test2 = SharedDataFrame(df=df2, verbose=verbose)
if verbose:
print(test1.get_dataframe())
with self.assertRaises(NotMergableError):
test1.merge_into(test2)
if verbose:
print(test1.get_dataframe())
| 2.953125 | 3 |
assessment_03/gameplay/PlayerAction.py | DominicSchiller/osmi-module-datascience | 0 | 12795471 | from enum import Enum
from . import PlayerAction
__author__ = '<NAME>'
__email__ = '<EMAIL>'
__version__ = '1.0'
__license__ = 'MIT'
class PlayerAction(Enum):
"""
Enumeration of possible player actions.
"""
HIT = 0
STAND = 1
YES = 2
NO = 3
@staticmethod
def get_action(action_name: str) -> PlayerAction:
"""
Get player action for it's string representation.
:param action_name: The action's string representation
:return: The corresponding player action.
"""
action_name = action_name.lower()
if action_name == "hit":
return PlayerAction.HIT
elif action_name == "stand":
return PlayerAction.STAND
elif action_name == "yes":
return PlayerAction.YES
elif action_name == "no":
return PlayerAction.NO
else:
return None
| 3.4375 | 3 |
src/config/settings.py | kents00/Django | 1 | 12795472 | import os
import logging
import environ
from pathlib import Path
SUPPORTED_NONLOCALES = ['media', 'admin', 'static']
# Build paths inside the project like this: BASE_DIR / 'subdir'.
PROJECT_ROOT = os.path.abspath(os.path.dirname(__file__) + "../../../")
env = environ.Env()
# reading .env file
environ.Env.read_env()
env = environ.Env(
# set casting, default value
DEBUG=(bool, True)
)
# Build paths inside the project like this: BASE_DIR / 'subdir'.
PROJECT_ROOT = os.path.abspath(os.path.dirname(__file__) + "../../../")
# Take environment variables from .env file
environ.Env.read_env(os.path.join(PROJECT_ROOT, '.env'))
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = env("SECRET_KEY", default="unsafe-secret-key")
# SECURITY WARNING: don't run with debug turned on in production!
# Debugging displays nice error messages, but leaks memory. Set this to False
# on all server instances and True only for development.
DEBUG = env('DEBUG')
ALLOWED_HOSTS = ["localhost", "127.0.0.1"]
# Application definition
INSTALLED_APPS = [
#Django default apps
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
#Third party apps
'compressor',
'django_nose',
'django_extensions',
'debug_toolbar',
#Local apps
#Application base
'Application',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
# third party middleware
'whitenoise.middleware.WhiteNoiseMiddleware',
'debug_toolbar.middleware.DebugToolbarMiddleware',
]
DEBUG_TOOLBAR_PANELS = [
'debug_toolbar.panels.history.HistoryPanel',
'debug_toolbar.panels.versions.VersionsPanel',
'debug_toolbar.panels.timer.TimerPanel',
'debug_toolbar.panels.settings.SettingsPanel',
'debug_toolbar.panels.headers.HeadersPanel',
'debug_toolbar.panels.request.RequestPanel',
'debug_toolbar.panels.sql.SQLPanel',
'debug_toolbar.panels.staticfiles.StaticFilesPanel',
'debug_toolbar.panels.templates.TemplatesPanel',
'debug_toolbar.panels.cache.CachePanel',
'debug_toolbar.panels.signals.SignalsPanel',
'debug_toolbar.panels.logging.LoggingPanel',
'debug_toolbar.panels.redirects.RedirectsPanel',
'debug_toolbar.panels.profiling.ProfilingPanel',
]
ROOT_URLCONF = 'config.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [
# insert your TEMPLATE_DIRS here
os.path.join(PROJECT_ROOT, 'templates'),
os.path.join(PROJECT_ROOT, 'templates/.base'),
os.path.join(PROJECT_ROOT, 'templates/layout'),
],
'APP_DIRS': True,
'OPTIONS': {
'debug': DEBUG,
'context_processors': [
# Default context processors
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
# Custom context processors here
#'config.context_processors.custom_context_processor',
],
},
},
]
WSGI_APPLICATION = 'config.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# By default, be at least somewhat secure with our session cookies.
SESSION_COOKIE_HTTPONLY = True
# Set this to true if you are using https
SESSION_COOKIE_SECURE = False
# Absolute filesystem path to the directory that will hold user-uploaded files.
MEDIA_ROOT = os.path.join(PROJECT_ROOT, 'media')
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
MEDIA_URL = '/media/'
# Absolute path to the directory static files should be collected to.
STATIC_ROOT = 'static/'
# URL prefix for static files.
STATIC_URL = '/static/'
# Additional locations of static files
STATICFILES_DIRS = [
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
]
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
# Memorycached
SESSIONS_ENGINE='django.contrib.sessions.backends.cache'
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.memcached.MemcachedCache',
'LOCATION': '127.0.0.1:11211',
}
}
# Argon2 password hashing
PASSWORD_HASHERS = [
'django.contrib.auth.hashers.Argon2PasswordHasher',
'django.contrib.auth.hashers.PBKDF2PasswordHasher',
'django.contrib.auth.hashers.PBKDF2SHA1PasswordHasher',
'django.contrib.auth.hashers.BCryptSHA256PasswordHasher',
# Custom hasher
'Application.hashers.PBKDF2WrappedSHA1PasswordHasher',
]
# cacheable files and compression support
STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'
STATICFILES_FINDERS = (
# django contrib default finders
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# third party finders
'compressor.finders.CompressorFinder',
)
TEST_RUNNER = 'django_nose.NoseTestSuiteRunner'
INTERNAL_IPS = ['127.0.0.1'] | 2 | 2 |
opencv_project_python-master/opencv_project_python-master/05.geometric_transform/remap_barrel.py | dongrami0425/Python_OpenCV-Study | 0 | 12795473 | <reponame>dongrami0425/Python_OpenCV-Study
import cv2
import numpy as np
# 왜곡 계수 설정 ---①
k1, k2, k3 = 0.5, 0.2, 0.0 # 배럴 왜곡
#k1, k2, k3 = -0.3, 0, 0 # 핀큐션 왜곡
img = cv2.imread('../img/girl.jpg')
rows, cols = img.shape[:2]
# 매핑 배열 생성 ---②
mapy, mapx = np.indices((rows, cols),dtype=np.float32)
# 중앙점 좌표로 -1~1 정규화 및 극좌표 변환 ---③
mapx = 2*mapx/(cols-1)-1
mapy = 2*mapy/(rows-1)-1
r, theta = cv2.cartToPolar(mapx, mapy)
# 방사 왜곡 변영 연산 ---④
ru = r*(1+k1*(r**2) + k2*(r**4) + k3*(r**6))
# 직교좌표 및 좌상단 기준으로 복원 ---⑤
mapx, mapy = cv2.polarToCart(ru, theta)
mapx = ((mapx + 1)*cols-1)/2
mapy = ((mapy + 1)*rows-1)/2
# 리매핑 ---⑥
distored = cv2.remap(img,mapx,mapy,cv2.INTER_LINEAR)
cv2.imshow('original', img)
cv2.imshow('distorted', distored)
cv2.waitKey()
cv2.destroyAllWindows() | 2.75 | 3 |
computer_network/UDPclient.py | WhiteHyun/Network | 1 | 12795474 | from socket import *
serverName = 'localhost'
serverPort = 12000
clientSocket = socket(AF_INET, SOCK_DGRAM)
message = input('Input lowercase sentence:')
clientSocket.sendto(message.encode(), (serverName, serverPort))
modifiedMessage, serverAddress = clientSocket.recvfrom(2048)
print(modifiedMessage.decode())
clientSocket.close()
| 2.84375 | 3 |
test_proj/data/data_tcA009.py | leeltib/vizsgamunka_ref | 0 | 12795475 | # A009 - enter users for scrolling function test
# test with randomly generated users (arbitrary number of users, default is 2)
import random
import string
title = 'Hello, én egy A009 test User vagyok!.'
class MyRND():
chars_lo = string.ascii_lowercase
chars_int = string.digits
chars_up = string.ascii_uppercase
chars = string.punctuation # *'[{&| etc
@classmethod
def uname(cls):
return "".join([random.choice(cls.chars_lo) for _ in range(8)])
@classmethod
def ppass(cls):
pp_lo = "".join([random.choice(cls.chars_lo) for _ in range(8)])
pp_int = "".join([random.choice(cls.chars_int) for _ in range(8)])
pp_up = "".join([random.choice(cls.chars_up) for _ in range(8)])
pchars = pp_lo[4] + pp_int[0] + pp_up[7] + pp_lo[1:3] + pp_int[3] + pp_up[4] + pp_lo[6]
return pchars
@classmethod
def email(cls):
mail_lo = "".join([random.choice(cls.chars_lo) for _ in range(7)])
mail_fix = "@gmail.com"
email = mail_lo + mail_fix
return email
class TestData:
def __init__(self, rn):
self.data = []
for i in range(rn):
d = {}
d["username"] = MyRND.uname()
d["email"] = MyRND.email()
d["password"] = <PASSWORD>()
self.data.append(d)
# set number of randomly generated users
td = TestData(2)
td_list = td.data
print(td_list)
users = []
for user in td_list:
user_data = []
for value in user.values():
user_data.append(value)
users.append(user_data)
print(users)
| 3.5 | 4 |
notebooks/MS_Functions.py | malwinasanjose/anomaly_detection | 0 | 12795476 | <filename>notebooks/MS_Functions.py
import pandas as pd
import numpy as np
import seaborn as sns
from matplotlib import pyplot as plt
def clean_df(df):
# drop columns containing only NAs
df_clean = df.dropna(how='all', axis=1)
print(f'dropped {df.shape[1] - df_clean.shape[1]} columns')
# drop rows with NA values
df_clean = df_clean.dropna(how='any', axis=0)
print(f'dropped {df.shape[0] - df_clean.shape[0]} rows')
return df_clean
def plot_time_series(df, columns, index_is_timestamp=False, start_date=None, end_date=None, ma_nr=[], groupby=[], width=10, height=5, anomaly_columns=[], anomaly_values=[]):
assert isinstance(ma_nr, list) , 'ma_nr should be a list'
assert isinstance(groupby, list), 'gropuby should be a list'
assert isinstance(columns, list), 'columns should be a list'
if index_is_timestamp:
plot_df = df.loc[start_date:end_date]
else:
plot_df = df.set_index('INSDATE').loc[start_date:end_date]
# filter specific machine number
if ma_nr:
plot_df = plot_df.loc[plot_df['MA_NR'].isin(ma_nr)]
else:
pass
# group by columns
if groupby:
plot_df = plot_df.groupby(groupby)
else:
pass
n = len(columns)
if anomaly_columns:
assert len(anomaly_values) == len(anomaly_columns), 'please provide anomaly value for each anomaly column indicator'
m = len(anomaly_columns)
fig, axs = plt.subplots(n, m, figsize=(width*m, height*n))
# reformat axs so it can be subset in the event that there's only one row or only one column
if n==1:
axs=[axs]
if m==1:
axs=[[i] for i in axs]
for col, anomaly in enumerate(anomaly_columns):
for row, column in enumerate(columns):
plot_df[column].plot(legend=True, ax=axs[row][col], xlabel='', ylabel=column, alpha=0.5)
sns.scatterplot(x=plot_df.index[plot_df[anomaly]==anomaly_values[col]],
y=plot_df[column].loc[plot_df[anomaly]==anomaly_values[col]],
color="red", s=10, ax=axs[row][col], label=f'anomaly: {anomaly}', alpha=1)
else:
fig, axs = plt.subplots(n, 1, figsize=(width, height*n))
if n == 1:
axs = [axs]
for row, column in enumerate(columns):
plot_df[column].plot(legend=True, ax=axs[row], xlabel='', ylabel=column, alpha=0.5)
axs[n-1].set_xlabel(df.index.name)
def nio_labels(nio_series):
nio_df = pd.DataFrame(nio_series.astype(str).str.rjust(10,'0').apply(lambda x: [i for i in x] if len(x)==10 else None).apply(pd.Series))
nio_df.columns = ['1,000,000,000', '100,000,000', '10,000,000', '1,000,000', '100,000', '10,000', '1,000', '100', '10', '1']
return nio_df | 2.890625 | 3 |
src/steampunk_scanner/cli.py | xlab-steampunk/steampunk-scanner-cli | 3 | 12795477 | <gh_stars>1-10
import argparse
import inspect
import sys
from steampunk_scanner import commands
class ArgParser(argparse.ArgumentParser):
"""An argument parser that displays help on error"""
def error(self, message: str):
"""
Overridden the original error method
:param message: Error message
"""
sys.stderr.write("error: {}\n".format(message))
self.print_help()
sys.exit(2)
def add_subparsers(self, **kwargs) -> argparse._SubParsersAction:
"""
Overridden the original add_subparsers method (workaround for http://bugs.python.org/issue9253)
"""
subparsers = super(ArgParser, self).add_subparsers()
subparsers.required = True
subparsers.dest = "command"
return subparsers
def create_parser() -> ArgParser:
"""
Create argument parser for CLI
:return: Parser as argparse.ArgumentParser object
"""
parser = ArgParser(description="Steampunk Scanner - a quality scanner for Ansible Playbooks")
subparsers = parser.add_subparsers()
cmds = inspect.getmembers(commands, inspect.ismodule)
for _, module in sorted(cmds, key=lambda x: x[0]):
module.add_parser(subparsers)
return parser
def main() -> ArgParser:
"""
Main CLI method to be called
"""
parser = create_parser()
args = parser.parse_args()
return args.func(args)
| 2.96875 | 3 |
lambda_function.py | cesarbruschetta/ses-receive-email-forward | 0 | 12795478 | <gh_stars>0
import sys
import os
import json
import logging
from typing import Dict
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from forward_recieved_email.utils import logger as c_logger
from forward_recieved_email.config import settings
from forward_recieved_email import processing
# SET LOGGER
c_logger.configure_logger()
logger = logging.getLogger(__name__)
def lambda_handler(event: Dict, context: Dict) -> None:
""" AWS lambda start """
# CHANGE LOGGER
logger = logging.getLogger()
logger.setLevel(settings.LOGGER_LEVEL)
logger.debug(json.dumps(event, indent=4))
result = processing.main_handler(event)
return result
| 1.953125 | 2 |
orm/db/migrations/0001_initial.py | madelinkind/twitter_crawler | 0 | 12795479 | # Generated by Django 3.1.2 on 2021-01-25 08:11
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='TwitterUser',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('screen_name', models.CharField(max_length=15)),
],
options={
'db_table': 'twitter_users',
},
),
migrations.CreateModel(
name='Tweet',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('tweet_text', models.CharField(max_length=280)),
('tweet_date', models.DateTimeField()),
('tweet_lang', models.CharField(max_length=3, null=True)),
('tweet_id', models.CharField(db_index=True, max_length=20, null=True)),
('tweet_info', models.JSONField()),
('is_retweet', models.BooleanField(default=True)),
('retweet_count', models.IntegerField(null=True)),
('twitter_user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='db.twitteruser')),
],
options={
'db_table': 'tweets',
},
),
]
| 1.929688 | 2 |
sagas/api/info_mod.py | samlet/stack | 3 | 12795480 | from sanic import Blueprint
from sanic.response import json
info = Blueprint('info', url_prefix='/info')
@info.route("/ping")
async def ping(request):
"""
$ curl localhost:1700/info/ping
:param request:
:return:
"""
return json({ "hello": "world" })
@info.route('/env/<tag>')
async def env_handler(request, tag):
"""
$ curl localhost:1700/info/env/PATH
:param request:
:param tag:
:return:
"""
import os
return json({tag: os.environ.get(tag)})
@info.post('/echo/<tag>')
async def echo(request, tag):
"""
$ curl -d '{"key1":"value1", "key2":"value2"}' \
-H "Content-Type: application/json" -X POST \
localhost:1700/info/echo/hi | json
:param request:
:param tag:
:return:
"""
data=request.json
print("..", data)
return json({tag:'POST request - {}'.format(request.json),
'keys': list(data.keys()),
})
| 2.4375 | 2 |
chatbot/views.py | G-sharp/ChatBot | 3 | 12795481 | from django.shortcuts import render
# Create your views here.
# -*- coding: utf-8 -*-
import json
import datetime
from DataBase import DBOPs
from django.http import HttpResponse
from chatbot import aimlKernel
def chat(request):
"""
Function:
对话接口
Args:
request: 请求
Returns:
返回Http报文
"""
dic = {}
if request.method == 'GET':
dic['botResponse'] = aimlKernel.k.respond(request.GET.get('ask', '无语'), request.GET.get('sessionid','test')).\
replace(' ', '')
DBOPs.InsertDB(request.GET.get('sessionid', 'test'), request.GET.get('ask', '无语'), dic['botResponse'])
dic['time'] = datetime.datetime.now().strftime(('%Y-%m-%d %H:%M:%S'))
dic['sessionid'] = request.GET.get('sessionid','test')
return HttpResponse(json.dumps(dic, ensure_ascii=False))
else:
dic['message'] = u'方法错误'
return HttpResponse(json.dumps(dic, ensure_ascii=False))
| 2.109375 | 2 |
test_palindrome.py | zhonchik/python_examples | 0 | 12795482 | <gh_stars>0
from palindrome import is_palindrome
def test_is_palindrome_1():
assert is_palindrome('a')
def test_is_palindrome_2():
assert is_palindrome('aa')
assert not is_palindrome('ab')
def test_is_palindrome_3():
assert is_palindrome('aba')
assert not is_palindrome('abc')
def test_is_palindrome_4():
assert is_palindrome('abba')
assert not is_palindrome('abcd')
def test_is_palindrome_5():
assert is_palindrome('abcba')
assert not is_palindrome('abcde')
| 2.453125 | 2 |
The_Codes/Problem_1/XML.py | MertYILDIZ19/ADM-HW1 | 0 | 12795483 | ############### XML 1 - Find the Score ################
import sys
import xml.etree.ElementTree as etree
def get_attr_number(node):
# your code goes here
count = 0
for i in root.iter():
count += len(i.attrib)
return count
if __name__ == '__main__':
sys.stdin.readline()
xml = sys.stdin.read()
tree = etree.ElementTree(etree.fromstring(xml))
root = tree.getroot()
print(get_attr_number(root))
############### XML2 - Find the Maximum Depth ################
import xml.etree.ElementTree as etree
maxdepth = 0
def depth(elem, level):
global maxdepth
# your code goes here
level += 1
if level >= maxdepth:
maxdepth = level
for i in elem:
depth(i, level)
if __name__ == '__main__':
n = int(input())
xml = ""
for i in range(n):
xml = xml + input() + "\n"
tree = etree.ElementTree(etree.fromstring(xml))
depth(tree.getroot(), -1)
print(maxdepth) | 3.9375 | 4 |
pipelines/voc-2007/sched.py | fenwuyaoji/approx-vision | 61 | 12795484 | #! /usr/bin/env python
from __future__ import unicode_literals
from PIL import Image
from subprocess import check_call
from concurrent import futures
import subprocess
import os
import io
import subprocess
import sys
from os import listdir
from os.path import isfile, join
import psutil
import time
vers_to_run = [ 3, 4, 5, 7, 8, 9,10,11,12,58,59,60,61,62,63,64]
in_vers = [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 6, 6, 6, 6, 6, 6, 6]
num_threads = 14
# The directory to convert
datasetpath = '/datasets/voc-2007/'
def convert_img(file_name,in_img_dir,out_img_dir):
# Make temp directory
temp_dir = 'temp_'+str(os.getpid())
subprocess.call('mkdir -p '+temp_dir,shell=True)
# Convert to png #
im = Image.open(in_img_dir+file_name)
im.save(temp_dir+'/'+file_name+'_temp.png')
# Run the given pipeline on the png
subprocess.call('../common/pipeline_V'+str(version) + '.o ' +
temp_dir + '/' + file_name + '_temp.png ' +
temp_dir + '/', shell=True)
# Convert back to jpeg and save
im = Image.open(temp_dir+'/'+'output.png')
im.save(out_img_dir+'/'+file_name)
# Delete temp directory
subprocess.call('rm -rf '+temp_dir,shell=True)
for i, version in enumerate(vers_to_run):
in_version = in_vers[i]
subprocess.call('make --directory ../common/ version='+str(version),shell=True)
# Copy all but the JPEG images
subprocess.call('rsync -av '+
datasetpath+'/v'+str(in_version)+'/ '+
datasetpath+'/v'+str(version)+' '+
'--exclude VOC2007/JPEGImages',
shell=True)
in_img_dir = datasetpath+'v'+str(in_version)+'/VOC2007/JPEGImages/'
out_img_dir = datasetpath+'v'+str(version)+'/VOC2007/JPEGImages/'
# Make the directory for this section
subprocess.call('mkdir -p '+out_img_dir,shell=True)
# Get list of files in directory
file_list = [f for f in listdir(in_img_dir) if
isfile(join(in_img_dir, f))]
file_list.sort()
with futures.ProcessPoolExecutor(max_workers=num_threads) as executor:
fs = [executor.submit(convert_img,file_name,in_img_dir,out_img_dir) for file_name in file_list]
for i, f in enumerate(futures.as_completed(fs)):
# Write progress to error so that it can be seen
sys.stderr.write( \
"Converted Image: {} / {} \r".format(i, len(file_list)))
| 2.375 | 2 |
parser.py | rougier/shadergraph | 3 | 12795485 | <gh_stars>1-10
# -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# Copyright (c) 2015, <NAME>
# Distributed under the (new) BSD License. See LICENSE.txt for more info.
# -----------------------------------------------------------------------------
from pyparsing import *
keywords = ("attribute const uniform varying break continue do for while"
"if else"
"in out inout"
"float int void bool true false"
"lowp mediump highp precision invariant"
"discard return"
"mat2 mat3 mat4"
"vec2 vec3 vec4 ivec2 ivec3 ivec4 bvec2 bvec3 bvec4 sampler2D samplerCube"
"struct")
reserved = ("asm"
"class union enum typedef template this packed"
"goto switch default"
"inline noinline volatile public static extern external"
"interface flat long short double half fixed unsigned superp"
"input output"
"hvec2 hvec3 hvec4 dvec2 dvec3 dvec4 fvec2 fvec3 fvec4 sampler1D sampler3D"
"sampler1DShadow sampler2DShadow"
"sampler2DRect sampler3DRect sampler2DRectShadow"
"sizeof cast"
"namespace using")
IDENTIFIER = Regex('[a-zA-Z_][a-zA-Z_0-9]*')
INT_DECIMAL = Regex('([+-]?(([1-9][0-9]*)|0+))')
INT_OCTAL = Regex('(0[0-7]*)')
INT_HEXADECIMAL = Regex('(0[xX][0-9a-fA-F]*)')
INTEGER = INT_HEXADECIMAL | INT_OCTAL | INT_DECIMAL
FLOAT = Regex('[+-]?(((\d+\.\d*)|(\d*\.\d+))([eE][-+]?\d+)?)|(\d*[eE][+-]?\d+)')
LPAREN, RPAREN = Literal("(").suppress(), Literal(")").suppress()
LBRACK, RBRACK = Literal("[").suppress(), Literal("]").suppress()
LBRACE, RBRACE = Literal("{").suppress(), Literal("}").suppress()
SEMICOLON, COMMA = Literal(";").suppress(), Literal(",").suppress()
EQUAL = Literal("=").suppress()
SIZE = INTEGER | IDENTIFIER
OPERATOR = oneOf("+ - * / [ ] . & ^ ! { }")
STORAGE_QUALIFIER = Regex("const|varying|uniform|attribute")
CONST_QUALIFIER = Literal("const")
INVARIANT_QUALIFIER = Literal("invariant")
PRECISION_QUALIFIER = Regex("lowp|mediump|highp")
PARAMETER_QUALIFIER = Regex("(in|out|inout)[ \t\n]")
# Variable declarations
# ---------------------
PART = nestedExpr() | nestedExpr('{','}') | IDENTIFIER | INTEGER | FLOAT | OPERATOR
EXPR = delimitedList(PART, delim=Empty()).setParseAction(keepOriginalText)
VARIABLE = (IDENTIFIER("name") + Optional(LBRACK + SIZE + RBRACK)("size")
+ Optional(EQUAL + EXPR)("value"))
VARIABLES = delimitedList(VARIABLE.setResultsName("variables",listAllMatches=True))
DECLARATION = (STORAGE_QUALIFIER("storage") + Optional(PRECISION_QUALIFIER)("precision") +
IDENTIFIER("type") + VARIABLES + SEMICOLON)
DECLARATION.ignore(cStyleComment)
# Function parameter
# ------------------
PARAMETER = Group(Optional(STORAGE_QUALIFIER)("storage") +
Optional(PRECISION_QUALIFIER)("precision") +
Optional(PARAMETER_QUALIFIER)("inout") +
IDENTIFIER("type") + Optional(IDENTIFIER("name")) +
Optional(LBRACK + SIZE + RBRACK)("size"))
# Function prototypes
# -------------------
FUNCTION = (Optional(STORAGE_QUALIFIER)("storage") +
Optional(PRECISION_QUALIFIER)("precision") +
IDENTIFIER("type") + IDENTIFIER("name") +
LPAREN + Optional(delimitedList(PARAMETER))("parameters") + RPAREN +
((nestedExpr("{", "}").setParseAction(keepOriginalText)("code")) | SEMICOLON))
FUNCTION.ignore(cStyleComment)
# Struct definitions & declarations
# ---------------------------------
STRUCT = ( Literal("struct").suppress() + IDENTIFIER("type") +
nestedExpr("{", "}").setParseAction(keepOriginalText)("content") +
Optional(VARIABLES) + SEMICOLON)
STRUCT.ignore(cStyleComment)
# Constants
# ---------
CONSTANT = (Literal("#").suppress() + Literal("define").suppress() +
IDENTIFIER("name") + restOfLine("value"))
class Type(object):
def __init__(self, base=None, storage=None, precision=None, size=None):
if isinstance(base, Type):
other = base
self.base = other.base
self.size = other.size
self.storage = other.storage
self.precision = other.precision
else:
self.base = base.strip()
self.size = size.strip()
self.storage = storage.strip()
self.precision = precision.strip()
def __str__(self):
s = ""
if self.storage:
s += "%s " % self.storage
if self.precision:
s += "%s " % self.precision
s += "%s" % self.base
return s
def __eq__(self, other):
return (self.base == other.base and
self.size == other.size and
self.precision == other.precision)
class Parameter(object):
def __init__(self, type, name=None, inout="in"):
self.type = Type(type)
self.name = name.strip()
self.alias = name.strip()
self.inout = inout.strip()
def __str__(self):
s = ""
if self.inout: s += "%s " % self.inout
s += str(self.type) + " "
if self.name: s += "%s" % self.name
if self.type.size: s += "[%s]" % self.size
return s
class Variable(object):
def __init__(self, type, name, value=None):
self.type = Type(type)
self.name = name.strip()
self.alias = name.strip()
self.value = value.strip()
def __str__(self):
s = str(self.type) + " " + self.alias
if self.type.size:
s += "[%s]" % self.type.size
if self.value:
s += " = %s" % self.value
s += ";"
return s
class Prototype(object):
def __init__(self, type, name, parameters):
self.type = Type(type)
self.name = name.strip()
self.alias = name.strip()
self.parameters = parameters
def __str__(self):
s = str(self.type) + " %s (" % self.alias
for i, parameter in enumerate(self.parameters):
s += str(parameter)
if i < len(self.parameters)-1:
s+= ", "
s += ");"
return s
class Function(object):
def __init__(self, type, name, parameters, code):
self.type = Type(type)
self.name = name.strip()
self.alias = name.strip()
self.parameters = parameters
self.code = code.strip()
def __str__(self):
s = str(self.type) + " %s (" % self.alias
for i, parameter in enumerate(self.parameters):
s += str(parameter)
if i < len(self.parameters)-1:
s+= ", "
s += ") "
s += self.code
return s
class Constant(object):
def __init__(self, name, value):
self.name = name.strip()
self.alias = name.strip()
self.value = value.strip()
def __str__(self):
s = "#define %s %s" % (self.alias, self.value)
return s
def __eq__(self, other):
return self.value == other.value
class Struct(object):
def __init__(self, name, content):
self.name = name.strip()
self.content = content.strip()
def __str__(self):
s = "struct %s %s;" % (self.name, self.content)
return s
def parse(code):
""" Parse a GLSL source code into an abstract syntax list """
constants = []
structs = []
variables = []
prototypes= []
functions = []
# Constants
for (token, start, end) in CONSTANT.scanString(code):
C = Constant(name = token.name,
value = token.value)
constants.append(C)
# Variables
for (token, start, end) in DECLARATION.scanString(code):
for variable in token.variables:
size = '' if not variable.size else variable.size[0]
value = '' if not variable.value else variable.value[0]
V = Variable(Type(base = token.type,
storage = token.storage,
precision = token.precision,
size = size),
name = variable.name,
value = value)
variables.append(V)
# Struct definitions & declarations
for (token, start, end) in STRUCT.scanString(code):
S = Struct(name = token.type,
content = token.content[0])
structs.append(S)
for variable in token.variables:
size = '' if not variable.size else variable.size[0]
value = '' if not variable.value else variable.value[0]
V = Variable(Type(base = token.type,
size = size),
name = variable.name,
value = value)
variables.append(V)
# Functions prototype and definitions
for (token, start, end) in FUNCTION.scanString(code):
parameters = []
for parameter in token.parameters:
size = '' if not parameter.size else parameter.size[0]
P = Parameter(type = Type(base = parameter.type,
storage = parameter.storage,
precision = parameter.precision,
size = parameter.size),
name = parameter.name,
inout = parameter.inout)
parameters.append(P)
T = Type(base = token.type,
storage = token.storage,
precision = token.precision,
size = token.size)
if token.code:
F = Function( type = T,
name = token.name,
parameters = parameters,
code = token.code[0])
functions.append(F)
for parameter in parameters:
parameter.function = F
else:
P = Prototype(type = T,
name = token.name,
parameters = parameters)
prototypes.append(P)
for parameter in parameters:
parameter.function = None
return constants, structs, variables, prototypes, functions
| 2.015625 | 2 |
src/AtmNu_Recoildistribution.py | cajohare/AtmNuFloor | 1 | 12795486 | <gh_stars>1-10
import os
import sys
sys.path.append('../src')
from numpy import *
from numpy import random
from Params import *
from NeutrinoFuncs import *
from LabFuncs import *
# This file doesn't save all its recoils because we need a large number to
# make a nice plot of energy/phi/costh distribution. So everytime this file
# is run the new distribution is merged with a previous one to make it smoother
# each time.
# I needed to run this file (both Xe131 and Ar40) around 10 times to get
# a nice distribution, but the recoil file ends up being huge (which is why
# it's not in the git repository)
#==============================================================================#
# Input
Nuc = eval(sys.argv[1])
print('Nucleus = ',Nuc.Name)
if Nuc.Name=='Xe':
E_min = 2.0
E_max = 200.0
elif Nuc.Name=='Ar':
E_min = 20.0
E_max = 400.0
#==============================================================================#
ngen = 1000000
fname = 'AtmNu_GranSasso_SolarMin.d'
ne = 20
#### Load high energy data
Phi_tot,E_high,cosZ,phi_Az = GetAtmNuFluxes(fname)
Phi_high = squeeze(sum(sum(Phi_tot,0),0))
###### Load low energy FLUKA data
dat1 = loadtxt(nufile_dir+'/atmospheric/FLUKA/AtmNu_mubar.txt',delimiter=',')
dat2 = loadtxt(nufile_dir+'/atmospheric/FLUKA/AtmNu_mu.txt',delimiter=',')
dat3 = loadtxt(nufile_dir+'/atmospheric/FLUKA/AtmNu_e.txt',delimiter=',')
dat4 = loadtxt(nufile_dir+'/atmospheric/FLUKA/AtmNu_ebar.txt',delimiter=',')
E_low = dat1[:,0]
Phi_low = dat1[:,1]+dat2[:,1]+dat3[:,1]+dat4[:,1]
###### Join the two
E_join = append(E_low[0:260],E_high[9:])
Phi_join = append(Phi_low[0:260],Phi_high[9:])
##### Interpolate to create new array
nfine = 1000
E_nu_max = 1.0e4
E_fine = linspace(E_join[0],E_nu_max,nfine)
Phi_fine = interp(E_fine,E_join,Phi_join)
# Generate ngen initial energies and directions
E_gen,phi_nu_gen,costh_nu_gen,E_r_gen =\
GenerateAtmNuDirections(ngen,E_fine,Phi_fine,E_high,Phi_tot,cosZ,phi_Az,Nuc)
# Scatter each neutrino
E_r_gen,phi_r_gen,costh_r_gen =\
ScatterNeutrinos(Nuc,E_gen,phi_nu_gen,costh_nu_gen,E_r_gen)
# Window and get angles
mask_window = (E_r_gen<=E_max)*(E_r_gen>=E_min)
E_r_gen = E_r_gen[mask_window]
phi_r_gen = phi_r_gen[mask_window]
costh_r_gen = costh_r_gen[mask_window]
nleft = size(costh_r_gen)
print('nleft=',size(costh_r_gen))
print('Generating Cygnus angles')
costh_r_gen_2 = zeros(shape=nleft)
t_gen = random.uniform(size=nleft)
for i in range(0,nleft):
v_lab = LabVelocity(Jan1+67+t_gen[i])
v_lab = v_lab/sqrt(sum(v_lab**2.0))
x_rec = array([cos(phi_r_gen[i])*sqrt(1-costh_r_gen[i]**2.0),
sin(phi_r_gen[i])*sqrt(1-costh_r_gen[i]**2.0),
costh_r_gen[i]])
costh_r_gen_2[i] = sum(v_lab*x_rec)
# Binning
costhmin = 0.0
costh_edges = sqrt(linspace(0.0,1.0,ne+1))
costh_centers = (costh_edges[1:]+costh_edges[0:-1])/2.0
E_r_edges = logspace(log10(E_min),log10(E_max),ne+1)
E_r_centers = (E_r_edges[1:]+E_r_edges[0:-1])/2.0
[E,C] = meshgrid(E_r_centers,costh_centers)
eff2 = efficiency(Nuc,E)
# Atmospheric neutrino rate
R_Atm = R_AtmNu(E_min,E_max,Nuc=Nuc,eff_on=False)
R1,ce,ee = histogram2d(abs(costh_r_gen),log10(E_r_gen),bins=(ne,ne),\
range=[[0.0,1.0],[log10(E_min),log10(E_max)]])
R1 = R_Atm*R1/sum(sum(R1))
R2,ce,ee = histogram2d(abs(costh_r_gen_2),log10(E_r_gen),bins=(ne,ne),\
range=[[0.0,1.0],[log10(E_min),log10(E_max)]])
R2 = R_Atm*R2/sum(sum(R2))
DAT1 = vstack((costh_centers,E_r_centers,R1))
DAT2 = vstack((costh_centers,E_r_centers,R2))
recoildat_fname1 = recoil_dir+'AtmNu_Ecosth_'+Nuc.Name+'_Stationary.txt'
recoildat_fname2 = recoil_dir+'AtmNu_Ecosth_'+Nuc.Name+'_CygnusTracking.txt'
file_exists = os.path.exists(recoildat_fname1)
if file_exists:
DAT_prev1 = loadtxt(recoildat_fname1)
DAT_prev2 = loadtxt(recoildat_fname2)
if (shape(DAT_prev1)[0]==shape(DAT1)[0])&(shape(DAT_prev1)[1]==shape(DAT1)[1]):
DAT1[2:,:] = (DAT_prev1[2:,:]+DAT1[2:,:])/2.0
DAT2[2:,:] = (DAT_prev2[2:,:]+DAT2[2:,:])/2.0
savetxt(recoildat_fname1,DAT1)
savetxt(recoildat_fname2,DAT2)
print('merged')
else:
savetxt(recoildat_fname1,DAT1)
savetxt(recoildat_fname2,DAT2)
print('overwritten')
else:
savetxt(recoildat_fname1,DAT1)
savetxt(recoildat_fname2,DAT2)
print('first write')
| 2.234375 | 2 |
coinnews/pipelines.py | jlparadox/coinnews | 0 | 12795487 | # -*- coding: utf-8 -*-
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://doc.scrapy.org/en/latest/topics/item-pipeline.html
import pymongo
from scrapy.conf import settings
from scrapy.exceptions import DropItem
from scrapy import log
class CoinnewsPipeline(object):
collection_name = 'coin_articles'
def __init__(self, mongo_uri, mongo_db):
self.mongo_uri = mongo_uri
self.mongo_db = mongo_db
@classmethod
def from_crawler(cls, crawler):
return cls(
mongo_uri=crawler.settings.get('MONGO_URI'),
mongo_db=crawler.settings.get('MONGO_DATABASE', 'items')
)
def open_spider(self, spider):
self.client = pymongo.MongoClient(self.mongo_uri)
self.db = self.client[self.mongo_db]
def close_spider(self, spider):
self.client.close()
def process_item(self, item, spider):
for data in item:
if not data:
raise DropItem("Missing data!")
self.db[self.collection_name].insert_one(dict(item))
log.msg("Question added to MongoDB database!",
level=log.DEBUG, spider=spider)
return item
| 2.578125 | 3 |
tests/shunit/data/bad_i18n_newline_4.py | saloniig/TWLight | 67 | 12795488 | <filename>tests/shunit/data/bad_i18n_newline_4.py
# Single-quoted string is preceded by newline.
# Translators: This is a helpful comment.
_(
'4')
| 0.863281 | 1 |
ticketNumber.py | brocchirodrigo/ValidadorBoletosSantanderPython | 0 | 12795489 | def verifyTicket(ticket):
ticket = ticket[::-1]
t01 = int(ticket[0]) * 2
t02 = int(ticket[1]) * 3
t03 = int(ticket[2]) * 4
t04 = int(ticket[3]) * 5
t05 = int(ticket[4]) * 6
t06 = int(ticket[5]) * 7
t07 = int(ticket[6]) * 8
t08 = int(ticket[7]) * 9
t09 = int(ticket[8]) * 2
t10 = int(ticket[9]) * 3
t11 = int(ticket[10]) * 4
t12 = int(ticket[11]) * 5
sumTicket = (t01 + t02 + t03 + t04 + t05 + t06 + t07 + t08 + t09 + t10 + t11 + t12)
resTicket = sumTicket % 11
verifyDig = 0
if resTicket == 10:
verifyDig = 1
elif resTicket == 1:
verifyDig = 0
elif resTicket == 0:
verifyDig = 0
else:
verifyDig = (11 - resTicket)
return verifyDig
| 3.265625 | 3 |
matchengine/internals/utilities/update_match_utils.py | AveraSD/matchengine-V2 | 18 | 12795490 | from __future__ import annotations
import asyncio
import datetime
import logging
from typing import TYPE_CHECKING
from pymongo import UpdateMany, InsertOne
from matchengine.internals.typing.matchengine_types import RunLogUpdateTask, UpdateTask, MongoQuery
from matchengine.internals.utilities.list_utils import chunk_list
from matchengine.internals.utilities.utilities import perform_db_call
logging.basicConfig(level=logging.INFO)
log = logging.getLogger('matchengine')
if TYPE_CHECKING:
from matchengine.internals.engine import MatchEngine
async def async_update_matches_by_protocol_no(matchengine: MatchEngine, protocol_no: str):
"""
Update trial matches by diff'ing the newly created trial matches against existing matches in
the db. Delete matches by adding {is_disabled: true} and insert all new matches.
"""
matches_by_sample_id = matchengine.matches.get(protocol_no, dict())
updated_time = datetime.datetime.now()
for matches in matches_by_sample_id.values():
for match in matches:
match['_updated'] = updated_time
if protocol_no not in matchengine.matches or protocol_no not in matchengine._trials_to_match_on:
log.info(f"{matchengine.match_criteria_transform.trial_collection} {protocol_no} was not matched on, not updating {matchengine.match_criteria_transform.trial_collection} matches")
if not matchengine.skip_run_log_entry:
matchengine.task_q.put_nowait(RunLogUpdateTask(protocol_no))
await matchengine.task_q.join()
return
log.info(f"Updating matches for {protocol_no}")
if not matchengine.drop:
# If no matches are found, disable all match records by sample id
if not matchengine.matches[protocol_no]:
for chunk in chunk_list(list(matchengine.clinical_ids_for_protocol_cache[protocol_no]),
matchengine.chunk_size):
matchengine.task_q.put_nowait(
UpdateTask(
[UpdateMany(filter={matchengine.match_criteria_transform.match_trial_link_id: protocol_no,
'clinical_id': {'$in': chunk}},
update={'$set': {"is_disabled": True,
'_updated': updated_time}})],
protocol_no
)
)
else:
# Get matches to disable and issue queries
matches_to_disable = await get_all_except(matchengine, protocol_no, matches_by_sample_id)
delete_ops = await get_delete_ops(matches_to_disable, matchengine)
matchengine.task_q.put_nowait(UpdateTask(delete_ops, protocol_no))
for sample_id in matches_by_sample_id.keys():
if not matchengine.drop:
new_matches_hashes = [match['hash'] for match in matches_by_sample_id[sample_id]]
# get existing matches in db with identical hashes to newly found matches
existing = await get_existing_matches(matchengine, new_matches_hashes)
existing_hashes = {result['hash'] for result in existing}
disabled = {result['hash'] for result in existing if result['is_disabled']}
# insert new matches if they don't already exist. disable everything else
matches_to_insert = get_matches_to_insert(matches_by_sample_id,
existing_hashes,
sample_id)
matches_to_disable = await get_matches_to_disable(matchengine,
new_matches_hashes,
protocol_no,
sample_id)
# flip is_disabled flag if a new match generated during run matches hash of an existing
matches_to_mark_available = [m for m in matches_by_sample_id[sample_id] if
m['hash'] in disabled]
ops = get_update_operations(matches_to_disable,
matches_to_insert,
matches_to_mark_available,
matchengine)
else:
ops = [InsertOne(document=trial_match) for trial_match in
matches_by_sample_id[sample_id]]
matchengine.task_q.put_nowait(UpdateTask(ops, protocol_no))
if not matchengine.skip_run_log_entry:
matchengine.task_q.put_nowait(RunLogUpdateTask(protocol_no))
await matchengine.task_q.join()
async def get_all_except(matchengine: MatchEngine,
protocol_no: str,
trial_matches_by_sample_id: dict) -> list:
"""Return all matches except ones matching current protocol_no"""
# get clinical ids with matches
clinical_ids = {matchengine.sample_mapping[sample_id] for sample_id in trial_matches_by_sample_id.keys()}
# if protocol has been run previously, subtract clinical ids from current run from
# previously run clinical ids for a specific protocol. The remainder are ids
# which were run previously, but not in the current run.
if protocol_no in matchengine.clinical_run_log_entries:
clinical_ids = matchengine.clinical_run_log_entries[protocol_no] - clinical_ids
query = {
matchengine.match_criteria_transform.match_trial_link_id: protocol_no,
"clinical_id": {
'$in': [clinical_id for clinical_id in clinical_ids]
}
}
projection = {
'_id': 1,
'hash': 1,
'clinical_id': 1
}
results = await perform_db_call(matchengine,
collection=matchengine.trial_match_collection,
query=MongoQuery(query),
projection=projection)
return [result for result in results]
async def get_delete_ops(matches_to_disable: list, matchengine: MatchEngine) -> list:
updated_time = datetime.datetime.now()
hashes = [result['hash'] for result in matches_to_disable]
ops = list()
for chunk in chunk_list(hashes, matchengine.chunk_size):
ops.append(UpdateMany(filter={'hash': {'$in': chunk}},
update={'$set': {"is_disabled": True, '_updated': updated_time}}))
return ops
async def get_existing_matches(matchengine: MatchEngine, new_matches_hashes: list) -> list:
"""
Get matches in db which have the same hashes as newly found matches.
:param matchengine:
:param new_matches_hashes:
:return:
"""
matches_to_not_change_query = MongoQuery({'hash': {'$in': new_matches_hashes}})
projection = {"hash": 1, "is_disabled": 1}
matches = await asyncio.gather(
perform_db_call(matchengine,
matchengine.trial_match_collection,
matches_to_not_change_query,
projection)
)
return matches[0]
async def get_matches_to_disable(matchengine: MatchEngine,
new_matches_hashes: list,
protocol_no: str,
sample_id: str) -> list:
"""
Get matches to disable by looking for existing, enabled matches whose
hashes are not present in newly generated matches during current run.
Done for every sample_id
:param matchengine:
:param new_matches_hashes:
:param protocol_no:
:param sample_id:
:return:
"""
query = {
matchengine.match_criteria_transform.match_trial_link_id: protocol_no,
'sample_id': sample_id,
'is_disabled': False,
'hash': {
'$nin': new_matches_hashes
}
}
matches_to_disable_query = MongoQuery(query)
projection = {"hash": 1, "is_disabled": 1}
matches = await asyncio.gather(
perform_db_call(matchengine,
matchengine.trial_match_collection,
matches_to_disable_query,
projection)
)
return matches[0]
def get_update_operations(matches_to_disable: list,
matches_to_insert: list,
matches_to_mark_available: list,
matchengine: MatchEngine) -> list:
ops = list()
updated_time = datetime.datetime.now()
disable_hashes = [trial_match['hash'] for trial_match in matches_to_disable]
for chunk in chunk_list(disable_hashes, matchengine.chunk_size):
ops.append(UpdateMany(filter={'hash': {'$in': chunk}},
update={'$set': {'is_disabled': True,
'_updated': updated_time}}))
for to_insert in matches_to_insert:
ops.append(InsertOne(document=to_insert))
available_hashes = [trial_match['hash'] for trial_match in matches_to_mark_available]
for chunk in chunk_list(available_hashes, matchengine.chunk_size):
ops.append(UpdateMany(filter={'hash': {'$in': chunk}},
update={'$set': {'is_disabled': False,
'_updated': updated_time}}))
return ops
def get_matches_to_insert(matches_by_sample_id: list, existing_hashes: set,
sample_id: str) -> list:
return [m for m in matches_by_sample_id[sample_id] if m['hash'] not in existing_hashes]
| 2.09375 | 2 |
tests/test_makeload.py | hansroh/aquests | 8 | 12795491 | import aquests
CONCURRENT = 50
MAX_REQ = 1000
_ID = 0
def makeload (response):
global _ID
print (response.meta ['_id'], response.code, response.msg, response.version)
if aquests.countreq () < MAX_REQ:
aquests.get ("http://127.0.0.1:5000/", meta = {'_id': _ID})
_ID += 1
def test_makeload ():
aquests.configure (CONCURRENT, callback = makeload) # cioncurrent
for i in range (CONCURRENT):
aquests.get ("http://127.0.0.1:5000/", meta = {'_id': _ID})
_ID += 1
aquests.fetchall ()
| 2.453125 | 2 |
aids/strings/reverse_string.py | ueg1990/aids | 0 | 12795492 | '''
Reverse a string
'''
def reverse_string_iterative(string):
result = ''
for char in range(len(string) - 1, -1 , -1):
result += char
return result
def reverse_string_recursive(string):
if string:
return reverse_string_recursive(string[1:]) + string[0]
return ''
def reverse_string_pythonic(string):
return string[::-1]
| 4.34375 | 4 |
nuke_stubs/nuke/nuke_classes/Undo.py | sisoe24/Nuke-Python-Stubs | 1 | 12795493 | <gh_stars>1-10
from numbers import Number
from typing import *
import nuke
from . import *
class Undo(object):
"""
Undo
"""
def __hash__(self, ):
"""
Return hash(self).
"""
return None
def __new__(self,*args, **kwargs):
"""
Create and return a new object. See help(type) for accurate signature.
"""
return None
def begin(self,*args, **kwargs):
"""
Begin a new user-visible group of undo actions.
"""
return None
def name(self,*args, **kwargs):
"""
Name current undo set.
"""
return None
def end(self,*args, **kwargs):
"""
Complete current undo set and add it to the undo list.
"""
return None
def new(self,*args, **kwargs):
"""
Same as end();begin().
"""
return None
def cancel(self,*args, **kwargs):
"""
Undoes any actions recorded in the current set and throws it away.
"""
return None
def undoSize(self,*args, **kwargs):
"""
Number of undo's that can be done.
"""
return None
def redoSize(self,*args, **kwargs):
"""
Number of redo's that can be done.
"""
return None
def undoTruncate(self,*args, **kwargs):
"""
Destroy any undo's greater or equal to n.
"""
return None
def redoTruncate(self,*args, **kwargs):
"""
Destroy any redo's greater or equal to n.
"""
return None
def undoDescribe(self,*args, **kwargs):
"""
Return short description of undo n.
"""
return None
def redoDescribe(self,*args, **kwargs):
"""
Return short description of redo n.
"""
return None
def undoDescribeFully(self,*args, **kwargs):
"""
Return long description of undo n.
"""
return None
def redoDescribeFully(self,*args, **kwargs):
"""
Return long description of redo n.
"""
return None
def undo(self,*args, **kwargs):
"""
Undoes 0'th undo.
"""
return None
def redo(self,*args, **kwargs):
"""
Redoes 0'th redo.
"""
return None
def disable(self,*args, **kwargs):
"""
Prevent recording undos until matching enable()
"""
return None
def enable(self,*args, **kwargs):
"""
Undoes the previous disable()
"""
return None
def disabled(self,*args, **kwargs):
"""
True if disable() has been called
"""
return None
def __enter__(self,*args, **kwargs):
"""
"""
return None
def __exit__(self,*args, **kwargs):
"""
"""
return None
def __init__(self, *args, **kwargs):
"""
Initialize self. See help(type(self)) for accurate signature.
"""
return None | 3.109375 | 3 |
get_events.py | malcolmvr/o365_linux_meeting_alerts | 0 | 12795494 | <filename>get_events.py
from datetime import datetime
from os import getenv
from os.path import isfile
from dotenv import load_dotenv
from O365 import Account
load_dotenv()
credentials = (getenv('APPLICATION_ID'), getenv('CLIENT_SECRET'))
print(credentials)
account = Account(credentials, auth_flow_type='authorization', tenant_id=getenv('TENANT_ID'))
if not isfile("./o365_token.txt"):
if account.authenticate(scopes=['https://graph.microsoft.com/.default', 'offline_access']):
print('Authenticated!')
else:
account.connection.refresh_token()
schedule = account.schedule()
calendar = schedule.get_default_calendar()
now = datetime.now()
date_query = calendar \
.new_query('start') \
.greater_equal(datetime(now.year, now.month, now.day, 0, 0, 0)) \
.chain('and') \
.on_attribute('end') \
.less_equal(datetime(now.year, now.month, now.day, 23, 59, 59))
events = list(calendar.get_events(query=date_query, include_recurring=True))
with open("events.txt", "w") as f:
f.write("\n".join([event.start.isoformat() for event in events]))
| 2.59375 | 3 |
python_basics/5.data_conversion/string_to_integer.py | edilsonmatola/Python_Master | 2 | 12795495 | string_number = '15'
# converting to integer
# integer_number will contain 15
integer_number = int(string_number)
print(integer_number) # Output: 15
| 3.703125 | 4 |
get_next_person.py | jbarry1506/python_unittest | 0 | 12795496 | def get_next_person(user):
person = get_random_person()
while person in user['people_seen']:
person = get_random_person()
return person
| 2.890625 | 3 |
copycat/codelet_methods.py | jalanb/co.py.cat | 27 | 12795497 | import logging
import random
from . import formulas
from . import temperature
from .bond import Bond
from .bond import possible_group_bonds
from .coderack import coderack
from .correspondence import Correspondence
from .group import Group
from .letter import Letter
from .replacement import Replacement
from .slipnet import slipnet
from .workspace_formulas import choose_bond_facet
from .workspace_formulas import choose_directed_neighbor
from .workspace_formulas import choose_neighbour
from .workspace_formulas import choose_unmodified_object
from .workspace_formulas import workspace
from .workspace_object import WorkspaceObject
# some methods common to the codelets
def __show_which_string_object_is_from(structure):
if not structure:
return "unstructured"
if isinstance(structure, WorkspaceObject):
return "target"
if structure.string == workspace.initial:
return "initial"
return "other"
def __get_scout_source(slipnode, relevance_method, type_name):
initial_relevance = relevance_method(workspace.initial, slipnode)
target_relevance = relevance_method(workspace.target, slipnode)
initial_unhappiness = workspace.initial.intra_string_unhappiness
target_unhappiness = workspace.target.intra_string_unhappiness
logging.info(
f"initial : relevance = {initial_relevance}, "
f"unhappiness = {int(initial_unhappiness)}"
)
logging.info(
f"target : relevance = {target_relevance}, "
f"unhappiness = {int(target_unhappiness)}"
)
string = workspace.initial
relevances = initial_relevance + target_relevance
unhappinesses = initial_unhappiness + target_unhappiness
randomized = random.random() * (relevances + unhappinesses)
initials = initial_relevance + initial_unhappiness
if randomized > initials:
string = workspace.target
logging.info(f"target string selected: {workspace.target} for {type_name}")
else:
logging.info(f"initial string selected: {workspace.initial} for {type_name}")
source = choose_unmodified_object("intra_string_salience", string.objects)
return source
def __get_bond_facet(source, destination):
bond_facet = choose_bond_facet(source, destination)
assert bond_facet
return bond_facet
def __get_descriptors(bond_facet, source, destination):
source_descriptor = source.get_descriptor(bond_facet)
destination_descriptor = destination.get_descriptor(bond_facet)
assert source_descriptor
assert destination_descriptor
return source_descriptor, destination_descriptor
def __all_opposite_mappings(mappings):
return len([m for m in mappings if m.label != slipnet.opposite]) == 0
def __structure_versus_structure(structure1, weight1, structure2, weight2):
structure1.update_strength()
structure2.update_strength()
weighted_strength1 = formulas.temperature_adjusted_value(
structure1.total_strength * weight1
)
weighted_strength2 = formulas.temperature_adjusted_value(
structure2.total_strength * weight2
)
rhs = (weighted_strength1 + weighted_strength2) * random.random()
logging.info(f"{weighted_strength1} > {rhs}: {weighted_strength1 > rhs}")
return weighted_strength1 > rhs
def __fight(structure, structure_weight, incompatibles, incompatible_weight):
if not (incompatibles and len(incompatibles)):
return True
for incompatible in incompatibles:
if not __structure_versus_structure(
structure, structure_weight, incompatible, incompatible_weight
):
logging.info(f"lost fight with {incompatible}")
return False
logging.info(f"won fight with {incompatible}")
return True
def __fight_incompatibles(
incompatibles, structure, name, structure_weight, incompatible_weight
):
if len(incompatibles):
if __fight(structure, structure_weight, incompatibles, incompatible_weight):
logging.info(f"broke the {name}")
return True
logging.info(f"failed to break {name}: Fizzle")
return False
logging.info(f"no incompatible {name}")
return True
def __slippability(concept_mappings):
for mapping in concept_mappings:
slippiness = mapping.slippability() / 100.0
probability_of_slippage = formulas.temperature_adjusted_probability(slippiness)
if formulas.coin_flip(probability_of_slippage):
return True
return False
# start the actual codelets
def breaker():
probability_of_fizzle = (100.0 - formulas.Temperature) / 100.0
assert not formulas.coin_flip(probability_of_fizzle)
# choose a structure at random
structures = [
s for s in workspace.structures if isinstance(s, (Group, Bond, Correspondence))
]
assert structures
structure = random.choice(structures)
__show_which_string_object_is_from(structure)
break_objects = [structure]
if isinstance(structure, Bond):
if structure.source.group:
if structure.source.group == structure.destination.group:
break_objects += [structure.source.group]
# try to break all objects
for structure in break_objects:
break_probability = formulas.temperature_adjusted_probability(
structure.total_strength / 100.0
)
if formulas.coin_flip(break_probability):
return
for structure in break_objects:
structure.break_the_structure()
def bottom_up_description_scout(codelet):
chosen_object = choose_unmodified_object("total_salience", workspace.objects)
assert chosen_object
__show_which_string_object_is_from(chosen_object)
description = formulas.choose_relevant_description_by_activation(chosen_object)
assert description
sliplinks = formulas.similar_property_links(description.descriptor)
assert sliplinks
values = [
sliplink.degree_of_association() * sliplink.destination.activation
for sliplink in sliplinks
]
i = formulas.select_list_position(values)
chosen = sliplinks[i]
chosen_property = chosen.destination
coderack.propose_description(
chosen_object, chosen_property.category(), chosen_property, codelet
)
def top_down_description_scout(codelet):
description_type = codelet.arguments[0]
chosen_object = choose_unmodified_object("total_salience", workspace.objects)
assert chosen_object
__show_which_string_object_is_from(chosen_object)
descriptions = chosen_object.get_possible_descriptions(description_type)
assert descriptions
values = [n.activation for n in descriptions]
i = formulas.select_list_position(values)
chosen_property = descriptions[i]
coderack.propose_description(
chosen_object, chosen_property.category(), chosen_property, codelet
)
def description_strength_tester(codelet):
description = codelet.arguments[0]
description.descriptor.buffer = 100.0
description.update_strength()
strength = description.total_strength
probability = formulas.temperature_adjusted_probability(strength / 100.0)
assert formulas.coin_flip(probability)
coderack.new_codelet("description-builder", codelet, strength)
def description_builder(codelet):
description = codelet.arguments[0]
assert description.object in workspace.objects
if description.object.described(description.descriptor):
description.description_type.buffer = 100.0
description.descriptor.buffer = 100.0
else:
description.build()
def bottom_up_bond_scout(codelet):
source = choose_unmodified_object("intra_string_salience", workspace.objects)
__show_which_string_object_is_from(source)
destination = choose_neighbour(source)
assert destination
logging.info(f"destination: {destination}")
bond_facet = __get_bond_facet(source, destination)
logging.info(f"chosen bond facet: {bond_facet.get_name()}")
logging.info(f"Source: {source}, destination: {destination}")
bond_descriptors = __get_descriptors(bond_facet, source, destination)
source_descriptor, destination_descriptor = bond_descriptors
logging.info(f"source descriptor: {source_descriptor.name.upper()}")
logging.info(f"destination descriptor: {destination_descriptor.name.upper()}")
category = source_descriptor.get_bond_category(destination_descriptor)
assert category
if category == slipnet.identity:
category = slipnet.sameness
logging.info(f"proposing {category.name} bond ")
coderack.propose_bond(
source,
destination,
category,
bond_facet,
source_descriptor,
destination_descriptor,
codelet,
)
def rule_scout(codelet):
assert workspace.number_of_unreplaced_objects() == 0
changed_objects = [o for o in workspace.initial.objects if o.changed]
# assert len(changed_objects) < 2
# if there are no changed objects, propose a rule with no changes
if not changed_objects:
return coderack.propose_rule(None, None, None, None, codelet)
changed = changed_objects[-1]
# generate a list of distinguishing descriptions for the first object
# ie. string-position (left-,right-most,middle or whole) or letter category
# if it is the only one of its type in the string
object_list = []
position = changed.get_descriptor(slipnet.string_position_category)
if position:
object_list += [position]
letter = changed.get_descriptor(slipnet.letter_category)
other_objects_of_same_letter = [
o
for o in workspace.initial.objects
if not o != changed and o.get_description_type(letter)
]
if not len(other_objects_of_same_letter):
object_list += [letter]
# if this object corresponds to another object in the workspace
# object_list = the union of this and the distingushing descriptors
if changed.correspondence:
target_object = changed.correspondence.object_from_target
new_list = []
slippages = workspace.slippages()
for node in object_list:
node = node.apply_slippages(slippages)
if target_object.described(node):
if target_object.distinguishing_descriptor(node):
new_list += [node]
object_list = new_list # should this be += ??
assert object_list
# use conceptual depth to choose a description
value_list = []
for node in object_list:
depth = node.conceptual_depth
value = formulas.temperature_adjusted_value(depth)
value_list += [value]
i = formulas.select_list_position(value_list)
descriptor = object_list[i]
# choose the relation (change the letmost object to "successor" or "d"
object_list = []
if changed.replacement.relation:
object_list += [changed.replacement.relation]
object_list += [
changed.replacement.object_from_modified.get_descriptor(slipnet.letter_category)
]
# use conceptual depth to choose a relation
value_list = []
for node in object_list:
depth = node.conceptual_depth
value = formulas.temperature_adjusted_value(depth)
value_list += [value]
i = formulas.select_list_position(value_list)
relation = object_list[i]
coderack.propose_rule(
slipnet.letter_category, descriptor, slipnet.letter, relation, codelet
)
def rule_strength_tester(codelet):
rule = codelet.arguments[0]
rule.update_strength()
probability = formulas.temperature_adjusted_probability(rule.total_strength / 100.0)
assert random.random() <= probability
coderack.new_codelet("rule-builder", codelet, rule.total_strength, rule)
def replacement_finder():
# choose random letter in initial string
letters = [o for o in workspace.initial.objects if isinstance(o, Letter)]
letter_of_initial_string = random.choice(letters)
logging.info(f"selected letter in initial string = {letter_of_initial_string}")
if letter_of_initial_string.replacement:
logging.info(
f"Replacement already found for {letter_of_initial_string}, so fizzling"
)
return
position = letter_of_initial_string.left_index
more_letters = [
o
for o in workspace.modified.objects
if isinstance(o, Letter) and o.left_index == position
]
letter_of_modified_string = more_letters and more_letters[0] or None
assert letter_of_modified_string
position -= 1
initial_ascii = ord(workspace.initial_string[position])
modified_ascii = ord(workspace.modified_string[position])
diff = initial_ascii - modified_ascii
if abs(diff) < 2:
relations = {0: slipnet.sameness, -1: slipnet.successor, 1: slipnet.predecessor}
relation = relations[diff]
logging.info(f"Relation found: {relation.name}")
else:
relation = None
logging.info("no relation found")
letter_of_initial_string.replacement = Replacement(
letter_of_initial_string, letter_of_modified_string, relation
)
if relation != slipnet.sameness:
letter_of_initial_string.changed = True
workspace.changed_object = letter_of_initial_string
logging.info("building replacement")
def top_down_bond_scout__category(codelet):
logging.info("top_down_bond_scout__category")
category = codelet.arguments[0]
source = __get_scout_source(
category, formulas.local_bond_category_relevance, "bond"
)
destination = choose_neighbour(source)
logging.info(f"source: {source}, destination: {destination}")
assert destination
bond_facet = __get_bond_facet(source, destination)
source_descriptor, destination_descriptor = __get_descriptors(
bond_facet, source, destination
)
forward_bond = source_descriptor.get_bond_category(destination_descriptor)
if forward_bond == slipnet.identity:
forward_bond = slipnet.sameness
backward_bond = slipnet.sameness
else:
backward_bond = destination_descriptor.get_bond_category(source_descriptor)
assert category in [forward_bond, backward_bond]
if category == forward_bond:
coderack.propose_bond(
source,
destination,
category,
bond_facet,
source_descriptor,
destination_descriptor,
codelet,
)
else:
coderack.propose_bond(
destination,
source,
category,
bond_facet,
destination_descriptor,
source_descriptor,
codelet,
)
def top_down_bond_scout__direction(codelet):
direction = codelet.arguments[0]
source = __get_scout_source(
direction, formulas.local_direction_category_relevance, "bond"
)
destination = choose_directed_neighbor(source, direction)
assert destination
logging.info(f"to object: {destination}")
bond_facet = __get_bond_facet(source, destination)
source_descriptor, destination_descriptor = __get_descriptors(
bond_facet, source, destination
)
category = source_descriptor.get_bond_category(destination_descriptor)
assert category
if category == slipnet.identity:
category = slipnet.sameness
coderack.propose_bond(
source,
destination,
category,
bond_facet,
source_descriptor,
destination_descriptor,
codelet,
)
def bond_strength_tester(codelet):
bond = codelet.arguments[0]
__show_which_string_object_is_from(bond)
bond.update_strength()
strength = bond.total_strength
probability = formulas.temperature_adjusted_probability(strength / 100.0)
logging.info(f"bond strength = {strength} for {bond}")
assert formulas.coin_flip(probability)
bond.facet.buffer = 100.0
bond.source_descriptor.buffer = 100.0
bond.destination_descriptor.buffer = 100.0
logging.info("succeeded: posting bond-builder")
coderack.new_codelet("bond-builder", codelet, strength)
def bond_builder(codelet):
bond = codelet.arguments[0]
__show_which_string_object_is_from(bond)
bond.update_strength()
assert bond.source in workspace.objects or bond.destination in workspace.objects
for string_bond in bond.string.bonds:
if bond.same_neighbours(string_bond) and bond.same_categories(string_bond):
if bond.direction_category:
bond.direction_category.buffer = 100.0
bond.category.buffer = 100.0
logging.info("already exists: activate descriptors & Fizzle")
return
incompatible_bonds = bond.get_incompatible_bonds()
logging.info(f"number of incompatible_bonds: {len(incompatible_bonds)}")
if len(incompatible_bonds):
logging.info(str(incompatible_bonds[0]))
assert __fight_incompatibles(incompatible_bonds, bond, "bonds", 1.0, 1.0)
incompatible_groups = bond.source.get_common_groups(bond.destination)
assert __fight_incompatibles(incompatible_groups, bond, "groups", 1.0, 1.0)
# fight all incompatible correspondences
incompatible_correspondences = []
if bond.left_object.leftmost or bond.right_object.rightmost:
if bond.direction_category:
incompatible_correspondences = bond.get_incompatible_correspondences()
if incompatible_correspondences:
logging.info("trying to break incompatible correspondences")
assert __fight(bond, 2.0, incompatible_correspondences, 3.0)
for incompatible in incompatible_bonds:
incompatible.break_the_structure()
for incompatible in incompatible_groups:
incompatible.break_the_structure()
for incompatible in incompatible_correspondences:
incompatible.break_the_structure()
logging.info(f"building bond {bond}")
bond.build_bond()
# pylint: disable=too-many-branches
# pylint: disable=too-many-statements
def top_down_group_scout__category(codelet):
group_category = codelet.arguments[0]
category = group_category.get_related_node(slipnet.bond_category)
assert category
source = __get_scout_source(
category, formulas.local_bond_category_relevance, "group"
)
assert source
assert not source.spans_string()
if source.leftmost:
direction = slipnet.right
elif source.rightmost:
direction = slipnet.left
else:
activations = [slipnet.left.activation]
activations += [slipnet.right.activation]
if not formulas.select_list_position(activations):
direction = slipnet.left
else:
direction = slipnet.right
if direction == slipnet.left:
first_bond = source.left_bond
else:
first_bond = source.right_bond
if not first_bond or first_bond.category != category:
# check the other side of object
if direction == slipnet.right:
first_bond = source.left_bond
else:
first_bond = source.right_bond
if not first_bond or first_bond.category != category:
if category == slipnet.sameness and isinstance(source, Letter):
group = Group(
source.string,
slipnet.sameness_group,
None,
slipnet.letter_category,
[source],
[],
)
probability = group.single_letter_group_probability()
assert random.random() >= probability
coderack.propose_single_letter_group(source, codelet)
return
direction = first_bond.direction_category
search = True
bond_facet = None
# find leftmost object in group with these bonds
while search:
search = False
if not source.left_bond:
continue
if source.left_bond.category != category:
continue
if source.left_bond.direction_category != direction:
if source.left_bond.direction_category:
continue
if not bond_facet or bond_facet == source.left_bond.facet:
bond_facet = source.left_bond.facet
direction = source.left_bond.direction_category
source = source.left_bond.left_object
search = True
# find rightmost object in group with these bonds
search = True
destination = source
while search:
search = False
if not destination.right_bond:
continue
if destination.right_bond.category != category:
continue
if destination.right_bond.direction_category != direction:
if destination.right_bond.direction_category:
continue
if not bond_facet or bond_facet == destination.right_bond.facet:
bond_facet = destination.right_bond.facet
direction = source.right_bond.direction_category
destination = destination.right_bond.right_object
search = True
assert destination != source
objects = [source]
bonds = []
while source != destination:
bonds += [source.right_bond]
objects += [source.right_bond.right_object]
source = source.right_bond.right_object
coderack.propose_group(
objects, bonds, group_category, direction, bond_facet, codelet
)
def top_down_group_scout__direction(codelet):
direction = codelet.arguments[0]
source = __get_scout_source(
direction, formulas.local_direction_category_relevance, "direction"
)
logging.info(f"source chosen = {source}")
assert not source.spans_string()
if source.leftmost:
mydirection = slipnet.right
elif source.rightmost:
mydirection = slipnet.left
else:
activations = [slipnet.left.activation]
activations += [slipnet.right.activation]
if not formulas.select_list_position(activations):
mydirection = slipnet.left
else:
mydirection = slipnet.right
if mydirection == slipnet.left:
first_bond = source.left_bond
else:
first_bond = source.right_bond
if not first_bond:
logging.info("no first_bond")
else:
logging.info(f"first_bond: {first_bond}")
if first_bond and not first_bond.direction_category:
direction = None
if not first_bond or first_bond.direction_category != direction:
if mydirection == slipnet.right:
first_bond = source.left_bond
else:
first_bond = source.right_bond
if not first_bond:
logging.info("no first_bond2")
else:
logging.info(f"first_bond2: {first_bond}")
if first_bond and not first_bond.direction_category:
direction = None
assert first_bond
assert first_bond.direction_category == direction
logging.info(f"possible group: {first_bond}")
category = first_bond.category
assert category
group_category = category.get_related_node(slipnet.group_category)
logging.info(f"trying from {source} to {category.name}")
bond_facet = None
# find leftmost object in group with these bonds
search = True
while search:
search = False
if not source.left_bond:
continue
if source.left_bond.category != category:
continue
if source.left_bond.direction_category != direction:
if source.left_bond.direction_category:
continue
if not bond_facet or bond_facet == source.left_bond.facet:
bond_facet = source.left_bond.facet
direction = source.left_bond.direction_category
source = source.left_bond.left_object
search = True
destination = source
search = True
while search:
search = False
if not destination.right_bond:
continue
if destination.right_bond.category != category:
continue
if destination.right_bond.direction_category != direction:
if destination.right_bond.direction_category:
continue
if not bond_facet or bond_facet == destination.right_bond.facet:
bond_facet = destination.right_bond.facet
direction = source.right_bond.direction_category
destination = destination.right_bond.right_object
search = True
assert destination != source
logging.info(f"proposing group from {source} to {destination}")
objects = [source]
bonds = []
while source != destination:
bonds += [source.right_bond]
objects += [source.right_bond.right_object]
source = source.right_bond.right_object
coderack.propose_group(
objects, bonds, group_category, direction, bond_facet, codelet
)
# noinspection PyStringFormat
def group_scout__whole_string(codelet):
string = workspace.initial
if random.random() > 0.5:
string = workspace.target
logging.info(f"target string selected: {workspace.target}")
else:
logging.info(f"initial string selected: {workspace.initial}")
# find leftmost object & the highest group to which it belongs
leftmost = None
for objekt in string.objects:
if objekt.leftmost:
leftmost = objekt
while leftmost.group and leftmost.group.bond_category == slipnet.sameness:
leftmost = leftmost.group
if leftmost.spans_string():
# the object already spans the string - propose this object
group = leftmost
coderack.propose_group(
group.object_list,
group.bond_list,
group.group_category,
group.direction_category,
group.facet,
codelet,
)
return
bonds = []
objects = [leftmost]
while leftmost.right_bond:
bonds += [leftmost.right_bond]
leftmost = leftmost.right_bond.right_object
objects += [leftmost]
assert leftmost.rightmost
# choose a random bond from list
chosen_bond = random.choice(bonds)
category = chosen_bond.category
direction_category = chosen_bond.direction_category
bond_facet = chosen_bond.facet
bonds = possible_group_bonds(category, direction_category, bond_facet, bonds)
assert bonds
group_category = category.get_related_node(slipnet.group_category)
coderack.propose_group(
objects, bonds, group_category, direction_category, bond_facet, codelet
)
def group_strength_tester(codelet):
# update strength value of the group
group = codelet.arguments[0]
__show_which_string_object_is_from(group)
group.update_strength()
strength = group.total_strength
probability = formulas.temperature_adjusted_probability(strength / 100.0)
assert random.random() <= probability
# it is strong enough - post builder & activate nodes
group.group_category.get_related_node(slipnet.bond_category).buffer = 100.0
if group.direction_category:
group.direction_category.buffer = 100.0
coderack.new_codelet("group-builder", codelet, strength)
def group_builder(codelet):
# update strength value of the group
group = codelet.arguments[0]
__show_which_string_object_is_from(group)
equivalent = group.string.equivalent_group(group)
if equivalent:
logging.info("already exists...activate descriptors & fizzle")
group.activate_descriptions()
equivalent.add_descriptions(group.descriptions)
return
# check to see if all objects are still there
for o in group.object_list:
assert o in workspace.objects
# check to see if bonds are there of the same direction
incompatible_bonds = [] # incompatible bond list
if len(group.object_list) > 1:
previous = group.object_list[0]
for objekt in group.object_list[1:]:
left_bond = objekt.left_bond
if left_bond:
if left_bond.left_object == previous:
continue
if left_bond.direction_category == group.direction_category:
continue
incompatible_bonds += [left_bond]
previous = objekt
next_object = group.object_list[-1]
for objekt in reversed(group.object_list[:-1]):
right_bond = objekt.right_bond
if right_bond:
if right_bond.right_object == next_object:
continue
if right_bond.direction_category == group.direction_category:
continue
incompatible_bonds += [right_bond]
next_object = objekt
# if incompatible bonds exist - fight
group.update_strength()
assert __fight_incompatibles(incompatible_bonds, group, "bonds", 1.0, 1.0)
# fight incompatible groups
# fight all groups containing these objects
incompatible_groups = group.get_incompatible_groups()
assert __fight_incompatibles(incompatible_groups, group, "Groups", 1.0, 1.0)
for incompatible in incompatible_bonds:
incompatible.break_the_structure()
# create new bonds
group.bond_list = []
for i in range(1, len(group.object_list)):
object1 = group.object_list[i - 1]
object2 = group.object_list[i]
if not object1.right_bond:
if group.direction_category == slipnet.right:
source = object1
destination = object2
else:
source = object2
destination = object1
category = group.group_category.get_related_node(slipnet.bond_category)
facet = group.facet
new_bond = Bond(
source,
destination,
category,
facet,
source.get_descriptor(facet),
destination.get_descriptor(facet),
)
new_bond.build_bond()
group.bond_list += [object1.right_bond]
for incompatible in incompatible_groups:
incompatible.break_the_structure()
group.build_group()
group.activate_descriptions()
logging.info("building group")
def rule_builder(codelet):
rule = codelet.arguments[0]
if rule.rule_equal(workspace.rule):
rule.activate_rule_descriptions()
return
rule.update_strength()
assert rule.total_strength
# fight against other rules
if workspace.rule:
assert __structure_versus_structure(rule, 1.0, workspace.rule, 1.0)
workspace.build_rule(rule)
def __get_cut_off(density):
if density > 0.8:
distribution = [5.0, 150.0, 5.0, 2.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]
elif density > 0.6:
distribution = [2.0, 5.0, 150.0, 5.0, 2.0, 1.0, 1.0, 1.0, 1.0, 1.0]
elif density > 0.4:
distribution = [1.0, 2.0, 5.0, 150.0, 5.0, 2.0, 1.0, 1.0, 1.0, 1.0]
elif density > 0.2:
distribution = [1.0, 1.0, 2.0, 5.0, 150.0, 5.0, 2.0, 1.0, 1.0, 1.0]
else:
distribution = [1.0, 1.0, 1.0, 2.0, 5.0, 150.0, 5.0, 2.0, 1.0, 1.0]
stop = sum(distribution) * random.random()
total = 0.0
for i in range(0, len(distribution)):
total += distribution[i]
if total >= stop:
return i + 1
return len(distribution)
def rule_translator():
assert workspace.rule
if len(workspace.initial) == 1 and len(workspace.target) == 1:
bond_density = 1.0
else:
number_of_bonds = len(workspace.initial.bonds) + len(workspace.target.bonds)
nearly_total_length = len(workspace.initial) + len(workspace.target) - 2
bond_density = number_of_bonds / nearly_total_length
if bond_density > 1.0:
bond_density = 1.0
cutoff = __get_cut_off(bond_density) * 10.0
assert cutoff >= formulas.actual_temperature
if workspace.rule.build_translated_rule():
workspace.found_answer = True
else:
temperature.clamp_time = coderack.codelets_run + 100
temperature.clamped = True
formulas.Temperature = 100.0
def bottom_up_correspondence_scout(codelet):
object_from_initial = choose_unmodified_object(
"inter_string_salience", workspace.initial.objects
)
object_from_target = choose_unmodified_object(
"inter_string_salience", workspace.target.objects
)
assert object_from_initial.spans_string() == object_from_target.spans_string()
# get the posible concept mappings
concept_mappings = formulas.get_mappings(
object_from_initial,
object_from_target,
object_from_initial.relevant_descriptions(),
object_from_target.relevant_descriptions(),
)
assert concept_mappings
assert __slippability(concept_mappings)
# find out if any are distinguishing
distinguishing_mappings = [m for m in concept_mappings if m.distinguishing()]
assert distinguishing_mappings
# if both objects span the strings, check to see if the
# string description needs to be flipped
opposites = [
m
for m in distinguishing_mappings
if m.initial_description_type == slipnet.string_position_category
and m.initial_description_type != slipnet.bond_facet
]
initial_description_types = [m.initial_description_type for m in opposites]
flip_target_object = False
if (
object_from_initial.spans_string()
and object_from_target.spans_string()
and slipnet.direction_category in initial_description_types
and __all_opposite_mappings(formulas.opposite_mappings)
and slipnet.opposite.activation != 100.0
):
object_from_target = object_from_target.flipped_version()
concept_mappings = formulas.get_mappings(
object_from_initial,
object_from_target,
object_from_initial.relevant_descriptions(),
object_from_target.relevant_descriptions(),
)
flip_target_object = True
coderack.propose_correspondence(
object_from_initial,
object_from_target,
concept_mappings,
flip_target_object,
codelet,
)
def important_object_correspondence_scout(codelet):
object_from_initial = choose_unmodified_object(
"relative_importance", workspace.initial.objects
)
descriptors = object_from_initial.relevant_distinguishing_descriptors()
slipnode = formulas.choose_slipnode_by_conceptual_depth(descriptors)
assert slipnode
initial_descriptor = slipnode
for mapping in workspace.slippages():
if mapping.initial_descriptor == slipnode:
initial_descriptor = mapping.target_descriptor
target_candidates = []
for objekt in workspace.target.objects:
for description in objekt.relevant_descriptions():
if description.descriptor == initial_descriptor:
target_candidates += [objekt]
assert target_candidates
object_from_target = choose_unmodified_object(
"inter_string_salience", target_candidates
)
assert object_from_initial.spans_string() == object_from_target.spans_string()
# get the posible concept mappings
concept_mappings = formulas.get_mappings(
object_from_initial,
object_from_target,
object_from_initial.relevant_descriptions(),
object_from_target.relevant_descriptions(),
)
assert concept_mappings
assert __slippability(concept_mappings)
# find out if any are distinguishing
distinguishing_mappings = [m for m in concept_mappings if m.distinguishing()]
assert distinguishing_mappings
# if both objects span the strings, check to see if the
# string description needs to be flipped
opposites = [
m
for m in distinguishing_mappings
if m.initial_description_type == slipnet.string_position_category
and m.initial_description_type != slipnet.bond_facet
]
initial_description_types = [m.initial_description_type for m in opposites]
flip_target_object = False
if (
object_from_initial.spans_string()
and object_from_target.spans_string()
and slipnet.direction_category in initial_description_types
and __all_opposite_mappings(formulas.opposite_mappings)
and slipnet.opposite.activation != 100.0
):
object_from_target = object_from_target.flipped_version()
concept_mappings = formulas.get_mappings(
object_from_initial,
object_from_target,
object_from_initial.relevant_descriptions(),
object_from_target.relevant_descriptions(),
)
flip_target_object = True
coderack.propose_correspondence(
object_from_initial,
object_from_target,
concept_mappings,
flip_target_object,
codelet,
)
def correspondence_strength_tester(codelet):
correspondence = codelet.arguments[0]
object_from_initial = correspondence.object_from_initial
object_from_target = correspondence.object_from_target
assert object_from_initial in workspace.objects
assert (
object_from_target in workspace.objects
or correspondence.flip_target_object
and not workspace.target.equivalent_group(object_from_target.flipped_version())
)
correspondence.update_strength()
strength = correspondence.total_strength
probability = formulas.temperature_adjusted_probability(strength / 100.0)
assert random.random() <= probability
# activate some concepts
for mapping in correspondence.concept_mappings:
mapping.initial_description_type.buffer = 100.0
mapping.initial_descriptor.buffer = 100.0
mapping.target_description_type.buffer = 100.0
mapping.target_descriptor.buffer = 100.0
coderack.new_codelet("correspondence-builder", codelet, strength, correspondence)
def correspondence_builder(codelet):
correspondence = codelet.arguments[0]
object_from_initial = correspondence.object_from_initial
object_from_target = correspondence.object_from_target
want_flip = correspondence.flip_target_object
if want_flip:
flipper = object_from_target.flipped_version()
target_not_flipped = not workspace.target.equivalent_group(flipper)
else:
target_not_flipped = False
initial_in_objects = object_from_initial in workspace.objects
target_in_objects = object_from_target in workspace.objects
assert initial_in_objects or (
not target_in_objects and (not (want_flip and target_not_flipped))
)
if correspondence.reflexive():
# if the correspondence exists, activate concept mappings
# and add new ones to the existing corr.
existing = correspondence.object_from_initial.correspondence
for mapping in correspondence.concept_mappings:
if mapping.label:
mapping.label.buffer = 100.0
if not mapping.is_contained_by(existing.concept_mappings):
existing.concept_mappings += [mapping]
return
incompatibles = correspondence.get_incompatible_correspondences()
# fight against all correspondences
if incompatibles:
correspondence_spans = (
correspondence.object_from_initial.letter_span()
+ correspondence.object_from_target.letter_span()
)
for incompatible in incompatibles:
incompatible_spans = (
incompatible.object_from_initial.letter_span()
+ incompatible.object_from_target.letter_span()
)
assert __structure_versus_structure(
correspondence, correspondence_spans, incompatible, incompatible_spans
)
incompatible_bond = None
incompatible_group = None
# if there is an incompatible bond then fight against it
initial = correspondence.object_from_initial
target = correspondence.object_from_target
if initial.leftmost or initial.rightmost and target.leftmost or target.rightmost:
# search for the incompatible bond
incompatible_bond = correspondence.get_incompatible_bond()
if incompatible_bond:
# bond found - fight against it
assert __structure_versus_structure(
correspondence, 3.0, incompatible_bond, 2.0
)
# won against incompatible bond
incompatible_group = target.group
if incompatible_group:
assert __structure_versus_structure(
correspondence, 1.0, incompatible_group, 1.0
)
# if there is an incompatible rule, fight against it
incompatible_rule = None
if workspace.rule:
if workspace.rule.incompatible_rule_correspondence(correspondence):
incompatible_rule = workspace.rule
assert __structure_versus_structure(
correspondence, 1.0, incompatible_rule, 1.0
)
for incompatible in incompatibles:
incompatible.break_the_structure()
# break incompatible group and bond if they exist
if incompatible_bond:
incompatible_bond.break_the_structure()
if incompatible_group:
incompatible_group.break_the_structure()
if incompatible_rule:
workspace.break_rule()
correspondence.build_correspondence()
| 2.34375 | 2 |
src/grpc_infer.py | huyhoang17/kuzushiji_recognition | 16 | 12795498 | import os
import math
import time
import functools
import random
from tqdm import tqdm
import cv2
import numpy as np
import matplotlib.pyplot as plt
from PIL import Image, ImageDraw
from pylab import rcParams
rcParams['figure.figsize'] = 20, 20 # noqa
from consts import FONT_SIZE
from utils import (
make_contours,
get_centers,
get_labels,
vis_pred_bbox,
filter_polygons_points_intersection,
vis_pred_bbox_polygon,
vis_pred_center,
font
)
from grpc_utils import (
KuzuSegment,
KuzuClassify
)
if __name__ == '__main__':
img_dir = "./images"
img_fp = os.path.join(img_dir, random.choice(os.listdir(img_dir)))
print(img_fp)
filter_polygon = True
kuzu_seg = KuzuSegment()
kuzu_cls = KuzuClassify()
img, origin_image, origin_h, origin_w = kuzu_seg.load_image(img_fp)
pred_bbox, pred_center = kuzu_seg.predict(img)
# get all polygon area in image
polygon_contours = make_contours(pred_bbox)
# get all center points by contour method
center_coords = get_centers(pred_center.astype(np.uint8))
no_center_points = len(center_coords)
final_center = vis_pred_center(center_coords, rad=2)
# filter polygon
if filter_polygon:
filtered_contours = filter_polygons_points_intersection(polygon_contours, center_coords) # noqa
pred_bbox = vis_pred_bbox_polygon(pred_bbox, filtered_contours)
final_bbox = vis_pred_bbox(pred_bbox, center_coords, width=2)
y_ratio = origin_h / 512
x_ratio = origin_w / 512
pil_img = Image.fromarray(origin_image).convert('RGBA')
char_canvas = Image.new('RGBA', pil_img.size)
char_draw = ImageDraw.Draw(char_canvas)
print(">>> {}".format(no_center_points))
if no_center_points > 0:
bbox_cluster = get_labels(center_coords, pred_bbox)
# ignore background hex color (=0)
for cluster_index in tqdm(range(len(center_coords))[1:]):
char_pixel = (bbox_cluster == cluster_index).astype(np.float32)
try:
horizontal_indicies = np.where(np.any(char_pixel, axis=0))[0]
vertical_indicies = np.where(np.any(char_pixel, axis=1))[0]
x_min, x_max = horizontal_indicies[[0, -1]]
y_min, y_max = vertical_indicies[[0, -1]]
except IndexError:
continue
x = x_min
y = y_min
w = x_max - x_min
h = y_max - y_min
# convert to original coordinates
x = int(x * x_ratio)
w = int(w * x_ratio)
y = int(y * y_ratio)
h = int(h * y_ratio)
# set offset to crop character
offset = 5 # percentage
y_diff = math.ceil(h * offset / 100)
x_diff = math.ceil(w * offset / 100)
# expand area
y_from = y - y_diff
y_to = y + h + y_diff
x_from = x - x_diff
x_to = x + w + x_diff
# tune
y_from, y_to, x_from, x_to = \
list(map(functools.partial(np.maximum, 0),
[y_from, y_to, x_from, x_to]))
try:
char_img = origin_image[y_from:y_to, x_from:x_to]
char_img = kuzu_cls.load_image(char_img)
pred_label = kuzu_cls.predict(char_img)
# print(pred_label)
char_draw.text(
(x + w + FONT_SIZE / 4, y + h / 2 - FONT_SIZE),
pred_label, fill=(0, 0, 255, 255),
font=font
)
except Exception as e:
print(e)
continue
char_img = Image.alpha_composite(pil_img, char_canvas)
char_img = char_img.convert("RGB")
char_img = np.asarray(char_img)
final_bbox = cv2.resize(final_bbox, (origin_w, origin_h))
final_center = cv2.resize(final_center, (origin_w, origin_h))
plt.imshow(char_img)
plt.imshow(final_bbox, cmap="jet", alpha=0.50)
plt.savefig("./assets/{}.jpg".format(time.time()), bbox_inches='tight')
| 2.0625 | 2 |
mpire/signal.py | synapticarbors/mpire | 505 | 12795499 | from inspect import Traceback
from signal import getsignal, SIG_IGN, SIGINT, signal as signal_, Signals
from types import FrameType
from typing import Type
class DelayedKeyboardInterrupt:
def __init__(self, in_thread: bool = False) -> None:
"""
:param in_thread: Whether or not we're living in a thread or not
"""
self.in_thread = in_thread
self.signal_received = None
def __enter__(self) -> None:
# When we're in a thread we can't use signal handling
if not self.in_thread:
self.signal_received = False
self.old_handler = signal_(SIGINT, self.handler)
def handler(self, sig: Signals, frame: FrameType) -> None:
self.signal_received = (sig, frame)
def __exit__(self, exc_type: Type, exc_val: Exception, exc_tb: Traceback) -> None:
if not self.in_thread:
signal_(SIGINT, self.old_handler)
if self.signal_received:
self.old_handler(*self.signal_received)
class DisableKeyboardInterruptSignal:
def __enter__(self) -> None:
# Prevent signal from propagating to child process
self._handler = getsignal(SIGINT)
ignore_keyboard_interrupt()
def __exit__(self, exc_type: Type, exc_val: Exception, exc_tb: Traceback) -> None:
# Restore signal
signal_(SIGINT, self._handler)
def ignore_keyboard_interrupt():
signal_(SIGINT, SIG_IGN)
| 2.375 | 2 |
scripts/create_shell_link.py | DatGuy1/Windows-Toasts | 1 | 12795500 | #!/usr/bin/env python
import argparse
import os
import sys
from pathlib import Path
from typing import Any, Optional
try:
import pythoncom
from win32com.propsys import propsys
from win32com.shell import shell
except ImportError:
raise ImportError(
"pywin32 is required to run create_shell_link.py.To install, execute 'pip install pywin32' in a terminal"
)
class IconFileAction(argparse.Action): # pragma: no cover
def __call__(self, parser_container, namespace, values: Any, option_string=None):
if values.suffix != ".ico":
raise ValueError("The supplied icon file is not of type .ico.")
setattr(namespace, self.dest, values)
# noinspection PyUnresolvedReferences
def create_shell_link(
appId: str,
appName: str,
iconPath: Optional[Path] = None,
overwrite: bool = False,
appDataPath: str = os.getenv("APPDATA"),
):
# See https://github.com/mohabouje/WinToast/blob/master/src/wintoastlib.cpp#L594
if appDataPath is None: # pragma: no cover
raise RuntimeError("Couldn't find APPDATA path. Please rerun this script with the --appdata argument")
programsPath = Path(appDataPath) / "Microsoft" / "Windows" / "Start Menu" / "Programs"
shellLinkPath = programsPath / f"{appName}.lnk"
linkExists = shellLinkPath.exists()
if linkExists: # pragma: no cover
if overwrite:
print("Script run with --overwrite, overwriting existing link...")
else:
sys.exit(
f"Link '{shellLinkPath}' already exists. To overwrite, rerun this script with the --overwrite argument"
)
# Adapted from https://github.com/mhammond/pywin32/blob/main/com/win32comext/shell/demos/create_link.py
# noinspection PyTypeChecker
shellLink = pythoncom.CoCreateInstance(
shell.CLSID_ShellLink, None, pythoncom.CLSCTX_INPROC_SERVER, shell.IID_IShellLink
)
# Set shell link arguments
shellLink.SetPath("")
shellLink.SetArguments("")
shellLink.SetWorkingDirectory("")
if iconPath is not None:
shellLink.SetIconLocation(str(iconPath.resolve()), 0)
# Set AUMI to supplied argument
propertyStore = shellLink.QueryInterface(propsys.IID_IPropertyStore)
propertyKey = propsys.PSGetPropertyKeyFromName("System.AppUserModel.ID")
propertyStore.SetValue(propertyKey, propsys.PROPVARIANTType(appId))
propertyStore.Commit()
# Save file
# noinspection PyUnresolvedReferences
propertyStore.QueryInterface(pythoncom.IID_IPersistFile).Save(str(shellLinkPath), True)
print(f"Successfully {'modified' if linkExists else 'created'} shell link with the AUMI '{appId}'")
if __name__ == "__main__": # pragma: no cover
parser = argparse.ArgumentParser(description="Create shell link for use in toast notifications")
parser.add_argument("--appdata", "-ad", type=str, required=False, help="AppData path if script fails to find it")
parser.add_argument("--app_id", "-a", type=str, required=True, help="Application User Model ID for identification")
parser.add_argument("--name", "-n", type=str, required=True, help="Display name on notification")
parser.add_argument(
"--icon", "-i", type=Path, required=False, action=IconFileAction, help="Path to image file for desired icon"
)
if sys.version_info >= (3, 9):
parser.add_argument(
"--overwrite", "-o", action=argparse.BooleanOptionalAction, help="Overwrite if a link already exists"
)
else:
parser.add_argument(
"--overwrite", "-o", default=False, action="store_true", help="Overwrite if a link already exists"
)
args = parser.parse_args()
create_shell_link(
appId=args.app_id, appName=args.name, iconPath=args.icon, overwrite=args.overwrite, appDataPath=args.appdata
)
| 2.40625 | 2 |
oteltrace/contrib/mysql/__init__.py | ocelotl/opentelemetry-auto-instr-python-1 | 2 | 12795501 | # Copyright 2019, OpenTelemetry Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Instrument mysql to report MySQL queries.
``patch_all`` will automatically patch your mysql connection to make it work.
::
# Make sure to import mysql.connector and not the 'connect' function,
# otherwise you won't have access to the patched version
from oteltrace import Pin, patch
import mysql.connector
# If not patched yet, you can patch mysql specifically
patch(mysql=True)
# This will report a span with the default settings
conn = mysql.connector.connect(user="alice", password="<PASSWORD>", host="localhost", port=3306, database="test")
cursor = conn.cursor()
cursor.execute("SELECT 6*7 AS the_answer;")
# Use a pin to specify metadata related to this connection
Pin.override(conn, service='mysql-users')
Only the default full-Python integration works. The binary C connector,
provided by _mysql_connector, is not supported yet.
Help on mysql.connector can be found on:
https://dev.mysql.com/doc/connector-python/en/
"""
from ...utils.importlib import require_modules
# check `mysql-connector` availability
required_modules = ['mysql.connector']
with require_modules(required_modules) as missing_modules:
if not missing_modules:
from .patch import patch
from .tracers import get_traced_mysql_connection
__all__ = ['get_traced_mysql_connection', 'patch']
| 1.648438 | 2 |
src/smartystreets/__init__.py | bennylope/smartystreets.py | 12 | 12795502 | # -*- coding: utf-8 -*-
__author__ = "<NAME>"
__email__ = "<EMAIL>"
__version__ = "1.0.0"
from smartystreets.client import Client # noqa
__all__ = ["Client"]
| 1.304688 | 1 |
api/system/node/events.py | klebed/esdc-ce | 97 | 12795503 | from api.event import Event
from que import TT_DUMMY, TG_DC_UNBOUND
from que.utils import DEFAULT_DC, task_id_from_string
class NodeSystemRestarted(Event):
"""
Called from node_sysinfo_cb after erigonesd:fast is restarted on a compute node.
"""
_name_ = 'node_system_restarted'
def __init__(self, node, **kwargs):
# Create such a task_id that info is send to SuperAdmins and node owner
task_id = task_id_from_string(node.owner.id, dummy=True, dc_id=DEFAULT_DC, tt=TT_DUMMY, tg=TG_DC_UNBOUND)
kwargs['node_hostname'] = node.hostname
super(NodeSystemRestarted, self).__init__(task_id, **kwargs)
| 2.0625 | 2 |
lib/proxies/aws_long.py | jhong93/aws-lambda-proxy | 7 | 12795504 | <reponame>jhong93/aws-lambda-proxy
import boto3
import json
import logging
from base64 import b64decode
from random import SystemRandom
from concurrent.futures import ThreadPoolExecutor
from lib.proxy import AbstractRequestProxy, ProxyResponse
from lib.stats import LambdaStatsModel, S3StatsModel
from lib.workers import LambdaSqsTaskConfig, LambdaSqsTask, WorkerManager
logger = logging.getLogger(__name__)
random = SystemRandom()
class LongLivedLambdaProxy(AbstractRequestProxy):
"""Return a function that queues requests in SQS"""
def __init__(self, functions, maxLambdas, s3Bucket, stats, verbose):
# Supporting this across regions is not a priority since that would
# incur costs for SQS and S3, and be error prone.
if len(functions) > 1 and 'arn:' in functions[0]:
raise NotImplementedError(
'Only a single function may be specified by name for a '
'long lived proxy')
self.__verbose = verbose
self.__s3Bucket = s3Bucket
if 'lambda' not in stats.models:
stats.register_model('lambda', LambdaStatsModel())
self.__lambdaStats = stats.get_model('lambda')
if s3Bucket is not None:
if 's3' not in stats.models:
stats.register_model('s3', S3StatsModel())
self.__s3Stats = stats.get_model('s3')
self.__s3 = boto3.client('s3')
self.__s3DeletePool = ThreadPoolExecutor(1)
class ProxyTask(LambdaSqsTaskConfig):
@property
def queue_prefix(self):
return 'lambda-proxy'
@property
def lambda_function(self):
return random.choice(functions)
@property
def max_workers(self):
return maxLambdas
@property
def load_factor(self):
return 4
def pre_invoke_callback(self, workerId, workerArgs):
logger.info('Starting worker: %d', workerId)
workerArgs['longLived'] = True
if s3Bucket:
workerArgs['s3Bucket'] = s3Bucket
def post_return_callback(self, workerId, workerResponse):
if workerResponse is not None:
logger.info('Worker %d ran for %dms and proxied %d '
'requests: Exit reason: %s',
workerId,
workerResponse['workerLifetime'],
workerResponse['numRequestsProxied'],
workerResponse['exitReason'])
self.workerManager = WorkerManager(ProxyTask(), stats)
def __delete_object_from_s3(self, key):
self.__s3.delete_object(Bucket=self.__s3Bucket, Key=key)
def __load_object_from_s3(self, key):
result = self.__s3.get_object(Bucket=self.__s3Bucket, Key=key)
ret = result['Body'].read()
self.__s3DeletePool.submit(self.__delete_object_from_s3, key)
self.__s3Stats.record_get(len(ret))
return ret
def request(self, method, url, headers, data):
task = LambdaSqsTask()
if data:
task.add_binary_attribute('data', data)
task.set_body(json.dumps({
'method': method,
'url': url,
'headers': headers
}))
result = self.workerManager.execute(task, timeout=10)
if result is None:
return ProxyResponse(statusCode=500, headers={}, content='')
if type(result) is list:
# Fragmented response
payload = {}
dataChunks = []
for part in result:
if part.has_attribute('data'):
dataChunks.append(part.get_binary_attribute('data'))
if len(part.body) > 1:
# We use a hack to send practically empty bodies
decodedBody = b64decode(part.body).decode('zlib')
payload.update(json.loads(decodedBody))
content = b''.join(dataChunks)
else:
# Single message
payload = json.loads(b64decode(result.body).decode('zlib'))
if result.has_attribute('s3'):
key = result.get_string_attribute('s3')
content = self.__load_object_from_s3(key)
elif result.has_attribute('data'):
content = result.get_binary_attribute('data')
else:
content = b''
statusCode = payload['statusCode']
responseHeaders = payload['headers']
return ProxyResponse(statusCode=statusCode, headers=responseHeaders,
content=content) | 2.09375 | 2 |
TS3GatherBot.py | ikinz/TS3GatherBot | 0 | 12795505 | <reponame>ikinz/TS3GatherBot
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created by <NAME>
This bot will make it easier to set up gathers, all
from your teamspeak 3 server.
The bot requires access to the Teamspeak 3 server
query!
"""
__author__ = '<NAME>'
__copyright__ = 'Copyright 2015, TS3GatherBot'
__credits__ = ['<NAME>']
__licence__ = 'MIT'
__version__ = '1.0.0'
__maintainer__ = '<NAME>'
__status__ = 'Production'
import threading
import telnetlib
from Config import config, maps, admins, vetoprocesses
from queue import Queue
from Player import Player
# Amount of players needed to start gather (even number please :))
PLAYERS_NEEDED = config["players"]
"""
Bot Thread
"""
class BotThread(threading.Thread):
def __init__(self, name, password, channel, index):
super(BotThread, self).__init__()
self.commands = {
# User commands
"!start": self.cmd_start,
"!stop": self.cmd_stop,
"!maps": self.cmd_maps,
"!ready": self.cmd_ready,
"!r": self.cmd_ready,
"!gaben": self.cmd_ready,
"!unready": self.cmd_unready,
"!notready": self.cmd_unready,
"!nr": self.cmd_unready,
"!ur": self.cmd_unready,
"!help": self.cmd_help,
"!h": self.cmd_help,
# Admin commands
"!activate": self.cmd_activate
}
self.name = name
self.password = password
self.telnet = None
self.botId = None
self.channel = channel
self.ti = index
def run(self):
self.telnet = self.initBot()
self.botId = self.getPlayerId(self.name)
self.channel = self.moveToChannel(self.getChannelId(self.channel))
# Print Welcome message
self.sendChannelMessage(
"\\n[b]The GatherBot is currently running[/b]\\n\\n"
"[color=green]!start[/color] : [i]Starts a gather[/i]\\n"
"[color=green]!stop[/color] : [i]Stops the gather[/i]\\n\\n"
"[color=green]!maps[/color] : [i]Set the amount of maps to play (default=bo3)[/i]\\n"
"[color=green]!ready[/color] : [i]Sets you as ready[/i]\\n"
"[color=green]!unready[/color] : [i]Sets you as unready[/i]\\n\\n"
"[color=red]Please type !help for a full list of commands[/color]"
)
# While an exit command has not been issued
ex = False
while not ex:
# Print queue'd messages
while not cmdToThread[self.ti].empty():
self.sendChannelMessage(cmdToThread[self.ti].get())
# Read commands from user and execute them
self.telnet.write(self.getenc("servernotifyregister event=textchannel id=%s\n" % self.channel))
msg = str(self.telnet.read_until(self.getenc("msg=ok")))
if msg.__contains__("notifytextmessage"):
self.execCommand(msg)
# When the bot is closed, close all connections
# before exiting thread
self.closeBot()
"""
Connects to the teamspeak server via telnet
and returns the telnet client
"""
def initBot(self):
# Connect and log in to server
telnet = telnetlib.Telnet(config["host"], config["port"])
telnet.open(telnet.host, telnet.port)
telnet.write(self.getenc("login %s %s\n" % (self.name, self.password)))
telnet.read_until(self.getenc("msg=ok"))
# Select virtual server id
telnet.write(self.getenc("use sid=%s\n" % (config["sid"])))
telnet.read_until(self.getenc("msg=ok"))
# Set bot nickname
telnet.write(self.getenc("clientupdate client_nickname=%s\n" % (self.name)))
telnet.read_until(self.getenc("msg=ok"))
return telnet
"""
Log out from telnet and close the client
"""
def closeBot(self):
self.telnet.write(self.getenc("logout\n"))
self.telnet.close()
print("Bot closed")
"""
Get the client ID for this bot
"""
def getPlayerId(self, name):
self.telnet.write(self.getenc("clientfind pattern=%s\n" % name))
botstr = str(self.telnet.read_until(self.getenc("msg=ok")))
botstr = botstr.split()[0]
return int(botstr.split("=")[1])
"""
Get the channel ID from the name of the channel
"""
def getChannelId(self, channel):
channelLobby = channel.replace(" ", "\s")
self.telnet.write(self.getenc("channelfind pattern=%s\n" % (channelLobby)))
channelLobby = str(self.telnet.read_until(self.getenc("msg=ok")))
channelLobby = channelLobby.split("\\n")[1]
channelLobby = channelLobby.split()[0]
return int(channelLobby.split("=")[1])
"""
Move user to channel
"""
def moveToChannel(self, channel):
self.telnet.write(self.getenc("clientmove clid=%s cid=%s\n" % (self.botId, channel)))
self.telnet.read_until(self.getenc("msg=ok"))
return channel
"""
Print out a server message
"""
def sendServerMessage(self, msg):
msg = msg.replace(" ", "\s")
self.telnet.write(self.getenc("sendtextmessage targetmode=3 target=1 msg=%s\n" % (msg)))
self.telnet.read_until(self.getenc("msg=ok"))
"""
Print a message to a specific channel
"""
def sendChannelMessage(self, msg):
msg = msg.replace(" ", "\s")
self.telnet.write(self.getenc("sendtextmessage targetmode=2 msg=%s\n" % (msg)))
self.telnet.read_until(self.getenc("msg=ok"))
"""
Parse and execute commands sent by users
"""
def execCommand(self, cmd):
i1 = cmd.index("invokeruid")
i1 = cmd.index("=", i1)
i2 = cmd.index("\\n", i1)
userid = cmd[i1 + 1:i2]
cmd = [x.split("=") for x in cmd.split() if len(x.split("=")) > 1 and not x.__contains__("msg=ok")]
d = {}
for it in cmd:
d[it[0]] = it[1]
global active
cmdsp = d['msg'].split("\\\\s")
if (cmdsp[0] in self.commands and active) or d['msg'] == '!activate':
self.commands[cmdsp[0]](userid, d['invokername'], d['msg'])
"""
Start gather and set mod (the one who started the gather)
"""
def cmd_start(self, userid, user, data):
global gatherRunning
if not gatherRunning:
gatherRunning = True
global players
players.append(Player(user, userid, True))
broadcastMessage("[color=green]A gather has been started by %s![/color]" % user)
else:
self.sendChannelMessage("[color=red]A gather is already running![/color]")
"""
Stop the gather and move all players to lobby
"""
def cmd_stop(self, userid, user, data):
global gatherRunning
global players
p = None
for x in players:
if x.uid == userid:
p = x
if gatherRunning and p.isMod:
gatherRunning = False
global vetoSystem
vetoSystem = "bo3"
# Move all players to Lobby
plrs = ["clid=" + str(self.getPlayerId(x.name)) for x in players]
plrs = "|".join(plrs)
self.telnet.write(self.getenc("clientmove %s cid=%s\n" % (plrs, self.getChannelId(config['gl']))))
self.telnet.read_until(self.getenc("msg=ok"), 3)
players = []
broadcastMessage("[color=red]Gather has been stopped![/color]")
else:
self.sendChannelMessage("[color=red]No gather currently running![/color]")
"""
Change the amount of maps that will be played
Only available to game mods!
"""
def cmd_maps(self, userid, user, data):
global gatherRunning
if gatherRunning:
data = data.split("\\\\s")
global players
p = None
for x in players:
if x.uid == userid:
p = x
if len(data) > 1 and p.isMod:
data = data[1].lower()
if data in vetoprocesses:
global vetoSystem
vetoSystem = data
broadcastMessage("[color=green]Game changed to %s![/color]" % data)
else:
self.sendChannelMessage("[color=red]%s not supported![/color]" % data)
else:
self.sendChannelMessage("[color=red]You didn't enter a value or you're not the game mod![/color]" % data)
else:
self.sendChannelMessage("[color=red]No gather currently running![/color]")
"""
Sets a user as ready
"""
def cmd_ready(self, userid, user, data):
global gatherRunning
if gatherRunning:
global players
alreadyReady = False
for p in players:
if p.uid == userid:
alreadyReady = True
if not alreadyReady:
players.append(Player(user, userid))
broadcastMessage("[color=green]%s is ready![/color]" % user)
self.start_gather()
else:
self.sendChannelMessage("[color=red]You're already ready![/color]")
else:
self.sendChannelMessage("[color=red]No gather currently running![/color]")
"""
Set up teams, move players to correct channel and start veto process
"""
def start_gather(self):
global players
if len(players) == PLAYERS_NEEDED:
broadcastMessage("[color=green]%s players are ready! Setting up teams![/color]" % PLAYERS_NEEDED)
l = players[:]
import random
random.shuffle(l)
team1 = l[:int(PLAYERS_NEEDED/2)]
team2 = l[int(PLAYERS_NEEDED/2):]
plrs = ["clid=" + str(self.getPlayerId(x.name)) for x in team1]
plrs = "|".join(plrs)
self.telnet.write(self.getenc("clientmove %s cid=%s\n" % (plrs, self.getChannelId(config['g1']))))
self.telnet.read_until(self.getenc("msg=ok"))
plrs = ["clid=" + str(self.getPlayerId(x.name)) for x in team2]
plrs = "|".join(plrs)
self.telnet.write(self.getenc("clientmove %s cid=%s\n" % (plrs, self.getChannelId(config['g2']))))
self.telnet.read_until(self.getenc("msg=ok"))
"""
Sets a player as not ready
"""
def cmd_unready(self, userid, user, data):
global gatherRunning
if gatherRunning:
global players
for p in players:
if p.uid == userid:
if p.isMod:
self.sendChannelMessage("[color=red]You can't leave your own gather. Use !stop to cancel it instead![/color]")
else:
players.remove(p)
else:
self.sendChannelMessage("[color=red]No gather currently running![/color]")
"""
Print help text to channel
"""
def cmd_help(self, userid, user, data):
string = "\\n[b]Available commands are:[/b]\\n" \
"[color=grey]!<cmd> (<aliases>) : [i]<Description>[/i][/color]\\n\\n" \
"[color=green]!start[/color] : [i]Starts a gather[/i]\\n" \
"[color=green]!stop[/color] : [i]Stops the gather[/i]\\n\\n" \
"[color=green]!maps[/color] : [i]Set the amount of maps to play (default=bo3)[/i]\\n" \
"[color=green]!ready (!r, !gaben)[/color] : [i]Sets you as ready[/i]\\n" \
"[color=green]!unready (!notready, !nr, !ur)[/color] : [i]Sets you as unready[/i]\\n\\n" \
"[color=green]!help (!h)[/color] : [i]List all available commands[/i]\\n"
if userid in admins.keys():
string += "\\n\\n" \
"[b]Admin Commands:[/b]\\n" \
"[color=grey]!<cmd> (<aliases>) : [i]<Description>[/i][/color]\\n\\n" \
"[color=green]!activate[/color] : [i]Toggle this bot[/i]\\n"
self.sendChannelMessage(string)
"""
Toggle whether the bot is activated
Only available to admins!
"""
def cmd_activate(self, userid, user, data):
if userid in admins:
global active
active = not active
if active:
broadcastMessage("[color=green]GatherBot has been activated[/color]")
else:
broadcastMessage("[color=red]GatherBot has been deactivated[/color]")
else:
self.sendChannelMessage("[color=red]You're not an admin, GTFO![/color]")
"""
Fix encoding of strings
"""
def getenc(self, str):
return str.encode('ascii')
"""
Broadcast message to all bots
"""
def broadcastMessage(msg):
for q in cmdToThread:
q.put(msg)
"""
Init the app
"""
active = True
players = []
gatherRunning = False
vetoSystem = "bo3"
# Create lists with all the bots and their Queues
cmdToThread = [
Queue(), Queue(), Queue()
]
bots = [
BotThread(config["user"], config["pass"], config["gl"], 0),
BotThread(config["user1"], config["pass1"], config["g1"], 1),
BotThread(config["user2"], config["pass2"], config["g2"], 2)
]
for b in bots:
b.start()
| 2.203125 | 2 |
learningPygame/McKinley/00-MovingSmile/04-Drawing.py | Rosebotics/catapult2019 | 0 | 12795506 | # TODO: In this module we'll start drawing a simple smiley face
# Yellow circle for the head
# Two black circle eyes
# Red rectangle (rect) mouth
# Red circle nose.
import pygame
import sys
pygame.init()
screen = pygame.display.set_mode((600, 600))
while True:
for event in pygame.event.get():
if event.type == pygame.MOUSEBUTTONDOWN:
print(event.pos)
if event.type == pygame.QUIT:
sys.exit()
screen.fill((0, 200, 200))
# Draws the yellow head
pygame.draw.circle(screen, (255,255,0), (300,300), 250)
pygame.draw.circle(screen, (0, 0, 0), (300, 300), 250, 5)
# draws the eyes
pygame.draw.circle(screen, (0, 0, 0), (205, 200), 20)
pygame.draw.circle(screen, (0, 0, 0), (400, 200), 20)
# draws the nose
pygame.draw.circle(screen, (255, 0, 0), (300, 300), 35)
pygame.draw.circle(screen, (0, 0, 0), (300, 300), 35, 2)
# draws the mouth
pygame.draw.rect(screen, (127, 0, 0), (200, 400, 200, 25))
# pygame.draw.rect(screen, color, (x, y, width, height), thickness)
# pygame.draw.rect(screen, (100, 0, 0), (240, 350, 160, 30))
pygame.display.update()
| 3.921875 | 4 |
airbyte-integrations/bases/source-acceptance-test/source_acceptance_test/tests/test_full_refresh.py | koji-m/airbyte | 6,215 | 12795507 | <filename>airbyte-integrations/bases/source-acceptance-test/source_acceptance_test/tests/test_full_refresh.py
#
# Copyright (c) 2021 Airbyte, Inc., all rights reserved.
#
import pytest
from airbyte_cdk.models import Type
from source_acceptance_test.base import BaseTest
from source_acceptance_test.utils import ConnectorRunner, full_refresh_only_catalog, serialize
@pytest.mark.default_timeout(20 * 60)
class TestFullRefresh(BaseTest):
def test_sequential_reads(self, connector_config, configured_catalog, docker_runner: ConnectorRunner, detailed_logger):
configured_catalog = full_refresh_only_catalog(configured_catalog)
output = docker_runner.call_read(connector_config, configured_catalog)
records_1 = [message.record.data for message in output if message.type == Type.RECORD]
output = docker_runner.call_read(connector_config, configured_catalog)
records_2 = [message.record.data for message in output if message.type == Type.RECORD]
output_diff = set(map(serialize, records_1)) - set(map(serialize, records_2))
if output_diff:
msg = "The two sequential reads should produce either equal set of records or one of them is a strict subset of the other"
detailed_logger.info(msg)
detailed_logger.log_json_list(output_diff)
pytest.fail(msg)
| 1.976563 | 2 |
medical_prescription/user/test/test_forms_patient.py | thiagonf/Sprint-3-GPP | 0 | 12795508 | from django.test import TestCase
from user.forms import PatientForm
from user.models import User
class TestPatientForm(TestCase):
def setUp(self):
self.name_valid = '<NAME>'
self.name_invalid = 'a12'
self.name_invalid_TYPE = 'a@hjasgdjasd1al'
self.name_invalid_MAX = 'aasdkgasghdhjadjasvdashdjavcdbnmhasdvbdmmasbdnmhamsjdhgegdhjgsavdhabvdbnasd'
self.name_invalid_MIN = 'a'
self.phone_valid = '1234567890'
self.phone_invalid = '456'
self.phone_invalid_MIN = '456'
self.phone_invalid_TYPE = 'asdaaaaaads'
self.phone_invalid_MAX = '456134564898761'
self.email_valid = '<EMAIL>'
self.email_invalid = 'admin.com'
self.email_invalid_TYPE = 'admin.com'
self.email_invalid_MIN = '[email protected]'
self.email_invalid_MAX = '<EMAIL>'
self.email_invalid_BASE = '<EMAIL>'
self.password_valid = '<PASSWORD>'
self.password_invalid = '<PASSWORD>'
self.password_invalid_MAX = '<PASSWORD>'
self.password_invalid_MIN = '<PASSWORD>'
self.password_invalid_TYPE = '<PASSWORD>!'
self.date_of_birth_valid = '10/12/1990'
self.date_of_birth_invalid = '18'
self.date_of_birth_invalid_FORMAT = '18'
self.date_of_birth_invalid_MIN = '10/12/2020'
self.sex_valid = 'M'
self.sex_invalid = 'A'
self.CPF_document_valid = '61367541000'
self.CPF_document_invalid = '11111111111'
self.CPF_document_invalid_MIN = '111111111'
self.CPF_document_invalid_MAX = '11111111111'
self.CPF_document_invalid_TYPE = '252627282930asdf'
self.CEP_valid = 72850735
self.CEP_invalid = '7285073A'
self.CEP_invalid_MIN = 42
self.CEP_invalid_MAX = 728507351
self.UF_valid = 'DF'
self.UF_invalid = ''
self.UF_invalid_MIN = 'A'
self.UF_invalid_MAX = 'AAA'
self.city_valid = 'Bras lia'
self.city_invalid = ''
self.city_invalid_MAX = 'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa'
self.neighborhood_valid = 'Setor Leste'
self.neighborhood_invalid = ''
self.neighborhood_invalid_MAX = 'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa'
self.complement_valid = 'Rua 01, Quadra 10, Lote 15'
self.complement_invalid = ''
self.complement_invalid_MAX = '''aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa'''
user = User()
user.email = "<EMAIL>"
user.save()
def test_forms_patient_is_valid(self):
form_data = {'name': self.name_valid,
'phone': self.phone_valid,
'email': self.email_valid,
'password': <PASSWORD>,
'confirm_password': <PASSWORD>,
'CPF_document': self.CPF_document_valid,
'sex': self.sex_valid,
'date_of_birth': self.date_of_birth_valid,
'CEP': self.CEP_valid,
'UF': self.UF_valid,
'city': self.city_valid,
'neighborhood': self.neighborhood_valid,
'complement': self.complement_valid}
form = PatientForm(data=form_data)
self.assertTrue(form.is_valid())
def test_forms_patient_name_is_not_valid_TYPE(self):
form_data = {'name': self.name_invalid_TYPE,
'phone': self.phone_valid,
'email': self.email_valid,
'password': <PASSWORD>,
'confirm_password': <PASSWORD>,
'CPF_document': self.CPF_document_valid,
'sex': self.sex_valid,
'date_of_birth': self.date_of_birth_valid,
'CEP': self.CEP_valid,
'UF': self.UF_valid,
'city': self.city_valid,
'neighborhood': self.neighborhood_valid,
'complement': self.complement_valid}
form = PatientForm(data=form_data)
self.assertFalse(form.is_valid())
def test_forms_patient_name_is_not_valid_MAX(self):
form_data = {'name': self.name_invalid_MAX,
'phone': self.phone_valid,
'email': self.email_valid,
'password': <PASSWORD>,
'confirm_password': <PASSWORD>,
'CPF_document': self.CPF_document_valid,
'sex': self.sex_valid,
'date_of_birth': self.date_of_birth_valid,
'CEP': self.CEP_valid,
'UF': self.UF_valid,
'city': self.city_valid,
'neighborhood': self.neighborhood_valid,
'complement': self.complement_valid}
form = PatientForm(data=form_data)
self.assertFalse(form.is_valid())
def test_forms_patient_name_is_not_valid_MIN(self):
form_data = {'name': self.name_invalid_MIN,
'phone': self.phone_valid,
'email': self.email_valid,
'password': <PASSWORD>,
'confirm_password': self.<PASSWORD>,
'CPF_document': self.CPF_document_valid,
'sex': self.sex_valid,
'date_of_birth': self.date_of_birth_valid,
'CEP': self.CEP_valid,
'UF': self.UF_valid,
'city': self.city_valid,
'neighborhood': self.neighborhood_valid,
'complement': self.complement_valid}
form = PatientForm(data=form_data)
self.assertFalse(form.is_valid())
def test_forms_patient_phone_is_not_valid_TYPE(self):
form_data = {'name': self.name_valid,
'phone': self.phone_invalid_TYPE,
'email': self.email_valid,
'password': <PASSWORD>,
'password_confirmation': <PASSWORD>,
'CPF_document': self.CPF_document_valid,
'sex': self.sex_valid,
'date_of_birth': self.date_of_birth_valid,
'CEP': self.CEP_valid,
'UF': self.UF_valid,
'city': self.city_valid,
'neighborhood': self.neighborhood_valid,
'complement': self.complement_valid}
form = PatientForm(data=form_data)
self.assertFalse(form.is_valid())
def test_forms_patient_phone_is_not_valid_MAX(self):
form_data = {'name': self.name_valid,
'phone': self.phone_invalid_MAX,
'email': self.email_valid,
'password': <PASSWORD>,
'password_confirmation': <PASSWORD>,
'CPF_document': self.CPF_document_valid,
'sex': self.sex_valid,
'date_of_birth': self.date_of_birth_valid,
'CEP': self.CEP_valid,
'UF': self.UF_valid,
'city': self.city_valid,
'neighborhood': self.neighborhood_valid,
'complement': self.complement_valid}
form = PatientForm(data=form_data)
self.assertFalse(form.is_valid())
def test_forms_patient_phone_is_not_valid_MIN(self):
form_data = {'name': self.name_valid,
'phone': self.phone_invalid_MIN,
'email': self.email_valid,
'password': <PASSWORD>,
'password_confirmation': <PASSWORD>,
'CPF_document': self.CPF_document_valid,
'sex': self.sex_valid,
'date_of_birth': self.date_of_birth_valid,
'CEP': self.CEP_valid,
'UF': self.UF_valid,
'city': self.city_valid,
'neighborhood': self.neighborhood_valid,
'complement': self.complement_valid}
form = PatientForm(data=form_data)
self.assertFalse(form.is_valid())
def test_forms_patient_email_is_not_valid_TYPE(self):
form_data = {'name': self.name_valid,
'phone': self.phone_valid,
'email': self.email_invalid_TYPE,
'password': <PASSWORD>,
'confirm_password': <PASSWORD>,
'CPF_document': self.CPF_document_valid,
'sex': self.sex_valid,
'date_of_birth': self.date_of_birth_valid,
'CEP': self.CEP_valid,
'UF': self.UF_valid,
'city': self.city_valid,
'neighborhood': self.neighborhood_valid,
'complement': self.complement_valid}
form = PatientForm(data=form_data)
self.assertFalse(form.is_valid())
def test_forms_patient_email_is_not_valid_MAX(self):
form_data = {'name': self.name_valid,
'phone': self.phone_valid,
'email': self.email_invalid_MAX,
'password': <PASSWORD>,
'confirm_password': <PASSWORD>,
'CPF_document': self.CPF_document_valid,
'sex': self.sex_valid,
'date_of_birth': self.date_of_birth_valid,
'CEP': self.CEP_valid,
'UF': self.UF_valid,
'city': self.city_valid,
'neighborhood': self.neighborhood_valid,
'complement': self.complement_valid}
form = PatientForm(data=form_data)
self.assertFalse(form.is_valid())
def test_forms_patient_email_is_not_valid_MIN(self):
form_data = {'name': self.name_valid,
'phone': self.phone_valid,
'email': self.email_invalid_MIN,
'password': <PASSWORD>,
'confirm_password': <PASSWORD>,
'CPF_document': self.CPF_document_valid,
'sex': self.sex_valid,
'date_of_birth': self.date_of_birth_valid,
'CEP': self.CEP_valid,
'UF': self.UF_valid,
'city': self.city_valid,
'neighborhood': self.neighborhood_valid,
'complement': self.complement_valid}
form = PatientForm(data=form_data)
self.assertFalse(form.is_valid())
def test_forms_patient_email_is_not_valid_BASE(self):
form_data = {'name': self.name_valid,
'phone': self.phone_valid,
'email': self.email_invalid_BASE,
'password': <PASSWORD>,
'confirm_password': <PASSWORD>,
'CPF_document': self.CPF_document_valid,
'sex': self.sex_valid,
'date_of_birth': self.date_of_birth_valid,
'CEP': self.CEP_valid,
'UF': self.UF_valid,
'city': self.city_valid,
'neighborhood': self.neighborhood_valid,
'complement': self.complement_valid}
form = PatientForm(data=form_data)
self.assertFalse(form.is_valid())
def test_forms_patient_password_is_not_valid_TYPE(self):
form_data = {'name': self.name_valid,
'phone': self.phone_valid,
'email': self.email_valid,
'password': self.<PASSWORD>,
'confirm_password': <PASSWORD>,
'CPF_document': self.CPF_document_valid,
'sex': self.sex_valid,
'date_of_birth': self.date_of_birth_valid,
'CEP': self.CEP_valid,
'UF': self.UF_valid,
'city': self.city_valid,
'neighborhood': self.neighborhood_valid,
'complement': self.complement_valid}
form = PatientForm(data=form_data)
self.assertFalse(form.is_valid())
def test_forms_patient_password_is_not_valid_MIN(self):
form_data = {'name': self.name_valid,
'phone': self.phone_valid,
'email': self.email_valid,
'password': <PASSWORD>,
'confirm_password': <PASSWORD>,
'CPF_document': self.CPF_document_valid,
'sex': self.sex_valid,
'date_of_birth': self.date_of_birth_valid,
'CEP': self.CEP_valid,
'UF': self.UF_valid,
'city': self.city_valid,
'neighborhood': self.neighborhood_valid,
'complement': self.complement_valid}
form = PatientForm(data=form_data)
self.assertFalse(form.is_valid())
def test_forms_patient_password_is_not_valid_MAX(self):
form_data = {'name': self.name_valid,
'phone': self.phone_valid,
'email': self.email_valid,
'password': <PASSWORD>,
'confirm_password': <PASSWORD>,
'CPF_document': self.CPF_document_valid,
'sex': self.sex_valid,
'date_of_birth': self.date_of_birth_valid,
'CEP': self.CEP_valid,
'UF': self.UF_valid,
'city': self.city_valid,
'neighborhood': self.neighborhood_valid,
'complement': self.complement_valid}
form = PatientForm(data=form_data)
self.assertFalse(form.is_valid())
def test_forms_patient_password_confirmation_is_not_valid(self):
form_data = {'name': self.name_valid,
'phone': self.phone_valid,
'email': self.email_valid,
'password': <PASSWORD>,
'confirm_password': <PASSWORD>,
'CPF_document': self.CPF_document_valid,
'sex': self.sex_valid,
'date_of_birth': self.date_of_birth_valid,
'CEP': self.CEP_valid,
'UF': self.UF_valid,
'city': self.city_valid,
'neighborhood': self.neighborhood_valid,
'complement': self.complement_valid}
form = PatientForm(data=form_data)
self.assertFalse(form.is_valid())
def test_forms_patient_CPF_document_is_not_valid_TYPE(self):
form_data = {'name': self.name_valid,
'phone': self.phone_valid,
'email': self.email_valid,
'password': <PASSWORD>,
'confirm_password': <PASSWORD>,
'CPF_document': self.CPF_document_invalid_TYPE,
'sex': self.sex_valid,
'date_of_birth': self.date_of_birth_valid,
'CEP': self.CEP_valid,
'UF': self.UF_valid,
'city': self.city_valid,
'neighborhood': self.neighborhood_valid,
'complement': self.complement_valid}
form = PatientForm(data=form_data)
self.assertFalse(form.is_valid())
def test_forms_patient_CPF_document_is_not_valid_MIN(self):
form_data = {'name': self.name_valid,
'phone': self.phone_valid,
'email': self.email_valid,
'password': <PASSWORD>,
'confirm_password': <PASSWORD>,
'CPF_document': self.CPF_document_invalid_MIN,
'sex': self.sex_valid,
'date_of_birth': self.date_of_birth_valid,
'CEP': self.CEP_valid,
'UF': self.UF_valid,
'city': self.city_valid,
'neighborhood': self.neighborhood_valid,
'complement': self.complement_valid}
form = PatientForm(data=form_data)
self.assertFalse(form.is_valid())
def test_forms_patient_CPF_document_is_not_valid_MAX(self):
form_data = {'name': self.name_valid,
'phone': self.phone_valid,
'email': self.email_valid,
'password': <PASSWORD>,
'confirm_password': <PASSWORD>,
'CPF_document': self.CPF_document_invalid_MAX,
'sex': self.sex_valid,
'date_of_birth': self.date_of_birth_valid,
'CEP': self.CEP_valid,
'UF': self.UF_valid,
'city': self.city_valid,
'neighborhood': self.neighborhood_valid,
'complement': self.complement_valid}
form = PatientForm(data=form_data)
self.assertFalse(form.is_valid())
def test_forms_patient_date_of_birth_is_not_valid_FORMAT(self):
form_data = {'name': self.name_valid,
'phone': self.phone_valid,
'email': self.email_valid,
'password': <PASSWORD>,
'confirm_password': <PASSWORD>,
'CPF_document': self.CPF_document_valid,
'sex': self.sex_valid,
'date_of_birth': self.date_of_birth_invalid_FORMAT,
'CEP': self.CEP_valid,
'UF': self.UF_valid,
'city': self.city_valid,
'neighborhood': self.neighborhood_valid,
'complement': self.complement_valid}
form = PatientForm(data=form_data)
self.assertFalse(form.is_valid())
def test_forms_patient_date_of_birth_is_not_valid_MIN(self):
form_data = {'name': self.name_valid,
'phone': self.phone_valid,
'email': self.email_valid,
'password': <PASSWORD>,
'confirm_password': <PASSWORD>,
'CPF_document': self.CPF_document_valid,
'sex': self.sex_valid,
'date_of_birth': self.date_of_birth_invalid_MIN,
'CEP': self.CEP_valid,
'UF': self.UF_valid,
'city': self.city_valid,
'neighborhood': self.neighborhood_valid,
'complement': self.complement_valid}
form = PatientForm(data=form_data)
self.assertFalse(form.is_valid())
def test_forms_patient_sex_is_not_valid(self):
form_data = {'name': self.name_valid,
'phone': self.phone_valid,
'email': self.email_valid,
'password': <PASSWORD>,
'confirm_password': <PASSWORD>,
'CPF_document': self.CPF_document_valid,
'sex': self.sex_invalid,
'date_of_birth': self.date_of_birth_valid,
'CEP': self.CEP_valid,
'UF': self.UF_valid,
'city': self.city_valid,
'neighborhood': self.neighborhood_valid,
'complement': self.complement_valid}
form = PatientForm(data=form_data)
self.assertFalse(form.is_valid())
def test_forms_patient_CEP_is_not_valid(self):
form_data = {'name': self.name_valid,
'phone': self.phone_valid,
'email': self.email_valid,
'password': self.password_valid,
'confirm_password': <PASSWORD>,
'CPF_document': self.CPF_document_invalid,
'sex': self.sex_valid,
'date_of_birth': self.date_of_birth_valid,
'CEP': self.CEP_invalid,
'UF': self.UF_valid,
'city': self.city_valid,
'neighborhood': self.neighborhood_valid,
'complement': self.complement_valid}
form = PatientForm(data=form_data)
self.assertFalse(form.is_valid())
def test_forms_patient_CEP_is_not_valid_MAX(self):
form_data = {'name': self.name_valid,
'phone': self.phone_valid,
'email': self.email_valid,
'password': <PASSWORD>,
'confirm_password': <PASSWORD>,
'CPF_document': self.CPF_document_valid,
'sex': self.sex_valid,
'date_of_birth': self.date_of_birth_valid,
'CEP': self.CEP_invalid_MAX,
'UF': self.UF_valid,
'city': self.city_valid,
'neighborhood': self.neighborhood_valid,
'complement': self.complement_valid}
form = PatientForm(data=form_data)
self.assertFalse(form.is_valid())
def test_forms_patient_UF_is_not_valid(self):
form_data = {'name': self.name_valid,
'phone': self.phone_valid,
'email': self.email_valid,
'password': <PASSWORD>,
'confirm_password': <PASSWORD>,
'CPF_document': self.CPF_document_valid,
'sex': self.sex_valid,
'date_of_birth': self.date_of_birth_valid,
'CEP': self.CEP_valid,
'UF': self.UF_invalid,
'city': self.city_valid,
'neighborhood': self.neighborhood_valid,
'complement': self.complement_valid}
form = PatientForm(data=form_data)
self.assertFalse(form.is_valid())
def test_forms_patient_UF_is_not_valid_MAX(self):
form_data = {'name': self.name_valid,
'phone': self.phone_valid,
'email': self.email_valid,
'password': <PASSWORD>,
'confirm_password': <PASSWORD>,
'CPF_document': self.CPF_document_valid,
'sex': self.sex_valid,
'date_of_birth': self.date_of_birth_valid,
'CEP': self.CEP_valid,
'UF': self.UF_invalid_MAX,
'city': self.city_valid,
'neighborhood': self.neighborhood_valid,
'complement': self.complement_valid}
form = PatientForm(data=form_data)
self.assertFalse(form.is_valid())
def test_forms_patient_city_is_not_valid(self):
form_data = {'name': self.name_valid,
'phone': self.phone_valid,
'email': self.email_valid,
'password': <PASSWORD>,
'confirm_password': <PASSWORD>,
'CPF_document': self.CPF_document_valid,
'sex': self.sex_valid,
'date_of_birth': self.date_of_birth_valid,
'CEP': self.CEP_valid,
'UF': self.UF_valid,
'city': self.city_invalid,
'neighborhood': self.neighborhood_valid,
'complement': self.complement_valid}
form = PatientForm(data=form_data)
self.assertFalse(form.is_valid())
def test_forms_patient_city_is_not_valid_MAX(self):
form_data = {'name': self.name_valid,
'phone': self.phone_valid,
'email': self.email_valid,
'password': <PASSWORD>,
'confirm_password': <PASSWORD>,
'CPF_document': self.CPF_document_valid,
'sex': self.sex_valid,
'date_of_birth': self.date_of_birth_valid,
'CEP': self.CEP_valid,
'UF': self.UF_valid,
'city': self.city_invalid_MAX,
'neighborhood': self.neighborhood_valid,
'complement': self.complement_valid}
form = PatientForm(data=form_data)
self.assertFalse(form.is_valid())
def test_forms_patient_neighborhood_is_not_valid(self):
form_data = {'name': self.name_valid,
'phone': self.phone_valid,
'email': self.email_valid,
'password': <PASSWORD>,
'confirm_password': <PASSWORD>,
'CPF_document': self.CPF_document_valid,
'sex': self.sex_valid,
'date_of_birth': self.date_of_birth_valid,
'CEP': self.CEP_valid,
'UF': self.UF_valid,
'city': self.city_valid,
'neighborhood': self.neighborhood_invalid,
'complement': self.complement_valid}
form = PatientForm(data=form_data)
self.assertFalse(form.is_valid())
def test_forms_patient_neighborhood_is_not_valid_MAX(self):
form_data = {'name': self.name_valid,
'phone': self.phone_valid,
'email': self.email_valid,
'password': <PASSWORD>,
'confirm_password': <PASSWORD>,
'CPF_document': self.CPF_document_valid,
'sex': self.sex_valid,
'date_of_birth': self.date_of_birth_valid,
'CEP': self.CEP_valid,
'UF': self.UF_valid,
'city': self.city_valid,
'neighborhood': self.neighborhood_invalid_MAX,
'complement': self.complement_valid}
form = PatientForm(data=form_data)
self.assertFalse(form.is_valid())
def test_forms_patient_complement_is_not_valid(self):
form_data = {'name': self.name_valid,
'phone': self.phone_valid,
'email': self.email_valid,
'password': <PASSWORD>,
'confirm_password': <PASSWORD>,
'CPF_document': self.CPF_document_valid,
'sex': self.sex_valid,
'date_of_birth': self.date_of_birth_valid,
'CEP': self.CEP_valid,
'UF': self.UF_valid,
'city': self.city_valid,
'neighborhood': self.neighborhood_valid,
'complement': self.complement_invalid}
form = PatientForm(data=form_data)
self.assertFalse(form.is_valid())
def test_forms_patient_complement_is_not_valid_MAX(self):
form_data = {'name': self.name_valid,
'phone': self.phone_valid,
'email': self.email_valid,
'password': <PASSWORD>,
'confirm_password': <PASSWORD>,
'CPF_document': self.CPF_document_valid,
'sex': self.sex_valid,
'date_of_birth': self.date_of_birth_valid,
'CEP': self.CEP_valid,
'UF': self.UF_valid,
'city': self.city_valid,
'neighborhood': self.neighborhood_valid,
'complement': self.complement_invalid_MAX}
form = PatientForm(data=form_data)
self.assertFalse(form.is_valid())
| 2.3125 | 2 |
PassGen.py | CoderMP/PythonPassGen | 0 | 12795509 | <reponame>CoderMP/PythonPassGen<gh_stars>0
####### REQUIRED IMPORTS #######
import os
import sys
import click
import random
import string
from time import sleep
####### FUNCTIONS ######
def displayHeader():
"""
() -> ()
Function that is responsible for printing the application header
and menu
"""
# Clear the console window
os.system('cls')
# Print the application header & menu
print("\033[94m------------------------------\n" +
"|| \033[92mPassword Generator \033[94m||\n" +
"------------------------------\n\n" +
"\033[0mWelcome to Password Generator v1.0\n" +
"\033[92mSource Code By: \033[0m\033[1m<NAME> (CoderMP)\n" +
"\033[91mLicense: \033[0m\033[1mMIT\n\n" +
"\033[0m\033[1m[1] Generate Password(s)\n" +
"[2] Exit Program\n")
def generator(len, num):
"""
(int, int) -> list
Function that is repsonsible for generating a random alphanumeric
password based off the iser request parameters
"""
# Initialize the list that will hold the generated passwords
passList = []
# Initialize a counter variable to assist with generation
i = 0
while i < num:
# Assemble the password
temp = ''.join(random.choices(string.ascii_lowercase + string.digits, k = len))
# Append the temp variable value to the passList
passList.append(temp)
# Increment the counter
i += 1
# Return the password list
return passList
def passParams():
"""
() -> ()
Function that is responsible for retrieving the desired password
generation paramters of the user.
"""
# Prompt the user for their desired pass length and how many to generate
len = click.prompt('How long would you like your password(s) to be? >>', type=int)
num = click.prompt('How many password(s) would you like to generate? >>', type=int)
print('\n')
# Assemble the password list
passwordList = generator(len, num)
# Print the password list to the console
print(*passwordList, sep='\n')
def genLogic():
"""
() -> ()
Function that is responsible for executing the application logic based on the user's choice
"""
# Prompt the user for input
op = click.prompt('Enter choice >>', type=int)
if (op == 1):
print('\n')
# Call method that retrieves the password generation parameters
passParams()
while(True):
# Prompt the user as to whether or not they'd like to generate another set
choice = click.prompt('\n\nWould you like to generate another set? (y/n) >>', type=str)
# Execute accordingly
if (choice == 'Y' or choice == 'y'):
print('\n')
# Call the function that retrieves the password generation parameters
passParams()
if (choice == 'N' or choice == 'n'):
# Notify the user of navigation back to the main menu
print('Returning you to the main menu....')
sleep(1.3)
os.system('cls')
break
# Display the main menu and prompt the user for input
displayHeader()
genLogic()
if (op == 2):
# Notify the user of the termination sequence
print('\nTerminating program...')
sleep(2)
# Terminate
sys.exit()
else:
# Notify the user of their command error and re-prompt them for input
print('\033[91mInvalid command, please try again!\033[0m')
genLogic()
####### MAIN PROGRAM #######
if __name__ == '__main__':
displayHeader()
genLogic() | 3.109375 | 3 |
device.py | JCLemme/letterbox | 0 | 12795510 | <filename>device.py
#!/usr/bin/env python3
import os
import sys
import time
import zmq
def main():
frontend = None
backend = None
context = None
try:
context = zmq.Context(1)
# Socket facing clients
frontend = context.socket(zmq.SUB)
frontend.bind("tcp://*:5559")
frontend.setsockopt(zmq.SUBSCRIBE, b"")
# Socket facing services
backend = context.socket(zmq.PUB)
backend.bind("tcp://*:5560")
zmq.device(zmq.FORWARDER, frontend, backend)
except Exception as e:
print(e)
print("bringing down zmq device")
finally:
pass
if frontend != None: frontend.close()
if backend != None: backend.close()
if context != None: context.term()
if __name__ == "__main__":
main()
| 2.40625 | 2 |
src/__init__.py | Nardri/rbac-service | 0 | 12795511 | <reponame>Nardri/rbac-service
"""Blueprint module"""
from flask import Blueprint
# instantiating the blue print
rbac_blueprint = Blueprint('rbac-service', __name__, url_prefix='/v1')
| 1.578125 | 2 |
myGym/generate_dataset.py | incognite-lab/myGym | 18 | 12795512 | <filename>myGym/generate_dataset.py<gh_stars>10-100
## script to generate train/test sets from the simulator in COCO or DOPE format. Used for vision training.
import gym
from myGym import envs
from matplotlib.pyplot import imshow, show
import cv2
import numpy as np
import os
import glob
import json
import commentjson
import sys
import random
from pycocotools.cocostuffhelper import segmentationToCocoMask, segmentationToCocoResult
from pycocotools import mask
import pybullet as p
from bbox import BBox3D
from myGym.envs.wrappers import RandomizedEnvWrapper
import pkg_resources
# config, specify here or pass as an input argument
CONFIG_DEFAULT = pkg_resources.resource_filename("myGym", 'configs/dataset_coco.json')
# helper functions:
def color_names_to_rgb():
"""
Assign RGB colors to objects by name as specified in the training config file
"""
with open(pkg_resources.resource_filename("myGym", 'configs/rgbcolors.json'), "r") as read_file:
clr = json.load(read_file) #json file with suggested colors
new_dict = {}
for key, value in config['object_colors'].items():
new_value = []
for item in value:
new_value.append(clr[item])
new_dict[key] = new_value
config['color_dict'] = new_dict
def _category_coco_format(): #COCO
"""
Create list of dictionaries with category id-name pairs in MSCOCO format
Returns:
:return categories: (list) Categories in MSCOCO format
"""
categories = []
for value, key in config['used_class_names'].items():
categories.append({"id": int(key), "name": str(value)})
return categories
def _segmentationToPoly(mask, ):
"""
Convert segmentation from RLE to polynoms ([[x1 y1 x2 x2 y2 ...]]). Code from https://github.com/facebookresearch/Detectron/issues/100#issuecomment-362882830.
Parameters:
:param mask: (array) Bitmap mask
:return segmentationPoly: (list) Segmentation converted to polynoms
"""
contours, _ = cv2.findContours((mask).astype(np.uint8), cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
segmentationPoly = []
for contour in contours:
contour = contour.flatten().tolist()
if len(contour) > 4:
segmentationPoly.append(contour)
return segmentationPoly
def create_coco_json(): #COCO
"""
Create COCO json data structure
Returns:
:return data_train: (dict) Data structure for training data
:return data_test: (dist) Data structure for testing data
"""
data_train = dict(
images=[# file_name, height, width, id
],
type='instances',
annotations=[# segmentation, area, iscrowd, image_id, bbox, category_id, id
],
categories = _category_coco_format(),
)
data_test = dict(
images=[# file_name, height, width, id
],
type='instances',
annotations=[# segmentation, area, iscrowd, image_id, bbox, category_id, id
],
categories = _category_coco_format(),
)
return data_train, data_test
def create_3D_box(env_object,objdim): #DOPE
objpos = env_object.get_position()
objorient = env_object.get_orientation()
#objdim = env_object.get_cuboid_dimensions()
box= BBox3D(objpos[0],objpos[1],objpos[2],objdim[0],objdim[1],objdim[2],objorient[3],objorient[0],objorient[1],objorient[2])
return box.p,box.center
class GeneratorCoco: #COCO
"""
Generator class for COCO image dataset for YOLACT vision model training
"""
def __init__(self):
pass
def get_env(self): #COCO
"""
Create environment for COCO dataset generation according to dataset config file
Returns:
:return env: (object) Environment for dataset generation
"""
env = RandomizedEnvWrapper(env=gym.make(config['env_name'],
robot = config['robot'],
render_on = True,
gui_on = config['gui_on'],
show_bounding_boxes_gui = config['show_bounding_boxes_gui'],
changing_light_gui = config['changing_light_gui'],
shadows_on = config['shadows_on'],
color_dict = config['color_dict'],
object_sampling_area = config['object_sampling_area'],
num_objects_range = config['num_objects_range'],
used_objects = used_objects,
active_cameras = config['active_cameras'],
camera_resolution = config['camera_resolution'],
renderer=p.ER_BULLET_HARDWARE_OPENGL,
dataset = True,
),
config_path = config['output_folder']+'/config_dataset.json')
p.setGravity(0, 0, -9.81)
return env
def episode_zero(self):
"""
Initial espisode set-up
"""
self.id_unique = 0 #image_id*x for paralel dataset generation, otherwise 0
def init_data(self): #COCO
"""
Initialize data structures for COCO dataset annotations
Returns:
:return data_train: (dict) Data structure for training data
:return data_test: (dist) Data structure for testing data
"""
data_train, data_test = create_coco_json()
return data_train, data_test
def resume(self): #COCO
"""
Resume COCO dataset generation
Returns:
:return data_train: (dict) Training data from preceding dataset generation in COCO data structure
:return data_test: (dist) Testing data from preceding dataset generation in COCO data structure
:return image_id: (int) ID of last generated image in preceding dataset generation
"""
try:
with open(os.path.join(config['output_test_folder'],'annotations.json'), 'r') as f:
data_test = json.load(f)
except:
pass #happens when test JSON is empty, which can happen for small numbers
try:
with open(os.path.join(config['output_train_folder'],'annotations.json'), 'r') as f:
data_train = json.load(f)
# get the ID of last image
img_ids = [img["id"] for img in data_train['images']]
image_id = max(img_ids) +1 # +1 if last sample were test (thus not in here). it's safe to have holes in the ids, just need to monothically increase
self.id_unique = len(data_test['annotations']) + len(data_train['annotations'])
print("Resuming from image_id {} for episodes: {}".format(image_id, config['num_episodes']))
except FileNotFoundError:
image_id = 0
return data_test, data_train, image_id
def data_struct_image(self): #COCO
"""
Assign name to COCO dataset image and train of test status
Returns:
:param data: (dict) Corresponding data dictionary
:param name: (string) Name of image file for saving
"""
data = data_test if isTestSample == True else data_train
name = 'img_{}_cam{}.jpg'.format(image_id,camera_id)
return data, name
def store_image_info(self): #COCO
"""
Append COCO dataset image info to corresponding data dict
"""
data['images'].append(dict(
file_name=name,
height=im.shape[0],
width=im.shape[1],
id=image_id,))
def get_append_annotations(self): #COCO
"""
Make and append COCO annotations for each object in the scene
"""
seg = segmentationToCocoMask(img_mask,object_uid)
area = float(mask.area(seg))
bbox = mask.toBbox(seg).flatten().tolist()
#1 run length encoding RLE segmentation format
seg['counts'] = str(seg['counts'], "utf-8") #utf-8 format in str
#2 or poly segmentation format
bitmap = mask.decode(seg)
seg = _segmentationToPoly(bitmap)
self.too_small_obj = False
try:
#notify and skip the object with too small visible representation
assert(area > config['min_obj_area'])
assert(len(seg)>0 and len(seg[0])>0)
except:
#make inverse map id->name (just to pretty print)
inv_map = dict(zip(config['used_class_names'].values(), config['used_class_names'].keys()))
self.too_small_obj = inv_map[class_id]
self.data_dict = dict(
id=self.id_unique,
image_id=image_id,
category_id=class_id,
segmentation=seg,
area=area,
bbox=bbox,
iscrowd=0,
)
if self.too_small_obj == False: #area ok
data['annotations'].append(self.data_dict) #append annotations
self.id_unique +=1
else: #area too small to be realistically seen
print('Too small object of class {} with area={} in img {}'.format(self.too_small_obj, self.data_dict['area'], name))
def visualize(self): #COCO
"""
Visualize mask and bounding box coordinates for COCO annotated object
"""
mask = img_mask==object_uid
mask = np.expand_dims(mask, axis=2)
mask = 255*mask.astype('uint8')
cv2.imshow('image',im)
cv2.waitKey(1)
print(class_name)
if self.too_small_obj:
cv2.imshow('Too small object', mask)
else:
cv2.imshow('Labeled object', mask)
cv2.waitKey(1000)
print(self.data_dict['bbox'])
def write_json_end(self): #COCO
"""
Write json file with COCO annotations to output directory
"""
if config['make_dataset'] in ["new", "resume"]:
print("Storing annotations.json at episode {} of {}.".format(episode, config['num_episodes']))
for flag in ['test','train']:
if flag == 'train':
folder = config['output_train_folder']
data = data_train
else:
folder = config['output_test_folder']
data = data_test
json_name = 'img_{}_cam{}.json'.format(image_id, camera_id)
json_dict = {"images": data["images"], "type":'instances',"annotations": data["annotations"], "categories":_category_coco_format()}
if len(data["images"]) > 0:
with open(os.path.join(folder,json_name), 'w') as f:
json.dump(json_dict, f, indent=4)
# clear data and continue
data["images"].clear()
data["annotations"].clear()
class GeneratorDope: #DOPE
def __init__(self):
self.object_settings = {"exported_object_classes": [], "exported_objects": []}
def get_env(self): #DOPE
env = RandomizedEnvWrapper(env=gym.make(config['env_name'],
robot = config['robot'],
render_on = True,
gui_on = config['gui_on'],
show_bounding_boxes_gui = config['show_bounding_boxes_gui'],
changing_light_gui = config['changing_light_gui'],
shadows_on = config['shadows_on'],
color_dict = config['color_dict'],
object_sampling_area = config['object_sampling_area'],
num_objects_range = config['num_objects_range'],
used_objects = used_objects,
active_cameras = config['active_cameras'],
camera_resolution = config['camera_resolution'],
dataset = True,
), config_path = config['output_folder']+'/config_dataset.json')
p.setGravity(0, 0, -9.81)
return env
def episode_zero(self):
self.objdim = {}
while len(self.objdim.keys()) < len(config['used_class_names'].keys()):
env.reset(random_robot=config['random_arm_movement'], random_pos=False)
observation = env.get_observation()
env_objects = observation["objects"]
for obj in env_objects:
if obj.name not in self.objdim.keys():
self.objdim[obj.name] = obj.get_cuboid_dimensions()
def init_data(self): #DOPE
data_train = {"objects":[]}
data_test = {"objects":[]}
return data_train, data_test
def resume(self): #DOPE
try:
files_test = [int(x.replace('.jpg','')) for x in os.listdir(config['output_test_folder']) if x.endswith(".jpg")]
files_train = [int(x.replace('.jpg','')) for x in os.listdir(config['output_train_folder']) if x.endswith(".jpg")]
image_id = max(max(files_test),max(files_train))
print("Resuming from image_id {} for episodes: {}".format(image_id, config['num_episodes']))
self.episode_zero()
except FileNotFoundError:
image_id = 0
return self.init_data()[0],self.init_data()[1],image_id
def data_struct_image(self): #DOPE
data_train, data_test = self.init_data()
data = data_test if isTestSample == True else data_train
name = '{}.jpg'.format(image_id)
return data, name
def store_image_info(self): #DOPE
#write dataset image info
filename = str(image_id) + '.json'
if config['make_dataset'] in ["new", "resume"]:
print("Storing {} and {} at episode {} of {}.".format(filename, name, episode, config['num_episodes']))
with open(os.path.join(path, filename), 'w') as f:
json.dump(data, f, indent=4)
def get_append_annotations(self): #DOPE
cuboid_with_centroid = env_object.get_bounding_box()
cuboid_centroid=cuboid_with_centroid[8]
cuboid=cuboid_with_centroid[:8]
seg = segmentationToCocoMask(img_mask, object_uid)
seg['counts'] = str(seg['counts'], "utf-8") #utf-8 format in str
bbox = mask.toBbox(seg).flatten().tolist()
bounding_box = {'top_left': bbox[:2], 'bottom_right': [bbox[0]+bbox[2], bbox[1]+bbox[3]]}
boxp,boxc = create_3D_box(env_object,self.objdim[class_name])
box3D = []
for x in range(boxp.shape[0]):
box3D.append(tuple(boxp[x]))
boxc = list(boxc)
projected_cuboid_centroid = list(env.project_point_to_camera_image(cuboid_centroid, camera_id))
projected_cuboid = [list(env.project_point_to_camera_image(point, camera_id)) for point in cuboid]
projected_3DBB_centroid = list(env.project_point_to_camera_image(boxc, camera_id))
projected_3DBB = [list(env.project_point_to_camera_image(point, camera_id)) for point in box3D]
if class_name not in self.object_settings["exported_object_classes"]:
self.object_settings["exported_object_classes"].append(class_name)
self.object_settings['exported_objects'].append({
"class": class_name,
"segmentation_class_id": class_id,
"cuboid_dimensions": self.objdim[class_name]
})
self.data_dict = {
"class": class_name,
"class_id": class_id,
"location":env_object.get_position(),
"quaternion_xyzw": env_object.get_orientation(),
"cuboid_centroid": cuboid_centroid,
"projected_cuboid_centroid": projected_cuboid_centroid,
"bounding_box": bounding_box,
"cuboid": cuboid,
"projected_cuboid": projected_cuboid,
"box3D": box3D,
"projected_3DBB": projected_3DBB,
"projected_3DBB_centroid": projected_3DBB_centroid,
}
data["objects"].append(self.data_dict)
def visualize(self): #DOPE
image = im
for projected_cuboid_point in data["objects"][-1]["projected_cuboid"]:
image = cv2.circle(cv2.UMat(image), tuple(map(int, projected_cuboid_point)), 4, [0,255,0], -1)
for projected_cuboid_point in data["objects"][-1]["projected_3DBB"]:
image = cv2.circle(cv2.UMat(image), tuple(map(int, projected_cuboid_point)), 4, [255,0,0], -1)
image = cv2.circle(cv2.UMat(image), tuple(map(int, [data["objects"][-1]["projected_cuboid_centroid"][0],data["objects"][-1]["projected_cuboid_centroid"][1]])), 4, [0,255,0], -1)
image = cv2.circle(cv2.UMat(image), tuple(map(int, [data["objects"][-1]["projected_3DBB_centroid"][0],data["objects"][-1]["projected_3DBB_centroid"][1]])), 4, [255,0,0], -1)
image = cv2.circle(cv2.UMat(image), tuple(map(int, [data["objects"][-1]["bounding_box"]["top_left"][0],data["objects"][-1]["bounding_box"]["top_left"][1]])), 4, [255,255,0], -1)
image = cv2.circle(cv2.UMat(image), tuple(map(int, [data["objects"][-1]["bounding_box"]["bottom_right"][0],data["objects"][-1]["bounding_box"]["bottom_right"][1]])), 4, [255,255,0], -1)
print(class_name)
cv2.imshow('image',image)
cv2.waitKey(1000)
self.draw_bounding_box_3D()
def draw_bounding_box_3D(self): #DOPE
for points in range(7):
#p.addUserDebugLine([0,0,0], [1,2,3], lineColorRGB=(0.31, 0.78, 0.47), lineWidth = 10)
#points2=(points[0]+0.001,points[1]+0.001,points[2]+0.001)
p.addUserDebugLine(data["objects"][-1]["box3D"][points],data["objects"][-1]["box3D"][points+1], lineColorRGB=(0.0, 0.0, 0.99), lineWidth = 4, lifeTime = 1)
def write_json_end(self): #DOPE
self.camera = {"camera_settings": []}
for c in range(np.count_nonzero(config['active_cameras'])): #loop through active cameras
camera_id=np.nonzero(config['active_cameras'])[0][c]
intrinsic_settings = env.get_camera_opencv_matrix_values(camera_id)
captured_image_size = {"width": config['camera_resolution'][0], "height": config['camera_resolution'][1]}
self.camera["camera_settings"].append(dict(
name="camera" + str(camera_id),
intrinsic_settings=intrinsic_settings,
captured_image_size=captured_image_size,
))
if config['make_dataset'] in ["new", "resume"]:
filename = "_camera_settings" + '.json'
print("Storing {}.".format(filename))
with open(os.path.join(config['output_test_folder'], filename), 'w') as f:
json.dump(self.camera, f, indent=4)
with open(os.path.join(config['output_train_folder'], filename), 'w') as f:
json.dump(self.camera, f, indent=4)
filename = "_object_settings" + '.json'
with open(os.path.join(config['output_test_folder'], filename), 'w') as f:
json.dump(self.object_settings, f, indent=4)
with open(os.path.join(config['output_train_folder'], filename), 'w') as f:
json.dump(self.object_settings, f, indent=4)
class GeneratorVae:
"""
Generator class for image dataset for VAE vision model training
"""
def __init__(self):
self.object_settings = {"exported_object_classes": [], "exported_objects": []}
self.env = None
self.imsize = config["imsize"] # only supported format at the moment
def get_env(self):
"""
Create environment for VAE dataset generation according to dataset config file
"""
self.env = RandomizedEnvWrapper(env=gym.make(config['env_name'],
robot = config['robot'],
render_on = True,
gui_on = config['gui_on'],
show_bounding_boxes_gui = config['show_bounding_boxes_gui'],
changing_light_gui = config['changing_light_gui'],
shadows_on = config['shadows_on'],
color_dict = config['color_dict'],
object_sampling_area = config['object_sampling_area'],
num_objects_range = config['num_objects_range'],
used_objects = used_objects,
active_cameras = config['active_cameras'],
camera_resolution = config['camera_resolution'],
dataset = True,
), config_path = config['output_folder']+'/config_dataset.json')
p.setGravity(0, 0, -9.81)
def collect_data(self, steps):
"""
Collect data for VAE dataset
Parameters:
:param steps: (int) Number of episodes initiated during dataset generation
"""
data = np.zeros((steps, self.imsize, self.imsize, 3), dtype='f')
for t in range(steps):
self.env.reset(random_pos=True)
self.env.render()
action = [random.uniform(1,2) for x in range(6)]
#action = [2,2,2,2,2,2]
self.env.robot.reset_random(action)
# send the Kuka arms up
observation, reward, done, info = self.env.step(action)
img = observation['camera_data'][6]['image']
imgs = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)
img = cv2.resize(imgs[0:450,100:500], (self.imsize, self.imsize))
cv2.imshow("image", img)
cv2.waitKey(1)
padding = 6 - len(str(t+7999))
name = padding * "0" + str(t+7999)
cv2.imwrite(os.path.join(dataset_pth, "img_{}.png".format(name)), img)
data[t] = img
print("Image {}/{}".format(t, steps))
self.env.close()
# main
if __name__ == "__main__":
if len(sys.argv) <= 1:
config_path = CONFIG_DEFAULT
print('No config.json passed in argument. Loading default config: {}'.format(CONFIG_DEFAULT))
else:
config_path = pkg_resources.resource_filename("myGym", sys.argv[1])
with open(config_path) as file:
config = commentjson.load(file)
# initialize dataset generator
if config['dataset_type'] == 'coco':
generator = GeneratorCoco()
elif config['dataset_type'] == 'dope':
generator = GeneratorDope()
elif config['dataset_type'] == 'vae':
generator = GeneratorVae()
else:
raise Exception("dataset_type in config: use one of 'coco', 'dope', 'vae'!")
#prepare directories
config['output_test_folder'] = config['output_folder'] + '/test'
config['output_train_folder'] = config['output_folder'] + '/train'
os.makedirs(config["output_test_folder"], exist_ok=True)
os.makedirs(config["output_train_folder"], exist_ok=True)
#define objects to appear in the env, add colors
config['used_class_names'] = dict([x[1:3] for x in config['used_class_names_quantity']])
used_objects = []
for x in config['used_class_names_quantity']:
for _ in range(x[0]):
used_objects.append(x[1])
if config['color_dict'] == 'object_colors':
color_names_to_rgb()
config['texture_randomizer']['exclude'].append("objects")
config['color_randomizer']['exclude'].append("objects")
#write config.json to output_folder
with open(config['output_folder']+'/config_dataset.json', 'w') as f:
commentjson.dump(config, f)
if config['dataset_type'] == 'vae':
generator.get_env()
dataset_pth = config['output_folder'] + '/train'
generator.collect_data(config['num_episodes'])
print("Dataset finished. Ready to train!")
raise SystemExit(0)
# initialize pybullet env
env = generator.get_env()
first_link_uid = env.robot.robot_uid
robot_uids = np.array([((x + 1) << 24) + first_link_uid for x in range(-1, env.robot.gripper_index)],dtype=np.int32)
gripper_uids = np.array([((x + 1) << 24) + first_link_uid for x in range(env.robot.gripper_index, env.robot.num_joints + 1)])
# check mode of writing files
image_id = 0 #for paralel dataset generation >0, otherwise 0
if config['make_dataset'] == "new": #cleanup files
files = glob.glob(os.path.join(config['output_test_folder'],'./*'))
for f in files:
os.remove(f)
files = glob.glob(os.path.join(config['output_train_folder'],'./*'))
for f in files:
os.remove(f)
data_train, data_test = generator.init_data()
generator.episode_zero()
elif config['make_dataset'] == 'resume': #resume
print("Restoring dataset generation")
data_test, data_train, image_id = generator.resume()
elif (config['make_dataset'] == "display"):
data_train, data_test = generator.init_data()
generator.episode_zero()
# the main loop
for episode in range(int(image_id/(config['num_steps']*np.count_nonzero(config['active_cameras']))), config['num_episodes']): #loop through episodes
print("episode: {}/{}".format(episode, config['num_episodes']))
#env reset
#random_robot randomizes the init position of robots
#random_pos randomizes the init positions of objects
# if episode == 0:
# generator.episode_zero()
if episode % config['num_episodes_hard_reset'] == 0: #to prevent objects vanishing when GUI is on
print("Hard reset!!!")
env.reset(hard=True)
env.reset(random_robot=config['random_arm_movement'], random_pos=True, hard=False)
observation = env.get_observation()
env_objects = observation["objects"]
for t in range(config['num_steps']): #loop through steps
# randomize the movements of robots (using joint control)
action = env.action_space.sample()
observation, reward, done, info = env.step(action)
if t == config['num_steps']-1 or t%config['make_shot_every_frame'] == 0: # we only use frames from some steps
env.render() #only render at the steps/frames we use for dataset
for c in range(np.count_nonzero(config['active_cameras'])): #loop through active cameras
camera_id=np.nonzero(config['active_cameras'])[0][c]
image_id = image_id + 1 #unique
isTestSample = np.random.random_sample() < config['train_test_split_pct'] # bool, test/train data?
path = config['output_test_folder'] if isTestSample == True else config['output_train_folder']
#get dataset image and its mask
im = observation["camera_data"][camera_id]["image"]
im = cv2.cvtColor(im, cv2.COLOR_RGB2BGR) #fix colors
img_mask = observation["camera_data"][camera_id]["segmentation_mask"]
obj_ids = [x for x in np.unique(img_mask)] #identify objects(links) in the camera view (in the image)
img_mask = np.where(np.isin(img_mask,gripper_uids), gripper_uids[0], img_mask) #merge gripper links
img_mask = np.where(np.isin(img_mask,robot_uids), robot_uids[0], img_mask) #merge robot links
obj_ids = [x for x in np.unique(img_mask)] #identify merged objects in the camera view (in the image)
#prepare data strucuture
data, name = generator.data_struct_image()
for object_uid in obj_ids: #loop through kuka and used objects in the image (in the camera view)
if object_uid == robot_uids[0]:
class_name = config['robot']
elif object_uid == gripper_uids[0]:
class_name = env.robot.get_name()
else:
env_object_list = list(filter(lambda object: object.uid == object_uid, env_objects))
if len(env_object_list) > 0:
env_object = env_object_list[0]
class_name = env_object.get_name()
else:
continue
if class_name in config['used_class_names']:
class_id = config['used_class_names'][class_name]
generator.get_append_annotations() #annotate and append annotations
if config['visualize']: #visualize
generator.visualize()
#store dataset image and info
generator.store_image_info()
if config['make_dataset'] in ["new", "resume"]:
cv2.imwrite(os.path.join(path, name), im, [int(cv2.IMWRITE_JPEG_QUALITY), 70])
if done:
print("Episode finished after {} timesteps".format(t+1))
break
# write JSON annotations every n periods or at the end
if episode % config['autosafe_episode'] == 0 or episode == config['num_episodes']-1:
generator.write_json_end()
data_train, data_test = generator.init_data()
# end
print('DATASET FINISHED')
| 2.75 | 3 |
assetoolz/assets.py | aspyatkin/assetoolz | 0 | 12795513 | import os
from .cache import Cache
from .models import CacheEntry
from .utils import get_file_hash, save_file, load_file
import shutil
import io
from .compiler import ExpressionProcessor
from .expressions import stylesheets, scripts, html
import subprocess
import tempfile
class AssetCollection(object):
def __init__(self, file_list, settings):
self._assets = []
self._settings = settings
for path in file_list:
res = get_asset_objects(path, settings)
if type(res) is list:
for asset in res:
self._assets.append(asset)
self._assets[-1]._collection = self
self._assets[-1]._settings = settings
else:
if res is None:
continue
self._assets.append(res)
self._assets[-1]._collection = self
self._assets[-1]._settings = settings
def find_asset(self, path, lang):
for asset in self._assets:
if asset._path == path and asset._lang == lang:
return asset
return None
def pick_dependencies(self):
print('Found {count:d} assets'.format(count=len(self._assets)))
if self._settings.verbose:
print("Picking dependencies...")
for asset in self._assets:
asset.parse()
if self._settings.verbose:
print(asset)
print('Dependencies {dependencies}\n'.format(
dependencies=asset._dependencies))
self._assets = DependencyResolver.topological_sort(self._assets)
if self._settings.verbose:
print('Build order:\n{collection}\n'.format(
collection=self._assets))
def build(self):
print('Building assets...')
for asset in self._assets:
asset.compile(force=self._settings.force)
print('Build done.')
class DependencyResolver(object):
@staticmethod
def topological_sort(assets_unsorted):
assets_sorted = []
while len(assets_unsorted) > 0:
acyclic = False
for asset in assets_unsorted:
for dependency in asset._dependencies:
if dependency in assets_unsorted:
break
else:
acyclic = True
assets_unsorted.remove(asset)
assets_sorted.append(asset)
if not acyclic:
raise RuntimeError('A cyclic dependency occurred')
return assets_sorted
class Asset(object):
FILE = 0
STRING = 1
def __init__(self, resource_type, path, lang):
self._resource_type = resource_type
self._path = path
self._lang = lang
self._collection = None
self._settings = None
self._dependencies = []
self._tool_cache = Cache()
self._flag_modified = False
def is_partial(self, path):
return os.path.basename(path).startswith("_")
def get_target_path(self, **opts):
common_prefix = os.path.commonprefix([
self._path,
self._get_source_dir()])
path_part = self._path[len(common_prefix)+1:]
if 'hash' in opts:
parts = os.path.splitext(path_part)
new_filename = '%s-%s' % (parts[0], opts['hash'])
path_part = '%s%s' % (new_filename, parts[1])
if 'change_extension' in opts:
new_ext = opts['change_extension']
parts = os.path.splitext(path_part)
path_part = '%s%s' % (parts[0], new_ext)
if 'lang' in opts and not(opts['lang'] is None):
lang = opts['lang']
parts = os.path.splitext(path_part)
path_part = '%s-%s%s' % (parts[0], lang, parts[1])
if self.is_partial(path_part):
target_path = os.path.join(self._get_partials_dir(), path_part)
else:
target_path = os.path.join(self._get_target_dir(), path_part)
return target_path
def __repr__(self):
if self._lang is None:
t = '{path}'
else:
t = '{path} ({lang})'
common_prefix = os.path.commonprefix([
self._path,
self._get_source_dir()])
return t.format(path=self._path[len(common_prefix) + 1:],
lang=self._lang)
def add_dependency(self, path, lang=None):
dependency = self._collection.find_asset(path, lang)
if dependency:
if dependency not in self._dependencies:
self._dependencies.append(dependency)
else:
print("Couldn't find dependency with path %s" % path)
def __eq__(self, other):
return self._path == other._path and self._lang == other._lang
def __ne__(self, other):
return self._path != other._path and self._lang != other._lang
def parse(self):
self._parse()
def dependencies_modified(self):
for dep_asset in self._dependencies:
if dep_asset._flag_modified:
return True
return False
def compile(self, force=False):
if self._resource_type == Asset.FILE:
cache_entry = self._tool_cache.find_entry(self._path, self._lang)
file_modified = True if cache_entry is None\
else cache_entry.file_modified() or self.dependencies_modified()
if file_modified or force:
if cache_entry:
if os.path.exists(cache_entry.target):
os.remove(cache_entry.target)
target_path = self._get_target_path()
self._compile(target_path)
if cache_entry:
cache_entry.target = target_path
self._tool_cache.update(cache_entry)
print('Updated {asset}'.format(asset=self))
else:
cache_entry = CacheEntry(self._path, target_path, self._lang)
self._tool_cache.add(cache_entry)
print('Created {asset}'.format(asset=self))
self._flag_modified = True
else:
if self._settings.verbose:
print('Cached {asset}'.format(asset=self))
else:
print("String asset")
class TextAsset(Asset):
def __init__(self, path, lang=None):
super(TextAsset, self).__init__(Asset.FILE, path, lang)
self._data = None
split = os.path.splitext(path)
self._basename = split[0]
self._extension = split[1]
def load(self):
with io.open(self._path, 'r', encoding='utf-8') as f:
self._data = f.read()
def save(self, path):
if not os.path.exists(os.path.dirname(path)):
os.makedirs(os.path.dirname(path))
save_file(path, self._data)
class StylesheetAsset(TextAsset):
@staticmethod
def supported_extensions():
return ['.css', '.scss']
@staticmethod
def get_languages(settings):
return settings.stylesheets.languages
def _get_partials_dir(self):
return os.path.join(self._settings.partials, 'stylesheets')
def _get_source_dir(self):
return self._settings.stylesheets.source
def _get_target_dir(self):
return self._settings.stylesheets.target
def _get_target_path(self):
return self.get_target_path(hash=get_file_hash(self._path, unique=True))
def _parse(self):
self.load()
self._processor = ExpressionProcessor(self, [
stylesheets.ImageUrlExpression,
stylesheets.IncludeExpression,
stylesheets.FontUrlExpression
])
self._processor.parse()
def minify(self):
temp_path = tempfile.mkdtemp()
source_file = os.path.join(temp_path, "source.css")
save_file(source_file, self._data)
target_file = os.path.join(temp_path, "target.css")
proc = subprocess.Popen(
[
"java",
"-Xss100m",
"-jar",
self._settings.yuicompressor_file,
"--type",
"css",
"-o",
target_file,
source_file
],
stdout=subprocess.PIPE,
stdin=subprocess.PIPE,
)
out, err = proc.communicate()
self._data = load_file(target_file)
shutil.rmtree(temp_path)
def _compile(self, target_path):
self._processor.compile(self._settings, target_path)
if self._settings.minify and not self.is_partial(target_path):
if self._settings.verbose:
print('Minifying {asset}'.format(asset=self))
self.minify()
self.save(target_path)
class ScriptAsset(TextAsset):
@staticmethod
def supported_extensions():
return ['.js', '.coffee']
@staticmethod
def get_languages(settings):
return settings.scripts.languages
def _get_partials_dir(self):
return os.path.join(self._settings.partials, 'scripts')
def _get_source_dir(self):
return self._settings.scripts.source
def _get_target_dir(self):
return self._settings.scripts.target
def _get_target_path(self):
return self.get_target_path(
hash=get_file_hash(self._path, unique=True),
change_extension='.js'
)
def _parse(self):
self.load()
self._processor = ExpressionProcessor(self, [
scripts.IncludeExpression,
scripts.ScriptUrlExpression,
scripts.AppConfExpression,
scripts.ResourceUrlExpression
])
self._processor.parse()
def minify(self):
temp_path = tempfile.mkdtemp()
source_file = os.path.join(temp_path, "source.js")
save_file(source_file, self._data)
target_file = os.path.join(temp_path, "target.js")
proc = subprocess.Popen(
[
"java",
"-jar",
self._settings.yuicompressor_file,
"--type",
"js",
"-o",
target_file,
source_file
],
stdout=subprocess.PIPE,
stdin=subprocess.PIPE,
)
out, err = proc.communicate()
self._data = load_file(target_file)
shutil.rmtree(temp_path)
def compile_coffee(self):
temp_path = tempfile.mkdtemp()
source_file = os.path.join(temp_path, "source.coffee")
save_file(source_file, self._data)
target_file = os.path.join(temp_path, "source.js")
proc = subprocess.Popen(
[
self._settings.coffee_bin,
"-c",
source_file
],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
out, err = proc.communicate()
self._data = load_file(target_file)
shutil.rmtree(temp_path)
def _compile(self, target_path):
self._processor.compile(self._settings, target_path)
if self._extension == '.coffee':
if self._settings.verbose:
print('Using CoffeeScript Compiler for {asset}'.format(asset=self))
self.compile_coffee()
if self._settings.minify and not self.is_partial(target_path):
if self._settings.verbose:
print('Minifying {asset}'.format(asset=self))
self.minify()
self.save(target_path)
class HtmlAsset(TextAsset):
@staticmethod
def supported_extensions():
return ['.html']
@staticmethod
def get_languages(settings):
return settings.html.languages
def _get_partials_dir(self):
return os.path.join(self._settings.partials, 'html')
def _get_source_dir(self):
return self._settings.html.source
def _get_target_dir(self):
return self._settings.html.target
def _get_target_path(self):
return self.get_target_path(lang=self._lang)
def _parse(self):
self.load()
self._processor = ExpressionProcessor(self, [
html.IncludeExpression,
html.StylesheetUrlExpression,
html.ScriptUrlExpression,
html.ImageUrlExpression,
html.AppConfExpression,
html.I18nExpression,
html.I18nTemplateExpression,
html.ResourceUrlExpression
])
self._processor.parse()
def minify(self):
temp_path = tempfile.mkdtemp()
source_file = os.path.join(temp_path, "source.html")
save_file(source_file, self._data)
target_file = os.path.join(temp_path, "target.html")
proc = subprocess.Popen(
[
"java",
"-jar",
self._settings.htmlcompressor_file,
"--type",
"html",
"--mask",
"*.html",
"-o",
target_file,
source_file,
"--remove-intertag-spaces"
],
stdout=subprocess.PIPE,
stdin=subprocess.PIPE
)
out, err = proc.communicate()
self._data = load_file(target_file)
shutil.rmtree(temp_path)
def _compile(self, target_path):
self._processor.compile(self._settings, target_path)
if self._settings.minify and not self.is_partial(target_path):
if self._settings.verbose:
print('Minifying {asset}'.format(asset=self))
self.minify()
self.save(target_path)
class BinaryAsset(Asset):
def __init__(self, path, lang=None):
super(BinaryAsset, self).__init__(Asset.FILE, path, lang)
def _get_target_path(self):
return self.get_target_path(hash=get_file_hash(self._path, unique=True))
def _parse(self):
pass
def _compile(self, target_path):
if not os.path.exists(os.path.dirname(target_path)):
os.makedirs(os.path.dirname(target_path))
shutil.copy(self._path, target_path)
class ImageAsset(BinaryAsset):
def __init__(self, path, lang=None):
super(ImageAsset, self).__init__(path, lang)
@staticmethod
def supported_extensions():
return ['.png', '.jpg', '.gif']
@staticmethod
def get_languages(settings):
return settings.images.languages
def _get_partials_dir(self):
return os.path.join(self._settings.partials, 'images')
def _get_source_dir(self):
return self._settings.images.source
def _get_target_dir(self):
return self._settings.images.target
class FontAsset(BinaryAsset):
def __init__(self, path, lang=None):
super(FontAsset, self).__init__(path, lang)
@staticmethod
def supported_extensions():
return ['.eot', '.svg', '.ttf', '.woff']
@staticmethod
def get_languages(settings):
return settings.fonts.languages
def _get_partials_dir(self):
return os.path.join(self._settings.partials, 'fonts')
def _get_source_dir(self):
return self._settings.fonts.source
def _get_target_dir(self):
return self._settings.fonts.target
def get_asset_objects(path, settings):
asset_classes = [
ImageAsset,
FontAsset,
StylesheetAsset,
HtmlAsset,
ScriptAsset
]
file_ext = os.path.splitext(path)[1]
for asset_class in asset_classes:
if file_ext in asset_class.supported_extensions():
langs = asset_class.get_languages(settings)
if langs is None:
return asset_class(path, None)
else:
return [asset_class(path, lang) for lang in langs]
return None
| 2.265625 | 2 |
PIP/Class Program/ClassQuestion1.py | ankitrajbiswal/SEM_5 | 10 | 12795514 | def rectangle(breadth,length):
return breadth*length,2*(length+breadth)
x,y=rectangle(float(input("Enter breatdh ")),float(input("Enter length ")))
print("Area ",x,"Perimeter ",y)
| 4.09375 | 4 |
data_mgmt/data_prep/gen_motif.py | theandyb/smaug-redux | 0 | 12795515 | from itertools import product
alpha = ['A', 'T', 'C', 'G']
motifs = [a+b+c+d+e+f+g for a,b,c,d,e,f,g in product(alpha, repeat=7)]
with open('motifs7.txt', 'w') as f:
for item in motifs:
f.write("%s\n" % item)
| 2.875 | 3 |
bak/extract/misc.py | ashlinrichardson/bcstats_ohcs_craigslist | 2 | 12795516 | <filename>bak/extract/misc.py
import os
import sys
import csv
import time
args = sys.argv
def err(msg):
print("Error:", msg)
sys.exit(1)
# parallel for loop
def parfor(my_function, my_inputs):
# evaluate function in parallel, and collect the results
import multiprocessing as mp
pool = mp.Pool(mp.cpu_count())
result = pool.map(my_function, my_inputs)
return(result) | 2.59375 | 3 |
TPP_RL.py | SudoMishra/mtech_thesis | 0 | 12795517 | import networkx as nx
graph = nx.DiGraph()
nodes = [f"{i}" for i in range(1, 13)]
nodes.extend([chr(i) for i in range(1, 13)])
graph.add_nodes_from([])
class Node:
def __init__(self, name, direct, sig):
self.name = name
self.direct = direct
self.sig = sig
class Edge:
def __init__(self, prev, to, tp, direct):
self.prev = prev
self.to = to
self.tp = tp
self.direct = direct
| 3.28125 | 3 |
djangoflutterwave/tests/test_template_tags.py | bdelate/django-flutterwave | 9 | 12795518 | <reponame>bdelate/django-flutterwave
# stdlib imports
from unittest.mock import patch
# django imports
from django.test import TestCase
# 3rd party imports
# project imports
from djangoflutterwave.tests.factories import FlwPlanModelFactory, UserFactory
from djangoflutterwave.templatetags.djangoflutterwave_tags import pay_button_params
class TestTemplateTags(TestCase):
"""Test suite for template tags"""
@patch(
"djangoflutterwave.templatetags.djangoflutterwave_tags.create_transaction_ref"
)
@patch("djangoflutterwave.templatetags.djangoflutterwave_tags.settings")
@patch("djangoflutterwave.templatetags.djangoflutterwave_tags.reverse")
def test_pay_button_params(
self, mock_reverse, mock_settings, mock_create_transaction_ref
):
"""Ensure a json string is returned containing the correct tx_ref,
public_key and redirect_url"""
mock_reverse.return_value = "test"
mock_settings.FLW_PUBLIC_KEY = "test"
mock_create_transaction_ref.return_value = "txref"
plan = FlwPlanModelFactory()
user = UserFactory()
expected_response = (
'{"tx_ref": "txref"' ', "redirect_url": "test", "public_key": "test"}'
)
actual_response = pay_button_params(user_pk=user.pk, plan_pk=plan.pk)
mock_reverse.assert_called()
self.assertEqual(expected_response, actual_response)
| 2.515625 | 3 |
torchnet/containers/models/__init__.py | a5chin/torchnet | 0 | 12795519 | <reponame>a5chin/torchnet
from .classification import Classifier
| 0.976563 | 1 |
datastructure/practice/c7/r_7_1.py | stoneyangxu/python-kata | 0 | 12795520 | import unittest
from datastructure.links.Node import Node
def from_second(head):
if head is None:
raise ValueError("Linked list is empty")
return head._next
class MyTestCase(unittest.TestCase):
def test_something(self):
head = Node(0, None)
current = head
for i in range(1, 6):
new_node = Node(i, None)
current._next = new_node
current = new_node
second = from_second(head)
result = []
node = second
while node is not None:
result.append(node._element)
node = node._next
self.assertEqual([1, 2, 3, 4, 5], result)
if __name__ == '__main__':
unittest.main()
| 3.515625 | 4 |
backend/project/dica/migrations/0001_initial.py | rafaelsanches123/megahack-segunda-edicao-2020 | 1 | 12795521 | <gh_stars>1-10
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='DicaModel',
fields=[
('descricao', models.CharField(max_length=200, primary_key=True)),
],
options={
'db_table': 'dica',
},
),
]
| 1.828125 | 2 |
Project1-MinNE-python/src/layer/net.py | MrCaiDev/uestc-CNTProject | 1 | 12795522 | <gh_stars>1-10
from time import sleep
from utils.coding import *
from utils.frame import *
from utils.params import *
from layer._abstract import AbstractLayer
class NetLayer(AbstractLayer):
"""主机网络层。
实现了主机应用层 <-> 主机网络层 <-> 主机物理层的消息收发。
"""
def __init__(self, device_id: str) -> None:
"""初始化主机网络层。
Args:
device_id: 该主机的设备号。
"""
self.__device_id = device_id
self.__app = f"1{device_id}300"
self.__port = f"1{device_id}200"
self.__phy = f"1{device_id}100"
super().__init__(self.__port)
self.__normal_builder = FrameBuilder()
self.__normal_builder.build(
src=self.__app,
reply_state=ReplyState.ACK,
)
self.__reply_builder = FrameBuilder()
self.__reply_builder.build(
src=self.__app,
session_state=SessionState.NORMAL,
data="",
)
self.__parser = FrameParser()
def __str__(self) -> str:
"""打印设备号与端口号。"""
return f"[Device {self.__device_id}] <Net Layer @{self.__port}>\n{'-'*30}"
def receive_all(self) -> tuple[str, bool]:
"""接收来自本机应用层与本机物理层的消息。
Returns:
- [0] 接收到的消息。
- [1] 本机应用层发来为 `True`,本机物理层发来为 `False`。
"""
while True:
message, port, _ = self._receive(bufsize=Network.IN_NE_BUFSIZE)
if port == self.__app:
return message, True
elif port == self.__phy:
return bits_to_string(message), False
else:
continue
def receive_from_app(self) -> str:
"""接收来自本机应用层的消息。
Returns:
接收到的消息。
"""
port = ""
while port != self.__app:
message, port, _ = self._receive(bufsize=Network.IN_NE_BUFSIZE)
return message
def receive_from_phy(self, timeout: int = Network.RECV_TIMEOUT) -> tuple[str, bool]:
"""接收来自本机物理层的消息。
Args:
timeout: 可选,接收超时时间,单位为秒,默认为 `RECV_TIMEOUT`。
Returns:
- [0] 接收到的消息。
- [1] 接收成功为 `True`,接收超时为 `False`。
"""
binary, _, success = self._receive(timeout=timeout)
binary = bits_to_string(binary) if success else binary
return binary, success
def send_to_app(self, message: str) -> int:
"""向本机应用层发送消息。
Args:
message: 要发送的消息。
Returns:
总共发送的字节数。
"""
return self._send(message, self.__app)
def send_to_phy(self, binary: str) -> int:
"""向本机物理层发送消息。
Args:
binary: 要发送的消息。
Returns:
总共发送的字节数。
"""
# 流量控制。
sleep(Network.FLOW_INTERVAL)
return self._send(string_to_bits(binary), self.__phy)
def should_receive(self, port: str) -> bool:
"""判断本层是否应该接收某帧。
Args:
发来的帧的目的端口号。
Returns:
应该接收为 `True`,不应该接收为 `False`。
"""
return port in (self.__app, Topology.BROADCAST_PORT)
def build_pool(self, app_data: dict) -> list[Frame]:
"""将消息打包为帧。
Args:
app_data: 本机应用层传来的消息数据。
Returns:
打包的帧列表。
"""
message = app_data["message"]
frame_num = Frame.calc_num(message)
# 第一帧是请求帧。
request_frame = self.__normal_builder.build(
session_state=SessionState.REQ_TXT
if app_data["msgtype"] == MessageType.TEXT
else SessionState.REQ_IMG,
dst=app_data["dst"],
)
frame_pool = [request_frame]
# 中间的帧是常规帧。
frame_pool.extend(
[
self.__normal_builder.build(
session_state=SessionState.NORMAL,
data=message[
i * FrameParam.DATA_LEN : (i + 1) * FrameParam.DATA_LEN
],
)
for i in range(frame_num - 1)
]
)
# 最后一帧是结束帧。
final_frame = self.__normal_builder.build(
session_state=SessionState.FIN,
data=message[(frame_num - 1) * FrameParam.DATA_LEN :],
)
frame_pool.append(final_frame)
return frame_pool
def build_ack(self, dst: str) -> Frame:
"""生成 ACK 帧。
Args:
dst: ACK 的目的地,即原消息的源。
Returns:
生成的 ACK 帧。
"""
return self.__reply_builder.build(reply_state=ReplyState.ACK, dst=dst)
def build_nak(self, dst: str) -> Frame:
"""生成 NAK 帧。
Args:
dst: NAK 的目的地,即原消息的源。
Returns:
生成的 NAK 帧。
"""
return self.__reply_builder.build(reply_state=ReplyState.NAK, dst=dst)
def parse_reply(self, binary: str) -> bool:
"""解析回复。
Args:
binary: 含有回复的 01 字符串。
Returns:
ACK 为 `True`,NAK 为 `False`。
"""
response = self.__parser.parse(binary)
return True if response.reply_state == ReplyState.ACK else False
def parse_message(self, binary: str) -> Frame:
"""解析消息。
Args:
binary: 含有消息的 01 字符串。
Returns:
收到的消息帧。
"""
return self.__parser.parse(binary)
| 2.4375 | 2 |
gammapy/astro/population/tests/test_simulate.py | Rishank2610/gammapy | 155 | 12795523 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
from numpy.testing import assert_allclose, assert_equal
import astropy.units as u
from astropy.table import Table
from gammapy.astro.population import (
add_observed_parameters,
add_pulsar_parameters,
add_pwn_parameters,
add_snr_parameters,
make_base_catalog_galactic,
make_catalog_random_positions_cube,
make_catalog_random_positions_sphere,
)
def test_make_catalog_random_positions_cube():
table = make_catalog_random_positions_cube(random_state=0)
d = table[0]
assert len(table) == 100
assert len(table.colnames) == 3
assert table["x"].unit == "pc"
assert_allclose(d["x"], 0.0976270078546495)
assert table["y"].unit == "pc"
assert_allclose(d["y"], 0.3556330735924602)
assert table["z"].unit == "pc"
assert_allclose(d["z"], -0.37640823601179485)
table = make_catalog_random_positions_cube(dimension=2, random_state=0)
assert_equal(table["z"], 0)
table = make_catalog_random_positions_cube(dimension=1, random_state=0)
assert_equal(table["y"], 0)
assert_equal(table["z"], 0)
def test_make_catalog_random_positions_sphere():
table = make_catalog_random_positions_sphere(random_state=0)
d = table[0]
assert len(table) == 100
assert len(table.colnames) == 3
assert table["lon"].unit == "rad"
assert_allclose(d["lon"], 3.4482969442579128)
assert table["lat"].unit == "rad"
assert_allclose(d["lat"], 0.36359133530192267)
assert table["distance"].unit == "pc"
assert_allclose(d["distance"], 0.6780943487897606)
def test_make_base_catalog_galactic():
table = make_base_catalog_galactic(n_sources=10, random_state=0)
d = table[0]
assert len(table) == 10
assert len(table.colnames) == 13
assert table["age"].unit == "yr"
assert_allclose(d["age"], 548813.50392732478)
assert table["n_ISM"].unit == "cm-3"
assert_allclose(d["n_ISM"], 1.0)
assert table["spiralarm"].unit is None
assert d["spiralarm"] == "Crux Scutum"
assert table["x_birth"].unit == "kpc"
assert_allclose(d["x_birth"], -5.856461, atol=1e-5)
assert table["y_birth"].unit == "kpc"
assert_allclose(d["y_birth"], 3.017292, atol=1e-5)
assert table["z_birth"].unit == "kpc"
assert_allclose(d["z_birth"], 0.049088, atol=1e-5)
assert table["x"].unit == "kpc"
assert_allclose(d["x"], -5.941061, atol=1e-5)
assert table["y"].unit == "kpc"
assert_allclose(d["y"], 3.081642, atol=1e-5)
assert table["z"].unit == "kpc"
assert_allclose(d["z"], 0.023161, atol=1e-5)
assert table["vx"].unit == "km/s"
assert_allclose(d["vx"], -150.727104, atol=1e-5)
assert table["vy"].unit == "km/s"
assert_allclose(d["vy"], 114.648494, atol=1e-5)
assert table["vz"].unit == "km/s"
assert_allclose(d["vz"], -46.193814, atol=1e-5)
assert table["v_abs"].unit == "km/s"
assert_allclose(d["v_abs"], 194.927693, atol=1e-5)
def test_add_snr_parameters():
table = Table()
table["age"] = [100, 1000] * u.yr
table["n_ISM"] = u.Quantity(1, "cm-3")
table = add_snr_parameters(table)
assert len(table) == 2
assert table.colnames == ["age", "n_ISM", "E_SN", "r_out", "r_in", "L_SNR"]
assert table["E_SN"].unit == "erg"
assert_allclose(table["E_SN"], 1e51)
assert table["r_out"].unit == "pc"
assert_allclose(table["r_out"], [1, 3.80730787743])
assert table["r_in"].unit == "pc"
assert_allclose(table["r_in"], [0.9086, 3.45931993743])
assert table["L_SNR"].unit == "1 / s"
assert_allclose(table["L_SNR"], [0, 1.0768e33])
def test_add_pulsar_parameters():
table = Table()
table["age"] = [100, 1000] * u.yr
table = add_pulsar_parameters(table, random_state=0)
assert len(table) == 2
assert len(table.colnames) == 10
assert table["age"].unit == "yr"
assert_allclose(table["age"], [100, 1000])
assert table["P0"].unit == "s"
assert_allclose(table["P0"], [0.214478, 0.246349], atol=1e-5)
assert table["P1"].unit == ""
assert_allclose(table["P1"], [6.310423e-13, 4.198294e-16], atol=1e-5)
assert table["P0_birth"].unit == "s"
assert_allclose(table["P0_birth"], [0.212418, 0.246336], atol=1e-5)
assert table["P1_birth"].unit == ""
assert_allclose(table["P1_birth"], [6.558773e-13, 4.199198e-16], atol=1e-5)
assert table["CharAge"].unit == "yr"
assert_allclose(table["CharAge"], [2.207394e-21, 1.638930e-24], atol=1e-5)
assert table["Tau0"].unit == "yr"
assert_allclose(table["Tau0"], [5.131385e03, 9.294538e06], atol=1e-5)
assert table["L_PSR"].unit == "erg / s"
assert_allclose(table["L_PSR"], [2.599229e36, 1.108788e33], rtol=1e-5)
assert table["L0_PSR"].unit == "erg / s"
assert_allclose(table["L0_PSR"], [2.701524e36, 1.109026e33], rtol=1e-5)
assert table["B_PSR"].unit == "G"
assert_allclose(table["B_PSR"], [1.194420e13, 3.254597e11], rtol=1e-5)
def test_add_pwn_parameters():
table = make_base_catalog_galactic(n_sources=10, random_state=0)
# To compute PWN parameters we need PSR and SNR parameters first
table = add_snr_parameters(table)
table = add_pulsar_parameters(table, random_state=0)
table = add_pwn_parameters(table)
d = table[0]
assert len(table) == 10
assert len(table.colnames) == 27
assert table["r_out_PWN"].unit == "pc"
assert_allclose(d["r_out_PWN"], 1.378224, atol=1e-4)
def test_add_observed_parameters():
table = make_base_catalog_galactic(n_sources=10, random_state=0)
table = add_observed_parameters(table)
d = table[0]
assert len(table) == 10
assert len(table.colnames) == 20
assert table["distance"].unit == "pc"
assert_allclose(d["distance"], 13016.572756, atol=1e-5)
assert table["GLON"].unit == "deg"
assert_allclose(d["GLON"], -27.156565, atol=1e-5)
assert table["GLAT"].unit == "deg"
assert_allclose(d["GLAT"], 0.101948, atol=1e-5)
assert table["VGLON"].unit == "deg / Myr"
assert_allclose(d["VGLON"], 0.368166, atol=1e-5)
assert table["VGLAT"].unit == "deg / Myr"
assert_allclose(d["VGLAT"], -0.209514, atol=1e-5)
assert table["RA"].unit == "deg"
assert_allclose(d["RA"], 244.347149, atol=1e-5)
assert table["DEC"].unit == "deg"
assert_allclose(d["DEC"], -50.410142, atol=1e-5)
def test_chain_all():
# Test that running the simulation functions in chain works
table = make_base_catalog_galactic(n_sources=10, random_state=0)
table = add_snr_parameters(table)
table = add_pulsar_parameters(table, random_state=0)
table = add_pwn_parameters(table)
table = add_observed_parameters(table)
d = table[0]
# Note: the individual functions are tested above.
# Here we just run them in a chain and do very basic asserts
# on the output so that we make sure we notice changes.
assert len(table) == 10
assert len(table.colnames) == 34
assert table["r_out_PWN"].unit == "pc"
assert_allclose(d["r_out_PWN"], 1.378224, atol=1e-4)
assert table["RA"].unit == "deg"
assert_allclose(d["RA"], 244.347149, atol=1e-5)
| 1.953125 | 2 |
SRD-onef.py | sankeyad/strongly-regular-designs | 0 | 12795524 | <reponame>sankeyad/strongly-regular-designs<gh_stars>0
# SRD-onef.py
# Reads a single text file of strongly regular graph parameters.
# Output is feasible parameter sets for strongly regular designs admitting
# strongly regular decomposition, in which two graphs from the input file are
# the SRGs on the two fibres of the SRD.
# Usage: python3.7 SRD-onef.py <infile> <outfile>
# The infile should be a tab-separated text file with one SRG parameter set in the form 'n k lambda mu' per line, e.g.:
# 204 63 22 18
# 204 28 2 4
# 205 96 50 40
# 205 68 15 26
# 208 75 30 25
# 208 81 24 36
#import time
#begin_time = time.perf_counter()
from sys import argv
script, infile, outfile = argv
from tabulate import tabulate
import math
def is_integer_num(n):
if isinstance(n, int):
return True
if isinstance(n, float):
return n.is_integer()
return False
def check_4_srd(graph1, graph2): # checks a given pair of srgs
n1 = graph1[0]
k1 = graph1[1]
lam1 = graph1[2]
mu1 = graph1[3]
# compute r1, s1
c = lam1 - mu1
delta = c**2 + 4*(k1 - mu1)
d = math.sqrt(delta)
r1 = ( c + d )/2
s1 = ( c - d )/2
n2 = graph2[0]
k2 = graph2[1]
lam2 = graph2[2]
mu2 = graph2[3]
# compute r2, s2
c = lam2 - mu2
delta = c**2 + 4*(k2 - mu2)
d = math.sqrt(delta)
r2 = ( c + d )/2
s2 = ( c - d )/2
table = []
if k2 < k1:
S1 = 2 + k1 - k2
else:
S1 = 2
n = n1 + n2
while S1 < n1:
S2 = k2 + S1 - k1
k0 = k1 + S2
for a1 in range(0, S1):
a2 = a1 + lam2 - lam1
lam0 = a1 + lam2
if a2 >= 0 and lam0 < k0: # then carry on
if a1 < S1*S2/n2:
bot = a1+1
top = S1
else:
bot = 0
top = a1
for b1 in range(bot, top): # b1 < a1 in DGH but need to allow b1 < S1.
b2 = b1 + mu2 - mu1
mu0 = b1 + mu2
c = lam0 - mu0
delta = c**2 + 4*(k0 - mu0)
d = math.sqrt(delta)
r = ( c + d )/2
s = ( c - d )/2
f = (( n-1 )*(-s) - k0)/(r-s)
# check these parameters first
if b2 == a2 or b2 < 0 or n-2*k0+mu0-2 < 0 or n-2*k0+lam0 < 0:
continue #any of those would be invalid
elif (n-k0-1)*mu0 != k0*(k0-lam0-1):
continue
elif r*s*s - 2*r*r*s - r*r - k0*r + k0*s*s + 2*k0*s < 0 or r*r*s - 2*r*s*s - s*s - k0*s + k0*r*r + 2*k0*r < 0:
# above checks Krein parameters on gamma0
continue
elif not is_integer_num(f): # checks integrality of eigenvalue multiplicities
continue
else: # carry on
N1 = a2*k1/S2
N2 = a1*k2/S1
if is_integer_num(N1) and is_integer_num(N2) and n1 != S1 and n2 !=S2: # then good
P1 = ( (k1-N1)*S1 )/(n1-S1) # check it's an integer
P2 = ( (k2-N2)*S2 )/(n2-S2)
if is_integer_num(P1) and is_integer_num(P2):
rho1 = N1-P1
rho2 = N2-P2
sig1 = -(S2-b2)/(a2-b2)
sig2 = -(S1-b1)/(a1-b1)
if (rho1 == r1 and sig1 == s1) or (rho1 == s1 and sig1 == r1):
# then all is good, keep going
if (rho2 == r2 and sig2 == s2) or (rho2 == s2 and sig2 == r2):
# print out parameters
table.append([n1,k1,lam1,mu1,rho1,sig1,n,k0,lam0,mu0,r,s,S1,a1, b1, N1, P1])
table.append([n2,k2,lam2,mu2,rho2,sig2," "," "," "," "," "," ",S2,a2, b2, N2, P2])
else:
continue
# print(S1, "failed at eigenvalue stage")
else:
continue
# print(S1, "failed at eigenvalue stage")
else:
continue
#print("P1 or P2 not integer")
else:
continue
#print("N1 or N2 not integer")
else:
continue
#print("a2 or lam0 problem")
S1 += 1
return(table)
# end of function check_4_srd
#___________________________________
# start of main
graphs = open(infile)
params = open(outfile, 'w')
results = [] # will be table of feasible parameters for srds arising from a pair of srgs
g1 = [0,0,0,0]
g2 = [0,0,0,0]
lines = []
# first, extract a pair of lines from the text file of srgs
i = 1
j = 1
count = 0
lines.append(graphs.readline().split('\t'))
while lines[ len(lines)-1 ] != ['']:
lines.append(graphs.readline().split('\t') )
count = len(lines)-1 # this is because last item in list is now ''
for i in range(0,count):
for j in range(i, count):
line1 = lines[i] # reads the text, separated by spaces, into list
line2 = lines[j]
for entry in range(0,4): # converting text to integer
g1[entry] = int( line1[entry] )
g2[entry] = int( line2[entry] )
newones = check_4_srd(g1,g2)
# new sets are then appended to the results table
results += newones
# Loops through the input file to compare all pairs, including g2=g1.
# Uses tabulate to write results with a header row.
params.write(tabulate(results, headers =
["n_i", "k_i", "lam_i", "mu_i","rho_i", "sig_i", "n", "k", "lam", "mu", "r", "s", "S_i", "a_i", "b_i", "N_i", "P_i"]))
params.close()
graphs.close()
# print( time.perf_counter() - begin_time ) | 2.90625 | 3 |
foreshadow/smart/intent_resolving/core/data_set_parsers/base_data_set_parser.py | adithyabsk/foreshadow | 25 | 12795525 | """Class definition for the DataSetParser ABC and FeaturizerMixin."""
from abc import ABC, abstractmethod
from pathlib import Path
from typing import Callable, Generator, List, Tuple, Type
import numpy as np
import pandas as pd
from sklearn.preprocessing import RobustScaler
class FeaturizerMixin:
"""Mixin to provide secondary featurization functionality."""
def featurize_secondary(self):
"""
Perform secondary featurization.
Sequentially trigger each featurizer to extract secondary features.
The extracted secondary metafeatures are stored in each featurizer's
`sec_metafeatures` and `sec_test_metafeatures` attributes.
These extracted metafeatures will then be collected and appended column-wise
to the `metafeature` and `test_metafeature` attributes of the DataSetParser
subclass instance.
"""
for featurizer in self.featurizers:
if type(featurizer).__name__ == "RawDataSetFeaturizerViaLambda":
featurizer.featurize(
self._create_raw_generator(),
keys=self.metafeatures,
test_keys=self.test_metafeatures,
multiprocess=self._multiprocess_raw_secondary,
)
else:
featurizer.featurize(
meta_df=self.metafeatures,
test_meta_df=self.test_metafeatures,
)
self.__add_secondary_metafeatures()
def __add_secondary_metafeatures(self):
"""Add secondary features to the training and test metafeature attributes."""
# Get secondary feature names
if self.metafeatures is not None:
sec_feature_names = list(self.metafeatures) + [
name
for featurizer in self.featurizers
for name in featurizer.sec_feature_names
]
elif self.test_metafeatures is not None:
sec_feature_names = list(self.test_metafeatures) + [
name
for featurizer in self.featurizers
for name in featurizer.sec_feature_names
]
if self.metafeatures is not None:
sec_metafeatures = [x.sec_metafeatures for x in self.featurizers]
self.metafeatures = pd.concat(
[self.metafeatures, *sec_metafeatures],
axis=1,
ignore_index=True,
)
self.metafeatures.columns = sec_feature_names
if self.test_metafeatures is not None:
sec_test_metafeatures = [
x.sec_test_metafeatures for x in self.featurizers
]
self.test_metafeatures = pd.concat(
[self.test_metafeatures, *sec_test_metafeatures],
axis=1,
ignore_index=True,
)
self.test_metafeatures.columns = sec_feature_names
class DataSetParser(ABC, FeaturizerMixin):
"""
Abstract base class to load and extract metafeatures from raw data sets.
FeaturizerMixin provides the `.featurize` method.
Instance attributes:
src {Path}
-- Path to data set file on disk.
metafeatures {pd.DataFrame}
-- Metafeatures extracted from the raw data set. Each metafeature
row corresponds to a feature column in the raw data set.
labels {pd.Series}
-- Label corresponding to each metafeature.
test_src {Path}
-- Optional path to test raw data set file on disk. This attribute
applies more to the subclasses of MetaDataSetParser.
test_metafeatures {pd.DataFrame}
-- Optional metafeatures extracted from the test raw data set.
test_labels {pd.Series}
-- Optional labels corresponding to each test metafeature row.
scaler {RobustScaler}
-- A scaler to handle normalize metafeatures before serving them
for training.
featurizers: {List}
-- A list of featurizers that performs secondary metafeaturizations.
Class attributes:
NUM_BASE_METAFEATURES {int}
-- Number of base metafeatures.
Used to separate base and secondary metafeatures.
Abstract methods:
load_data_set
-- Load the data set and perform necessarily cleaning and parsing.
featurize_base
-- Featurize base metafeatures.
normalize_features
-- Performs normalization on the metafeatures and test metafeatures
(if provided).
_create_raw_generator
-- Returns a generator of raw data sets. This supports the
MetaDataSetFeaturizerViaLambda class functionality.
"""
NUM_BASE_METAFEATURES = (
7
) # Includes (total_val, min, max, mean, std, num_nans, num_distincts)
def __init__(self):
"""Init function."""
self.src: Path = None
self.labels: pd.Series = None
self.metafeatures: pd.DataFrame = None
self.test_src: Path = None
self.test_labels: pd.Series = None
self.test_metafeatures: pd.DataFrame = None
self.scaler: Type[RobustScaler] = None
self.featurizers: List = []
self._multiprocess_raw_secondary: bool = False # Multiprocessing of raw dataframe(s)
@abstractmethod
def load_data_set(self):
"""Load data set from source."""
raise NotImplementedError
@abstractmethod
def featurize_base(self):
"""Featurize base metafeatures."""
raise NotImplementedError
@abstractmethod
def normalize_features(self):
"""Normalize metafeatures for training."""
raise NotImplementedError
@abstractmethod
def _create_raw_generator(
self
) -> Generator[Tuple[str, Callable[[], pd.DataFrame]], None, None]:
raise NotImplementedError
def _select_metafeatures(
self, df: pd.DataFrame, mark: str = "*"
) -> Tuple[pd.DataFrame, pd.DataFrame]:
"""
Select metafeatures to normalize and to retain for training.
The following criteria is used.
Metafeatures to:
- normalize: Numerical columns
- not normalize but retain for training: Features whose title ends with `mark`.
Remainder metafeatures are dropped.
Note:
Columns are tracked by indices instead of names to avoid problems when
there are duplicated columnn names.
Arguments:
df {pd.DataFrame}
-- Metafeatures dataframe.
mark {str}
-- Character to append to names of columns that should not be
normlized but retained for training.
Returns:
Tuple[pd.DataFrame, pd.DataFrame]
-- (metafeatures_to_normalize, metafeatures_to_retain)
"""
idx_to_normalize: List[int] = []
idx_to_retain: List[int] = []
IGNORE_COLS = (
"attribute_name", # Already represented as ngrams
"sample", # Ignore sample columns which may be of type int
"total_val", # Intent prediction should not be based on # data points
"num_distincts", # Use `normalized_distinct_rate` instead
"num_nans", # Captured in `nan_rate`
)
for i, col in enumerate(df.columns):
if col in IGNORE_COLS:
continue
# Save columns that are either numeric or that have been marked
# into appropriate groups
if col[-1] == "*":
idx_to_retain.append(i)
elif self._is_numeric(df.iloc[:, i]):
idx_to_normalize.append(i)
features_to_normalize = df.iloc[:, idx_to_normalize]
features_to_retain = df.iloc[:, idx_to_retain]
return features_to_normalize, features_to_retain
def _is_numeric(self, series: pd.Series) -> bool:
return pd.api.types.is_numeric_dtype(series)
@staticmethod
def _split_features_and_labels(
mds: pd.DataFrame, label_col: str
) -> Tuple[pd.DataFrame, pd.Series]:
"""
Split features and labels.
Arguments:
mds {pd.DataFrame} -- MetaDataSet.
label_col {str} -- Column containing labels in the MetaDataSet.
Returns:
Tuple[pd.DataFrame, pd.Series] -- (features, labels) tuple.
"""
return mds.drop(label_col, axis=1), mds[label_col]
| 2.984375 | 3 |
DIN/model.py | daxixi/Context-Aware-Compilation-of-DNN-Training-Pipelines-across-Edge-and-Cloud | 0 | 12795526 | import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
class MLP(nn.Module):
def __init__(self, MLPInfo, activation='PReLU', PReLuInit=0.25, isUseBN=True, dropoutRate=0.0, initStd=0.0001):
super(MLP, self).__init__()
self.multiLayerPerceptron = nn.ModuleList() # MLP
for i in range(len(MLPInfo)-1):
self.multiLayerPerceptron.append(nn.Linear(MLPInfo[i], MLPInfo[i + 1]))
if isUseBN:
self.multiLayerPerceptron.append(nn.BatchNorm1d(MLPInfo[i + 1]))
actiFun = nn.PReLU(1, init=PReLuInit) if activation == 'PReLU' else Dice()
self.multiLayerPerceptron.append(actiFun)
self.multiLayerPerceptron.append(nn.Dropout(dropoutRate))
def forward(self, x):
for layer in self.multiLayerPerceptron:
x = layer(x)
return x
class Bottom(nn.Module):
def __init__(self, embeddingGroupInfo, MLPInfo, attMLPInfo, activation='PReLU',
PReLuInit=0.25, isUseBN=True, l2RegEmbedding=1e-6,
dropoutRate=0.0, initStd=0.0001, device=torch.device('cpu')):
super(Bottom, self).__init__()
self.dev = device
self.embeddingGroups = nn.ModuleDict() # embedding group
for key, value in embeddingGroupInfo.items():
if key == 'MovieId' or key == 'Genre':
self.embeddingGroups[key] = nn.Embedding(value[0], value[1], padding_idx=0)
else:
self.embeddingGroups[key] = nn.Embedding(value[0], value[1])
self.sequenceMeanPooling = SequencePoolingLayer(mod='mean', device=self.dev) # sequence pooling layer
self.attentionActivationUnit = AttentionActivationUnit(attMLPInfo, activation,
PReLuInit, initStd) # attention activation unit
self.sequenceAttentionPooling = SequencePoolingLayer(mod='attention', device=self.dev) # sequence pooling layer
self.to(self.dev)
def forward(self, movieIdSequence,ads, movieFeature):
movieFeatSequence = movieFeature[movieIdSequence]
adsFeat = movieFeature[ads]
movieIdFeat = self.embeddingGroups['MovieId'](movieFeatSequence[:, :, 0]) # (B, SeqLen, 16)
movieGenreFeat = self.embeddingGroups['Genre'](movieFeatSequence[:, :, 1:]) # (B, SeqLen, 6, 8)
movieGenreFeat = self.sequenceMeanPooling(movieGenreFeat, movieFeatSequence[:, :, 1:] > 0) # (B, SeqLen, 8)
#print(movieGenreFeat)
#input()
adsIdFeat = self.embeddingGroups['MovieId'](adsFeat[:, 0]) # (B, 16)
adsGenreFeat = self.embeddingGroups['Genre'](adsFeat[:, 1:]) # (B, 6, 8)
adsGenreFeat = self.sequenceMeanPooling(adsGenreFeat, adsFeat[:, 1:] > 0) # (B, 8)
adsEmbedding = torch.cat((adsIdFeat, adsGenreFeat), dim=-1) # (B, 24)
movieEmbedding = torch.cat((movieIdFeat, movieGenreFeat), dim=-1) # (B, SeqLen, 24)
attentionWeights = self.attentionActivationUnit(movieEmbedding, adsEmbedding) # (B, SeqLen, 1)
movieSequenceEmbedding = self.sequenceAttentionPooling(movieEmbedding, attentionWeights) # (B, 24)
return movieSequenceEmbedding,adsEmbedding
def forward_FR(self, movieIdSequence,ads, movieFeature):
movieSequenceEmbedding,adsEmbedding=self.forward(movieIdSequence,ads, movieFeature)
out=torch.cat((movieSequenceEmbedding,adsEmbedding),dim=0)
return out
class DIN(nn.Module):
def __init__(self, embeddingGroupInfo, MLPInfo, attMLPInfo, activation='PReLU',
PReLuInit=0.25, isUseBN=True, l2RegEmbedding=1e-6,
dropoutRate=0.0, initStd=0.0001, device=torch.device('cpu')):
super(DIN, self).__init__()
self.l2RegEmbeddding = l2RegEmbedding
self.dev = device
self.MLP = MLP(MLPInfo, activation, PReLuInit, isUseBN, dropoutRate) # MLP
self.output = nn.Linear(MLPInfo[-1], 2) # output layer
self.to(self.dev)
def forward(self, m1,m2,a1,a2):
#interactive
movieSequenceEmbedding=m1+m2
adsEmbedding=a1+a2
# MLP inputs
x = torch.cat((movieSequenceEmbedding, adsEmbedding), dim=-1)
x = self.MLP(x)
x = F.softmax(self.output(x), dim=1)
return x # (B, 2)
def regLoss(self):
totalRegLoss = torch.zeros(size=(1,), device=self.dev)
for name, param in self.named_parameters():
if 'embedding' in name and 'MovieId' in name and 'weight' in name:
totalRegLoss += torch.sum(self.l2RegEmbeddding * param*param)
return totalRegLoss
def loss(self, m1,m2,a1,a2,label, lossFunc):
preds = self.forward(m1,m2,a1,a2)
loss = lossFunc(preds[:, 1], label.float(), reduction='mean') + self.regLoss()
return loss
def predict(self, m1,m2,a1,a2):
preds = self.forward(m1,m2,a1,a2)[:, 1]
return preds.cpu().detach().numpy()
class SequencePoolingLayer(nn.Module):
def __init__(self, mod='mean', device=torch.device('cpu')):
super(SequencePoolingLayer, self).__init__()
self.mod = mod
self.dev = device
self.eps = torch.FloatTensor([1e-8]).to(self.dev)
def forward(self, x, mask):
if self.mod == 'mean':
length = torch.sum(mask.type(torch.float32), dim=-1, keepdim=True) # (..., dim, 6) -> (...,dim, 1)
x = torch.sum(x, dim=-2, keepdim=False) # (..., dim, 6, 8) -> (..., dim, 8)
x = torch.div(x, length.type(torch.float32) + self.eps) # (..., dim, 8)
elif self.mod == 'attention':
attentionWeights = torch.repeat_interleave(mask, x.shape[-1], dim=-1) # (..., dim, 1) -> (.... dim, E)
x = torch.mul(x, attentionWeights) # (..., dim, E)
x = torch.sum(x, dim=-2, keepdim=False) # (..., dim, E) -> (..., E)
else:
pass
return x
class AttentionActivationUnit(nn.Module):
def __init__(self, attMLPInfo, activation='PReLu', PReLuInit=0.25, initStd=0.0001):
super(AttentionActivationUnit, self).__init__()
self.MLP = MLP(attMLPInfo, activation, PReLuInit, isUseBN=False, dropoutRate=0.0, initStd=initStd)
self.output = nn.Linear(attMLPInfo[-1], 1)
def forward(self, x, target):
target = torch.unsqueeze(target, dim=1) # (B, 1, 24)
target = torch.repeat_interleave(target, x.shape[-2], dim=1) # (B, SeqLen, 24)
product = torch.mul(x, target) # (B, SeqLen, 24)
# product = torch.sum(product, dim=-1, keepdim=True) # (B, SeqLen, 1)
x = torch.cat((x, target, product), dim=2) # (B, SeqLen, 72)
x = self.MLP(x)
x = self.output(x)
# product = torch.sum(product, dim=-1, keepdim=True)
# product = F.softmax(product, dim=1)
return x # (B, SeqLen, 1)
class Dice(nn.Module):
def __init__(self):
super(Dice, self).__init__()
pass
| 2.65625 | 3 |
mmflow/__init__.py | gaotongxiao/mmflow | 1 | 12795527 | <reponame>gaotongxiao/mmflow
from .version import __version__, parse_version_info, version_info
__all__ = ['__version__', 'version_info', 'parse_version_info']
| 0.976563 | 1 |
py/probe/runtime_probe/probe_config_types_unittest.py | arccode/factory | 3 | 12795528 | #!/usr/bin/env python3
# Copyright 2020 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import re
import unittest
from cros.factory.probe.runtime_probe import probe_config_types
from cros.factory.utils import json_utils
class ProbeStatementDefinitionBuilderTest(unittest.TestCase):
def testBuildProbeStatementDefinition(self):
builder = probe_config_types.ProbeStatementDefinitionBuilder('category_x')
builder.AddProbeFunction('func_1', 'This is func 1.')
builder.AddProbeFunction('func2', 'This is func 2.')
builder.AddIntOutputField('field1', 'This is field1')
builder.AddStrOutputField('field2', 'This is field2')
builder.AddHexOutputField('field3', 'This is field3')
builder.AddIntOutputField('field_only_func1',
'This is field ?',
probe_function_names=['func_1'])
d = builder.Build()
self.assertEqual(d.category_name, 'category_x')
self.assertCountEqual(list(d.expected_fields.keys()),
['field1', 'field2', 'field3', 'field_only_func1'])
self.assertCountEqual(list(d.probe_functions.keys()), ['func_1', 'func2'])
self.assertCountEqual(
[f.name for f in d.probe_functions['func_1'].output_fields],
['field1', 'field2', 'field3', 'field_only_func1'])
self.assertCountEqual(
[f.name for f in d.probe_functions['func2'].output_fields],
['field1', 'field2', 'field3'])
class ConcreteProbeStatementDefinitionTestBase(unittest.TestCase):
def setUp(self):
builder = probe_config_types.ProbeStatementDefinitionBuilder('category_x')
builder.AddProbeFunction('func_1', 'This is func 1.')
builder.AddProbeFunction('func2', 'This is func 2.')
builder.AddIntOutputField('int_field', '')
builder.AddStrOutputField('str_field', '')
builder.AddStrOutputField('str_field_started_with_a',
'',
value_pattern=re.compile('a.*'))
builder.AddHexOutputField('hex_field', '')
builder.AddHexOutputField('hex_field_three_digits', '', num_value_digits=3)
self.probe_statement_definition = builder.Build()
class ProbeStatementDefinitionTest(ConcreteProbeStatementDefinitionTestBase):
def _GenerateExpectResult(self, comp_name, func_name, expect_field,
func_arg=None, information=None):
statement = {
'eval': {
func_name: func_arg or {}
},
'expect': expect_field
}
if information is not None:
statement['information'] = information
return probe_config_types.ComponentProbeStatement('category_x', comp_name,
statement)
def testGenerateProbeStatementNoField(self):
result = self.probe_statement_definition.GenerateProbeStatement(
'comp_1', 'func_1', {})
self.assertEqual(result,
self._GenerateExpectResult('comp_1', 'func_1', {}))
def testGenerateProbeStatementIntField(self):
result = self.probe_statement_definition.GenerateProbeStatement(
'comp_1', 'func_1', {'int_field': None})
self.assertEqual(
result,
self._GenerateExpectResult('comp_1', 'func_1',
{'int_field': [False, 'int']}))
result = self.probe_statement_definition.GenerateProbeStatement(
'comp_1', 'func_1', {'int_field': 3})
self.assertEqual(
result,
self._GenerateExpectResult('comp_1', 'func_1',
{'int_field': [True, 'int', '!eq 3']}))
def testGenerateProbeStatementStrField(self):
result = self.probe_statement_definition.GenerateProbeStatement(
'comp_1', 'func_1', {'str_field': None})
self.assertEqual(
result,
self._GenerateExpectResult('comp_1', 'func_1',
{'str_field': [False, 'str']}))
result = self.probe_statement_definition.GenerateProbeStatement(
'comp_1', 'func_1', {'str_field': 'sss'})
self.assertEqual(
result,
self._GenerateExpectResult('comp_1', 'func_1',
{'str_field': [True, 'str', '!eq sss']}))
result = self.probe_statement_definition.GenerateProbeStatement(
'comp_1', 'func_1', {'str_field_started_with_a': 'a_value'})
self.assertEqual(
result,
self._GenerateExpectResult(
'comp_1', 'func_1',
{'str_field_started_with_a': [True, 'str', '!eq a_value']}))
with self.assertRaises(ValueError): # format error
self.probe_statement_definition.GenerateProbeStatement(
'comp_1', 'func_1', {'str_field_started_with_a': 'b_value'})
# Ignore the regular expression check if the given expected value is also
# an regular expression pattern.
result = self.probe_statement_definition.GenerateProbeStatement(
'comp_1', 'func_1',
{'str_field_started_with_a': re.compile('x.*')})
self.assertEqual(
result,
self._GenerateExpectResult('comp_1', 'func_1', {
'str_field_started_with_a': [True, 'str', '!re x.*']
}))
def testGenerateProbeStatementHexField(self):
result = self.probe_statement_definition.GenerateProbeStatement(
'comp_1', 'func_1', {'hex_field': '0AAAA'})
self.assertEqual(
result,
self._GenerateExpectResult(
'comp_1', 'func_1', {'hex_field': [True, 'hex', '!eq 0x0AAAA']}))
with self.assertRaises(ValueError):
self.probe_statement_definition.GenerateProbeStatement(
'comp_1', 'func_1', {'hex_field': 'xyz'})
result = self.probe_statement_definition.GenerateProbeStatement(
'comp_1', 'func_1', {'hex_field_three_digits': None})
self.assertEqual(
result,
self._GenerateExpectResult('comp_1', 'func_1',
{'hex_field_three_digits': [False, 'hex']}))
result = self.probe_statement_definition.GenerateProbeStatement(
'comp_1', 'func_1', {'hex_field_three_digits': 'B3F'})
self.assertEqual(
result,
self._GenerateExpectResult(
'comp_1', 'func_1',
{'hex_field_three_digits': [True, 'hex', '!eq 0xB3F']}))
with self.assertRaises(ValueError):
self.probe_statement_definition.GenerateProbeStatement(
'comp_1', 'func_1', {'hex_field_three_digits': 'B3FF'})
def testGenerateProbeStatementList(self):
result = self.probe_statement_definition.GenerateProbeStatement(
'comp_1', 'func_1', [{
'hex_field': '0AAAA'
}])
self.assertEqual(
result,
self._GenerateExpectResult('comp_1', 'func_1',
{'hex_field': [True, 'hex', '!eq 0x0AAAA']}))
result = self.probe_statement_definition.GenerateProbeStatement(
'comp_1', 'func_1', [{
'hex_field': '0AAAA'
}, {
'str_field': 'sss'
}])
self.assertEqual(
result,
self._GenerateExpectResult('comp_1', 'func_1', [{
'hex_field': [True, 'hex', '!eq 0x0AAAA']
}, {
'str_field': [True, 'str', '!eq sss']
}]))
def testGenerateProbeStatementExtraInformation(self):
result = self.probe_statement_definition.GenerateProbeStatement(
'comp_1', 'func_1', {
'str_field': 'sss',
'int_field': 3,
'hex_field': '0BAD'}, information={'comp_group': 'other_name'})
self.assertEqual(
result,
self._GenerateExpectResult(
'comp_1', 'func_1', {
'str_field': [True, 'str', '!eq sss'],
'int_field': [True, 'int', '!eq 3'],
'hex_field': [True, 'hex', '!eq 0x0BAD']}, information={
'comp_group': 'other_name'}))
def testGenerateProbeStatementWithArgument(self):
result = self.probe_statement_definition.GenerateProbeStatement(
'comp_1', 'func_1', {}, probe_function_argument={'arg_1': 'aaa'})
self.assertEqual(result,
self._GenerateExpectResult('comp_1', 'func_1', {},
func_arg={'arg_1': 'aaa'}))
class ProbeConfigPayloadTest(ConcreteProbeStatementDefinitionTestBase):
def testAll(self):
p = probe_config_types.ProbeConfigPayload()
p.AddComponentProbeStatement(
self.probe_statement_definition.GenerateProbeStatement(
'comp_1', 'func_1', {'int_field': 1}))
p.AddComponentProbeStatement(
self.probe_statement_definition.GenerateProbeStatement(
'comp_2', 'func_1', {'int_field': 2}))
p.AddComponentProbeStatement(
self.probe_statement_definition.GenerateProbeStatement(
'comp_3', 'func_2', {'int_field': 3}))
with self.assertRaises(ValueError): # component name confliction
p.AddComponentProbeStatement(
self.probe_statement_definition.GenerateProbeStatement(
'comp_2', 'func_1', {'int_field': 4}))
with self.assertRaises(ValueError): # probe statement confliction.
p.AddComponentProbeStatement(
self.probe_statement_definition.GenerateProbeStatement(
'comp_4', 'func_1', {'int_field': 2}))
result = p.DumpToString()
self.assertEqual(
json_utils.LoadStr(result),
{
'category_x': {
'comp_1': {
'eval': {'func_1': {}},
'expect': {'int_field': [True, 'int', '!eq 1']}
},
'comp_2': {
'eval': {'func_1': {}},
'expect': {'int_field': [True, 'int', '!eq 2']}
},
'comp_3': {
'eval': {'func_2': {}},
'expect': {'int_field': [True, 'int', '!eq 3']}
},
}
})
class ComponentProbeStatementTest(unittest.TestCase):
def testIdenticalStatements(self):
cps1 = probe_config_types.ComponentProbeStatement('category1', 'comp1', {
'eval': {
'func_1': {}
},
'expect': {
'int_field': [True, 'int', '!eq 1']
}
})
cps2 = probe_config_types.ComponentProbeStatement('category1', 'comp1', {
'eval': {
'func_1': {}
},
'expect': {
'int_field': [True, 'int', '!eq 1']
}
})
self.assertEqual(cps1.statement_hash, cps2.statement_hash)
self.assertEqual(cps1, cps2)
def testHashCompNamesDiffer(self):
cps1 = probe_config_types.ComponentProbeStatement('category1', 'comp1', {
'eval': {
'func_1': {}
},
'expect': {
'int_field': [True, 'int', '!eq 1']
}
})
cps2 = probe_config_types.ComponentProbeStatement('category1', 'comp2', {
'eval': {
'func_1': {}
},
'expect': {
'int_field': [True, 'int', '!eq 1']
}
})
self.assertEqual(cps1.statement_hash, cps2.statement_hash)
self.assertNotEqual(cps1, cps2)
def testHashCategoryNamesDiffer(self):
cps1 = probe_config_types.ComponentProbeStatement('category1', 'comp1', {
'eval': {
'func_1': {}
},
'expect': {
'int_field': [True, 'int', '!eq 1']
}
})
cps2 = probe_config_types.ComponentProbeStatement('category2', 'comp1', {
'eval': {
'func_1': {}
},
'expect': {
'int_field': [True, 'int', '!eq 1']
}
})
self.assertNotEqual(cps1.statement_hash, cps2.statement_hash)
self.assertNotEqual(cps1, cps2)
def testHashFunctionNamesDiffer(self):
cps1 = probe_config_types.ComponentProbeStatement('category1', 'comp1', {
'eval': {
'func_1': {}
},
'expect': {
'int_field': [True, 'int', '!eq 1']
}
})
cps2 = probe_config_types.ComponentProbeStatement('category1', 'comp1', {
'eval': {
'func_2': {}
},
'expect': {
'int_field': [True, 'int', '!eq 1']
}
})
self.assertNotEqual(cps1.statement_hash, cps2.statement_hash)
self.assertNotEqual(cps1, cps2)
def testFromDictSucceed(self):
self.assertEqual(
probe_config_types.ComponentProbeStatement('category1', 'comp1', {
'eval': {
'func_1': {}
},
'expect': {
'int_field': [True, 'int', '!eq 1']
}
}),
probe_config_types.ComponentProbeStatement.FromDict({
'category1': {
'comp1': {
'eval': {
'func_1': {}
},
'expect': {
'int_field': [True, 'int', '!eq 1']
}
}
}
}))
def testFromDictValueHashMultipleCategories(self):
self.assertRaises(
ValueError, probe_config_types.ComponentProbeStatement.FromDict, {
'category1': {
'comp_name1': {
'eval': {
'func_1': {}
},
'expect': {
'int_field': [True, 'int', '!eq 1']
}
}
},
'category2': {
'comp_name1': {
'eval': {
'func_1': {}
},
'expect': {
'int_field': [True, 'int', '!eq 1']
}
}
},
})
def testFromDictCategoryNotString(self):
self.assertRaises(
ValueError, probe_config_types.ComponentProbeStatement.FromDict, {
123: {
'comp_name1': {
'eval': {
'func_1': {}
},
'expect': {
'int_field': [True, 'int', '!eq 1']
}
}
}
})
def testFromDictMultipleComponents(self):
self.assertRaises(
ValueError, probe_config_types.ComponentProbeStatement.FromDict, {
'category1': {
'comp_name1': {
'eval': {
'func_1': {}
},
'expect': {
'int_field': [True, 'int', '!eq 1']
}
},
'comp_name2': {
'eval': {
'func_1': {}
},
'expect': {
'int_field': [True, 'int', '!eq 1']
}
}
}
})
def testFromDictComponentNameNotString(self):
self.assertRaises(
ValueError, probe_config_types.ComponentProbeStatement.FromDict, {
'category1': {
3.1415926: {
'eval': {
'func_1': {}
},
'expect': {
'int_field': [True, 'int', '!eq 1']
}
}
}
})
def testFromDictMiscErrors(self):
self.assertRaises(ValueError,
probe_config_types.ComponentProbeStatement.FromDict,
{'category1': 100})
if __name__ == '__main__':
unittest.main()
| 2.09375 | 2 |
backend/submissions/migrations/0007_auto_20191119_2132.py | marcoacierno/pycon | 56 | 12795529 | # Generated by Django 2.2.7 on 2019-11-19 21:32
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('submissions', '0006_merge_20191113_0542'),
]
operations = [
migrations.CreateModel(
name='SubmissionTag',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100, unique=True)),
],
),
migrations.AddField(
model_name='submission',
name='tags',
field=models.ManyToManyField(to='submissions.SubmissionTag', verbose_name='tags'),
),
]
| 1.5625 | 2 |
_static/src/python/SignalProcessing/Digital/Basic/Interpolation/demo_SincInterpolation0.py | metai/aitrace | 1 | 12795530 | import numpy as np
import matplotlib.pyplot as plt
PI = np.pi
# =========================define sinc
# ---------------normalized
def sinc1(x):
PI = np.pi
x = np.array(x)
y = np.where(np.abs(PI * x) < 1e-38, 1.0, np.sin(PI * x) / (PI * x))
return y
def sinc_interpolation(x, t, T):
ns = np.arange(x.size)
print(ns, "============")
y = []
for tt in t:
y.append(np.sum(x * sinc1((tt - ns * T) / T)))
return np.array(y)
# =========================test sinc definition
f0 = 100
Ns = 2000
Tp = 20.0 / Ns
t = np.linspace(-10, 10, Ns)
t2 = np.linspace(-10, 10, Ns * 2)
y1 = sinc1(t / Tp)
x = np.sin(2 * PI * f0 * t)
print(x.shape)
y = sinc_interpolation(x, t2, Tp)
print(y.shape, "===")
yfft = np.fft.fftshift(np.fft.fft(y))
plt.figure()
plt.subplot(131)
plt.plot(t, x, '^b')
plt.plot(t2, y, '+r')
plt.legend(['original', 'sinc interpolated'])
plt.title('sinc(t/Tp), ' + "Tp=" + str(Tp))
plt.xlabel('Time/s')
plt.ylabel('Amplitude')
plt.grid()
plt.show()
| 3.0625 | 3 |
parsec/commands/workflows/import_workflow_dict.py | abretaud/parsec | 0 | 12795531 | <gh_stars>0
import click
from parsec.cli import pass_context, json_loads
from parsec.decorators import custom_exception, dict_output, _arg_split
@click.command('import_workflow_dict')
@click.argument("workflow_dict", type=str)
@pass_context
@custom_exception
@dict_output
def cli(ctx, workflow_dict):
"""Imports a new workflow given a dictionary representing a previously exported workflow.
Output:
"""
return ctx.gi.workflows.import_workflow_dict(json_loads(workflow_dict))
| 2.203125 | 2 |
RWL.py | MarcinAman/Python-AGH | 0 | 12795532 | from string import ascii_lowercase
import functools
from itertools import combinations
def generate_binary(n):
"""
Function returns generator with binary sequences of a set length
:param n: length of a binary sequence
:return: generator with binary sequence
"""
if n == 0:
yield ""
else:
for c in generate_binary(n - 1):
yield "0" + c
yield "1" + c
def find_value(zipped_list, x):
for a, b in zipped_list:
if a == x:
return b
return -1
def replace_mapping(zipped_list, x):
if x == 'T':
return 1
elif x == 'F':
return 0
elif x in (ascii_lowercase + 'TF'):
return find_value(zipped_list, x)
else:
return x
def get_variables(expression):
"""
Functions filters the expression for variables and returns them
As a variable we mean any lower case character
:param expression: expression to search in
:return: list with variables from expression
"""
variables = []
for variable in expression:
if variable in ascii_lowercase and variable not in variables:
variables.append(variable)
return variables
def calculate_onp(expression, values):
"""
Function calculates a value of an expression in reverse polish notation
:param expression: Expression in RPN given as a string.
:param values: binary sequence with values to be put in coresponding positions. Also string
:return: Bool value of an expression
Warning: function will only work on correct RNP expression and will not return any warnings in case of errors
"""
zipped_list = list(zip(get_variables(expression), list(values)))
expression = list(map(lambda x: replace_mapping(zipped_list, x), expression))
operators = {'^': lambda x, y: bool(x) ^ bool(y), '&': lambda x, y: bool(x) and bool(y),
'|': lambda x, y: bool(x) or bool(y), '/': lambda x, y: not (bool(x) and bool(y)),
'>': lambda x, y: not bool(x) or bool(y)}
stack = []
while len(expression) > 0:
if expression[0] in ['0', '1']:
stack.append(int(expression[0]))
else:
if expression[0] == '~':
top = not bool(stack.pop())
stack.append(top)
else:
e1 = int(stack.pop())
e2 = int(stack.pop())
stack.append(operators[expression[0]](e2, e1))
del expression[0]
return stack[0]
def is_associative(tkn, associativity_type):
if tkn == '>' and associativity_type == 'r': # because only in case of > it matters.
return False
return True
def concat(s1, s2):
"""
Helper function to reduce expressions
:param s1: Sthing we can iterate over with binary sequence and '_'
:param s2: Sthing we can iterate over with binary sequence and '_'
:return: Merged version of input, when certain bits are different this place is being replaced by '_'
"""
w = ""
lz = 0
for z1, z2 in zip(s1, s2):
if z1 == z2:
w += z1
else:
lz += 1
w += "_"
if lz == 1:
return w
return False
def reduce_(s):
"""
Main reduce function
:param s: Set with values
:return: reduced set
"""
result = set()
b2 = False
for e1 in s:
b1 = False
for e2 in s:
v = concat(e1, e2)
if v:
result.add(v)
b1 = b2 = True
if not b1:
result.add(e1)
if b2:
return reduce_(result)
return result
def expression_to_string(s):
"""
Helper function to change a reduced set to human-readable form
:param s: Set with values
:return: String made from input in pattern: (expression)|(expression)|(expression) or T (if expression is tautology)
"""
result2 = ""
for e1 in s:
result = ""
for i in range(0, len(e1)):
if e1[i] == '_':
continue
if e1[i] == '0':
result += '~'
result += ascii_lowercase[i] + "&"
result2 += '(' + result[:-1] + ')|'
if result2 == '()|':
return 'T'
return result2[:-1]
def trim_expression(expression):
"""
Basic expression trimming
:param expression: takes an expression which in most cases matches a pattern: (expression) and trims brackets
:return: expression with trimmed brackets
"""
e = Expression('')
while len(expression) > 2 and expression[0] == '(' and expression[-1] == ')' and e.check_expression(expression):
expression = expression[1:-1]
return expression
def reduce_tuple(expression):
"""
Function reduces a tuple of string expressions
:param expression: tuple containing expressions. We assume that they do not contain '|'
since in this case they are a product of QuineMcCluskey algorithm
:return: String containing reduced expression or the input one if further reduction was not successful
"""
expression_list = list(expression)
variables = get_variables(str.join('|', expression_list))
binary_generator = generate_binary(len(variables))
incorrect_binaries = []
some_expression = Expression('')
onp_expression = some_expression.convert_to_onp(str.join('|', expression_list))
onp_xor = some_expression.convert_to_onp(functools.reduce(lambda x, y: x + '^' + y, variables))
while True:
try:
x = binary_generator.__next__()
if calculate_onp(onp_expression, x) != calculate_onp(onp_xor, x):
incorrect_binaries.append(x)
except:
break
if len(incorrect_binaries) > 0:
return str.join('|', expression_list)
return '(' + functools.reduce(lambda x, y: x + '^' + y, variables) + ')'
def reduce_xor(expression):
"""
Specific function to reduce xor expressions. It generates combinations of k elements in len(variables)
where k is in range from 2 to len(variables). It checks whether it is not the same as var1 xor var2 xor var3 etc
:param expression: String expression to be reduced. We assume that it matches a pattern: (expr1)|(expr2)|(expr3) ...
:return: reduced expression in string form or input one if further reduction was not possible
"""
expressions_list = expression.split('|')
n = len(expressions_list)
for a in range(2, n + 1):
for expr in combinations(expressions_list, a): # i feel really bad for this
reduced_sub_expression = reduce_tuple(expr)
prev_expression = str.join('|', expr)
if len(reduced_sub_expression) < len(prev_expression):
for var in list(expr):
del expressions_list[expressions_list.index(var)]
expressions_list.append(reduced_sub_expression)
return reduce_xor(functools.reduce(lambda x, y: '|' + x + y + '|', expressions_list))
return expression
def reduce_brackets(expression):
"""
Function that reduces unessesary brackets. It eliminates situations where between two | there is a expression that doesnt need them
example:
(expr1)|(a)|(expr2) will be evaluated to: (expr1)|a|(expr2)
:param expression: string expression in form (expr1)|(expr2)|(expr3)
:return: reduced expression
"""
expression_list = expression.split('|')
if len(expression_list) == 1:
return trim_expression(expression_list[0])
reduced_expressions = []
for some in expression_list:
if len(some) <= 4:
# we are sure that there will be 2 brackets + we want 1 variable (or variable + negation)
reduced_expressions.append(trim_expression(some))
else:
reduced_expressions.append(some)
return str.join('|', reduced_expressions)
def reduce_logical_expression(expression):
"""
Main function that is responsible for driving program.
It calls functions to check if expression is correct and then reduces expression
:param expression: String expression to be reduced
:return: reduced expression or ERROR if it is not correct
"""
expression_object = Expression(expression)
if not expression_object.check_expression():
return 'ERROR'
expression_in_general_form = expression_object.generate_general_form()
expression_with_xor = reduce_brackets(reduce_xor(expression_in_general_form))
if len(expression_with_xor) < len(expression):
return expression_with_xor
e = reduce_brackets(expression_in_general_form)
if len(e) < len(expression):
return e
return reduce_brackets(expression)
class Expression:
"""
Class designed to handle most of expression operations.
It contains map with bindings:
<operator> -> (priority,arguments_number)
Also string with correct signs and expression itself
"""
def __init__(self, expression):
self.general_form = ''
self.correctSigns = '~^&|/>()TF' + ascii_lowercase
self.expression = expression.replace(' ', '')
self.operators = {'~': (4, 1), '^': (3, 2), '&': (2, 2), '|': (2, 2), '/': (2, 2),
'>': (1, 2)} # <operator> -> (priority,arguments_number)
def check_if_brackets_are_correct(self, expression=''):
"""
Helper function to determine whether brackets are placed correctly
:param expression: expression in String form
:return: Bool result of brackets checking
"""
if not expression:
expression = self.expression
brackets = 0
for a in expression:
if a == '(':
brackets += 1
elif a == ')':
brackets -= 1
if brackets < 0:
return False
if brackets == 0:
return True
return False
def check_if_signs_are_correct(self, expression=''):
"""
Simple filter function that checks if expression contains correct signs and is semantically correct
:param expression: String expression to be checked
:return: Bool result
"""
if not expression:
expression = self.expression
if not expression:
return True
if [x for x in expression if x not in self.correctSigns]:
return False
state = True
for single in expression:
if state:
if single in self.operators and self.operators[single][1] == 1 or single in ['(', ')']: # we want ~
# we ignore brackets since they are already checked
continue
elif single in (ascii_lowercase + 'TF'):
state = False
else:
return False
else:
if single in self.operators and self.operators[single][1] == 2: # everything else than ~
state = True
elif single in ['(', ')']:
continue
else:
return False
return not state
def check_expression(self, expression=''):
"""
Higher level interface for checking expression
It calls methods to determine whether expression is correct semantically, in terms of brackets and signs
:param expression: String expression to check
:return: Bool result
"""
if not expression:
expression = self.expression
return self.check_if_signs_are_correct(expression) and self.check_if_brackets_are_correct(expression)
def convert_to_onp(self, expression=''):
"""
Function converts an infix expression to RPN
Warning: it doesnt check whether this expression is correct
:param expression: Infix expression
:return: RPN expression
"""
if not expression:
expression = self.expression
stack = []
onp = []
for tkn in expression:
if tkn in self.operators:
while len(stack) > 0 and stack[-1] in self.operators:
if (is_associative(tkn, 'l') and (self.operators[tkn][0] - self.operators[stack[-1]][0]) <= 0) \
or (
is_associative(tkn, 'r') and (self.operators[tkn][0] - self.operators[stack[-1]][0]) < 0):
onp.append(stack.pop())
continue
break
stack.append(tkn)
elif tkn == '(':
stack.append(tkn)
elif tkn == ')':
while len(stack) > 0 and stack[-1] != '(':
onp.append(stack.pop())
stack.pop()
else:
onp.append(tkn)
while len(stack) > 0:
onp.append(stack.pop())
return functools.reduce(lambda x, y: x + y, onp)
def generate_general_form(self, expression=''):
"""
Function generates general form from infix expression
It uses QuineMcCluskey algorithm
Result matches a pattern: (expression1)|(expression2)|(expression3)...
:param expression: Infix expression as a String
:return: String infix expression evaluated using QuineMcCluskey
"""
if not expression:
expression = self.expression
n = len(get_variables(expression))
correct_binaries = []
generator = generate_binary(n)
current_expression = self.convert_to_onp(expression)
while True:
try:
x = generator.__next__()
if calculate_onp(current_expression, x):
correct_binaries.append(x)
except:
break
set2 = reduce_(correct_binaries)
self.general_form = expression_to_string(set2)
return self.general_form
if __name__ == '__main__':
x = None
while not x:
x = input('')
if x:
print(reduce_logical_expression(x))
else:
break
| 4.21875 | 4 |
python-ca/bjorn/migrations/0010_alter_certificate_revocation_reason.py | AS207960/bjorn | 4 | 12795533 | # Generated by Django 3.2.6 on 2021-09-08 14:44
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('bjorn', '0009_auto_20210908_1427'),
]
operations = [
migrations.AlterField(
model_name='certificate',
name='revocation_reason',
field=models.PositiveSmallIntegerField(blank=True, choices=[(1, 'Unspecified'), (2, 'Key compromise'), (3, 'CA compromise'), (4, 'Affiliation changed'), (5, 'Superseded'), (6, 'Cessation of operation'), (7, 'Certificate hold'), (8, 'Remove from CRL'), (9, 'Privilege withdrawn'), (10, 'AA compromise')], null=True),
),
]
| 1.53125 | 2 |
codershq/users/migrations/0004_auto_20210805_1832.py | Buhannad/CodersHQ | 45 | 12795534 | <filename>codershq/users/migrations/0004_auto_20210805_1832.py<gh_stars>10-100
# Generated by Django 3.0.11 on 2021-08-05 14:32
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('users', '0003_auto_20210805_1818'),
]
operations = [
migrations.RemoveField(
model_name='user',
name='cv',
),
migrations.AlterField(
model_name='user',
name='bio',
field=models.TextField(blank=True, max_length=500, verbose_name='Bio'),
),
migrations.AlterField(
model_name='user',
name='name',
field=models.CharField(blank=True, max_length=255, verbose_name='Enter your name'),
),
]
| 1.679688 | 2 |
pypkgcreator/__init__.py | somtud/create-python-package | 1 | 12795535 | <reponame>somtud/create-python-package<filename>pypkgcreator/__init__.py<gh_stars>1-10
#!/usr/bin/env python
__version__ = 'v0.0.1'
| 1.273438 | 1 |
tables/wikipedia-scripts/weblib/web.py | yash-srivastava19/sempre | 812 | 12795536 | <gh_stars>100-1000
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import urllib, urllib2, urlparse, socket
import json, sys, os, hashlib, subprocess, time
from blacklist import BLACKLIST
BASEDIR = os.path.dirname(os.path.realpath(os.path.join(__file__, '..')))
class WebpageCache(object):
def __init__(self, basedir=BASEDIR, dirname='web.cache', log=True, timeout=15):
self.cachePath = os.path.join(basedir, dirname)
if not os.path.exists(self.cachePath):
os.mkdir(self.cachePath)
self.log = log
self.cache_miss = False
self.timeout = timeout
def get_hashcode(self, url):
return hashlib.sha1(url).hexdigest()
def get_path(self, url, already_hashed=False):
if not already_hashed:
url = self.get_hashcode(url)
return os.path.join(self.cachePath, url)
def get_current_datetime(self):
return time.strftime("%Y-%m-%d-%H-%M-%S", time.gmtime())
def open_in_browser(self, hashcode, browser="firefox"):
path = os.path.join(self.cachePath, hashcode)
subprocess.call([browser, path])
def comment(self, url):
return ' '.join(('<!--', urllib.quote(url),
self.get_current_datetime(), '-->\n'))
def read(self, url, already_hashed=False):
path = self.get_path(url, already_hashed)
if os.path.exists(path):
with open(path) as fin:
error = False
check_url = fin.readline().strip()
if check_url == 'ERROR':
error = True
error_message = fin.readline().strip()
check_url = fin.readline()
if not already_hashed:
tokens = check_url.split()
assert len(tokens) > 2 and tokens[1] == urllib.quote(url), path
if error:
return WebLoadingError(error_message)
else:
return fin.read()
def write(self, url, content, already_hashed=False):
path = self.get_path(url, already_hashed)
with open(path, 'w') as fout:
fout.write(self.comment(url))
fout.write(content)
def write_error(self, url, error, already_hashed=False):
path = self.get_path(url, already_hashed)
with open(path, 'w') as fout:
fout.write('ERROR\n')
fout.write(error.replace('\n', ' ') + '\n')
fout.write(self.comment(url))
def get_page(self, url, force=False, check_html=True):
result = self.read(url)
if result and not force:
self.cache_miss = False
if isinstance(result, WebLoadingError):
if self.log:
print >> sys.stderr, '[ERROR]', result
result = None
else:
self.cache_miss = True
try:
if self.log:
print >> sys.stderr, 'Downloading from', url, '...'
# Check blacklist
parsed_url = urlparse.urlparse(url)
if parsed_url.netloc in BLACKLIST:
raise WebLoadingError('URL %s in blacklist' % url)
# Open web page
opener = urllib2.build_opener()
opener.addheaders = [
('User-agent',
'Mozilla/5.0 (compatible; MSIE 7.0; Windows NT 6.0)')]
response = opener.open(url, timeout=self.timeout)
# Check content type to prevent non-HTML
content_type = response.info().type
if check_html and content_type != 'text/html':
raise WebLoadingError("Non-HTML response: %s" %
content_type)
result = response.read()
self.write(url, result)
except Exception, e:
if self.log:
print >> sys.stderr, '[ERROR] ', e
if isinstance(e, (WebLoadingError, urllib2.URLError, socket.error)):
self.write_error(url, str(e.message))
result = None
if self.log:
if self.cache_miss:
print >> sys.stderr, 'Retrieved "%s"' % url
else:
print >> sys.stderr, ('Loaded "%s" from cache (%s)' %
(url, self.get_path(url)))
return result
################################################################
# GOOGLE SUGGEST
GOOGLE_SUGGEST_URL = 'http://suggestqueries.google.com/complete/search?client=firefox&q='
def get_google_suggest_url(self, before, after=''):
answer = self.GOOGLE_SUGGEST_URL + urllib.quote(before) + urllib.quote(after)
if after:
answer += '&cp=' + str(len(before))
return answer
def get_from_google_suggest(self, before, after=''):
url = self.get_google_suggest_url(before, after)
return json.loads(self.get_page(url, check_html=False))[1]
################################################################
# GOOGLE SEARCH -- old API
# The important fields of each result are
# - url (+ unescapedUrl, visibleUrl, cacheUrl)
# - titleNoFormatting (+ title)
# - content
GOOGLE_SEARCH_URL = 'http://ajax.googleapis.com/ajax/services/search/web?v=1.0&rsz=large&q='
def get_google_search_url(self, keyword):
answer = self.GOOGLE_SEARCH_URL + urllib.quote(keyword)
return answer
def get_from_google_search(self, keyword, raw=False):
url = self.get_google_search_url(keyword)
result = self.get_page(url, check_html=False)
if raw:
return result
return json.loads(result)
def get_urls_from_google_search(self, keyword):
results = self.get_from_google_search(keyword)['responseData']['results']
return [(x['unescapedUrl'], x['titleNoFormatting']) for x in results]
GOOGLE_PAUSE = 30
def get_from_google_search_with_backoff(self, keyword):
url = self.get_google_search_url(keyword)
result = self.get_page(url, check_html=False)
while True:
try:
return json.loads(result)['responseData']['results']
except:
# Google nailed me! Exponential backoff!
print >> sys.stderr, ('Hide from Google for %d seconds ...' %
WebpageCache.GOOGLE_PAUSE)
time.sleep(WebpageCache.GOOGLE_PAUSE)
WebpageCache.GOOGLE_PAUSE *= 2
result = self.get_page(url, check_html=False, force=True)
def get_urls_from_google_search_with_backoff(self, keyword):
results = self.get_from_google_search_with_backoff(keyword)
return [(x['unescapedUrl'], x['titleNoFormatting']) for x in results]
################################################################
# GOOGLE SEARCH -- Custom Search
CUSTOM_GOOGLE_SEARCH_URL = 'https://www.googleapis.com/customsearch/'\
'v1?key=%s&cx=%s&alt=json&safe=high&q=%s'
def set_google_custom_search_keys(self, api_key, cx):
self.api_key = api_key
self.cx = cx
def get_google_custom_search_url(self, keyword):
answer = self.CUSTOM_GOOGLE_SEARCH_URL % \
(self.api_key, self.cx, urllib.quote(keyword))
return answer
def get_from_google_custom_search(self, keyword, raw=False):
url = self.get_google_custom_search_url(keyword)
answer = self.get_page(url, check_html=False)
if raw:
return answer
return json.loads(answer)
def get_urls_from_google_custom_search(self, keyword):
results = self.get_from_google_custom_search(keyword)['items']
return [(x['link'], x.get('title', '')) for x in results]
def get_urls_from_google_hybrid_search(self, keyword):
'''Return (cache_path, results)'''
old_url = self.get_google_search_url(keyword)
result = self.read(old_url)
if result and not isinstance(result, WebLoadingError):
# Found result in cache
try:
results = json.loads(result)['responseData']['results']
return (self.get_path(old_url),
[(x['unescapedUrl'], x['titleNoFormatting'])
for x in results])
except:
# Stale bad cache ...
pass
# Use Custom search
return (self.get_path(self.get_google_custom_search_url(keyword)),
self.get_urls_from_google_custom_search(keyword))
class WebLoadingError(Exception):
def __init__(self, msg):
self.args = (msg,)
self.msg = msg
self.message = msg
| 2.359375 | 2 |
Toonland-2013-master/toonland/security/HackerCrypt.py | BarcodeALT/Toonland-2003 | 0 | 12795537 | ########################## THE TOON LAND PROJECT ##########################
# Filename: HackerCrypt.py
# Created by: Cody/Fd Green Cat Fd (January 31st, 2013)
####
# Description:
#
# Encryption method written by Team FD in 2011 for their personal releases.
# The script has been modified to meet Toon Land's coding standards.
####
from base64 import b64encode, b64decode
from binascii import hexlify, unhexlify
from random import randrange
from __main__ import __dict__ as __main__
from bz2 import compress as c_bz2
from bz2 import decompress as d_bz2
from zlib import compress as c_zlib
from zlib import decompress as d_zlib
from sha import sha as sha1
class HackerCrypt:
__version__ = 'v1.2.0.2'
def __init__(self):
self.MAGIC = sha1('[TL]').digest()
self.KEY = sha1('TL-Cookies').digest()
def makeIV(self):
iv = ''
for i in range(4):
iv += chr(randrange(256))
return iv
def rc4(self, data, key):
j = 0
s = range(256)
for i in range(256):
j = (j + s[i] + ord(key[i % len(key)])) % 256
s[i], s[j] = s[j], s[i]
j = i = 0
results = []
for c in data:
j = (j + 1) % 256
i = (i + s[j]) % 256
s[j], s[i] = s[i], s[j]
results.append(chr(ord(c) ^ s[(s[j] + s[i]) % 256]))
return ''.join(results)
def encode(self, data):
b64 = b64encode(data)
hex = hexlify(b64)
encoded = list(hexlify(hex))
for x in range(len(encoded)):
alpha = int(encoded[x]) + 2
encoded[x] = chr(alpha)
return ''.join(encoded)
def decode(self, encoded):
encoded = list(encoded)
for x in range(len(encoded)):
alpha = str(encoded[x])
encoded[x] = str(ord(alpha) - 2)
encoded = unhexlify(''.join(encoded))
unhexed = unhexlify(encoded)
return b64decode(unhexed)
def compress(self, data):
bz2 = b64encode(c_bz2(data))
return c_zlib(hexlify(bz2))
def decompress(self, compressed):
unhexed = unhexlify(d_zlib(compressed))
return d_bz2(b64decode(unhexed))
def encrypt(self, data):
compressed = self.compress(data)
encoded = self.encode(compressed)
data = self.MAGIC + encoded
iv = self.makeIV()
key = self.KEY + iv
return iv + self.rc4(data, key)
def decrypt(self, encrypted):
if len(encrypted) < 4:
return None
iv = encrypted[:4]
data = encrypted[4:]
key = self.KEY + iv
data = self.rc4(data, key)
if not data.startswith(self.MAGIC):
return None
decoded = self.decode(data[len(self.MAGIC):])
return self.decompress(decoded) | 2.546875 | 3 |
core/management/commands/advance_turn.py | johnpooch/diplomacy | 1 | 12795538 | <filename>core/management/commands/advance_turn.py
import json
from django.core.management.base import BaseCommand, CommandError
from core import models
from core.game import process_turn
from core.models.base import GameStatus
from . import DiplomacyManagementCommandMixin
class Command(BaseCommand, DiplomacyManagementCommandMixin):
@property
def help(self):
return 'Restore a game to a previous turn'
def add_arguments(self, parser):
parser.add_argument(
'game',
type=str,
help='Slug of the game to advance',
)
parser.add_argument(
'--no_input',
action='store_true',
help='Skip prompt.',
)
parser.add_argument(
'--dry_run',
action='store_true',
help='Do not advance turn - show outcome of adjudicator.',
)
def handle(self, *args, **options):
slug = options['game']
self.noinput = options['no_input']
dry_run = options['dry_run']
try:
game = models.Game.objects.get(slug=slug)
except models.Game.DoesNotExist:
raise CommandError(
'Could not find a game with the slug "{}"'.format(slug)
)
if game.status != GameStatus.ACTIVE:
raise CommandError(
'Cannot advance turn on an inactive game'
)
turn = game.get_current_turn()
if turn.game.status != GameStatus.ACTIVE:
raise CommandError('Cannot restore turn if game is not active.')
if not turn.ready_to_process:
self.stdout.write('Not all nations have finalized their orders\n')
self.prompt()
result = process_turn(turn, dry_run)
if dry_run:
pretty_output = json.dumps(result, sort_keys=True, indent=4)
self.stdout.write(pretty_output)
| 2.140625 | 2 |
dominant_colour.py | tawilkinson/dominant-colour | 0 | 12795539 | <reponame>tawilkinson/dominant-colour
import numpy as np
import time
from cv2 import cv2
from sklearn.cluster import KMeans
from skimage import io
from skimage.transform import rescale
def cv2_dominant_colour(img_url, colours=10, timing=False):
'''
Dominant Colour method using open cv, based on
https://stackoverflow.com/a/43111221/2523885
'''
if timing:
start = time.perf_counter()
tic = time.perf_counter()
img = io.imread(img_url)
pixels = np.float32(img.reshape(-1, 3))
if timing:
toc = time.perf_counter()
print(f"Loaded the image in {toc - tic:0.2f}s")
if timing:
tic = time.perf_counter()
n_colours = colours
criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 200, .1)
flags = cv2.KMEANS_RANDOM_CENTERS
_, labels, centroid = cv2.kmeans(
pixels, n_colours, None, criteria, 10, flags)
labels = labels.flatten().tolist()
_, counts = np.unique(labels, return_counts=True)
if timing:
toc = time.perf_counter()
print(f"KMeans calculation in {toc - tic:0.2f}s")
if timing:
tic = time.perf_counter()
dominant = centroid[np.argmax(counts)]
if timing:
toc = time.perf_counter()
print(f"Dominant selection in {toc - tic:0.2f}s")
if timing:
end = time.perf_counter()
total_time = end - start
print(f"cv2_dominant_colour execution in {total_time:0.2f}s")
return dominant, labels, centroid, total_time
def sklearn_dominant_colour(img_url, colours=10, timing=False):
'''
Dominant Colour method using sklearn, based on:
https://medium.com/analytics-vidhya/colour-separation-in-an-image-using-kmeans-clustering-using-python-f994fa398454
'''
if timing:
start = time.perf_counter()
tic = time.perf_counter()
img = io.imread(img_url)
img = img.reshape((-1, 3))
if timing:
toc = time.perf_counter()
print(f"Loaded the image in {toc - tic:0.2f}s")
if timing:
tic = time.perf_counter()
cluster = KMeans(n_clusters=colours)
cluster.fit(img)
if timing:
toc = time.perf_counter()
print(f"KMeans calculation in {toc - tic:0.2f}s")
labels = cluster.labels_
labels = list(labels)
centroid = cluster.cluster_centers_
if timing:
tic = time.perf_counter()
percent = []
for i in range(len(centroid)):
j = labels.count(i)
j = j/(len(labels))
percent.append(j)
if timing:
toc = time.perf_counter()
print(f"Percentage calculation in {toc - tic:0.2f}s")
indices = np.argsort(percent)[::-1]
dominant = centroid[indices[0]]
if timing:
end = time.perf_counter()
total_time = end - start
print(
f"sklearn_dominant_colour execution in {total_time:0.2f}s")
return dominant, labels, centroid, total_time
def fast_dominant_colour(img_url, colours=10, timing=False, scale=1.0):
'''
Faster method for web use that speeds up the sklearn variant.
Also can use a scaling factor to improve the speed at cost of
accuracy
'''
if timing:
start = time.perf_counter()
tic = time.perf_counter()
img = io.imread(img_url)
if scale != 1.0:
img = rescale(img, scale, multichannel=True)
img = img * 255
img = img.reshape((-1, 3))
if timing:
toc = time.perf_counter()
print(f"Loaded the image in {toc - tic:0.2f}s")
if timing:
tic = time.perf_counter()
cluster = KMeans(n_clusters=colours, n_init=3, max_iter=10, tol=0.001)
cluster.fit(img)
if timing:
toc = time.perf_counter()
print(f"KMeans calculation in {toc - tic:0.2f}s")
labels = cluster.labels_
centroid = cluster.cluster_centers_
if timing:
tic = time.perf_counter()
percent = []
_, counts = np.unique(labels, return_counts=True)
for i in range(len(centroid)):
j = counts[i]
j = j/(len(labels))
percent.append(j)
if timing:
toc = time.perf_counter()
print(f"Percentage calculation in {toc - tic:0.2f}s")
indices = np.argsort(percent)[::-1]
dominant = centroid[indices[0]]
if timing:
end = time.perf_counter()
total_time = end - start
print(f"fast_dominant_colour execution in {total_time:0.2f}s")
return dominant, labels, centroid, total_time
def visualise_colours(labels, centroids):
'''
Generate a visualisation of the colours in an image
'''
# Get the number of different clusters, create histogram, and normalise
sorted_labels = np.arange(0, len(np.unique(labels)) + 1)
(hist, _) = np.histogram(labels, bins=sorted_labels)
hist = hist.astype("float")
hist /= hist.sum()
# Create frequency rect and iterate through each cluster's colour
# and percentage
rect = np.zeros((50, 300, 3), dtype=np.uint8)
colours = sorted(zip(hist, centroids))
start = 0
for (percent, colour) in colours:
print(f"[{clamp(colour[0])}, {clamp(colour[0])}, {clamp(colour[0])}] ",
"{:0.2f}%".format(percent * 100))
end = start + (percent * 300)
cv2.rectangle(rect, (int(start), 0), (int(end), 50),
colour.astype("uint8").tolist(), -1)
start = end
return rect
def clamp(x):
'''
Utility function to return ints from 0-255
'''
return int(max(0, min(x, 255)))
def get_rgb_colour(img_url, debug=False):
'''
Method to print hex sting and return an rgb tuple of the
dominant colour in an image
'''
dominant_colour = fast_dominant_colour(img_url, scale=0.1)
r = dominant_colour[0]
g = dominant_colour[1]
b = dominant_colour[2]
if debug:
hex_str = "#{0:02x}{1:02x}{2:02x}".format(clamp(r), clamp(g), clamp(b))
print(f'{hex_str}')
rgb_colour = (clamp(r), clamp(g), clamp(b))
return rgb_colour
| 3 | 3 |
ingest/tools/cb.py | ivmfnal/striped | 1 | 12795540 | <filename>ingest/tools/cb.py
import json, getopt, sys
import numpy as np
from striped.client import CouchBaseBackend
Usage = """
python cb.py get [-j] [-o <file>|-d <dtype>] <bucket> <key>
-n means show data as numpy array of given dtype and shape
python cb.py put [-j] [-f <file>|-d <data>] <bucket> <key>
"""
if not sys.argv[1:]:
print(Usage)
sys.exit(1)
cmd = sys.argv[1]
args = sys.argv[2:]
if cmd == "get":
show_as_np = False
opts, args = getopt.getopt(args, "d:jo:")
opts = dict(opts)
dtype = opts.get("-d")
out_file = opts.get("-o")
json_data = "-j" in opts
Bucket, Key = args
cb = CouchBaseBackend(Bucket)
if json_data:
data = cb[Key].json
out = json.dumps(data, indent=4, sort_keys=True, separators=(',', ': '))
out_file = open(out_file, "w") if out_file else sys.stdout
out_file.write(out)
else:
data = cb[Key].data
if out_file:
open(out_file, "wb").write(data)
elif dtype:
data = np.frombuffer(data, dtype=dtype)
print(data.shape, data.dtype, data)
else:
print(len(data), repr(data[:100]))
elif cmd == "put":
opts, args = getopt.getopt(args, "jf:d:")
opts = dict(opts)
Bucket, Key = args
json_in = "-j" in opts
data = None
if "-d" in opts:
data = opts["-d"]
else:
data = open(opts["-f"], "rb").read()
if json_in:
data = json.loads(data)
cb = CouchBaseBackend(Bucket)
if json_in:
cb[Key].json = data
else:
cb[Key].data = data
| 2.4375 | 2 |
rules/private/phases/phase_coda.bzl | hmemcpy/rules_scala-1 | 0 | 12795541 | #
# PHASE: coda
#
# Creates the final rule return structure
#
def phase_coda(ctx, g):
return struct(
java = g.ijinfo.intellij_info,
providers = g.out.providers,
)
| 1.804688 | 2 |
scrapli_cfg/platform/core/arista_eos/base_platform.py | m1009d/scrapli_cfg | 15 | 12795542 | """scrapli_cfg.platform.core.arista_eos.base"""
import json
import re
from datetime import datetime
from logging import LoggerAdapter
from typing import Iterable, List, Tuple, Union
from scrapli.driver import AsyncNetworkDriver, NetworkDriver
from scrapli.response import Response
from scrapli_cfg.exceptions import ScrapliCfgException
from scrapli_cfg.platform.core.arista_eos.patterns import (
BANNER_PATTERN,
END_PATTERN,
GLOBAL_COMMENT_LINE_PATTERN,
VERSION_PATTERN,
)
from scrapli_cfg.response import ScrapliCfgResponse
CONFIG_SOURCES = [
"running",
"startup",
]
class ScrapliCfgEOSBase:
conn: Union[NetworkDriver, AsyncNetworkDriver]
logger: LoggerAdapter
config_sources: List[str]
config_session_name: str
candidate_config: str
@staticmethod
def _parse_version(device_output: str) -> str:
"""
Parse version string out of device output
Args:
device_output: output from show version command
Returns:
str: device version string
Raises:
N/A
"""
version_string_search = re.search(pattern=VERSION_PATTERN, string=device_output)
if not version_string_search:
return ""
version_string = version_string_search.group(0) or ""
return version_string
@staticmethod
def _parse_config_sessions(device_output: str) -> List[str]:
"""
Parse config session names out of device output
Args:
device_output: output from show version command
Returns:
list[str]: config session names
Raises:
N/A
"""
try:
config_session_dict = json.loads(device_output)
except json.JSONDecodeError:
return []
sessions = list(config_session_dict.get("sessions", {}))
return sessions
@staticmethod
def _get_config_command(source: str) -> str:
"""
Return command to use to get config based on the provided source
Args:
source: name of the config source, generally running|startup
Returns:
str: command to use to fetch the requested config
Raises:
N/A
"""
if source == "running":
return "show running-config"
return "show startup-config"
@staticmethod
def _prepare_config_payloads(config: str) -> Tuple[str, str]:
"""
Prepare a configuration so it can be nicely sent to the device via scrapli
Args:
config: configuration to prep
Returns:
tuple: tuple of "normal" config lines and "eager" config lines
Raises:
N/A
"""
# remove comment lines
config = re.sub(pattern=GLOBAL_COMMENT_LINE_PATTERN, repl="!", string=config)
# remove "end" at the end of config if present - if its present it will drop scrapli out
# of the config session which we do not want
config = re.sub(pattern=END_PATTERN, repl="!", string=config)
# find all sections that need to be "eagerly" sent
eager_config = re.findall(pattern=BANNER_PATTERN, string=config)
for eager_section in eager_config:
config = config.replace(eager_section, "!")
joined_eager_config = "\n".join(captured_section for captured_section in eager_config)
return config, joined_eager_config
def _prepare_load_config_session_and_payload(self, config: str) -> Tuple[str, str, bool]:
"""
Prepare the normal and eager payloads and decide if we need to register a config session
Args:
config: candidate config to load
Returns:
tuple: tuple containing "normal" config elements to send to the device and "eager" mode
config elements to send to the device (things like banners/macro that require
scrapli "eager=True"), and lastly a bool indicating if the config session needs to
be registered on the device
Raises:
N/A
"""
config, eager_config = self._prepare_config_payloads(config=config)
register_config_session = False
if not self.config_session_name:
self.config_session_name = f"scrapli_cfg_{round(datetime.now().timestamp())}"
self.logger.debug(f"configuration session name will be '{self.config_session_name}'")
register_config_session = True
return config, eager_config, register_config_session
def _reset_config_session(self) -> None:
"""
Reset config session info
Resets the candidate config and config session name attributes -- when these are "empty" we
know there is no current config session
Args:
N/A
Returns:
None
Raises:
N/A
"""
self.logger.debug("resetting candidate config and config session name")
self.candidate_config = ""
self.config_session_name = ""
def _normalize_source_candidate_configs(self, source_config: str) -> Tuple[str, str]:
"""
Normalize candidate config and source config so that we can easily diff them
Args:
source_config: current config of the source config store
Returns:
ScrapliCfgDiff: scrapli cfg diff object
Raises:
N/A
"""
self.logger.debug("normalizing source and candidate configs for diff object")
# Remove all comment lines from both the source and candidate configs -- this is only done
# here pre-diff, so we dont modify the user provided candidate config which can totally have
# those comment lines - we only remove "global" (top level) comments though... user comments
# attached to interfaces and the stuff will remain
source_config = re.sub(pattern=GLOBAL_COMMENT_LINE_PATTERN, string=source_config, repl="")
source_config = "\n".join(line for line in source_config.splitlines() if line)
candidate_config = re.sub(
pattern=GLOBAL_COMMENT_LINE_PATTERN, string=self.candidate_config, repl=""
)
candidate_config = "\n".join(line for line in candidate_config.splitlines() if line)
return source_config, candidate_config
def _pre_clear_config_sessions(self) -> ScrapliCfgResponse:
"""
Handle pre "clear_config_sessions" operations for parity between sync and async
Args:
N/A
Returns:
ScrapliCfgResponse: new response object to update w/ get results
Raises:
N/A
"""
self.logger.info("clear_config_sessions requested")
response = ScrapliCfgResponse(
host=self.conn.host, raise_for_status_exception=ScrapliCfgException
)
return response
def _post_clear_config_sessions(
self,
response: ScrapliCfgResponse,
scrapli_responses: Iterable[Response],
) -> ScrapliCfgResponse:
"""
Handle post "clear_config_sessions" operations for parity between sync and async
Args:
response: response object to update
scrapli_responses: list of scrapli response objects from fetching the version
Returns:
ScrapliCfgResponse: response object containing string of the version as the `result`
attribute
Raises:
N/A
"""
response.record_response(scrapli_responses=scrapli_responses)
if response.failed:
msg = "failed to clear device configuration session(s)"
self.logger.critical(msg)
response.result = msg
else:
response.result = "configuration session(s) cleared"
return response
| 1.859375 | 2 |
backend/lib/google/cloud/grpc/datastore/v1/datastore_pb2_grpc.py | isaiah-solo/Droptalk | 0 | 12795543 | <reponame>isaiah-solo/Droptalk<gh_stars>0
import grpc
from grpc.framework.common import cardinality
from grpc.framework.interfaces.face import utilities as face_utilities
import google.cloud.grpc.datastore.v1.datastore_pb2 as google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2
import google.cloud.grpc.datastore.v1.datastore_pb2 as google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2
import google.cloud.grpc.datastore.v1.datastore_pb2 as google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2
import google.cloud.grpc.datastore.v1.datastore_pb2 as google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2
import google.cloud.grpc.datastore.v1.datastore_pb2 as google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2
import google.cloud.grpc.datastore.v1.datastore_pb2 as google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2
import google.cloud.grpc.datastore.v1.datastore_pb2 as google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2
import google.cloud.grpc.datastore.v1.datastore_pb2 as google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2
import google.cloud.grpc.datastore.v1.datastore_pb2 as google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2
import google.cloud.grpc.datastore.v1.datastore_pb2 as google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2
import google.cloud.grpc.datastore.v1.datastore_pb2 as google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2
import google.cloud.grpc.datastore.v1.datastore_pb2 as google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2
class DatastoreStub(object):
"""Each RPC normalizes the partition IDs of the keys in its input entities,
and always returns entities with keys with normalized partition IDs.
This applies to all keys and entities, including those in values, except keys
with both an empty path and an empty or unset partition ID. Normalization of
input keys sets the project ID (if not already set) to the project ID from
the request.
"""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.Lookup = channel.unary_unary(
'/google.datastore.v1.Datastore/Lookup',
request_serializer=google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2.LookupRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2.LookupResponse.FromString,
)
self.RunQuery = channel.unary_unary(
'/google.datastore.v1.Datastore/RunQuery',
request_serializer=google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2.RunQueryRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2.RunQueryResponse.FromString,
)
self.BeginTransaction = channel.unary_unary(
'/google.datastore.v1.Datastore/BeginTransaction',
request_serializer=google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2.BeginTransactionRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2.BeginTransactionResponse.FromString,
)
self.Commit = channel.unary_unary(
'/google.datastore.v1.Datastore/Commit',
request_serializer=google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2.CommitRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2.CommitResponse.FromString,
)
self.Rollback = channel.unary_unary(
'/google.datastore.v1.Datastore/Rollback',
request_serializer=google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2.RollbackRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2.RollbackResponse.FromString,
)
self.AllocateIds = channel.unary_unary(
'/google.datastore.v1.Datastore/AllocateIds',
request_serializer=google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2.AllocateIdsRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2.AllocateIdsResponse.FromString,
)
class DatastoreServicer(object):
"""Each RPC normalizes the partition IDs of the keys in its input entities,
and always returns entities with keys with normalized partition IDs.
This applies to all keys and entities, including those in values, except keys
with both an empty path and an empty or unset partition ID. Normalization of
input keys sets the project ID (if not already set) to the project ID from
the request.
"""
def Lookup(self, request, context):
"""Looks up entities by key.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def RunQuery(self, request, context):
"""Queries for entities.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def BeginTransaction(self, request, context):
"""Begins a new transaction.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def Commit(self, request, context):
"""Commits a transaction, optionally creating, deleting or modifying some
entities.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def Rollback(self, request, context):
"""Rolls back a transaction.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def AllocateIds(self, request, context):
"""Allocates IDs for the given keys, which is useful for referencing an entity
before it is inserted.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_DatastoreServicer_to_server(servicer, server):
rpc_method_handlers = {
'Lookup': grpc.unary_unary_rpc_method_handler(
servicer.Lookup,
request_deserializer=google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2.LookupRequest.FromString,
response_serializer=google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2.LookupResponse.SerializeToString,
),
'RunQuery': grpc.unary_unary_rpc_method_handler(
servicer.RunQuery,
request_deserializer=google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2.RunQueryRequest.FromString,
response_serializer=google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2.RunQueryResponse.SerializeToString,
),
'BeginTransaction': grpc.unary_unary_rpc_method_handler(
servicer.BeginTransaction,
request_deserializer=google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2.BeginTransactionRequest.FromString,
response_serializer=google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2.BeginTransactionResponse.SerializeToString,
),
'Commit': grpc.unary_unary_rpc_method_handler(
servicer.Commit,
request_deserializer=google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2.CommitRequest.FromString,
response_serializer=google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2.CommitResponse.SerializeToString,
),
'Rollback': grpc.unary_unary_rpc_method_handler(
servicer.Rollback,
request_deserializer=google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2.RollbackRequest.FromString,
response_serializer=google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2.RollbackResponse.SerializeToString,
),
'AllocateIds': grpc.unary_unary_rpc_method_handler(
servicer.AllocateIds,
request_deserializer=google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2.AllocateIdsRequest.FromString,
response_serializer=google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2.AllocateIdsResponse.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'google.datastore.v1.Datastore', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
| 1.203125 | 1 |
nikolaBase/conf_base.py | diSimplex/nikolaBase | 0 | 12795544 | # -*- coding: utf-8 -*-
import time
# !! This is the configuration of Nikola. !! #
# !! You should edit it to your liking. !! #
# Data about this site
BLOG_AUTHOR = "<NAME>" # (translatable)
BLOG_TITLE = "My Nikola Site" # (translatable)
# This is the main URL for your site. It will be used
# in a prominent link. Don't forget the protocol (http/https)!
SITE_URL = "https://example.com/"
# This is the URL where Nikola's output will be deployed.
# If not set, defaults to SITE_URL
# BASE_URL = "https://example.com/"
BLOG_EMAIL = "<EMAIL>"
BLOG_DESCRIPTION = "This is a demo site for Nikola." # (translatable)
# What is the default language?
DEFAULT_LANG = "en"
# What other languages do you have?
# The format is {"translationcode" : "path/to/translation" }
# the path will be used as a prefix for the generated pages location
TRANSLATIONS = {
DEFAULT_LANG: "",
# Example for another language:
# "es": "./es",
}
# What will translated input files be named like?
TRANSLATIONS_PATTERN = '{path}.{lang}.{ext}'
# Links for the sidebar / navigation bar. (translatable)
# This is a dict. The keys are languages, and values are tuples.
NAVIGATION_LINKS = {
DEFAULT_LANG: (
("/archive.html", "Archive"),
("/categories/", "Tags"),
("/rss.xml", "RSS feed"),
),
}
# Alternative navigation links. Works the same way NAVIGATION_LINKS does,
# although themes may not always support them. (translatable)
# (Bootstrap 4: right-side of navbar, Bootblog 4: right side of title)
NAVIGATION_ALT_LINKS = {
DEFAULT_LANG: ()
}
# Name of the theme to use.
#THEME = "bootblog4"
THEME = "disimplex"
# A theme color. In default themes, it might be displayed by some browsers as
# the browser UI color (eg. Chrome on Android). Other themes might also use it
# as an accent color (the default ones don’t). Must be a HEX value.
THEME_COLOR = '#5670d4'
# Theme configuration. Fully theme-dependent. (translatable)
# Samples for bootblog4 (enabled) and bootstrap4 (commented) follow.
# bootblog4 supports: featured_large featured_small featured_on_mobile
# featured_large_image_on_mobile featured_strip_html sidebar
# bootstrap4 supports: navbar_light (defaults to False)
# navbar_custom_bg (defaults to '')
# Config for bootblog4:
THEME_CONFIG = {
DEFAULT_LANG: {
# Show the latest featured post in a large box, with the previewimage as its background.
'featured_large': False,
# Show the first (remaining) two featured posts in small boxes.
'featured_small': False,
# Show featured posts on mobile.
'featured_on_mobile': True,
# Show image in `featured_large` on mobile.
# `featured_small` displays them only on desktop.
'featured_large_image_on_mobile': True,
# Strip HTML from featured post text.
'featured_strip_html': False,
# Contents of the sidebar, If empty, the sidebar is not displayed.
'sidebar': ''
}
}
# POSTS and PAGES contains (wildcard, destination, template) tuples.
# (translatable)
#
POSTS = (
("posts/*.rst", "posts", "post.tmpl"),
("posts/*.md", "posts", "post.tmpl"),
("posts/*.txt", "posts", "post.tmpl"),
("posts/*.html", "posts", "post.tmpl"),
)
PAGES = (
("pages/*.rst", "", "page.tmpl"),
("pages/*.md", "", "page.tmpl"),
("pages/*.txt", "", "page.tmpl"),
("pages/*.html", "", "page.tmpl"),
)
# Below this point, everything is optional
# Post's dates are considered in UTC by default, if you want to use
# another time zone, please set TIMEZONE to match. Check the available
# list from Wikipedia:
TIMEZONE = "Europe/London"
# Date format used to display post dates. (translatable)
# Used by babel.dates, CLDR style: http://cldr.unicode.org/translation/date-time-1/date-time
# You can also use 'full', 'long', 'medium', or 'short'
# DATE_FORMAT = 'yyyy-MM-dd HH:mm'
# Date format used to display post dates, if local dates are used. (translatable)
# Used by Luxon: https://moment.github.io/luxon/docs/manual/formatting
# Example for presets: {'preset': True, 'format': 'DATE_FULL'}
# LUXON_DATE_FORMAT = {
# DEFAULT_LANG: {'preset': False, 'format': 'yyyy-MM-dd HH:mm'},
# }
# Date fanciness.
#
# 0 = using DATE_FORMAT and TIMEZONE (without JS)
# 1 = using LUXON_DATE_FORMAT and local user time (JS, using Luxon)
# 2 = using a string like “2 days ago” (JS, using Luxon)
#
# Your theme must support it, Bootstrap already does.
# DATE_FANCINESS = 0
# Customize the locale/region used for a language.
# For example, to use British instead of US English: LOCALES = {'en': 'en_GB'}
# LOCALES = {}
# One or more folders containing files to be copied as-is into the output.
# The format is a dictionary of {source: relative destination}.
# Default is:
# FILES_FOLDERS = {'files': ''}
# Which means copy 'files' into 'output'
# One or more folders containing code listings to be processed and published on
# the site. The format is a dictionary of {source: relative destination}.
# Default is:
# LISTINGS_FOLDERS = {'listings': 'listings'}
# Which means process listings from 'listings' into 'output/listings'
# A mapping of languages to file-extensions that represent that language.
# Feel free to add or delete extensions to any list, but don't add any new
# compilers unless you write the interface for it yourself.
#
# The default compiler for `new_post` is the first entry in the POSTS tuple.
#
# 'rest' is reStructuredText
# 'markdown' is Markdown
# 'html' assumes the file is HTML and just copies it
COMPILERS = {
"rest": ['.rst', '.txt'],
"markdown": ['.md', '.mdown', '.markdown'],
"textile": ['.textile'],
"txt2tags": ['.t2t'],
"bbcode": ['.bb'],
"wiki": ['.wiki'],
"ipynb": ['.ipynb'],
"html": ['.html', '.htm'],
# PHP files are rendered the usual way (i.e. with the full templates).
# The resulting files have .php extensions, making it possible to run
# them without reconfiguring your server to recognize them.
"php": ['.php'],
# Pandoc detects the input from the source filename
# but is disabled by default as it would conflict
# with many of the others.
# "pandoc": ['.rst', '.md', '.txt'],
}
# Preferred metadata format for new posts
# "YAML": YAML wrapped in "---"
METADATA_FORMAT = "YAML"
# If you do not want to display a tag publicly, you can mark it as hidden.
# The tag will not be displayed on the tag list page and posts.
# Tag pages will still be generated.
HIDDEN_TAGS = ['mathjax']
# If CATEGORY_ALLOW_HIERARCHIES is set to True, categories can be organized in
# hierarchies. For a post, the whole path in the hierarchy must be specified,
# using a forward slash ('/') to separate paths. Use a backslash ('\') to escape
# a forward slash or a backslash (i.e. '\//\\' is a path specifying the
# subcategory called '\' of the top-level category called '/').
CATEGORY_ALLOW_HIERARCHIES = False
# If CATEGORY_OUTPUT_FLAT_HIERARCHY is set to True, the output written to output
# contains only the name of the leaf category and not the whole path.
CATEGORY_OUTPUT_FLAT_HIERARCHY = False
# If you do not want to display a category publicly, you can mark it as hidden.
# The category will not be displayed on the category list page.
# Category pages will still be generated.
HIDDEN_CATEGORIES = []
# If ENABLE_AUTHOR_PAGES is set to True and there is more than one
# author, author pages are generated.
ENABLE_AUTHOR_PAGES = False
# If you do not want to display an author publicly, you can mark it as hidden.
# The author will not be displayed on the author list page and posts.
# Tag pages will still be generated.
HIDDEN_AUTHORS = ['Guest']
# Optional HTML that displayed on “main” blog index.html files.
# May be used for a greeting. (translatable)
FRONT_INDEX_HEADER = {
DEFAULT_LANG: ''
}
# URLs to other posts/pages can take 3 forms:
# rel_path: a relative URL to the current page/post (default)
# full_path: a URL with the full path from the root
# absolute: a complete URL (that includes the SITE_URL)
# URL_TYPE = 'rel_path'
#
# Note that our use of "server side includes" / partials
# REQUIRES the use of 'full_path'
#
URL_TYPE = 'full_path'
# Extension for RSS feed files
# RSS_EXTENSION = ".xml"
# RSS filename base (without extension); used for indexes and galleries.
# (translatable)
# RSS_FILENAME_BASE = "rss"
# Atom filename base (without extension); used for indexes.
# (translatable)
ATOM_FILENAME_BASE = "feed"
# Extension for Atom feed files
# ATOM_EXTENSION = ".atom"
# A list of redirection tuples, [("foo/from.html", "/bar/to.html")].
#
# A HTML file will be created in output/foo/from.html that redirects
# to the "/bar/to.html" URL. notice that the "from" side MUST be a
# relative URL.
#
# If you don't need any of these, just set to []
REDIRECTIONS = []
# Presets of commands to execute to deploy. Can be anything, for
# example, you may use rsync:
# "rsync -rav --delete output/ [email protected]:/srv/www/site"
# And then do a backup, or run `nikola ping` from the `ping`
# plugin (`nikola plugin -i ping`). Or run `nikola check -l`.
# You may also want to use github_deploy (see below).
# You can define multiple presets and specify them as arguments
# to `nikola deploy`. If no arguments are specified, a preset
# named `default` will be executed. You can use as many presets
# in a `nikola deploy` command as you like.
# DEPLOY_COMMANDS = {
# 'default': [
# "rsync -rav --delete output/ [email protected]:/srv/www/site",
# ]
# }
# github_deploy configuration
# For more details, read the manual:
# https://getnikola.com/handbook.html#deploying-to-github
# You will need to configure the deployment branch on GitHub.
GITHUB_SOURCE_BRANCH = 'src'
GITHUB_DEPLOY_BRANCH = 'master'
# The name of the remote where you wish to push to, using github_deploy.
GITHUB_REMOTE_NAME = 'origin'
# Whether or not github_deploy should commit to the source branch automatically
# before deploying.
GITHUB_COMMIT_SOURCE = True
# Where the output site should be located
# If you don't use an absolute path, it will be considered as relative
# to the location of conf.py
# OUTPUT_FOLDER = 'output'
# where the "cache" of partial generated content should be located
# default: 'cache'
# CACHE_FOLDER = 'cache'
# #############################################################################
# Image Gallery Options
# #############################################################################
# Use a thumbnail (defined by ".. previewimage:" in the gallery's index) in
# list of galleries for each gallery
GALLERIES_USE_THUMBNAIL = False
# Image to use as thumbnail for those galleries that don't have one
# None: show a grey square
# '/url/to/file': show the image in that url
GALLERIES_DEFAULT_THUMBNAIL = None
# Images will be scaled down according to IMAGE_THUMBNAIL_SIZE and MAX_IMAGE_SIZE
# options, but will have to be referenced manually to be visible on the site
# (the thumbnail has ``.thumbnail`` added before the file extension by default,
# but a different naming template can be configured with IMAGE_THUMBNAIL_FORMAT).
IMAGE_FOLDERS = {'images': 'images'}
# IMAGE_THUMBNAIL_SIZE = 400
# IMAGE_THUMBNAIL_FORMAT = '{name}.thumbnail{ext}'
# #############################################################################
# HTML fragments and diverse things that are used by the templates
# #############################################################################
# 'Read more...' for the index page, if INDEX_TEASERS is True (translatable)
INDEX_READ_MORE_LINK = '<p class="more"><a href="{link}">{read_more}…</a></p>'
# 'Read more...' for the feeds, if FEED_TEASERS is True (translatable)
FEED_READ_MORE_LINK = '<p><a href="{link}">{read_more}…</a> ({min_remaining_read})</p>'
# Append a URL query to the FEED_READ_MORE_LINK in Atom and RSS feeds. Advanced
# option used for traffic source tracking.
FEED_LINKS_APPEND_QUERY = False
# A HTML fragment describing the license, for the sidebar.
# (translatable)
LICENSE = ""
# I recommend using the Creative Commons' wizard:
# https://creativecommons.org/choose/
# LICENSE = """
# <a rel="license" href="https://creativecommons.org/licenses/by-nc-sa/4.0/">
# <img alt="Creative Commons License BY-NC-SA"
# style="border-width:0; margin-bottom:12px;"
# src="https://i.creativecommons.org/l/by-nc-sa/4.0/88x31.png"></a>"""
# A small copyright notice for the page footer (in HTML).
# (translatable)
CONTENT_FOOTER = 'Contents © {date} <a href="mailto:{email}">{author}</a> - Powered by <a href="https://getnikola.com" rel="nofollow">Nikola</a> {license}'
# Things that will be passed to CONTENT_FOOTER.format(). This is done
CONTENT_FOOTER_FORMATS = {
DEFAULT_LANG: (
(),
{
"email": BLOG_EMAIL,
"author": BLOG_AUTHOR,
"date": time.gmtime().tm_year,
"license": LICENSE
}
)
}
# A simple copyright tag for inclusion in RSS feeds that works just
# like CONTENT_FOOTER and CONTENT_FOOTER_FORMATS
RSS_COPYRIGHT = 'Contents © {date} <a href="mailto:{email}">{author}</a> {license}'
RSS_COPYRIGHT_PLAIN = 'Contents © {date} {author} {license}'
RSS_COPYRIGHT_FORMATS = CONTENT_FOOTER_FORMATS
# To use comments, you can choose between different third party comment
# systems. The following comment systems are supported by Nikola:
# disqus, facebook, intensedebate, isso, muut, commento, utterances
# You can leave this option blank to disable comments.
COMMENT_SYSTEM = ""
# And you also need to add your COMMENT_SYSTEM_ID which
# depends on what comment system you use. The default is
# "nikolademo" which is a test account for Disqus. More information
# is in the manual.
COMMENT_SYSTEM_ID = ""
# Create index.html for page folders?
# WARNING: if a page would conflict with the index file (usually
# caused by setting slug to `index`), the PAGE_INDEX
# will not be generated for that directory.
# PAGE_INDEX = False
# Enable comments on pages (i.e. not posts)?
# COMMENTS_IN_PAGES = False
# Enable comments on picture gallery pages?
# COMMENTS_IN_GALLERIES = False
# What file should be used for directory indexes?
# Defaults to index.html
# Common other alternatives: default.html for IIS, index.php
# INDEX_FILE = "index.html"
# If a link ends in /index.html, drop the index.html part.
# http://mysite/foo/bar/index.html => http://mysite/foo/bar/
# (Uses the INDEX_FILE setting, so if that is, say, default.html,
# it will instead /foo/default.html => /foo)
STRIP_INDEXES = False
# List of files relative to the server root (!) that will be asked to be excluded
# from indexing and other robotic spidering. * is supported. Will only be effective
# if SITE_URL points to server root. The list is used to exclude resources from
# /robots.txt and /sitemap.xml, and to inform search engines about /sitemapindex.xml.
# ROBOTS_EXCLUSIONS = ["/archive.html", "/category/*.html"]
# Instead of putting files in <slug>.html, put them in <slug>/index.html.
# No web server configuration is required. Also enables STRIP_INDEXES.
# This can be disabled on a per-page/post basis by adding
# .. pretty_url: False
# to the metadata.
PRETTY_URLS = False
# If True, publish future dated posts right away instead of scheduling them.
# Defaults to False.
# FUTURE_IS_NOW = False
# If True, future dated posts are allowed in deployed output
# Only the individual posts are published/deployed; not in indexes/sitemap
# Generally, you want FUTURE_IS_NOW and DEPLOY_FUTURE to be the same value.
# DEPLOY_FUTURE = False
# If False, draft posts will not be deployed
# DEPLOY_DRAFTS = True
# Allows scheduling of posts using the rule specified here (new_post -s)
# Specify an iCal Recurrence Rule: https://www.kanzaki.com/docs/ical/rrule.html
# SCHEDULE_RULE = ''
# If True, use the scheduling rule to all posts (not pages!) by default
# SCHEDULE_ALL = False
# Do you want to add a Mathjax config file?
# MATHJAX_CONFIG = ""
# If you want support for the $.$ syntax (which may conflict with running
# text!), just use this config:
# MATHJAX_CONFIG = """
# <script type="text/x-mathjax-config">
# MathJax.Hub.Config({
# tex2jax: {
# inlineMath: [ ['$','$'], ["\\\(","\\\)"] ],
# displayMath: [ ['$$','$$'], ["\\\[","\\\]"] ],
# processEscapes: true
# },
# displayAlign: 'center', // Change this to 'left' if you want left-aligned equations.
# "HTML-CSS": {
# styles: {'.MathJax_Display': {"margin": 0}}
# }
# });
# </script>
# """
# Want to use KaTeX instead of MathJax? While KaTeX may not support every
# feature yet, it's faster and the output looks better.
# USE_KATEX = False
# KaTeX auto-render settings. If you want support for the $.$ syntax (which may
# conflict with running text!), just use this config:
# KATEX_AUTO_RENDER = """
# delimiters: [
# {left: "$$", right: "$$", display: true},
# {left: "\\\\[", right: "\\\\]", display: true},
# {left: "\\\\begin{equation*}", right: "\\\\end{equation*}", display: true},
# {left: "$", right: "$", display: false},
# {left: "\\\\(", right: "\\\\)", display: false}
# ]
# """
# What Markdown extensions to enable?
# You will also get gist, nikola and podcast because those are
# done in the code, hope you don't mind ;-)
# Note: most Nikola-specific extensions are done via the Nikola plugin system,
# with the MarkdownExtension class and should not be added here.
# Defaults are markdown.extensions.(fenced_code|codehilite|extra)
# markdown.extensions.meta is required for Markdown metadata.
MARKDOWN_EXTENSIONS = ['markdown.extensions.fenced_code', 'markdown.extensions.codehilite', 'markdown.extensions.extra', 'markdown.extensions.toc']
# Options to be passed to markdown extensions (See https://python-markdown.github.io/reference/)
# Default is {} (no config at all)
# MARKDOWN_EXTENSION_CONFIGS = {}
# Social buttons. This is sample code for AddThis (which was the default for a
# long time). Insert anything you want here, or even make it empty (which is
# the default right now)
# (translatable)
# SOCIAL_BUTTONS_CODE = """
# <!-- Social buttons -->
# <div id="addthisbox" class="addthis_toolbox addthis_peekaboo_style addthis_default_style addthis_label_style addthis_32x32_style">
# <a class="addthis_button_more">Share</a>
# <ul><li><a class="addthis_button_facebook"></a>
# <li><a class="addthis_button_google_plusone_share"></a>
# <li><a class="addthis_button_linkedin"></a>
# <li><a class="addthis_button_twitter"></a>
# </ul>
# </div>
# <script src="https://s7.addthis.com/js/300/addthis_widget.js#pubid=ra-4f7088a56bb93798"></script>
# <!-- End of social buttons -->
# """
# Show link to source for the posts?
SHOW_SOURCELINK = False
# Copy the source files for your pages?
# Setting it to False implies SHOW_SOURCELINK = False
COPY_SOURCES = False
# Modify the number of Post per Index Page
# Defaults to 10
# INDEX_DISPLAY_POST_COUNT = 10
# Extra things you want in the pages HEAD tag. This will be added right
# before </head>
# (translatable)
# EXTRA_HEAD_DATA = ""
# Google Analytics or whatever else you use. Added to the bottom of <body>
# in the default template (base.tmpl).
# (translatable)
# BODY_END = ""
# Bundle JS and CSS into single files to make site loading faster in a HTTP/1.1
# environment but is not recommended for HTTP/2.0 when caching is used.
# Defaults to True.
# USE_BUNDLES = True
USE_BUNDLES = False
# Plugins you don't want to use. Be careful :-)
# DISABLED_PLUGINS = ["render_galleries"]
# Special settings to disable only parts of the indexes plugin.
# Use with care.
# DISABLE_INDEXES = False
# DISABLE_MAIN_ATOM_FEED = False
# DISABLE_MAIN_RSS_FEED = False
# Add the absolute paths to directories containing plugins to use them.
# For example, the `plugins` directory of your clone of the Nikola plugins
# repository.
# EXTRA_PLUGINS_DIRS = []
# Add the absolute paths to directories containing themes to use them.
# For example, the `v7` directory of your clone of the Nikola themes
# repository.
# EXTRA_THEMES_DIRS = []
# List of regular expressions, links matching them will always be considered
# valid by "nikola check -l"
# LINK_CHECK_WHITELIST = []
# The <hN> tags in HTML generated by certain compilers (reST/Markdown)
# will be demoted by that much (1 → h1 will become h2 and so on)
# This was a hidden feature of the Markdown and reST compilers in the
# past. Useful especially if your post titles are in <h1> tags too, for
# example.
# (defaults to 1.)
# DEMOTE_HEADERS = 1
# If set to True, the tags 'draft', 'mathjax' and 'private' have special
# meaning. If set to False, these tags are handled like regular tags.
USE_TAG_METADATA = False
# If set to True, a warning is issued if one of the 'draft', 'mathjax'
# and 'private' tags are found in a post. Useful for checking that
# migration was successful.
WARN_ABOUT_TAG_METADATA = False
# Templates will use those filters, along with the defaults.
# Consult your engine's documentation on filters if you need help defining
# those.
# TEMPLATE_FILTERS = {}
# Put in global_context things you want available on all your templates.
# It can be anything, data, functions, modules, etc.
GLOBAL_CONTEXT = {}
# Add functions here and they will be called with template
# GLOBAL_CONTEXT as parameter when the template is about to be
# rendered
GLOBAL_CONTEXT_FILLER = []
# Settings for the (boot)Reveal theme must be added to the global context.
# subtheme selection: beige/serif/simple/sky/night/default
# transition selection: cube/page/concave/linear/none/default
GLOBAL_CONTEXT.update({
'subtheme': 'simple',
'transition': 'none'
})
| 1.9375 | 2 |
Programinhas_python/busca/Fetch_words.py | xDanielz/python_exercicios_pessoais | 0 | 12795545 | while True:
search = str(input('Informe a palavra que gostaria de procurar: '))
try:
with open(f'acounts.txt', 'r') as file:
for lines in file.readlines():
if search in lines:
print(lines)
break
else:
print('Não encontrado')
except FileNotFoundError:
print('Arquivo não encontrado')
stop = str(input('Gostaria de procurar outra palavra?[S/N]: '))[0].upper()
if stop == 'N':
break
| 3.828125 | 4 |
tests/data_sources/gsp/test_gsp_data_source.py | openclimatefix/nowcasting_dataset | 15 | 12795546 | """ Tests for GSPDataSource """
import os
from datetime import datetime
import pandas as pd
import nowcasting_dataset
from nowcasting_dataset.data_sources.gsp.gsp_data_source import (
GSPDataSource,
drop_gsp_north_of_boundary,
)
from nowcasting_dataset.geospatial import osgb_to_lat_lon
def test_gsp_pv_data_source_init():
"""Test GSP init"""
local_path = os.path.dirname(nowcasting_dataset.__file__) + "/.."
_ = GSPDataSource(
zarr_path=f"{local_path}/tests/data/gsp/test.zarr",
start_datetime=datetime(2020, 4, 1),
end_datetime=datetime(2020, 4, 2),
history_minutes=30,
forecast_minutes=60,
image_size_pixels=64,
meters_per_pixel=2000,
)
def test_gsp_pv_data_source_get_locations():
"""Test GSP locations"""
local_path = os.path.dirname(nowcasting_dataset.__file__) + "/.."
gsp = GSPDataSource(
zarr_path=f"{local_path}/tests/data/gsp/test.zarr",
start_datetime=datetime(2020, 4, 1),
end_datetime=datetime(2020, 4, 2),
history_minutes=30,
forecast_minutes=60,
image_size_pixels=64,
meters_per_pixel=2000,
)
locations_x, locations_y = gsp.get_locations(t0_datetimes_utc=gsp.gsp_power.index[0:10])
assert len(locations_x) == len(locations_y)
# This makes sure it is not in lat/lon.
# Note that OSGB could be <= than 90, but that would mean a location in the middle of the sea,
# which is impossible for GSP data
assert locations_x[0] > 90
assert locations_y[0] > 90
lat, lon = osgb_to_lat_lon(locations_x, locations_y)
assert 0 < lat[0] < 90 # this makes sure it is in lat/lon
assert -90 < lon[0] < 90 # this makes sure it is in lat/lon
def test_gsp_pv_data_source_get_all_locations():
"""Test GSP example"""
local_path = os.path.dirname(nowcasting_dataset.__file__) + "/.."
gsp = GSPDataSource(
zarr_path=f"{local_path}/tests/data/gsp/test.zarr",
start_datetime=datetime(2020, 4, 1),
end_datetime=datetime(2020, 4, 2),
history_minutes=30,
forecast_minutes=60,
image_size_pixels=64,
meters_per_pixel=2000,
)
N_gsps = len(gsp.metadata)
t0_datetimes_utc = gsp.gsp_power.index[0:10]
x_locations = gsp.metadata.location_x
(
t0_datetimes_utc_all_gsps,
x_centers_osgb_all_gsps,
y_centers_osgb_all_gsps,
) = gsp.get_all_locations(t0_datetimes_utc=t0_datetimes_utc)
assert len(t0_datetimes_utc_all_gsps) == len(x_centers_osgb_all_gsps)
assert len(t0_datetimes_utc_all_gsps) == len(y_centers_osgb_all_gsps)
assert len(t0_datetimes_utc_all_gsps) == len(x_locations) * len(t0_datetimes_utc)
# check first few are the same datetime
assert (x_centers_osgb_all_gsps[0:N_gsps] == x_locations.values).all()
assert (t0_datetimes_utc_all_gsps[0:N_gsps] == t0_datetimes_utc[0]).all()
# check second set of datetimes
assert (x_centers_osgb_all_gsps[N_gsps : 2 * N_gsps] == x_locations.values).all()
assert (t0_datetimes_utc_all_gsps[N_gsps : 2 * N_gsps] == t0_datetimes_utc[1]).all()
# check all datetimes
t0_datetimes_utc_all_gsps_overlap = t0_datetimes_utc_all_gsps.union(t0_datetimes_utc)
assert len(t0_datetimes_utc_all_gsps_overlap) == len(t0_datetimes_utc_all_gsps)
def test_gsp_pv_data_source_get_example():
"""Test GSP example"""
local_path = os.path.dirname(nowcasting_dataset.__file__) + "/.."
start_dt = datetime(2020, 4, 1)
end_dt = datetime(2020, 4, 1)
gsp = GSPDataSource(
zarr_path=f"{local_path}/tests/data/gsp/test.zarr",
start_datetime=datetime(2020, 4, 1),
end_datetime=datetime(2020, 4, 2),
history_minutes=30,
forecast_minutes=60,
image_size_pixels=64,
meters_per_pixel=2000,
)
x_locations, y_locations = gsp.get_locations(t0_datetimes_utc=gsp.gsp_power.index[0:10])
example = gsp.get_example(
t0_datetime_utc=gsp.gsp_power.index[0],
x_center_osgb=x_locations[0],
y_center_osgb=y_locations[0],
)
assert len(example.id) == len(example.power_mw[0])
assert len(example.x_osgb) == len(example.y_osgb)
assert len(example.x_osgb) > 0
assert pd.Timestamp(example.time[0].values) <= end_dt
assert pd.Timestamp(example.time[0].values) >= start_dt
def test_gsp_pv_data_source_get_batch():
"""Test GSP batch"""
local_path = os.path.dirname(nowcasting_dataset.__file__) + "/.."
gsp = GSPDataSource(
zarr_path=f"{local_path}/tests/data/gsp/test.zarr",
start_datetime=datetime(2020, 4, 1),
end_datetime=datetime(2020, 4, 2),
history_minutes=30,
forecast_minutes=60,
image_size_pixels=64,
meters_per_pixel=2000,
)
batch_size = 10
x_locations, y_locations = gsp.get_locations(t0_datetimes_utc=gsp.gsp_power.index[0:batch_size])
batch = gsp.get_batch(
t0_datetimes_utc=gsp.gsp_power.index[batch_size : 2 * batch_size],
x_centers_osgb=x_locations[0:batch_size],
y_centers_osgb=y_locations[0:batch_size],
)
assert len(batch.power_mw[0]) == 4
assert len(batch.id[0]) == len(batch.x_osgb[0])
assert len(batch.x_osgb[1]) == len(batch.y_osgb[1])
assert len(batch.x_osgb[2]) > 0
# assert T0_DT in batch[3].keys()
def test_drop_gsp_north_of_boundary(test_data_folder):
"""Test that dropping GSP north of a boundary works"""
gsp = GSPDataSource(
zarr_path=f"{test_data_folder}/gsp/test.zarr",
start_datetime=datetime(2020, 4, 1),
end_datetime=datetime(2020, 4, 2),
history_minutes=30,
forecast_minutes=60,
image_size_pixels=64,
meters_per_pixel=2000,
northern_boundary_osgb=None,
)
# remove all gsp systems
gsp_power, metadata = drop_gsp_north_of_boundary(
gsp.gsp_power, gsp.metadata, northern_boundary_osgb=0
)
assert len(gsp_power.columns) == 0
assert len(metadata) == 0
# remove half the systems
north_osgb_median = int(gsp.metadata.location_y.median())
gsp_power, metadata = drop_gsp_north_of_boundary(
gsp.gsp_power, gsp.metadata, northern_boundary_osgb=north_osgb_median
)
assert len(gsp_power.columns) == len(gsp.gsp_power.columns) / 2
assert len(metadata) == len(gsp.metadata) / 2
| 2.34375 | 2 |
S4/S4 Library/simulation/services/__init__.py | NeonOcean/Environment | 1 | 12795547 | <gh_stars>1-10
import argparse
import functools
import gc
import time
from services.tuning_managers import InstanceTuningManagers
from sims4.resources import INSTANCE_TUNING_DEFINITIONS
from sims4.tuning.instance_manager import TuningInstanceManager
from sims4.tuning.tunable import Tunable, TunableReference
import game_services
import paths
import sims4.reload
import sims4.service_manager
try:
import _zone
except ImportError:
class _zone:
@staticmethod
def invite_sims_to_zone(*_, **__):
pass
@staticmethod
def get_house_description_id(*_, **__):
pass
@staticmethod
def get_building_type(*_, **__):
return 0
@staticmethod
def get_eco_footprint_value(*_, **__):
return 0
@staticmethod
def get_rent(*_, **__):
return 0
@staticmethod
def get_lot_description_id(*_, **__):
pass
@staticmethod
def get_world_description_id(*_, **__):
pass
@staticmethod
def get_world_id(*_, **__):
pass
@staticmethod
def get_world_and_lot_description_id_from_zone_id(*_, **__):
pass
@staticmethod
def get_is_eco_footprint_compatible_for_world_description(*_, **__):
return False
@staticmethod
def get_hide_from_lot_picker(*_, **__):
pass
@staticmethod
def is_event_enabled(*_, **__):
pass
invite_sims_to_zone = _zone.invite_sims_to_zone
get_house_description_id = _zone.get_house_description_id
is_event_enabled = _zone.is_event_enabled
get_building_type = _zone.get_building_type
get_eco_footprint_value = _zone.get_eco_footprint_value
get_rent = _zone.get_rent
get_lot_description_id = _zone.get_lot_description_id
get_world_description_id = _zone.get_world_description_id
get_world_id = _zone.get_world_id
get_world_and_lot_description_id_from_zone_id = _zone.get_world_and_lot_description_id_from_zone_id
get_is_eco_footprint_compatible_for_world_description = _zone.get_is_eco_footprint_compatible_for_world_description
get_hide_from_lot_picker = _zone.get_hide_from_lot_picker
with sims4.reload.protected(globals()):
tuning_managers = InstanceTuningManagers()
get_instance_manager = tuning_managers.__getitem__
_account_service = None
_zone_manager = None
_server_clock_service = None
_persistence_service = None
_distributor_service = None
_intern_service = None
_terrain_service = None
definition_manager = None
snippet_manager = None
_terrain_object = None
_object_leak_tracker = None
for definition in INSTANCE_TUNING_DEFINITIONS:
accessor_name = definition.manager_name
accessor = functools.partial(tuning_managers.__getitem__, definition.TYPE_ENUM_VALUE)
globals()[accessor_name] = accessor
production_logger = sims4.log.ProductionLogger('Services')
logger = sims4.log.Logger('Services')
time_delta = None
gc_collection_enable = True
class TimeStampService(sims4.service_manager.Service):
def start(self):
global gc_collection_enable, time_delta
if gc_collection_enable:
gc.disable()
production_logger.info('GC disabled')
gc_collection_enable = False
else:
gc.enable()
production_logger.info('GC enabled')
gc_collection_enable = True
time_stamp = time.time()
production_logger.info('TimeStampService start at {}'.format(time_stamp))
logger.info('TimeStampService start at {}'.format(time_stamp))
if time_delta is None:
time_delta = time_stamp
else:
time_delta = time_stamp - time_delta
production_logger.info('Time delta from loading start is {}'.format(time_delta))
logger.info('Time delta from loading start is {}'.format(time_delta))
return True
def start_global_services(initial_ticks):
global _account_service, _zone_manager, _distributor_service, _intern_service
create_server_clock(initial_ticks)
from distributor.distributor_service import DistributorService
from intern_service import InternService
from server.account_service import AccountService
from services.persistence_service import PersistenceService
from services.terrain_service import TerrainService
from sims4.tuning.serialization import FinalizeTuningService
from zone_manager import ZoneManager
parser = argparse.ArgumentParser()
parser.add_argument('--python_autoleak', default=False, action='store_true')
(args, unused_args) = parser.parse_known_args()
if args.python_autoleak:
create_object_leak_tracker()
_account_service = AccountService()
_zone_manager = ZoneManager()
_distributor_service = DistributorService()
_intern_service = InternService()
init_critical_services = [server_clock_service(), get_persistence_service()]
services = [_distributor_service, _intern_service, _intern_service.get_start_interning(), TimeStampService]
instantiated_tuning_managers = []
for definition in INSTANCE_TUNING_DEFINITIONS:
instantiated_tuning_managers.append(tuning_managers[definition.TYPE_ENUM_VALUE])
services.append(TuningInstanceManager(instantiated_tuning_managers))
services.extend([FinalizeTuningService, TimeStampService, _intern_service.get_stop_interning(), get_terrain_service(), _zone_manager, _account_service])
sims4.core_services.start_services(init_critical_services, services)
def stop_global_services():
global _zone_manager, _account_service, _event_manager, _server_clock_service, _persistence_service, _terrain_service, _distributor_service, _intern_service, _object_leak_tracker
_zone_manager.shutdown()
_zone_manager = None
tuning_managers.clear()
_account_service = None
_event_manager = None
_server_clock_service = None
_persistence_service = None
_terrain_service = None
_distributor_service = None
_intern_service = None
if _object_leak_tracker is not None:
_object_leak_tracker = None
def create_object_leak_tracker(start=False):
global _object_leak_tracker
from performance.object_leak_tracker import ObjectLeakTracker
if _object_leak_tracker is None:
_object_leak_tracker = ObjectLeakTracker()
if start:
_object_leak_tracker.start_tracking()
return True
return False
def get_object_leak_tracker():
return _object_leak_tracker
def get_zone_manager():
return _zone_manager
def current_zone():
if _zone_manager is not None:
return _zone_manager.current_zone
def current_zone_id():
if _zone_manager is not None:
return sims4.zone_utils.zone_id
def current_zone_info():
zone = current_zone()
return zone.get_zone_info()
def current_region():
zone = current_zone()
if zone is not None:
return zone.region
def current_street():
zone = current_zone()
if zone is not None:
return zone.street
def get_zone(zone_id, allow_uninstantiated_zones=False):
if _zone_manager is not None:
return _zone_manager.get(zone_id, allow_uninstantiated_zones=allow_uninstantiated_zones)
def active_lot():
zone = current_zone()
if zone is not None:
return zone.lot
def active_lot_id():
lot = active_lot()
if lot is not None:
return lot.lot_id
def client_object_managers():
if game_services.service_manager is not None:
return game_services.service_manager.client_object_managers
return ()
def sim_info_manager():
return game_services.service_manager.sim_info_manager
def posture_graph_service(zone_id=None):
if zone_id is None:
zone = current_zone()
if zone is not None:
return zone.posture_graph_service
return
return _zone_manager.get(zone_id).posture_graph_service
def sim_spawner_service(zone_id=None):
if zone_id is None:
return current_zone().sim_spawner_service
return _zone_manager.get(zone_id).sim_spawner_service
def locator_manager():
return current_zone().locator_manager
def object_manager(zone_id=None):
if zone_id is None:
zone = current_zone()
else:
zone = _zone_manager.get(zone_id)
if zone is not None:
return zone.object_manager
def inventory_manager(zone_id=None):
if zone_id is None:
zone = current_zone()
if zone is not None:
return zone.inventory_manager
return
return _zone_manager.get(zone_id).inventory_manager
def prop_manager(zone_id=None):
if zone_id is None:
zone = current_zone()
else:
zone = _zone_manager.get(zone_id)
if zone is not None:
return zone.prop_manager
def social_group_manager():
return current_zone().social_group_manager
def client_manager():
return game_services.service_manager.client_manager
def get_first_client():
return client_manager().get_first_client()
def get_selectable_sims():
return get_first_client().selectable_sims
def owning_household_id_of_active_lot():
zone = current_zone()
if zone is not None:
return zone.lot.owner_household_id
def owning_household_of_active_lot():
zone = current_zone()
if zone is not None:
return household_manager().get(zone.lot.owner_household_id)
def object_preference_tracker(require_active_household=False):
zone = current_zone()
if zone is not None:
household = household_manager().get(zone.lot.owner_household_id)
if household is not None:
if require_active_household and not household.is_active_household:
return
return household.object_preference_tracker
travel_group = travel_group_manager().get_travel_group_by_zone_id(zone.id)
if travel_group is not None:
if require_active_household and not travel_group.is_active_sim_in_travel_group:
return
else:
return travel_group.object_preference_tracker
def get_active_sim():
client = client_manager().get_first_client()
if client is not None:
return client.active_sim
def active_sim_info():
client = client_manager().get_first_client()
if client is not None:
return client.active_sim_info
def active_household():
client = client_manager().get_first_client()
if client is not None:
return client.household
def active_household_id():
client = client_manager().get_first_client()
if client is not None:
return client.household_id
def active_household_lot_id():
household = active_household()
if household is not None:
home_zone = get_zone(household.home_zone_id)
if home_zone is not None:
lot = home_zone.lot
if lot is not None:
return lot.lot_id
def privacy_service():
return current_zone().privacy_service
def autonomy_service():
return current_zone().autonomy_service
def get_aging_service():
return game_services.service_manager.aging_service
def get_cheat_service():
return game_services.service_manager.cheat_service
def neighborhood_population_service():
return current_zone().neighborhood_population_service
def get_reset_and_delete_service():
return current_zone().reset_and_delete_service
def venue_service():
return current_zone().venue_service
def venue_game_service():
return getattr(game_services.service_manager, 'venue_game_service', None)
def zone_spin_up_service():
return current_zone().zone_spin_up_service
def household_manager():
return game_services.service_manager.household_manager
def travel_group_manager(zone_id=None):
if zone_id is None:
zone = current_zone()
if zone is not None:
return zone.travel_group_manager
return
return _zone_manager.get(zone_id).travel_group_manager
def utilities_manager(household_id=None):
if household_id:
return get_utilities_manager_by_household_id(household_id)
return get_utilities_manager_by_zone_id(current_zone_id())
def get_utilities_manager_by_household_id(household_id):
return game_services.service_manager.utilities_manager.get_manager_for_household(household_id)
def get_utilities_manager_by_zone_id(zone_id):
return game_services.service_manager.utilities_manager.get_manager_for_zone(zone_id)
def ui_dialog_service():
return current_zone().ui_dialog_service
def config_service():
return game_services.service_manager.config_service
def travel_service():
return current_zone().travel_service
def sim_quadtree():
return current_zone().sim_quadtree
def single_part_condition_list():
return current_zone().single_part_condition_list
def multi_part_condition_list():
return current_zone().multi_part_condition_list
def get_event_manager():
return game_services.service_manager.event_manager_service
def get_current_venue():
service = venue_service()
if service is not None:
return service.active_venue
def get_intern_service():
return _intern_service
def get_zone_situation_manager(zone_id=None):
if zone_id is None:
return current_zone().situation_manager
return _zone_manager.get(zone_id).situation_manager
def npc_hosted_situation_service():
return current_zone().n_p_c_hosted_situation_service
def ensemble_service():
return current_zone().ensemble_service
def sim_filter_service(zone_id=None):
if zone_id is None:
return current_zone().sim_filter_service
return _zone_manager.get(zone_id).sim_filter_service
def get_photography_service():
return current_zone().photography_service
def social_group_cluster_service():
return current_zone().social_group_cluster_service
def on_client_connect(client):
sims4.core_services.service_manager.on_client_connect(client)
game_services.service_manager.on_client_connect(client)
current_zone().service_manager.on_client_connect(client)
def on_client_disconnect(client):
sims4.core_services.service_manager.on_client_disconnect(client)
if game_services.service_manager.allow_shutdown:
game_services.service_manager.on_client_disconnect(client)
current_zone().service_manager.on_client_disconnect(client)
def on_enter_main_menu():
pass
def account_service():
return _account_service
def business_service():
bs = game_services.service_manager.business_service
return bs
def get_terrain_service():
global _terrain_service
if _terrain_service is None:
from services.terrain_service import TerrainService
_terrain_service = TerrainService()
return _terrain_service
def call_to_action_service():
return game_services.service_manager.call_to_action_service
def trend_service():
return game_services.service_manager.trend_service
def time_service():
return game_services.service_manager.time_service
def game_clock_service():
return game_services.service_manager.game_clock
def server_clock_service():
if _server_clock_service is None:
return
return _server_clock_service
def create_server_clock(initial_ticks):
global _server_clock_service
import clock
_server_clock_service = clock.ServerClock(ticks=initial_ticks)
def get_master_controller():
return current_zone().master_controller
def get_persistence_service():
global _persistence_service
if _persistence_service is None:
from services.persistence_service import PersistenceService
_persistence_service = PersistenceService()
return _persistence_service
def get_distributor_service():
return _distributor_service
def get_fire_service():
return current_zone().fire_service
def get_career_service():
return current_zone().career_service
def get_story_progression_service():
return current_zone().story_progression_service
def daycare_service():
zone = current_zone()
if zone is not None:
return zone.daycare_service
def get_adoption_service():
return current_zone().adoption_service
def get_laundry_service():
zone = current_zone()
if zone is not None and hasattr(zone, 'laundry_service'):
return zone.laundry_service
def get_object_routing_service():
zone = current_zone()
if zone is not None and hasattr(zone, 'object_routing_service'):
return zone.object_routing_service
def get_landlord_service():
return getattr(game_services.service_manager, 'landlord_service', None)
def get_roommate_service():
return getattr(game_services.service_manager, 'roommate_service', None)
def get_club_service():
return getattr(game_services.service_manager, 'club_service', None)
def get_culling_service():
return current_zone().culling_service
def get_gardening_service():
return current_zone().gardening_service
def drama_scheduler_service():
return current_zone().drama_schedule_service
def get_plex_service():
return current_zone().plex_service
def get_door_service():
return current_zone().door_service
def get_zone_modifier_service():
return current_zone().zone_modifier_service
def get_demographics_service():
return current_zone().demographics_service
def get_service_npc_service():
return current_zone().service_npc_service
def conditional_layer_service():
return current_zone().conditional_layer_service
def get_sickness_service():
return game_services.service_manager.sickness_service
def get_curfew_service():
return game_services.service_manager.curfew_service
def get_locale():
client = get_first_client()
return client.account.locale
def relationship_service():
return game_services.service_manager.relationship_service
def hidden_sim_service():
return game_services.service_manager.hidden_sim_service
def weather_service():
return getattr(game_services.service_manager, 'weather_service', None)
def season_service():
return getattr(game_services.service_manager, 'season_service', None)
def lot_decoration_service():
return getattr(game_services.service_manager, 'lot_decoration_service', None)
def get_style_service():
return game_services.service_manager.style_service
def get_tutorial_service():
return game_services.service_manager.tutorial_service
def calendar_service():
return current_zone().calendar_service
def get_rabbit_hole_service():
return game_services.service_manager.rabbit_hole_service
def holiday_service():
return getattr(game_services.service_manager, 'holiday_service', None)
def global_policy_service():
return getattr(game_services.service_manager, 'global_policy_service', None)
def narrative_service():
return getattr(game_services.service_manager, 'narrative_service', None)
def organization_service():
return getattr(game_services.service_manager, 'organization_service', None)
def get_object_lost_and_found_service():
return game_services.service_manager.object_lost_and_found_service
def street_service():
return getattr(game_services.service_manager, 'street_service', None)
def c_api_gsi_dump():
import server_commands.developer_commands
server_commands.developer_commands.gsi_dump()
| 1.960938 | 2 |
src/modules/encoder.py | Neptune1201/ASIM | 3 | 12795548 | # coding=utf-8
# Copyright (C) 2019 Alibaba Group Holding Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# add BiLSTM as encoder
import torch.nn as nn
import torch.nn.functional as f
import torch
from . import Conv1d
class Encoder(nn.Module):
def __init__(self, args, input_size):
super().__init__()
self.dropout = args.dropout
self.encoders = nn.ModuleList([Conv1d(
in_channels=input_size if i == 0 else args.hidden_size,
out_channels=args.hidden_size,
kernel_sizes=args.kernel_sizes) for i in range(args.enc_layers)])
def forward(self, x, mask):
x = x.transpose(1, 2) # B x C x L
mask = mask.transpose(1, 2)
for i, encoder in enumerate(self.encoders):
x.masked_fill_(~mask, 0.)
if i > 0:
x = f.dropout(x, self.dropout, self.training)
x = encoder(x)
x = f.dropout(x, self.dropout, self.training)
return x.transpose(1, 2) # B x L x C
def sort_by_seq_lens(batch, sequences_lengths, descending=True):
sorted_seq_lens, sorting_index =\
sequences_lengths.sort(0, descending=descending)
sorted_batch = batch.index_select(0, sorting_index)
idx_range = torch.arange(0, len(sequences_lengths)).to(sequences_lengths.device)
_, reverse_mapping = sorting_index.sort(0, descending=False)
restoration_index = idx_range.index_select(0, reverse_mapping)
return sorted_batch, sorted_seq_lens, sorting_index, restoration_index
class Seq2SeqEncoder(nn.Module):
def __init__(self,
rnn_type,
input_size,
hidden_size,
num_layers=1,
bias=True,
dropout=0.2,
bidirectional=False):
assert issubclass(rnn_type, nn.RNNBase),\
"rnn_type must be a class inheriting from torch.nn.RNNBase"
super(Seq2SeqEncoder, self).__init__()
self.rnn_type = rnn_type
self.input_size = input_size
self.hidden_size = hidden_size
self.num_layers = num_layers
self.bias = bias
self.dropout = dropout
self.bidirectional = bidirectional
self._encoder = rnn_type(input_size,
hidden_size,
num_layers=num_layers,
bias=bias,
batch_first=True,
dropout=dropout,
bidirectional=bidirectional)
def forward(self, sequences_batch, sequences_lengths):
outputs, _ = self._encoder(sequences_batch, None)
return outputs | 2.1875 | 2 |
EXERCICIOS/exercicios_curso_linkedin/Cap. 1 - Linguagem/01_codingstyle_resposta.py | raphael-d-cordeiro/Python_Public | 0 | 12795549 | # cada import deve ter a sua própria linha
import sys
import os
# duas linhas em branco separam classes de outras funções
class MinhaClasse():
def __init__(self):
self.descricao = "Minha Classe"
# dentro de uma classe, usamos uma linha em branco entre métodos
def meu_metodo(self, arg1):
pass
def main():
# comentários que usam mais de uma linha, devem ser limitados a 72
# caracteres por linha
instancia = MinhaClasse()
print(instancia.descricao)
instancia.descricao = "Classe da Jess"
print(instancia.descricao)
if __name__ == "__main__":
main()
| 3.78125 | 4 |
tests/unit/custom/test_table.py | matthewgdv/sqlhandler | 0 | 12795550 | <filename>tests/unit/custom/test_table.py
# import pytest
class TestTable:
def test___new__(self): # synced
assert True
| 1.867188 | 2 |
Subsets and Splits