id
stringlengths 1
7
| text
stringlengths 6
1.03M
| dataset_id
stringclasses 1
value |
---|---|---|
1672564
|
#!/usr/bin/env python
import unittest
import Factorial
class FactorialTest(unittest.TestCase):
f=open('Factorial_Python_results.csv','w')
@classmethod
def setUpClass(cls):
pass
@classmethod
def tearDownClass(cls):
FactorialTest.f.close()
def setUp(self):
pass
def test_1(self):
if Factorial.factorial(1)==1:
FactorialTest.f.write("1,,1\n")
else:
FactorialTest.f.write("0,Unsuccessful normal factorial generation,1\n")
self.assertEqual(Factorial.factorial(1),1)
def test_2(self):
if Factorial.factorial(2)==2:
FactorialTest.f.write("1,,2\n")
else:
FactorialTest.f.write("0,Zero factorial generation improper,2\n")
self.assertEqual(Factorial.factorial(2),2)
def test_3(self):
if Factorial.factorial(3)==3:
FactorialTest.f.write("1,,3\n")
else:
FactorialTest.f.write("0,3 factorial failue,3\n")
self.assertEqual(Factorial.factorial(3),3)
def test_4(self):
if Factorial.factorial(4)==24:
FactorialTest.f.write("1,,4\n")
else:
FactorialTest.f.write("0,Unsuccessful normal factorial generation,4\n")
self.assertEqual(Factorial.factorial(4),24)
def test_0(self):
if Factorial.factorial(0)==1:
FactorialTest.f.write("1,,5\n")
else:
FactorialTest.f.write("0,Zero factorial generation improper,5\n")
self.assertEqual(Factorial.factorial(0),1)
def test_7(self):
if Factorial.factorial(7)==5040:
FactorialTest.f.write("1,,6\n")
else:
FactorialTest.f.write("0,Large factorial failue,6\n")
self.assertEqual(Factorial.factorial(7),5040)
if __name__ == '__main__':
unittest.main()
|
StarcoderdataPython
|
1634586
|
from django.test import TestCase
from dj_emailauth.models import User
class UserModelTest(TestCase):
def setUp(self):
self.user = User.objects.create_user(
password="password", email="<EMAIL>"
)
self.user.full_clean()
self.superuser = User.objects.create_superuser(
password="password", email="<EMAIL>"
)
self.superuser.full_clean()
def test_string_representation(self):
self.assertEqual(str(self.user), "<EMAIL>")
self.assertEqual(str(self.superuser), "<EMAIL>")
def test_superuser(self):
self.assertTrue(self.superuser.is_staff)
self.assertTrue(self.superuser.is_superuser)
self.assertTrue(self.superuser.is_active)
def test_user(self):
self.assertFalse(self.user.is_staff)
self.assertFalse(self.user.is_superuser)
self.assertTrue(self.user.is_active)
def test_email_normalize(self):
user = User.objects.create_user(
password="password", email="<EMAIL>"
)
self.assertEqual(str(user), "<EMAIL>")
|
StarcoderdataPython
|
1607780
|
# Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
#
# Use of this source code is governed by a BSD-style license
# that can be found in the LICENSE file in the root of the source
# tree. An additional intellectual property rights grant can be found
# in the file PATENTS. All contributing project authors may
# be found in the AUTHORS file in the root of the source tree.
{
'includes': [
'build/common.gypi',
'audio/webrtc_audio.gypi',
'call/webrtc_call.gypi',
'video/webrtc_video.gypi',
],
'targets': [
{
'target_name': 'webrtc',
'type': 'static_library',
'sources': [
'audio_receive_stream.h',
'audio_send_stream.h',
'audio_state.h',
'call.h',
'config.h',
'transport.h',
'video_receive_stream.h',
'video_send_stream.h',
'<@(webrtc_audio_sources)',
'<@(webrtc_call_sources)',
'<@(webrtc_video_sources)',
],
'dependencies': [
'common.gyp:*',
'<@(webrtc_audio_dependencies)',
'<@(webrtc_call_dependencies)',
'<@(webrtc_video_dependencies)',
'rtc_event_log',
],
'conditions': [
# TODO(andresp): Chromium should link directly with this and no if
# conditions should be needed on webrtc build files.
['build_with_chromium==1', {
'dependencies': [
'<(webrtc_root)/modules/modules.gyp:video_capture',
],
}],
],
},
{
'target_name': 'rtc_event_log',
'type': 'static_library',
'sources': [
'call/rtc_event_log.cc',
'call/rtc_event_log.h',
'call/rtc_event_log_helper_thread.cc',
'call/rtc_event_log_helper_thread.h',
],
'conditions': [
# If enable_protobuf is defined, we want to compile the protobuf
# and add rtc_event_log.pb.h and rtc_event_log.pb.cc to the sources.
['enable_protobuf==1', {
'dependencies': [
'rtc_event_log_proto',
],
'defines': [
'ENABLE_RTC_EVENT_LOG',
],
}],
],
},
], # targets
'conditions': [
['include_tests==1', {
'includes': [
'webrtc_tests.gypi',
],
}],
['enable_protobuf==1', {
'targets': [
{
# This target should only be built if enable_protobuf is defined
'target_name': 'rtc_event_log_proto',
'type': 'static_library',
'sources': ['call/rtc_event_log.proto',],
'variables': {
'proto_in_dir': 'call',
'proto_out_dir': 'webrtc/call',
},
'includes': ['build/protoc.gypi'],
},
{
'target_name': 'rtc_event_log_parser',
'type': 'static_library',
'sources': [
'call/rtc_event_log_parser.cc',
'call/rtc_event_log_parser.h',
],
'dependencies': [
'rtc_event_log_proto',
],
'export_dependent_settings': [
'rtc_event_log_proto',
],
},
],
}],
['include_tests==1 and enable_protobuf==1', {
'targets': [
{
'target_name': 'rtc_event_log2rtp_dump',
'type': 'executable',
'sources': ['call/rtc_event_log2rtp_dump.cc',],
'dependencies': [
'<(DEPTH)/third_party/gflags/gflags.gyp:gflags',
'rtc_event_log_parser',
'rtc_event_log_proto',
'test/test.gyp:rtp_test_utils'
],
},
],
}],
], # conditions
}
|
StarcoderdataPython
|
3374272
|
from typing import List, Tuple, Optional
from enum import Enum
import subprocess
import re
import time
from colorama import Fore
from test_base import TestData
from tests import TestSet
import os.path
class TestResult(Enum):
PASSED = 1
FAILED = 2
IGNORED = 3
TestColors = {
TestResult.PASSED: Fore.GREEN,
TestResult.FAILED: Fore.RED,
TestResult.IGNORED: Fore.BLUE,
}
class Test:
"""
command(s)
┌─────────────┐-----------→┌─────────────────────┐-----------→┌──────────────┐
┌─│rotctl/hamlib│ │pipe/virtual ser. dev│ │ test program │─┐
│ └─────────────┘←-----------└─────────────────────┘←-----------└──────────────┘ │
│ │ │ response │ │ │
│ ↓ ↓ output output ↓ ↓ │
│ ┌──────┐┌──────┐ ┌──────┐┌──────┐ │
│ │stderr││stdout│──────┐ ┌──────│stderr││stdout│ │
│ └──────┘└──────┘ │ │ └──────┘└──────┘ │
│ ↓ ↓ ↓ ↓ │
│ must be empty expected lines expected lines: must be empty │
│ string repr. │
↓ of parsed commands ↓
return code and response return code
A test program receives data from rotctl and feeds the parser library under test.
The library parses the payload to a struct.
The interpreted struct string-representation is echoed to stdout.
A dummy response is generated sent back and printed to stdout if a response is expected.
"""
def __init__(self, project_dir: str):
self.virtual_dev_lockfile = "{}/easycomm-endpoint-lockfile".format(project_dir)
self.pipe_endpoint_a = "{}/easycomm-endpoint-rotctl".format(project_dir)
self.pipe_endpoint_b = "{}/easycomm-endpoint-test-program".format(project_dir)
self.virtual_device_cmd = ["/usr/bin/socat", "-d", "-d", "-W{}".format(self.virtual_dev_lockfile),
"pty,raw,echo=0,link={}".format(self.pipe_endpoint_a),
"pty,raw,echo=0,link={}".format(self.pipe_endpoint_b)]
self.rotctl_cmd = [self._rotctl_file_path,
"--rot-file={}".format(self.pipe_endpoint_a),
"--set-conf=timeout=400,post_write_delay=0,write_delay=0"]
self.test_program_cmd = ["{}/.pio/build/native/program".format(project_dir), self.pipe_endpoint_b]
self.virtual_dev = None
self.test_program = None
self.rotctl = None
self.timestamp_start = None
self.timestamp_end = None
self.rotctl_version = self._get_rotctl_version()
@property
def _rotctl_file_path(self) -> Optional[str]:
candidates = ["/usr/local/bin/rotctl", "/usr/bin/rotctl"]
for f in candidates:
if os.path.exists(f):
return f
return None
def run(self, test_data: TestData) -> Tuple[TestResult, float]:
self.timestamp_start = time.time()
print("test: -------------------- [test {}start{} - \"{}\"] --------------------"
.format(Fore.GREEN, Fore.RESET, test_data.description))
if re.fullmatch(test_data.allowed_rotctl_versions, self.rotctl_version) is None:
print("test: found rotctl version \"{}\" version but applicable is \"{}\""
.format(self.rotctl_version, test_data.allowed_rotctl_versions))
print("test: ignore test: rotctl version not accepted")
test_result = TestResult.IGNORED
else:
print("test: found applicable rotctl version \"{}\" version, applicable is \"{}\""
.format(self.rotctl_version, test_data.allowed_rotctl_versions))
self._set_up()
self.rotctl_cmd.extend(test_data.rotctl_extra_program_cli_args)
print("test: send \"{}\" to \"{}\"".format(test_data.rotctl_commands, " ".join(self.rotctl_cmd)))
self.rotctl = subprocess.Popen(self.rotctl_cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, text=True)
test_program_transaction, rotctl_transaction = self._test_transaction(
"{}".format(test_data.rotctl_commands))
test_result = TestResult.PASSED if Test.verify_process_output(
"test program",
test_data.expected_test_program_stdout_lines,
test_data.allowed_test_program_return_codes,
*test_program_transaction) and Test.verify_process_output(
"rotctl",
test_data.expected_rotctl_stdout_lines,
test_data.allowed_rotctl_return_codes,
*rotctl_transaction) else TestResult.FAILED
self._tear_down()
duration = time.time() - self.timestamp_start
print("test: -------------------- [test {}{}{} - \"{}\"] ---------------------"
.format(TestColors[test_result], test_result.name, Fore.RESET, test_data.description))
return test_result, duration
def _set_up(self) -> None:
print("test: setup ...")
print("test: prepare virtual device: \"{}\"".format(" ".join(self.virtual_device_cmd)))
self.virtual_dev = subprocess.Popen(self.virtual_device_cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE,
text=True)
def wait_until_file_exists(f: str) -> bool:
retry = 16
while not os.path.exists(f) and retry >= 0:
retry -= 1
time.sleep(0.125)
if not os.path.exists(f):
print("test: failed to wait for endpoint: \"{}\"".format(f))
return False
else:
return True
wait_until_file_exists(self.pipe_endpoint_a)
wait_until_file_exists(self.pipe_endpoint_b)
print("test: start test program: \"{}\"".format(" ".join(self.test_program_cmd)))
self.test_program = subprocess.Popen(self.test_program_cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE,
text=True)
print("test: test program PID={}".format(self.test_program.pid))
def _tear_down(self) -> None:
print("test: tear down")
for p in [self.rotctl, self.test_program, self.virtual_dev]:
p.terminate()
@staticmethod
def verify_process_output(process_name: str, expected_lines: List[str], allowed_ret_codes: List[int],
stdout_lines: List[str], stderr_lines: List[str], actual_ret_code) -> bool:
print("test: verify {} output ...".format(process_name))
has_passed = True
if actual_ret_code not in allowed_ret_codes:
has_passed = False
print("test: verify {} failed: allowed return codes {} but found {}"
.format(process_name, allowed_ret_codes, actual_ret_code))
if len(stderr_lines) > 0:
has_passed = False
print("test: verify {} failed: found {} stderr lines but 0 expected"
.format(process_name, len(stderr_lines)))
for line in stderr_lines:
print("test: \"{}\"".format(line))
len_expected, len_actual = len(expected_lines), len(stdout_lines)
if len_expected != len_actual:
has_passed = False
print("test: verify {} failed: expected {} stdout lines but found {}"
.format(process_name, len_expected, len_actual))
else:
for expected_line, actual_line in zip(expected_lines, stdout_lines):
if re.fullmatch(expected_line, actual_line) is None:
has_passed = False
print("test: verify {} failed: expected line does not match: expected=\"{}\" vs. actual=\"{}\""
.format(process_name, expected_line, actual_line))
if not has_passed:
print("test: expected {} stdout lines:".format(process_name))
for line in expected_lines:
print("test: \"{}\"".format(line))
print("test: actual {} stdout lines:".format(process_name))
for line in stdout_lines:
print("test: \"{}\"".format(line))
if has_passed:
print("test: verify {} output: OK".format(process_name))
return has_passed
def _test_transaction(self, rotctl_commands: str) \
-> Tuple[Tuple[List[str], List[str], int], Tuple[List[str], List[str], int]]:
print("test: transaction ...")
try:
rotctl_stdout, rotctl_stderr = self.rotctl.communicate(rotctl_commands, timeout=3)
rotctl_ret_code = self.rotctl.returncode
except subprocess.TimeoutExpired: # OK if process already finished
self.rotctl.kill()
rotctl_stdout, rotctl_stderr = self.rotctl.communicate()
rotctl_ret_code = self.rotctl.returncode
try:
test_program_stdout, test_program_stderr = self.test_program.communicate(timeout=3)
test_program_ret_code = self.test_program.returncode
except subprocess.TimeoutExpired: # OK if process already finished
self.test_program.kill()
test_program_stdout, test_program_stderr = self.test_program.communicate()
test_program_ret_code = self.test_program.returncode
return (self._lines_from_stream(test_program_stdout),
self._lines_from_stream(test_program_stderr),
test_program_ret_code), \
(self._lines_from_stream(rotctl_stdout),
self._lines_from_stream(rotctl_stderr),
rotctl_ret_code)
@staticmethod
def _lines_from_stream(lines_as_byte_array):
if lines_as_byte_array is None:
return []
else:
return str(lines_as_byte_array.strip()).split('\n') if len(lines_as_byte_array) > 0 else []
def _get_rotctl_version(self) -> str:
rotctl = subprocess.Popen([self._rotctl_file_path, "--version"],
stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True)
try:
stdout, stderr = rotctl.communicate(timeout=3)
except subprocess.TimeoutExpired:
rotctl.kill()
stdout, stderr = rotctl.communicate()
stderr_lines = self._lines_from_stream(stderr)
if len(stderr_lines) > 0:
print("test: failed to get rotctl version")
for line in stderr_lines:
print("test: \"{}\"".format(line))
stdout_lines = self._lines_from_stream(stdout)
if len(stdout_lines) > 0:
return stdout_lines[0]
return ""
class TestRunner:
def __init__(self, project_dir):
self.project_dir = project_dir
self.tests = TestSet.test_set()
def run(self):
results = [(test_data.description, Test(self.project_dir).run(test_data))
for test_data in self.tests]
print("test: test summary:")
passed, failed, ignored = 0, 0, 0
total_duration = 0
for description, (test_result, duration) in results:
total_duration += duration
if test_result is TestResult.PASSED:
print("test: {}PASSED{} in {:.2f}s ... \"{}\" "
.format(TestColors[TestResult.PASSED], Fore.RESET, duration, description))
passed += 1
elif test_result is TestResult.FAILED:
print("test: {}FAILED{} in {:.2f}s ... \"{}\""
.format(TestColors[TestResult.FAILED], Fore.RESET, duration, description))
failed += 1
else:
print("test: {}IGNORED{} in {:.2f}s ... \"{}\""
.format(TestColors[TestResult.IGNORED], Fore.RESET, duration, description))
ignored += 1
color = TestColors[TestResult.FAILED]
if failed <= 0:
color = TestColors[TestResult.IGNORED]
if ignored <= 0:
color = TestColors[TestResult.PASSED]
print("{}test: {} failed, {} passed, {} ignored in {:.2f}s (total {} tests){}"
.format(color, failed, passed, ignored, total_duration, passed + failed + ignored, Fore.RESET))
if failed > 0 or passed <= 0:
return 1
else:
return 0
|
StarcoderdataPython
|
1798493
|
<filename>app/starling/routes.py
import base64
import hashlib
import json
from flask import request
from dateutil import parser
from app import db
from app.helpers import json_response
from app.starling import bp
from app.starling.models import StarlingTransaction
from app.users.models import User
@bp.route('/webhook/<string:uuid>', methods=['POST'])
def webhook(uuid):
user = User.query.filter_by(uuid=uuid).first()
if user is None:
return json_response(404, {
'success': False,
'message': 'User does not exist'
})
body = request.get_data(as_text=True)
signature = str(request.headers.get('X-Hook-Signature'))
hash = hashlib.sha512(str(user.starling_webhook_secret + body).encode('utf-8'))
encoded = base64.b64encode(hash.digest()).decode("utf-8")
print('--- THEIR SIGNATURE ---')
print(signature)
print('--- OUR SIGNATURE ---')
print(encoded)
print('---------------')
# TODO: test this with actual request
if False and signature != encoded:
return json_response(403, {
'success': False,
'message': 'Invalid signature'
})
json_data = json.loads(body)
trans_data = {
'user_id': user.id,
'transaction_uid': json_data['content']['transactionUid'],
'amount': json_data['content']['amount'],
'transaction_type': json_data['content']['type'],
'payee': json_data['content']['counterParty'],
'transaction_date': parser.parse(json_data['timestamp']),
}
trans = StarlingTransaction.query.filter_by(
transaction_uid=trans_data['transaction_uid']
).first()
status = 200
if trans is None:
trans = StarlingTransaction(**trans_data)
db.session.add(trans)
status = 201
else:
trans.update(trans_data)
db.session.merge(trans)
db.session.commit()
return json_response(status, {'success': True})
|
StarcoderdataPython
|
4801188
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Version of WMCore/Services/Rucio intended to be used with mock or unittest.mock
"""
from __future__ import print_function, division
from future.utils import listitems
# from builtins import object # avoid importing this, it beraks things
import json
import logging
import os
import sys
from WMCore.Services.DBS.DBS3Reader import DBS3Reader, DBSReaderError
from WMCore.Services.Rucio.Rucio import WMRucioException, WMRucioDIDNotFoundException
from WMCore.WMBase import getTestBase
from WMQuality.Emulators.DataBlockGenerator.DataBlockGenerator import DataBlockGenerator
PROD_DBS = 'https://cmsweb-prod.cern.ch/dbs/prod/global/DBSReader'
NOT_EXIST_DATASET = 'thisdoesntexist'
# PILEUP_DATASET = '/HighPileUp/Run2011A-v1/RAW'
PILEUP_DATASET = '/GammaGammaToEE_Elastic_Pt15_8TeV-lpair/Summer12-START53_V7C-v1/GEN-SIM'
SITES = ['T2_XX_SiteA', 'T2_XX_SiteB', 'T2_XX_SiteC']
_BLOCK_LOCATIONS = {}
BLOCKS_PER_DATASET = 2
FILES_PER_BLOCK = 5
FILES_PER_DATASET = BLOCKS_PER_DATASET * FILES_PER_BLOCK
mockFile = os.path.join(getTestBase(), '..', 'data', 'Mock', 'RucioMockData.json')
try:
with open(mockFile, 'r') as jsonObj:
MOCK_DATA = json.load(jsonObj)
except IOError:
MOCK_DATA = {}
def _unicode(data):
"""
Temporary workaround for problem with how unicode strings are represented
by unicode (as u"hello worls") and future.types.newstr (as "hello world")
https://github.com/dmwm/WMCore/pull/10299#issuecomment-781600773
"""
if sys.version_info[0] == 2:
return unicode(data)
else:
return str(data)
class MockRucioApi(object):
"""
Version of WMCore/Services/Rucio intended to be used with mock or unittest.mock
"""
def __init__(self, acct, hostUrl=None, authUrl=None, configDict=None):
print("Using MockRucioApi: acct={}, url={}, authUrl={}".format(acct, hostUrl, authUrl))
configDict = configDict or {}
self.dataBlocks = DataBlockGenerator()
self.subRequests = {}
def sitesByBlock(self, block):
"""
Centralize the algorithm to decide where a block is based on the hash name
:param block: the name of the block
:return: a fake list of sites where the data is
"""
logging.info("%s: Calling mock sitesByBlock", self.__class__.__name__)
if hash(block) % 3 == 0:
sites = ['T2_XX_SiteA']
elif hash(block) % 3 == 1:
sites = ['T2_XX_SiteA', 'T2_XX_SiteB']
else:
sites = ['T2_XX_SiteA', 'T2_XX_SiteB', 'T2_XX_SiteC']
return sites
def __getattr__(self, item):
"""
__getattr__ gets called in case lookup of the actual method fails. We use this to return data based on
a lookup table
:param item: The method name the user is trying to call
:return: The generic lookup function
"""
def genericLookup(*args, **kwargs):
"""
This function returns the mocked DBS data
:param args: positional arguments it was called with
:param kwargs: named arguments it was called with
:return: the dictionary that DBS would have returned
"""
logging.info("%s: Calling mock genericLookup", self.__class__.__name__)
for key, value in listitems(kwargs):
# json dumps/loads converts strings to unicode strings, do the same with kwargs
if isinstance(value, str):
kwargs[key] = _unicode(value)
if kwargs:
signature = '%s:%s' % (item, sorted(kwargs.items()))
else:
signature = item
try:
return MOCK_DATA[signature]
except KeyError:
msg = "Rucio mock API failed to find key for signature: {}".format(signature)
raise KeyError(msg)
return genericLookup
def getDataLockedAndAvailable(self, **kwargs):
"""
Mock the method to discover where data is locked and available.
Note that, by default, it will not return any Tape RSEs.
:return: a unique list of RSEs
"""
logging.info("%s: Calling mock getDataLockedAndAvailable", self.__class__.__name__)
if 'name' not in kwargs:
raise WMRucioException("A DID name must be provided to the getBlockLockedAndAvailable API")
if self.isContainer(kwargs['name']):
# then resolve it at container level and all its blocks
return self.getContainerLockedAndAvailable(**kwargs)
if 'grouping' in kwargs:
# long strings seem not to be working, like ALL / DATASET. Make it short!
kwargs['grouping'] = kwargs['grouping'][0]
# TODO: either grouping or returnTape should change this response...
returnTape = kwargs.pop("returnTape", False)
rses = set()
if kwargs['name'].split('#')[0] == PILEUP_DATASET:
# Pileup is at a single site
sites = ['T2_XX_SiteC']
_BLOCK_LOCATIONS[kwargs['name']] = sites
else:
sites = self.sitesByBlock(block=kwargs['name'])
_BLOCK_LOCATIONS[kwargs['name']] = sites
rses.update(sites)
return list(rses)
def getPileupLockedAndAvailable(self, container, account, scope="cms"):
"""
Mock method to resolve where the pileup container (and all its blocks)
is locked and available.
"""
logging.info("%s: calling mock getPileupLockedAndAvailable", self.__class__.__name__)
result = dict()
if not self.isContainer(container):
raise WMRucioException("Pileup location needs to be resolved for a container DID type")
kwargs = dict(name=container, account=account, scope=scope)
try:
DBS3Reader(PROD_DBS).checkDatasetPath(kwargs['name'])
blocks = DBS3Reader(PROD_DBS).listFileBlocks(dataset=kwargs['name'])
for block in blocks:
result[block] = self.sitesByBlock(block)
except DBSReaderError:
logging.error("%s: Failed to fetch blocks from DBS", self.__class__.__name__)
return result
def getContainerLockedAndAvailable(self, **kwargs):
"""
Mock the method to discover where container data is locked and available.
Note that, by default, it will not return any Tape RSEs.
:return: a unique list of RSEs
"""
logging.info("%s: Calling mock getContainerLockedAndAvailable", self.__class__.__name__)
if 'name' not in kwargs:
raise WMRucioException("A DID name must be provided to the getContainerLockedAndAvailable API")
kwargs.setdefault("scope", "cms")
if kwargs['name'] == PILEUP_DATASET:
return ['T2_XX_SiteA', 'T2_XX_SiteB', 'T2_XX_SiteC']
try:
DBS3Reader(PROD_DBS).checkDatasetPath(kwargs['name'])
blocks = DBS3Reader(PROD_DBS).dbs.listBlocks(dataset=kwargs['name'])
singleBlock = blocks[0]['block_name']
return self.sitesByBlock(singleBlock)
except DBSReaderError:
return []
def isContainer(self, didName, scope='cms'):
"""
Mock check for whether a DID name corresponds to a container type or not,
by simply relying on the naming convention
:param didName: string with the DID name
:param scope: string containing the Rucio scope (defaults to 'cms')
:return: True if the DID is a container, else False
"""
# TODO: figure use cases where we need to raise this exception
if didName == "a bad DID name yet to be defined":
msg = "Data identifier not found in MockRucio: {}".format(didName)
raise WMRucioDIDNotFoundException(msg)
return "#" not in didName
|
StarcoderdataPython
|
124676
|
"""
Created on Mon Feb 1 10:08:31 2016
"""
#------------------------------------------------------------------------------
#CHAPTER 6: The Finite-Element Method
#------------------------------------------------------------------------------
import numpy as np
import matplotlib.pyplot as plt
# Basic parameters
nt = 1000 # number of time steps
vs = 3000 # acoustic velocity
ro0 = 2500 # Density
isnap = 250 # snapshot frequency
nx = 1000 # number of grid points
isx = 500 # source location
xmax = 10000.
eps = 0.5 # stability limit
dx = xmax/(nx-1) # calculate space increment
x = np.arange(0, nx)*dx # initialize space coordinates
x = x.T
h = np.diff(x) # Element sizes
# parameters
ro = x*0 + ro0
mu = x*0 + ro*vs**2
# time step from stabiity criterion
dt = 0.5*eps*dx/np.max(np.sqrt(mu/ro))
# source time function
pt = 20*dt
t = np.arange(1, nt+1)*dt # initialize time axis
t0 = 3*pt
src = -1/pt**2*(t-t0)*np.exp(-1/pt**2*(t-t0)**2)
# Source vector
f = np.zeros(nx); f[isx:isx+1] = f[isx:isx+1] + 1.
# Stiffness matrix Kij
K = np.zeros((nx,nx))
for i in range(1, nx-1):
for j in range(1, nx-1):
if i==j:
K[i,j] = mu[i-1]/h[i-1] + mu[i]/h[i]
elif i==j+1:
K[i,j] = -mu[i-1]/h[i-1]
elif i+1==j:
K[i,j] = -mu[i]/h[i]
else:
K[i,j] = 0
# Corner element
K[0,0] = mu[0]/h[0]
K[nx-1,nx-1] = mu[nx-1]/h[nx-2]
#%% CODE 10: Listing 6.4 Mass matrix with varying element size - Pag 147
# Mass matrix M_ij
M = np.zeros((nx,nx))
for i in range(1, nx-1):
for j in range (1, nx-1):
if j==i:
M[i,j] = (ro[i-1]*h[i-1] + ro[i]*h[i])/3
elif j==i+1:
M[i,j] = ro[i]*h[i]/6
elif j==i-1:
M[i,j] = ro[i-1]*h[i-1]/6
else:
M[i,j] = 0
# Corner element
M[0,0] = ro[0]*h[0]/3
M[nx-1,nx-1] = ro[nx-1]*h[nx-2]/3
# Invert M
Minv = np.linalg.inv(M)
# Initialize FD matrices for comparison in the regular grid case
Mf = np.zeros((nx,nx), dtype=float)
D = np.zeros((nx,nx), dtype=float)
dx = h[1]
for i in range(nx):
Mf[i,i] = 1./ro[i]
if i>0:
if i<nx-1:
D[i+1,i] =1
D[i-1,i] =1
D[i,i] = -2
D = ro0*vs**2*D/dx**2
# Initialize fields
u = np.zeros(nx)
uold = np.zeros(nx)
unew = np.zeros(nx)
U = np.zeros(nx)
Uold = np.zeros(nx)
Unew = np.zeros(nx)
fig = plt.figure(figsize=(14,8), dpi=80)
fig.suptitle("1D Elastic wave solution", fontsize=16)
iplot = 0
#%% CODE 09: Listing 6.3 Time extrapolation - Pag 147
# CODE 11: Listing 6.5 1D elastic case _ Pag 148
# Time extrapolation
for it in range(nt):
# Finite Difference Method
Unew = (dt**2)*Mf @ (D @ U + f/dx*src[it]) + 2*U - Uold
Uold, U = U, Unew
# Finite Element Method
unew = (dt**2)*Minv @ (f*src[it] - K @ u) + 2*u - uold
uold, u = u, unew
# Display both
if np.mod(it+1, isnap) == 0:
# extract window
xc = 500*dx + it*dt*vs - 150
xd = 300
iplot += 1
plt.subplot(4,1,iplot)
L1 = plt.plot(x, u, label='FEM')
L2 = plt.plot(x, U, label='FDM')
plt.legend()
plt.text(xc+1.5*xd, 0.00000002, '%d m' %(xc-500*dx))
plt.savefig('Fig_6.10.png')
plt.show()
|
StarcoderdataPython
|
24856
|
import logging
import sys
import os
from logging.handlers import RotatingFileHandler
from multiprocessing.pool import ThreadPool
from optparse import OptionParser
import requests
from requests.packages import urllib3
urllib3.disable_warnings()
# Workers configurations
ASYNC_WORKERS_COUNT = 100 # How many threads will make http requests.
WORKERS_DECREMENTED_COUNT_ON_ERROR = 10 # Retry the fuzzing with x less workers, to decrease the load on the server.
STARTED_JOB_LOG_INTERVAL = 100 # Every x started jobs, a log will be written
# IO Configurations
DEFAULT_PATHS_LIST_FILE = 'words_lists/Filenames_or_Directories_Common.wordlist'
VALID_ENDPOINTS_FILE = 'endpoints.txt'
# HTTP Configuration
RESOURCE_EXISTS_STATUS_CODES = list(range(200, 300)) + [401, 402, 403]
DEFAULT_BASE_URL = 'https://www.example.com'
# Logging configurations
LOGS_DIRECTORY_FULL_NAME = 'logs'
LOG_FORMAT = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
LOGGING_LEVEL = logging.INFO
BACKUP_LOGS_FILES_COUNT = 5
FUZZING_LOGGER_NAME = 'fuzzing'
LOG_FILE_MAX_BYTES = 0.5 * 1000 * 1000 # 500 KB
class FilesFactory(object):
"""
Manage files and directories
"""
files = []
urls = []
def read_files_from_directory(self, user_path):
self.files = [os.path.join(user_path, f) for f in os.listdir(user_path) if os.path.isfile(os.path.join(user_path, f))]
def read_lines_from_files(self):
for l in self.files:
h = open(l, 'r')
self.urls += h.read().splitlines()
def __init__(self,user_path):
if os.path.isdir(user_path):
self.read_files_from_directory(user_path)
self.read_lines_from_files()
elif(os.path.isfile(user_path)):
self.files.append(user_path)
self.read_lines_from_files()
class LoggerFactory(object):
"""
Manages loggers
"""
loggers = {}
logging_level = LOGGING_LEVEL
logging.basicConfig(stream=sys.stdout, level=logging_level,
format=LOG_FORMAT)
# Modifying the logger's level to ERROR to prevent console spam
logging.getLogger('urllib3').setLevel(logging.WARNING)
@staticmethod
def get_logger(logger_name):
"""
Gets a logger by it's name. Created the logger if it don't exist yet.
:param logger_name: The name of the logger (identifier).
:return: The logger instance.
:returns: Logger
"""
if logger_name not in LoggerFactory.loggers:
LoggerFactory.loggers[logger_name] = LoggerFactory._get_logger(logger_name)
return LoggerFactory.loggers[logger_name]
@staticmethod
def _get_logger(logger_name, logs_directory_path=LOGS_DIRECTORY_FULL_NAME):
"""
Creates a logger with rolling file handler,
Or returns the logger if it already exists.
:param logger_name: The name of the logger
:param logs_directory_path: The path of the directory that the logs will be written to.
:return: An initialized logger instance.
returns: Logger
"""
# Creating the logs folder if its doesn't exist
if not os.path.exists(logs_directory_path):
os.mkdir(logs_directory_path)
logger = logging.getLogger(logger_name)
formatter = logging.Formatter(LOG_FORMAT)
# Adding a rotating file handler
rotating_file_handler = RotatingFileHandler(
os.path.join(logs_directory_path, '{0}.log'.format(logger_name)), maxBytes=LOG_FILE_MAX_BYTES,
backupCount=BACKUP_LOGS_FILES_COUNT)
rotating_file_handler.setFormatter(formatter)
rotating_file_handler.setLevel(LOGGING_LEVEL)
logger.addHandler(rotating_file_handler)
return logger
class AsyncURLFuzzer(object):
"""
An asynchronous http(s) website endpoint locator.
Discovers active endpoints in websites, based on a list of common URLS.
"""
def __init__(self, base_url=DEFAULT_BASE_URL, list_file=DEFAULT_PATHS_LIST_FILE,
async_workers_count=ASYNC_WORKERS_COUNT,
output_file=VALID_ENDPOINTS_FILE, resource_exists_status_codes=RESOURCE_EXISTS_STATUS_CODES):
"""
Initializes a new member of this class.
:param base_url: The base url of the website.
:type base_url: str
:param list_file: The path of a file, containing the paths to check.
:type list_file: str
:param async_workers_count: How many workers (threads) to use.
:type async_workers_count: int
:param output_file: The name of the active endpoints output file.
:type output_file: str
:param resource_exists_status_codes: A list of HTTP status codes to consider as valid.
:type resource_exists_status_codes: list
"""
self._logger = LoggerFactory.get_logger(FUZZING_LOGGER_NAME)
self._base_url = base_url
self._list_file_path = list_file
self._async_workers_count = async_workers_count
self._output_file_path = output_file
self._resource_exists_status_codes = resource_exists_status_codes
self._active_paths_status_codes = {}
self._checked_endpoints = {}
self._endpoints_total_count = 0
self._session = requests.session()
def start(self):
"""
Starts the fuzzing with the initialized parameters.
"""
self._get_website_endpoints()
def _get_website_endpoints(self, async_workers_count=ASYNC_WORKERS_COUNT):
"""
Requests asynchronously for all the resources with a number of workers (threads).
If it fails for HTTP overloads reasons, it retries with less workers, because it's probably a DDOS
protection mechanism.
:param async_workers_count: How many workers (threads) to use.
:type async_workers_count: int
"""
self._load_paths_list()
self._logger.info(
'Getting the endpoints of the website {0} with list file "{1}" and {2} async workers.'.format(
self._base_url,
self._list_file_path,
async_workers_count))
if 0 >= async_workers_count:
self._logger.error('Seems like the site does not support fuzzing, as it has a DDOS protection engine.')
return
pool = ThreadPool(async_workers_count)
try:
tasks = []
self._logger.debug('Preparing the workers...')
for i, path in enumerate(self._paths):
self._logger.debug('Started a worker for the endpoint {0}'.format(path))
if i > i and i % STARTED_JOB_LOG_INTERVAL == 0:
self._logger.info('Started {0} workers'.format(i))
path = path.strip()
full_path = '/'.join([self._base_url, path])
tasks.append(pool.apply_async(self.request_head, (full_path, path)))
for t in tasks:
status_code, full_path, path = t.get()
self._checked_endpoints[path] = path
if self._is_valid_status_code(status_code):
self._active_paths_status_codes[path] = status_code
self._logger.info(
'Fetched {0}/{1}; {2}; {3}'.format(len(self._checked_endpoints), self._endpoints_total_count,
status_code,
full_path))
self._save_output_log()
except requests.ConnectionError as e:
pool.terminate()
self._logger.error(e)
self._logger.warning('An error occured while fuzzing.'
' Retrying with less async workers to reduce the server load.')
retry_workers_count = async_workers_count - WORKERS_DECREMENTED_COUNT_ON_ERROR
self._get_website_endpoints(retry_workers_count)
def _is_valid_status_code(self, status_code):
"""
Checks whether a HTTP status code implies that the resouce exists.
:param status_code:
:return: True if the status code implies that the resouce exists, False otherwise.
"""
return status_code in self._resource_exists_status_codes
def _save_output_log(self):
"""
Saves the results to an output file.
"""
full_status_codes = {'/'.join([self._base_url, p]): code for p, code in self._active_paths_status_codes.items()}
output_lines = ['{0} : {1}'.format(path, code) for path, code in full_status_codes.items()]
if 1 >= len(output_lines):
self._logger.warning(
'There were no discovered endpoints. consider using a different file from "words_list" directory')
self._logger.info('The following endpoints are active:{0}{1}'.format(os.linesep, os.linesep.join(output_lines)))
with open(self._output_file_path, 'a+') as output_file:
output_lines.sort()
output_file.write(os.linesep.join(output_lines))
self._logger.info('The endpoints were exported to "{0}"'.format(self._output_file_path))
def _load_paths_list(self):
"""
Loads the list of paths from the configured status.
"""
if not os.path.exists(self._list_file_path):
raise FileNotFoundError('The file "{0}" does not exist.'.format(self._list_file_path))
with open(self._list_file_path) as paths_file:
paths = [p.strip().lstrip('/').rstrip('/') for p in paths_file.readlines()]
paths = [p for p in paths if p not in self._active_paths_status_codes]
if not self._endpoints_total_count:
self._endpoints_total_count = len(paths)
self._paths = paths
def request_head(self, url, path):
"""
Executes a http HEAD request to a url.
:param url: The full url to contact.
:param path: The uri of the request.
:return: A tuple of 3 variables:
the recieved status code (int),
the url argument (str),
the path argument (str).
"""
if url != '':
res = self._session.head(url, verify=False, allow_redirects=True)
return res.status_code, url, path
if __name__ == '__main__':
# Parsing the parameters.
parser = OptionParser(description=
'An Asynchronous, robust websites endpoint discovery tool with smart error handling. '
'Locates resources in websites based on a list of paths. '
'Check out the "words_list"" directory for lists examples.',
usage='%prog -u https://example.com/', version='%prog 0.1')
parser.add_option('-u', '--url', dest='base_url', help='The target website to scan.', default=DEFAULT_BASE_URL)
parser.add_option('-l', '--list', dest='list_file', help='A file containing the paths to check (separated with lines).',
default=DEFAULT_PATHS_LIST_FILE)
(options, args) = parser.parse_args()
list_file = options.list_file
base_url = options.base_url
if base_url is None:
parser.print_help()
sys.exit()
# Suspending warning logs from requests and urllib3
logging.getLogger("urllib3").setLevel(logging.ERROR)
logging.getLogger("requests").setLevel(logging.ERROR)
if (os.path.isdir(base_url) or os.path.isfile(base_url)):
FilesFactory(base_url)
for u in FilesFactory.urls:
fuzzer = AsyncURLFuzzer(u, list_file)
fuzzer.start()
else:
fuzzer = AsyncURLFuzzer(base_url, list_file)
fuzzer.start()
|
StarcoderdataPython
|
1690164
|
__author__ = 'steffenfb'
import re
from bs4 import BeautifulSoup
import json
def cookieToOneLine():
file = open('cookie.txt','r')
content = file.read()
clean = content.replace('\n','')
clean = content.replace('\"','\'')
file = open('cookie.txt','w')
file.write(clean)
file.close()
testStr = 'for (;;); {"t":"fullReload","seq":80}'
def cleanStringToOnlyNumbersAndLetters(input):
return re.sub('[^a-z0-9]','',input)
#cleanstring = cleanStringToOnlyNumbersAndLetters(testStr)
def getSeqNumber(input):
cleanstring = cleanStringToOnlyNumbersAndLetters(input)
cleanstring = cleanstring.split('seq')[1]
res = re.search('\d{1,3}',cleanstring)
#print 'found '+str(res.start())+' and '+str(res.end())
return cleanstring[res.start():res.end()]
#re.search('\d+',cleanstring).string
def htmlToJsonCleaner(html):
soup = BeautifulSoup(html,'html.parser')
page = soup.body.pre.contents[0]
if 'for (;;); ' in page:
page = page[9:]
print page
jsonObj = json.loads(page)
return jsonObj
#htmlToJsonCleaner(file('jsondump.json','r').read())
|
StarcoderdataPython
|
130598
|
import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="discovery_imaging_utils", # Replace with your own username
version="v0.1.4",
author="<NAME>",
author_email="<EMAIL>",
description="A package to aid in resting-state fMRI analysis",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/erikglee/discovery_imaging_utils",
download_url="https://github.com/erikglee/discovery_imaging_utils/archive/v0.1.4.tar.gz",
packages=setuptools.find_packages(),
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
python_requires='>=3.6',
)
|
StarcoderdataPython
|
1626653
|
<gh_stars>1-10
class f:
def __init__(self,t,q,p):
self.t=t;self.q=q;self.p=p
input=__import__('sys').stdin.readline
n,x=map(int,input().split());d={};a=[]
for _ in range(n):
u,t,q,p=input().split()
if u not in d.keys():
d[u]=f(t,int(q),int(p))
else:
if d[u].q<int(q) or (d[u].q==int(q) and d[u].p>int(p)):
d[u]=f(t,int(q),int(p))
for i in d.values():
a.append(i)
a.sort(key=lambda v:(-v.q, v.p));r=""
for i in range(x):
r+=a[i].t+'\n'
print(r,end='')
|
StarcoderdataPython
|
126257
|
#!/usr/bin/env python
from .technical_analysis import TechnicalAnalysisStrategy
from hummingbot.strategy.asset_price_delegate import AssetPriceDelegate
from hummingbot.strategy.order_book_asset_price_delegate import OrderBookAssetPriceDelegate
from hummingbot.strategy.api_asset_price_delegate import APIAssetPriceDelegate
__all__ = [
TechnicalAnalysisStrategy,
AssetPriceDelegate,
OrderBookAssetPriceDelegate,
APIAssetPriceDelegate
]
|
StarcoderdataPython
|
3238448
|
#!/usr/local/bin/python3.4
"""
## Copyright (c) 2015 SONATA-NFV, 2017 5GTANGO [, ANY ADDITIONAL AFFILIATION]
## ALL RIGHTS RESERVED.
##
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
## You may obtain a copy of the License at
##
## http://www.apache.org/licenses/LICENSE-2.0
##
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
##
## Neither the name of the SONATA-NFV, 5GTANGO [, ANY ADDITIONAL AFFILIATION]
## nor the names of its contributors may be used to endorse or promote
## products derived from this software without specific prior written
## permission.
##
## This work has been performed in the framework of the SONATA project,
## funded by the European Commission under Grant number 671517 through
## the Horizon 2020 and 5G-PPP programmes. The authors would like to
## acknowledge the contributions of their colleagues of the SONATA
## partner consortium (www.sonata-nfv.eu).
##
## This work has been performed in the framework of the 5GTANGO project,
## funded by the European Commission under Grant number 761493 through
## the Horizon 2020 and 5G-PPP programmes. The authors would like to
## acknowledge the contributions of their colleagues of the 5GTANGO
## partner consortium (www.5gtango.eu).
"""
import logging, datetime, uuid, time, json
from threading import Thread, Lock
import interfaces.sbi as sbi
from logger import TangoLogger
# INFORMATION
# mutex used to ensure one single access to ddbb (repositories) for the nsi records creation/update/removal
mutex_slice2db_access = Lock()
#Log definition to make the slice logs idetified among the other possible 5GTango components.
LOG = TangoLogger.getLogger(__name__, log_level=logging.DEBUG, log_json=True)
TangoLogger.getLogger("sonataAdapter:nsi_translator", logging.DEBUG, log_json=True)
LOG.setLevel(logging.DEBUG)
################################## THREADs to manage slice requests #################################
# SEND NETWORK SLICE (NS) INSTANTIATION REQUEST
## Objctive: send request 2 GTK to instantiate
## Params: NSI - parameters given by the user.
class thread_ns_instantiate(Thread):
def __init__(self, nsi_json):
Thread.__init__(self)
self.NSI = {}
self.req = nsi_json
# Creates the json structure to request a NS instantiation.
def send_instantiation_request(self):
LOG.info("Instantiating Slice: " + self.req['nsiId'])
# NS requests information
data = {}
data['name'] = self.req['nsiId']
data['nst_id'] = self.req['nstId']
data['request_type'] = 'CREATE_SLICE'
if self.req['instantiation_params']:
data['instantiation_params'] = self.req['instantiation_params']
# Calls the function towards the GTK
LOG.info("NS Instantiation request JSON: " + str(data))
instantiation_response = sbi.net_serv_instantiate(data)
return instantiation_response
def update_nsi_notify_instantiate(self):
""" mutex_slice2db_access.acquire()
try:
jsonNSI = self.NSI
# updates the slice information before notifying the GTK
if (jsonNSI['nsi-status'] == "INSTANTIATING"):
jsonNSI['nsi-status'] = "INSTANTIATED"
# validates if any service has error status to apply it to the slice status
for service_item in jsonNSI['nsr-list']:
if (service_item['working-status'] == "ERROR"):
service_item['working-status'] = 'ERROR'
jsonNSI['nsi-status'] = "ERROR"
# updates NetSlice template usageState
if (jsonNSI['nsi-status'] == "INSTANTIATED"):
nst_descriptor = nst_catalogue.get_saved_nst(jsonNSI['nst-ref'])
if (nst_descriptor['nstd'].get('usageState') == "NOT_IN_USE"):
nstParameter2update = "usageState=IN_USE"
updatedNST_jsonresponse = nst_catalogue.update_nst(nstParameter2update, jsonNSI['nst-ref'])
elif (jsonNSI['nsi-status'] == "TERMINATING"):
jsonNSI['nsi-status'] = "TERMINATED"
# updates NetSlice template usageState if no other nsi is instantiated/ready
nsis_list = nsi_repo.get_all_saved_nsi()
all_nsis_terminated = True
for nsis_item in nsis_list:
if (nsis_item['nst-ref'] == jsonNSI['nst-ref'] and nsis_item['nsi-status'] in ["INSTANTIATED", "INSTANTIATING", "READY"]):
all_nsis_terminated = False
break
if (all_nsis_terminated):
nst_descriptor = nst_catalogue.get_saved_nst(jsonNSI['nst-ref'])
nst_json = nst_descriptor['nstd']
if (nst_json['usageState'] == "IN_USE"):
nstParameter2update = "usageState=NOT_IN_USE"
updatedNST_jsonresponse = nst_catalogue.update_nst(nstParameter2update, jsonNSI['nst-ref'])
else:
# errors are managed in the main thread function (run)
jsonNSI['nsi-status'] = 'ERROR'
#TODO: check if any nsr is being instantiated and wait until
# sends the updated NetSlice instance to the repositories
jsonNSI['updateTime'] = str(datetime.datetime.now().isoformat())
repo_responseStatus = nsi_repo.update_nsi(jsonNSI, self.NSI['id'])
finally:
# release the mutex for other threads
mutex_slice2db_access.release()
# creates a thread with the callback URL to advise the GK this slice is READY
slice_callback = jsonNSI['sliceCallback']
json_slice_info = {}
json_slice_info['status'] = jsonNSI['nsi-status']
json_slice_info['updateTime'] = jsonNSI['updateTime']
json_slice_info['name'] = jsonNSI['name']
json_slice_info['instance_uuid'] = jsonNSI['id']
thread_response = mapper.sliceUpdated(slice_callback, json_slice_info)
LOG.info("Network Slice INSTANTIATION with ID: "+str(self.NSI['id'])+" finished and tng-gtk notified about it.") """
def run(self):
# acquires mutex to have unique access to the nsi (repositories)
mutex_slice2db_access.acquire()
instantiation_resp = self.send_instantiation_request()
if instantiation_resp[1] != 201:
self.NSI['nsi-status'] = 'ERROR'
self.NSI['errorLog'] = 'ERROR when instantiating '
else:
self.NSI['id'] = self.req['nsiId']
self.NSI['nsi-status'] = 'INSTANTIATING'
# releases mutex for any other thread to acquire it
mutex_slice2db_access.release()
if self.NSI['nsi-status'] != 'ERROR':
# Waits until the NS is instantiated/ready or error
deployment_timeout = 30 * 60 # 30 minutes
nsi_instantiated = False
while deployment_timeout > 0:
if self.NSI['id'] == self.req['nsiId']:
uuid = sbi.get_nsi_id_from_name(self.req['nsiId'])
if (uuid):
self.NSI['id'] = uuid
if self.NSI['id'] != self.req['nsiId']:
# Check ns instantiation status
nsi = sbi.get_saved_nsi(self.NSI['id'])
if "uuid" in nsi:
self.NSI = nsi
self.NSI["id"] = self.NSI["uuid"]
del self.NSI["uuid"]
if self.NSI['nsi-status'] in ["INSTANTIATED", "ERROR", "READY"]:
nsi_instantiated = True
# if all services are instantiated, ready or error, break the while loop to notify the GTK
if nsi_instantiated:
LOG.info("Network Slice Instantiation request processed for Network Slice with ID: "+str(self.NSI['id']))
break
time.sleep(15)
deployment_timeout -= 15
if not nsi_instantiated:
self.NSI['nsi-status'] = 'ERROR'
self.NSI['errorLog'] = 'ERROR when terminating with timeout'
# Notifies the VS that the Network Slice instantiation process is done (either complete or error)
LOG.info("Instantiation Step: Informing VS about the correct end of Network Slice with ID: "+str(self.NSI['id']))
self.update_nsi_notify_instantiate()
# SEND NETWORK SLICE (NS) TERMINATION REQUEST
## Objctive: send the ns termination request 2 GTK
## Params: nsiId (uuid within the incoming request URL)
class thread_ns_terminate(Thread):
def __init__(self, NSI):
Thread.__init__(self)
self.NSI = NSI
def send_termination_requests(self):
LOG.info("Terminating Slice: ")
data = {}
data["instance_uuid"] = self.NSI['id']
data["request_type"] = "TERMINATE_SLICE"
# calls the function towards the GTK
termination_response = sbi.net_serv_terminate(data)
return termination_response[0], termination_response[1]
def update_nsi_notify_terminate(self):
""" mutex_slice2db_access.acquire()
try:
jsonNSI = nsi_repo.get_saved_nsi(self.NSI['id'])
jsonNSI["id"] = jsonNSI["uuid"]
del jsonNSI["uuid"]
# updates nsir fields
jsonNSI['updateTime'] = str(datetime.datetime.now().isoformat())
if jsonNSI['nsi-status'] == "TERMINATING":
jsonNSI['nsi-status'] = "TERMINATED"
# validates if any service has error status to apply it to the slice status
for service_item in jsonNSI['nsr-list']:
if (service_item['working-status'] == "ERROR"):
jsonNSI['nsi-status'] = "ERROR"
jsonNSI['errorLog'] = "Network Slice termination not done due to a service termination error."
break
# sends the updated nsi to the repositories
repo_responseStatus = nsi_repo.update_nsi(jsonNSI, self.NSI['id'])
# updates NetSlice template usageState if no other nsi is instantiated/ready
nsis_list = nsi_repo.get_all_saved_nsi()
all_nsis_terminated = True
for nsis_item in nsis_list:
if (nsis_item['nst-ref'] == self.NSI['nst-ref'] and nsis_item['nsi-status'] in ["INSTANTIATED", "INSTANTIATING", "READY"]):
all_nsis_terminated = False
break
if (all_nsis_terminated):
nst_descriptor = nst_catalogue.get_saved_nst(self.NSI['nst-ref'])
nst_json = nst_descriptor['nstd']
if (nst_json['usageState'] == "IN_USE"):
nstParameter2update = "usageState=NOT_IN_USE"
updatedNST_jsonresponse = nst_catalogue.update_nst(nstParameter2update, self.NSI['nst-ref'])
finally:
# release the mutex for other threads
mutex_slice2db_access.release()
# sends the request to notify the GTK the slice is READY
slice_callback = jsonNSI['sliceCallback']
json_slice_info = {}
json_slice_info['status'] = jsonNSI['nsi-status']
json_slice_info['updateTime'] = jsonNSI['updateTime']
json_slice_info['name'] = jsonNSI['name']
json_slice_info['instance_uuid'] = jsonNSI['id']
thread_response = mapper.sliceUpdated(slice_callback, json_slice_info)
LOG.info("Network Slice TERMINATION with ID: "+str(self.NSI['id'])+" finished and tng-gtk notified about it.") """
def run(self):
# acquires mutex to have unique access to the nsi (rpositories)
mutex_slice2db_access.acquire()
# sends each of the termination requests
LOG.info("Termination Step: Terminating Network Slice Instantiation.")
# requests to terminate a NSI
termination_resp = self.send_termination_requests()
if termination_resp[1] != 201:
self.NSI['nsi-status'] = 'ERROR'
self.NSI['errorLog'] = 'ERROR when terminating '
# releases mutex for any other thread to acquire it
mutex_slice2db_access.release()
if self.NSI['nsi-status'] != 'ERROR':
# Waits until the NS is terminated or error
deployment_timeout = 30 * 60 # 30 minutes
nsi_terminated = False
while deployment_timeout > 0:
# Check ns instantiation status
self.NSI = sbi.get_saved_nsi(self.NSI['id'])
self.NSI["id"] = self.NSI["uuid"]
del self.NSI["uuid"]
if self.NSI['nsi-status'] in ["TERMINATED", "ERROR"]:
nsi_terminated = True
# if slice is terminated or error, break the while loop to notify the GTK
if nsi_terminated:
LOG.info("Network Slice Termination request processed for Network Slice with ID: "+str(self.NSI['id']))
break
time.sleep(15)
deployment_timeout -= 15
if not nsi_terminated:
self.NSI['nsi-status'] = 'ERROR'
self.NSI['errorLog'] = 'ERROR when terminating with timeout'
# Notifies the VS that the Network Slice termination process is done (either complete or error)
LOG.info("Termination Step: Informing VS about the correct end of Network Slice with ID: "+str(self.NSI['id']))
self.update_nsi_notify_terminate()
################################ NSI CREATION SECTION ##################################
# 1 step: create_nsi (with its internal functions)
# Create the NSIId and store in internal db.
def create_nsi(nsi_json):
LOG.info("Creating a new Network Slice record before instantiating it.")
# creates NSI ID with the received information
# This ID will be used as the name in the next interactions
LOG.info("Creating NSI record basic structure.")
newNsiId = nsi_json['name'] + "-" + str(uuid.uuid4())
# sending back the response
return (newNsiId, 201)
################################ NSI INSTANTIATION SECTION ##################################
# 1 step: instantiate_nsi
# Does all the process to instantiate the NSI
def instantiate_nsi(nsi_json):
LOG.info("Check for NstID before instantiating it.")
nstId = nsi_json['nstId']
catalogue_response = sbi.get_saved_nst(nstId)
if catalogue_response.get('nstd'):
nst_json = catalogue_response['nstd']
else:
return catalogue_response, catalogue_response['http_code']
# validate if there is any NSTD
if not catalogue_response:
return_msg = {}
return_msg['error'] = "There is NO NSTd with this uuid in the DDBB."
return return_msg, 400
# check if exists another nsir with the same name (nsiId)
nsirepo_jsonresponse = sbi.get_all_saved_nsi()
if nsirepo_jsonresponse:
for nsir_item in nsirepo_jsonresponse:
if (nsir_item["name"] == nsi_json['nsiId']):
return_msg = {}
return_msg['error'] = "There is already an slice with this nsiId."
return (return_msg, 400)
# Network Slice Placement
LOG.info("Placement of the Network Service Instantiations.")
new_nsi_json = nsi_placement(nsi_json, nst_json)
if new_nsi_json[1] != 200:
LOG.info("Error returning saved nsir.")
return (new_nsi_json[0], new_nsi_json[1])
# starts the thread to instantiate while sending back the response
LOG.info("Network Slice Instance Record created. Starting the instantiation procedure.")
thread_ns_instantiation = thread_ns_instantiate(new_nsi_json[0])
thread_ns_instantiation.start()
return ({},202)
# does the NS placement based on the available VIMs resources & the required of each NS.
def nsi_placement(nsi_json, nst_json):
# get the VIMs information registered to the SP
vims_list = sbi.get_vims_info()
# validates if the incoming vim_list is empty (return 500) or not (follow)
if not 'vim_list' in vims_list:
return_msg = {}
return_msg['error'] = "Not found any VIM information, register one to the SP."
return return_msg, 500
# NSR PLACEMENT: placement based on the instantiation parameters...
# TODO Choose vim per service based in instantiation parameters
city = "IT"
vimId = ""
for vim_item in vims_list['vim_list']:
if (vim_item['type'] == "vm" and vim_item['vim_city'] == city):
vimId = vim_item['vim_uuid']
break
if vimId != "":
instantiation_params_list = []
for subnet_item in nst_json["slice_ns_subnets"]:
service_dict = {}
service_dict["vim_id"] = vimId
service_dict["subnet_id"] = subnet_item["id"]
instantiation_params_list.append(service_dict)
nsi_json['instantiation_params'] = json.dumps(instantiation_params_list)
return nsi_json, 200
########################################## NSI TERMINATE SECTION #######################################
# 1 step: terminate_nsi
# Does all the process to terminate the NSI
def terminate_nsi(nsiName, TerminOrder):
#LOG.info("Updating the Network Slice Record for the termination procedure.")
mutex_slice2db_access.acquire()
try:
# Get the uuid form the name provided
uuid = sbi.get_nsi_id_from_name(nsiName)
if (uuid):
terminate_nsi = sbi.get_saved_nsi(uuid)
if terminate_nsi:
# if nsi is not in TERMINATING/TERMINATED
if terminate_nsi['nsi-status'] in ["INSTANTIATED", "INSTANTIATING", "READY", "ERROR"]:
terminate_nsi['id'] = terminate_nsi['uuid']
del terminate_nsi['uuid']
terminate_nsi['terminateTime'] = str(datetime.datetime.now().isoformat())
#terminate_nsi['sliceCallback'] = TerminOrder['callback']
terminate_nsi['nsi-status'] = "TERMINATING"
# starts the thread to terminate while sending back the response
LOG.info("Starting the termination procedure.")
thread_ns_termination = thread_ns_terminate(terminate_nsi)
thread_ns_termination.start()
terminate_value = 202
else:
terminate_nsi['errorLog'] = "This NSI is either terminated or being terminated."
terminate_value = 404
else:
terminate_nsi['errorLog'] = "There is no NSIR in the db."
terminate_value = 404
else:
terminate_nsi = {}
terminate_nsi['errorLog'] = "There is no NSIR in the db."
terminate_value = 404
finally:
mutex_slice2db_access.release()
return (terminate_nsi, terminate_value)
############################################ NSI GET SECTION ############################################
# Gets one single NSI item information
def get_nsi(nsiName):
# Get the uuid form the name provided
uuid = sbi.get_nsi_id_from_name(nsiName)
if (uuid):
LOG.info("Retrieving Network Slice Instance with ID: " +str(uuid))
nsirepo_jsonresponse = sbi.get_saved_nsi(uuid)
if (nsirepo_jsonresponse):
# Translate the response
new_nsirepo_jsonresponse = translate_nsi_from_sonata_to_vs(nsirepo_jsonresponse)
return (new_nsirepo_jsonresponse, 200)
else:
return_msg = {}
return_msg['msg'] = "There are no NSIR with this uuid in the db."
return (return_msg, 404)
else:
return_msg = {}
return_msg['msg'] = "There are no NSIR with this uuid in the db."
return (return_msg, 404)
# Gets all the existing NSI items
def get_all_nsi():
LOG.info("Retrieve all existing Network Slice Instance records.")
nsirepo_jsonresponse = sbi.get_all_saved_nsi()
if (nsirepo_jsonresponse):
new_nsirepo_jsonresponse = []
# Translate the response
for nsi in nsirepo_jsonresponse:
new_nsi = translate_nsi_from_sonata_to_vs(nsi)
new_nsirepo_jsonresponse.append(new_nsi)
return (new_nsirepo_jsonresponse, 200)
else:
return_msg = {}
return_msg['msg'] = "There are no NSIR in the db."
return (return_msg, 404)
# Translate nsi from sonata format to vs format
""" public class NetworkSliceInstance {
@Id
@GeneratedValue
@JsonIgnore
private Long id;
private String name;
private String description;
private String nsiId; //ID of the network slice
private String nstId; //ID of the network slice template
private String nsdId; //ID of the descriptor of the NFV network service that implements the network slice
private String nsdVersion; //version of the descriptor of the NFV network service that implements the network slice
private String dfId; //ID of the deployment flavour in the NFV network service
private String instantiationLevelId; //ID of the instantiation level in the NFV network service
@JsonIgnore
private String oldInstantiationLevelId; //ID of the previous instantiation level when the NFV network service is scaled
private String nfvNsId; //ID of the NFV network service that implements the network slice
private boolean soManaged;
@JsonInclude(JsonInclude.Include.NON_EMPTY)
@ElementCollection(fetch=FetchType.EAGER)
@Fetch(FetchMode.SELECT)
@Cascade(org.hibernate.annotations.CascadeType.ALL)
private List<String> networkSliceSubnetInstances = new ArrayList<>(); //in case of composite network slice, the ID of its network slice subnets
private String tenantId; //owner of the slice
private NetworkSliceStatus status;
private String errorMessage; //this field gets a value only in case of failure
@JsonInclude(JsonInclude.Include.NON_NULL)
private String nfvNsUrl;
} """
def translate_nsi_from_sonata_to_vs(nsi_sonata):
nsi_vs = {}
nsi_vs['name'] = nsi_sonata['name']
nsi_vs['description'] = nsi_sonata['description']
nsi_vs['nsiId'] = nsi_sonata['name']
nsi_vs['nstId'] = nsi_sonata['nst-ref']
nsi_vs['nsdId'] = ""
nsi_vs['nsdVersion'] = ""
nsi_vs['dfId'] = ""
nsi_vs['instantiationLevelId'] = ""
nsi_vs['nfvNsId'] = ""
nsi_vs['soManaged'] = False
nsi_vs['networkSliceSubnetInstances'] = None
nsi_vs['tenantId'] = ""
nsi_vs['status'] = translate_status_from_sonata_to_vs(nsi_sonata['nsi-status'])
nsi_vs['errorMessage'] = nsi_sonata['errorLog']
nsi_vs['nfvNsUrl'] = ""
""" nsi_vs = nsi_sonata """
return nsi_vs
# Translate status from sonata format to vs format
""" public enum NetworkSliceStatus {
NOT_INSTANTIATED,
INSTANTIATING,
INSTANTIATED,
UNDER_MODIFICATION,
TERMINATING,
TERMINATED,
FAILED
} """
def translate_status_from_sonata_to_vs(status_sonata):
if status_sonata == "READY":
status_vs = "INSTANTIATED"
elif status_sonata == "ERROR":
status_vs = "FAILED"
else:
status_vs = status_sonata
return status_vs
|
StarcoderdataPython
|
1660317
|
<filename>setup.py
#!/usr/bin/env python
"""
Pygr
====
Pygr is an open source software project used to develop graph database
interfaces for the popular Python language, with a strong emphasis
on bioinformatics applications ranging from genome-wide analysis of
alternative splicing patterns, to comparative genomics queries of
multi-genome alignment data.
"""
import os
import sys
try:
from setuptools import setup, Extension
except ImportError:
print 'Setuptools not imported, falling back to distutils'
from distutils.core import setup, Extension
import pygr
def error(msg):
"Fatal errors"
print('*** error %s' % msg)
sys.exit()
PYGR_NAME = "pygr"
PYGR_VERSION = pygr.__version__
if sys.version_info < (2, 3):
error('pygr requires python 2.3 or higher')
CLASSIFIERS = """
Development Status :: 5 - Production/Stable
Operating System :: MacOS :: MacOS X
Operating System :: Microsoft :: Windows :: Windows NT/2000
Operating System :: OS Independent
Operating System :: POSIX
Operating System :: POSIX :: Linux
Operating System :: Unix
Programming Language :: Python
Topic :: Scientific/Engineering
Topic :: Scientific/Engineering :: Bio-Informatics
"""
# split into lines and filter empty ones
CLASSIFIERS = filter(None, CLASSIFIERS.splitlines())
# Setuptools should handle all this automatically
if 'setuptools' in sys.modules:
try:
import pkg_resources
pkg_resources.require('Pyrex>=0.9.8')
ext = 'pyx'
except pkg_resources.DistributionNotFound:
ext = 'c'
cmdclass = {}
else:
# if pyrex is not present try compiling the C files
try:
from Pyrex.Compiler.Version import version as PYREX_VERSION
from Pyrex.Distutils import build_ext
if PYREX_VERSION < "0.9.8":
error("pyrex version >=0.9.8 required, found %s" % PYREX_VERSION)
ext = 'pyx'
cmdclass = {'build_ext': build_ext}
except ImportError, exc:
ext = 'c'
cmdclass = {}
# extension sources
seqfmt_src = [os.path.join('pygr', 'seqfmt.%s' % ext)]
cdict_src = [os.path.join('pygr', 'cgraph.c'),
os.path.join('pygr', 'cdict.%s' % ext)]
nested_src = [os.path.join('pygr', 'intervaldb.c'),
os.path.join('pygr', 'cnestedlist.%s' % ext),
os.path.join('pygr', 'apps', 'maf2nclist.c')]
def main():
setup(
name = PYGR_NAME,
version= PYGR_VERSION,
description = \
'Pygr, a Python graph-database toolkit oriented primarily on bioinformatics',
long_description = __doc__,
author = "<NAME>",
author_email='<EMAIL>',
url = 'http://code.google.com/p/pygr/',
license = 'New BSD License',
classifiers = CLASSIFIERS,
packages = ['pygr', 'pygr.apps'],
ext_modules = [
Extension('pygr.seqfmt', seqfmt_src),
Extension('pygr.cdict', cdict_src),
Extension('pygr.cnestedlist', nested_src),
],
cmdclass = cmdclass,
)
if __name__ == '__main__':
main()
|
StarcoderdataPython
|
1642642
|
<reponame>yemi33/grasshopperfund<gh_stars>0
from django.test import TestCase
from django.urls import reverse
from django.contrib.auth.models import User
from django.contrib import auth
from django.core.files.uploadedfile import SimpleUploadedFile
from ..models import Profile
class TestUpdateProfile(TestCase):
def setUp(self):
self.username = "test"
self.email = "<EMAIL>"
self.password = "<PASSWORD>"
self.user = User.objects.create_user(
username = self.username,
password = <PASSWORD>,
email=self.email
)
def test_update_profile(self):
self.client.login(
username = self.username,
password = <PASSWORD>
)
# currently, only email can be updated
updated_username = "updated_username"
updated_password = "<PASSWORD>"
updated_email = "<EMAIL>"
# need to open image for the updated profile
with open("./static/images/default.jpg", 'rb') as image:
updated_profile_image = SimpleUploadedFile(
"default.jpg",
image.read(),
content_type = "image/jpg"
)
response = self.client.post(
reverse(
"update-profile",
),
data = {
'user': self.user.id,
"email": updated_email,
"image": updated_profile_image,
},
)
# Succesfull form submit will redirect to view-profile
assert response.status_code == 302
# check that profile has been changed
assert Profile.objects.filter(email=updated_email).exists()
# delete profile
response = self.client.post(
reverse('delete-profile'),
)
assert not Profile.objects.filter(email=updated_email).exists()
self.client.logout()
|
StarcoderdataPython
|
4816459
|
<filename>pypy/module/gc/app_referents.py
# NOT_RPYTHON
import gc
def dump_rpy_heap(file):
"""Write a full dump of the objects in the heap to the given file
(which can be a file, a file name, or a file descritor).
Format for each object (each item is one machine word):
[addr] [typeindex] [size] [addr1]..[addrn] [-1]
where [addr] is the address of the object, [typeindex] and [size]
are as get_rpy_type_index() and get_rpy_memory_usage() would return,
and [addr1]..[addrn] are addresses of other objects that this object
points to. The full dump is a list of such objects, with a marker
[0][0][0][-1] inserted after all GC roots, before all non-roots.
If the argument is a filename and the 'zlib' module is available,
we also write 'typeids.txt' and 'typeids.lst' in the same directory,
if they don't already exist.
"""
if isinstance(file, str):
f = open(file, 'wb')
gc._dump_rpy_heap(f.fileno())
f.close()
try:
import zlib, os
except ImportError:
pass
else:
filename2 = os.path.join(os.path.dirname(file), 'typeids.txt')
if not os.path.exists(filename2):
data = zlib.decompress(gc.get_typeids_z())
f = open(filename2, 'wb')
f.write(data)
f.close()
filename2 = os.path.join(os.path.dirname(file), 'typeids.lst')
if not os.path.exists(filename2):
data = ''.join(['%d\n' % n for n in gc.get_typeids_list()])
f = open(filename2, 'w')
f.write(data)
f.close()
else:
if isinstance(file, int):
fd = file
else:
if hasattr(file, 'flush'):
file.flush()
fd = file.fileno()
gc._dump_rpy_heap(fd)
class GcStats(object):
def __init__(self, s):
self._s = s
for item in ('total_gc_memory', 'jit_backend_used',
'total_memory_pressure',
'total_allocated_memory', 'jit_backend_allocated',
'peak_memory', 'peak_allocated_memory', 'total_arena_memory',
'total_rawmalloced_memory', 'nursery_size',
'peak_arena_memory', 'peak_rawmalloced_memory',
):
setattr(self, item, self._format(getattr(self._s, item)))
self.memory_used_sum = self._format(self._s.total_gc_memory + self._s.total_memory_pressure +
self._s.jit_backend_used)
self.memory_allocated_sum = self._format(self._s.total_allocated_memory + self._s.total_memory_pressure +
self._s.jit_backend_allocated)
self.total_gc_time = self._s.total_gc_time
def _format(self, v):
if v < 1000000:
# bit unlikely ;-)
return "%.1fkB" % (v / 1024.)
return "%.1fMB" % (v / 1024. / 1024.)
def __repr__(self):
if self._s.total_memory_pressure != -1:
extra = "\n memory pressure: %s" % self.total_memory_pressure
else:
extra = ""
return """Total memory consumed:
GC used: %s (peak: %s)
in arenas: %s
rawmalloced: %s
nursery: %s
raw assembler used: %s%s
-----------------------------
Total: %s
Total memory allocated:
GC allocated: %s (peak: %s)
in arenas: %s
rawmalloced: %s
nursery: %s
raw assembler allocated: %s%s
-----------------------------
Total: %s
Total time spent in GC: %s
""" % (self.total_gc_memory, self.peak_memory,
self.total_arena_memory,
self.total_rawmalloced_memory,
self.nursery_size,
self.jit_backend_used,
extra,
self.memory_used_sum,
self.total_allocated_memory, self.peak_allocated_memory,
self.peak_arena_memory,
self.peak_rawmalloced_memory,
self.nursery_size,
self.jit_backend_allocated,
extra,
self.memory_allocated_sum,
self.total_gc_time / 1000.0)
def get_stats(memory_pressure=False):
return GcStats(gc._get_stats(memory_pressure=memory_pressure))
|
StarcoderdataPython
|
3338029
|
<reponame>Koalacards/2048AI
import random
from GameAgent import GameAgent, play_n_times
class RandomAgent(GameAgent):
def get_move(self, board):
return random.choice(board.get_legal_moves())
play_n_times(RandomAgent(), 1000)
|
StarcoderdataPython
|
3388032
|
# Generated by Django 3.1.5 on 2021-02-08 01:35
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Intent',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('tag', models.CharField(max_length=50)),
],
),
migrations.CreateModel(
name='Response',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('message', models.CharField(max_length=100)),
('Intent', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='ChatBotAPI.intent')),
],
),
migrations.CreateModel(
name='Pattern',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('value', models.CharField(max_length=100)),
('Intent', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='ChatBotAPI.intent')),
],
),
migrations.CreateModel(
name='ContextSet',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('value', models.CharField(max_length=100)),
('Intent', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='ChatBotAPI.intent')),
],
),
]
|
StarcoderdataPython
|
1714297
|
<reponame>esgomezm/deepcell-tf
# Copyright 2016-2019 The <NAME> at the California Institute of
# Technology (Caltech), with support from the Paul Allen Family Foundation,
# Google, & National Institutes of Health (NIH) under Grant U24CA224309-01.
# All rights reserved.
#
# Licensed under a modified Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.github.com/vanvalenlab/deepcell-tf/LICENSE
#
# The Work provided may be used for non-commercial academic purposes only.
# For any other use of the Work, including commercial use, please contact:
# <EMAIL>
#
# Neither the name of Caltech nor the names of its contributors may be used
# to endorse or promote products derived from this software without specific
# prior written permission.
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Classify the type of an input image to send the data to the correct model"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python import keras
from tensorflow.python.keras.utils.data_utils import get_file
from deepcell.layers import ImageNormalization2D, TensorProduct
from deepcell.utils.backbone_utils import get_backbone
MOBILENETV2_WEIGHTS_PATH = ('https://deepcell-data.s3-us-west-1.amazonaws.com/'
'model-weights/LabelDetectionModel_mobilenetv2.h5')
def LabelDetectionModel(input_shape=(None, None, 1),
inputs=None,
backbone='mobilenetv2',
use_pretrained_weights=True):
"""Classify a microscopy image as Nuclear, Cytoplasm, or Phase.
This can be helpful in determining the type of data (nuclear, cytoplasm,
etc.) so that this data can be forwared to the correct segmenation model.
Based on a standard backbone with an intiial ImageNormalization2D and final
AveragePooling2D, TensorProduct, and Softmax layers.
Args:
input_shape (tuple): a 3-length tuple of the input data shape.
inputs (tensorflow.keras.Layer): Optional input layer of the model.
If not provided, creates a Layer based on input_shape.
backbone (str): name of the backbone to use for the model.
use_pretrained_weights (bool): whether to load pre-trained weights.
Only supports the MobileNetV2 backbone.
"""
required_channels = 3 # required for most backbones
if inputs is None:
inputs = keras.layers.Input(shape=input_shape)
if keras.backend.image_data_format() == 'channels_first':
channel_axis = 0
else:
channel_axis = -1
norm = ImageNormalization2D(norm_method='whole_image')(inputs)
fixed_inputs = TensorProduct(required_channels)(norm)
# force the input shape
fixed_input_shape = list(input_shape)
fixed_input_shape[channel_axis] = required_channels
fixed_input_shape = tuple(fixed_input_shape)
backbone_model = get_backbone(
backbone,
fixed_inputs,
use_imagenet=False,
return_dict=False,
include_top=False,
weights=None,
input_shape=fixed_input_shape,
pooling=None)
x = keras.layers.AveragePooling2D(4)(backbone_model.outputs[0])
x = TensorProduct(256)(x)
x = TensorProduct(3)(x)
x = keras.layers.Flatten()(x)
outputs = keras.layers.Activation('softmax')(x)
model = keras.Model(inputs=backbone_model.inputs, outputs=outputs)
if use_pretrained_weights:
local_name = 'LabelDetectionModel_{}.h5'.format(backbone)
if backbone.lower() in {'mobilenetv2' or 'mobilenet_v2'}:
weights_path = get_file(
local_name,
MOBILENETV2_WEIGHTS_PATH,
cache_subdir='models',
md5_hash='14d4b2f7c77d334c958d2dde79972e6e')
else:
raise ValueError('Backbone %s does not have a weights file.' %
backbone)
model.load_weights(weights_path)
return model
|
StarcoderdataPython
|
3264373
|
<filename>delivery_bots/api/__init__.py
import os
import sentry_sdk
sentry_sdk.init(
os.getenv('SENTRY_URL', 'SENTRY'),
traces_sample_rate=1.0,
)
|
StarcoderdataPython
|
196444
|
<filename>revisiting_rainbow/Agents/dqn_agent_new.py
"""Compact implementation of a DQN agent
Specifically, we implement the following components:
* prioritized replay
* huber_loss
* mse_loss
* double_dqn
* noisy
* dueling
* Munchausen
Details in:
"Human-level control through deep reinforcement learning" by Mnih et al. (2015).
"Noisy Networks for Exploration" by Fortunato et al. (2017).
"Deep Reinforcement Learning with Double Q-learning" by Hasselt et al. (2015).
"Dueling Network Architectures for Deep Reinforcement Learning" by Wang et al. (2015).
"Munchausen Reinforcement Learning" by Vieillard et al. (2020).
"""
import time
import copy
import functools
from dopamine.jax import networks
from dopamine.jax.agents.dqn import dqn_agent
from dopamine.replay_memory import prioritized_replay_buffer
import gin
import jax
import jax.numpy as jnp
import numpy as onp
import tensorflow as tf
import jax.scipy.special as scp
from flax import linen as nn
def mse_loss(targets, predictions):
return jnp.mean(jnp.power((targets - (predictions)),2))
@functools.partial(jax.jit, static_argnums=(0, 9,10,11,12,13, 14))
def train(network_def, target_params, optimizer, states, actions, next_states, rewards,
terminals, loss_weights, cumulative_gamma, target_opt, mse_inf,tau,alpha,clip_value_min, rng):
"""Run the training step."""
online_params = optimizer.target
def loss_fn(params, rng_input, target, loss_multipliers):
def q_online(state):
return network_def.apply(params, state, rng=rng_input)
q_values = jax.vmap(q_online)(states).q_values
q_values = jnp.squeeze(q_values)
replay_chosen_q = jax.vmap(lambda x, y: x[y])(q_values, actions)
if mse_inf:
loss = jax.vmap(mse_loss)(target, replay_chosen_q)
else:
loss = jax.vmap(dqn_agent.huber_loss)(target, replay_chosen_q)
mean_loss = jnp.mean(loss_multipliers * loss)
return mean_loss, loss
rng, rng2, rng3, rng4 = jax.random.split(rng, 4)
def q_target(state):
return network_def.apply(target_params, state, rng=rng2)
def q_target_online(state):
return network_def.apply(online_params, state, rng=rng4)
if target_opt == 0:
target = dqn_agent.target_q(q_target, next_states, rewards, terminals, cumulative_gamma)
elif target_opt == 1:
#Double DQN
target = target_DDQN(q_target_online, q_target, next_states, rewards, terminals, cumulative_gamma)
elif target_opt == 2:
#Munchausen
target = target_m_dqn(q_target_online, q_target, states,next_states,actions,rewards,terminals,
cumulative_gamma,tau,alpha,clip_value_min)
else:
print('error')
grad_fn = jax.value_and_grad(loss_fn, has_aux=True)
(mean_loss, loss), grad = grad_fn(online_params, rng3, target, loss_weights)
optimizer = optimizer.apply_gradient(grad)
return optimizer, loss, mean_loss
def target_DDQN(model, target_network, next_states, rewards, terminals, cumulative_gamma):
"""Compute the target Q-value. Double DQN"""
next_q_values = jax.vmap(model, in_axes=(0))(next_states).q_values
next_q_values = jnp.squeeze(next_q_values)
replay_next_qt_max = jnp.argmax(next_q_values, axis=1)
next_q_state_values = jax.vmap(target_network, in_axes=(0))(next_states).q_values
q_values = jnp.squeeze(next_q_state_values)
replay_chosen_q = jax.vmap(lambda t, u: t[u])(q_values, replay_next_qt_max)
return jax.lax.stop_gradient(rewards + cumulative_gamma * replay_chosen_q *
(1. - terminals))
def stable_scaled_log_softmax(x, tau, axis=-1):
max_x = jnp.amax(x, axis=axis, keepdims=True)
y = x - max_x
tau_lse = max_x + tau * jnp.log(jnp.sum(jnp.exp(y / tau), axis=axis, keepdims=True))
return x - tau_lse
def stable_softmax(x, tau, axis=-1):
max_x = jnp.amax(x, axis=axis, keepdims=True)
y = x - max_x
return nn.softmax(y/tau, axis=axis)
def target_m_dqn(model, target_network, states, next_states, actions,rewards, terminals,
cumulative_gamma,tau,alpha,clip_value_min):
"""Compute the target Q-value. Munchausen DQN"""
#----------------------------------------
q_state_values = jax.vmap(target_network, in_axes=(0))(states).q_values
q_state_values = jnp.squeeze(q_state_values)
next_q_values = jax.vmap(target_network, in_axes=(0))(next_states).q_values
next_q_values = jnp.squeeze(next_q_values)
#----------------------------------------
tau_log_pi_next = stable_scaled_log_softmax(next_q_values, tau, axis=1)
pi_target = stable_softmax(next_q_values,tau, axis=1)
replay_log_policy = stable_scaled_log_softmax(q_state_values, tau, axis=1)
#----------------------------------------
replay_next_qt_softmax = jnp.sum((next_q_values-tau_log_pi_next)*pi_target,axis=1)
replay_action_one_hot = nn.one_hot(actions, q_state_values.shape[-1])
tau_log_pi_a = jnp.sum(replay_log_policy * replay_action_one_hot, axis=1)
#a_max=1
tau_log_pi_a = jnp.clip(tau_log_pi_a, a_min=clip_value_min,a_max=1)
munchausen_term = alpha * tau_log_pi_a
modified_bellman = (rewards + munchausen_term +cumulative_gamma * replay_next_qt_softmax *
(1. - jnp.float32(terminals)))
return jax.lax.stop_gradient(modified_bellman)
@functools.partial(jax.jit, static_argnums=(0, 4, 5, 6, 7, 8, 10, 11))
def select_action(network_def, params, state, rng, num_actions, eval_mode,
epsilon_eval, epsilon_train, epsilon_decay_period,
training_steps, min_replay_history, epsilon_fn):
epsilon = jnp.where(eval_mode,
epsilon_eval,
epsilon_fn(epsilon_decay_period,
training_steps,
min_replay_history,
epsilon_train))
rng, rng1, rng2, rng3 = jax.random.split(rng, num=4)
selected_action = jnp.argmax(network_def.apply(params, state, rng=rng3).q_values)
p = jax.random.uniform(rng1)
return rng, jnp.where(p <= epsilon,
jax.random.randint(rng2, (), 0, num_actions),
selected_action)
@gin.configurable
class JaxDQNAgentNew(dqn_agent.JaxDQNAgent):
"""A compact implementation of a simplified Rainbow agent."""
def __init__(self,
num_actions,
tau,
alpha=1,
clip_value_min=-10,
net_conf = None,
env = "CartPole",
normalize_obs = True,
hidden_layer=2,
neurons=512,
replay_scheme='prioritized',
noisy = False,
dueling = False,
initzer = 'xavier_uniform',
target_opt=0,
mse_inf=False,
network=networks.NatureDQNNetwork,
optimizer='adam',
epsilon_fn=dqn_agent.linearly_decaying_epsilon,
seed=None):
"""Initializes the agent and constructs the necessary components.
Args:
num_actions: int, number of actions the agent can take at any state.
observation_shape: tuple of ints or an int. If single int, the observation
is assumed to be a 2D square.
observation_dtype: DType, specifies the type of the observations. Note
that if your inputs are continuous, you should set this to jnp.float32.
stack_size: int, number of frames to use in state stack.
network: flax.nn Module that is initialized by shape in _create_network
below. See dopamine.jax.networks.RainbowNetwork as an example.
num_atoms: int, the number of buckets of the value function distribution.
vmax: float, the value distribution support is [-vmax, vmax].
gamma: float, discount factor with the usual RL meaning.
update_horizon: int, horizon at which updates are performed, the 'n' in
n-step update.
min_replay_history: int, number of transitions that should be experienced
before the agent begins training its value function.
update_period: int, period between DQN updates.
target_update_period: int, update period for the target network.
epsilon_fn: function expecting 4 parameters:
(decay_period, step, warmup_steps, epsilon). This function should return
the epsilon value used for exploration during training.
epsilon_train: float, the value to which the agent's epsilon is eventually
decayed during training.
epsilon_eval: float, epsilon used when evaluating the agent.
epsilon_decay_period: int, length of the epsilon decay schedule.
replay_scheme: str, 'prioritized' or 'uniform', the sampling scheme of the
replay memory.
optimizer: str, name of optimizer to use.
summary_writer: SummaryWriter object for outputting training statistics.
Summary writing disabled if set to None.
summary_writing_frequency: int, frequency with which summaries will be
written. Lower values will result in slower training.
allow_partial_reload: bool, whether we allow reloading a partial agent
(for instance, only the network parameters).
"""
# We need this because some tools convert round floats into ints.
seed = int(time.time() * 1e6) if seed is None else seed
self._net_conf = net_conf
self._env = env
self._normalize_obs = normalize_obs
self._hidden_layer = hidden_layer
self._neurons=neurons
self._noisy = noisy
self._dueling = dueling
self._initzer = initzer
self._target_opt = target_opt
self._mse_inf = mse_inf
self._tau = tau
self._alpha = alpha
self._clip_value_min = clip_value_min
self._rng = jax.random.PRNGKey(seed)
super(JaxDQNAgentNew, self).__init__(
num_actions= num_actions,
network= functools.partial(network,
num_actions=num_actions,
net_conf=self._net_conf,
env=self._env,
normalize_obs=self._normalize_obs,
hidden_layer=self._hidden_layer,
neurons=self._neurons,
noisy=self._noisy,
dueling=self._dueling,
initzer=self._initzer),
optimizer=optimizer,
epsilon_fn=dqn_agent.identity_epsilon if self._noisy == True else epsilon_fn)
self._replay_scheme = replay_scheme
def _build_networks_and_optimizer(self):
self._rng, rng = jax.random.split(self._rng)
online_network_params = self.network_def.init(
rng, x=self.state, rng=self._rng)
optimizer_def = dqn_agent.create_optimizer(self._optimizer_name)
self.optimizer = optimizer_def.create(online_network_params)
self.target_network_params = copy.deepcopy(online_network_params)
def _build_replay_buffer(self):
"""Creates the prioritized replay buffer used by the agent."""
return prioritized_replay_buffer.OutOfGraphPrioritizedReplayBuffer(
observation_shape=self.observation_shape,
stack_size=self.stack_size,
update_horizon=self.update_horizon,
gamma=self.gamma,
observation_dtype=self.observation_dtype)
def _train_step(self):
"""Runs a single training step.
Runs training if both:
(1) A minimum number of frames have been added to the replay buffer.
(2) `training_steps` is a multiple of `update_period`.
Also, syncs weights from online_network to target_network if training steps
is a multiple of target update period.
"""
# Run a train op at the rate of self.update_period if enough training steps
# have been run. This matches the Nature DQN behaviour.
if self._replay.add_count > self.min_replay_history:
if self.training_steps % self.update_period == 0:
self._sample_from_replay_buffer()
if self._replay_scheme == 'prioritized':
# The original prioritized experience replay uses a linear exponent
# schedule 0.4 -> 1.0. Comparing the schedule to a fixed exponent of
# 0.5 on 5 games (Asterix, Pong, Q*Bert, Seaquest, Space Invaders)
# suggested a fixed exponent actually performs better, except on Pong.
probs = self.replay_elements['sampling_probabilities']
# Weight the loss by the inverse priorities.
loss_weights = 1.0 / jnp.sqrt(probs + 1e-10)
loss_weights /= jnp.max(loss_weights)
else:
loss_weights = jnp.ones(self.replay_elements['state'].shape[0])
self.optimizer, loss, mean_loss = train(self.network_def,
self.target_network_params,
self.optimizer,
self.replay_elements['state'],
self.replay_elements['action'],
self.replay_elements['next_state'],
self.replay_elements['reward'],
self.replay_elements['terminal'],
loss_weights,
self.cumulative_gamma,
self._target_opt,
self._mse_inf,
self._tau,
self._alpha,
self._clip_value_min,
self._rng)
if self._replay_scheme == 'prioritized':
# Rainbow and prioritized replay are parametrized by an exponent
# alpha, but in both cases it is set to 0.5 - for simplicity's sake we
# leave it as is here, using the more direct sqrt(). Taking the square
# root "makes sense", as we are dealing with a squared loss. Add a
# small nonzero value to the loss to avoid 0 priority items. While
# technically this may be okay, setting all items to 0 priority will
# cause troubles, and also result in 1.0 / 0.0 = NaN correction terms.
self._replay.set_priority(self.replay_elements['indices'],
jnp.sqrt(loss + 1e-10))
if (self.summary_writer is not None and
self.training_steps > 0 and
self.training_steps % self.summary_writing_frequency == 0):
summary = tf.compat.v1.Summary(value=[
tf.compat.v1.Summary.Value(tag='HuberLoss', simple_value=mean_loss)])
self.summary_writer.add_summary(summary, self.training_steps)
if self.training_steps % self.target_update_period == 0:
self._sync_weights()
self.training_steps += 1
def _store_transition(self,
last_observation,
action,
reward,
is_terminal,
priority=None):
"""Stores a transition when in training mode.
Stores the following tuple in the replay buffer (last_observation, action,
reward, is_terminal, priority).
Args:
last_observation: Last observation, type determined via observation_type
parameter in the replay_memory constructor.
action: An integer, the action taken.
reward: A float, the reward.
is_terminal: Boolean indicating if the current state is a terminal state.
priority: Float. Priority of sampling the transition. If None, the default
priority will be used. If replay scheme is uniform, the default priority
is 1. If the replay scheme is prioritized, the default priority is the
maximum ever seen [Schaul et al., 2015].
"""
if priority is None:
if self._replay_scheme == 'uniform':
priority = 1.
else:
priority = self._replay.sum_tree.max_recorded_priority
if not self.eval_mode:
self._replay.add(last_observation, action, reward, is_terminal, priority)
def begin_episode(self, observation):
"""Returns the agent's first action for this episode.
Args:
observation: numpy array, the environment's initial observation.
Returns:
int, the selected action.
"""
self._reset_state()
self._record_observation(observation)
if not self.eval_mode:
self._train_step()
self._rng, self.action = select_action(self.network_def,
self.online_params,
self.state,
self._rng,
self.num_actions,
self.eval_mode,
self.epsilon_eval,
self.epsilon_train,
self.epsilon_decay_period,
self.training_steps,
self.min_replay_history,
self.epsilon_fn)
self.action = onp.asarray(self.action)
return self.action
def step(self, reward, observation):
"""Records the most recent transition and returns the agent's next action.
We store the observation of the last time step since we want to store it
with the reward.
Args:
reward: float, the reward received from the agent's most recent action.
observation: numpy array, the most recent observation.
Returns:
int, the selected action.
"""
self._last_observation = self._observation
self._record_observation(observation)
if not self.eval_mode:
self._store_transition(self._last_observation, self.action, reward, False)
self._train_step()
self._rng, self.action = select_action(self.network_def,
self.online_params,
self.state,
self._rng,
self.num_actions,
self.eval_mode,
self.epsilon_eval,
self.epsilon_train,
self.epsilon_decay_period,
self.training_steps,
self.min_replay_history,
self.epsilon_fn)
self.action = onp.asarray(self.action)
return self.action
|
StarcoderdataPython
|
3229534
|
<reponame>earthobservatory/isce2<gh_stars>1-10
#!/usr/bin/env python3
from __future__ import print_function
import logging
import numbers
import sys
class DictUtils:
@staticmethod
# if a value for a given key is "empty" (like '',[],{}, None etc, except for zero) then the pair is removed
def cleanDictionary(dictIn):
for k,v in list(dictIn.items()):
if (not v) and not isinstance(v,numbers.Number):
del dictIn[k]
#keep going down the tree
elif isinstance(v,dict):
DictUtils.cleanDictionary(v)
return dictIn # doesn't have to return it, but just in case one wants to use it this way instead of passing by ref
@staticmethod
def renormalizeKey(s):
"""
staticmethod renormalizeKey(s):
Apply renormalization to a dictionary key,
i.e., transform key to a standard format,
by removing all white space and canverting
to lower case.
"""
from isceobj.Util.StringUtils import StringUtils
return StringUtils.lower_no_spaces(s)
#renormalize all the keys in the dictionary
@staticmethod
def renormalizeKeys(dictNow):
"""
staticmethod renormalizeKeys(d):
renormalize all keys in dictionary d by
applying renormalizeKey static method.
"""
for k,v in list(dictNow.items()):
kNow = DictUtils.renormalizeKey(k)
if kNow != k:
dictNow[kNow] = dictNow.pop(k)
if isinstance(v,dict):
DictUtils.renormalizeKeys(v)
return dictNow
#compares keys in dict with an input one. it's case and whitespace insensitive
#if replace is true it also changes the equivalent key with k
@staticmethod
def keyIsIn(k,dictNow,replace = None):
if(replace == None):
replace = True
ret = False
for k1 in dictNow.keys():
if (''.join(k1.split())).lower() == (''.join(k.split())).lower():
if replace:
dictNow[k] = dictNow.pop(k1)
ret = True
break
return ret
@staticmethod
# update the dictionary dict1 by the value in dict2.
# If the key exists and replace = True, then the value is overwritten
# otherwise it is appended.
# If it does not exist a new node is created.
# When replace is True if spare (a list of key or single key) is defined the values of these
# keys will be appended if they are not already present. Use it only for str values, i.e. for doc string
def updateDictionary(dict1,dict2,replace = None,spare = None):
if replace is None:
replace = False
if spare:#if it's a single key, put it into a list
if isinstance(spare,str):
spare = [spare]
else:
spare = []
# dict1 is the one to update
for k2,v2 in dict2.items():
if DictUtils.keyIsIn(k2,dict1):
if isinstance(v2,dict):#if is a dict keep going down the node
DictUtils.updateDictionary(dict1[k2],v2,replace,spare)
else:
if replace:#replace the entry
append = False
if k2 in spare: #check if the key needs to be spared
append = True
if isinstance(dict1[k2],list):
if v2 in dict1[k2]: # if so then append the content
append = False
break
else:
if dict1[k2] == v2:
append = False
break
if not append:# same key but item already in. it will rewrite it. not a big deal
break
if append: #convert everything into a list
if not isinstance(v2,list):
v2 = [v2]
if not isinstance(dict1[k2],list):
dict1[k2] = [dict1[k2]]
#do not append if already there
for v22 in v2:
if v22 not in dict1[k2]:
dict1[k2].append(v22)
else:
dict1.update({k2:v2})
else:#update only if is not the same item or the item is not already present (if dict1[k2] is a list)
if isinstance(dict1[k2],list):
if v2 not in dict1[k2]: # if so then append the content
dict1[k2].append(v2)
else:
if dict1[k2] != v2:
dict1[k2] = [dict1[k2],v2]
else:
dict1.update({k2:v2})
#probably need to create a class with some dictionary utils. put also some of the methods in Parser()
# if we have a dict of dicts, keeping the structure, extract a particular key
# ex. {'n1':{n1_1:{'k1':v1},{'k2':v2},n1_2:{'k1':v11},{'k2':v22}}} extract the 'k2' the result is
# {'n1':{n1_1:{'k2':v2},n1_2:{'k2':v22}}}. in this case k1 could be the 'doc' string and 'k2' the units
@staticmethod
def extractDict(dictIn,key):
import copy
#put everything i
dictOut = copy.deepcopy(dictIn)
DictUtils.searchKey(dictIn,dictOut,key)
return dictOut
@staticmethod
#just wrapper of the _getDictWithey so the result can be returned instead of being an argument
def getDictWithKey(dictIn,key,includeKey=True):
dictOut = {}
DictUtils._getDictWithKey(dictIn,dictOut,key,includeKey)
return dictOut
@staticmethod
#it returns the first occurance of {key,val} where val is the corresponding value for that key
#if includeKey is True otherwise returns val
def _getDictWithKey(dictIn,dictOut,key,includeKey=True):
if(isinstance(dictIn,dict)):
for k in dictIn.keys():
if(k == key):
if includeKey:
dictOut.update({k:dictIn[k]})
else:
dictOut.update(dictIn[k])
break
else:
DictUtils._getDictWithKey(dictIn[k],dictOut,key,includeKey)
@staticmethod
#returns a dictionary where all the keys are removed but key
def searchKey(dictIn,dictOut,key):
for k,v in dictIn.items():
if(k == key):
break
if isinstance(v,dict):
DictUtils.searchKey(v,dictOut[k],key)
if dictOut[k] == {}:#if we removed everything in dictOut[k], then remove the branch
dictOut.pop(k)
elif (key != k):#this is a simple pair (k,v) but the key is not the one we want
dictOut.pop(k)
def __getstate__(self):
d = dict(self.__dict__)
del d['logger']
return d
def __setstate__(self,d):
self.__dict__.update(d)
self.logger = logging.getLogger('isce.iscesys.DictUtils')
def __init__(self):
self.logger = logging.getLogger('isce.iscesys.DictUtils')
|
StarcoderdataPython
|
109071
|
from epsilon.extime import Time
from axiom.store import Store
from axiom import attributes
from axiom.tags import Catalog
from axiom.item import Item
from axiom.dependency import installOn
from nevow.livetrial import testcase
from nevow import tags, loaders
from nevow.athena import expose
from xmantissa.webtheme import getLoader
from xmantissa import people
from xquotient.exmess import Message, MessageDetail, MessageBodyFragment, MessageActions
from xquotient.inbox import Inbox
from xquotient import equotient
from xquotient.test.util import MIMEReceiverMixin, PartMaker
class _Header(Item):
part = attributes.reference()
name = attributes.text()
value = attributes.text()
class _Part(Item):
z = attributes.integer()
def getHeader(self, k):
for hdr in self.store.query(
_Header, attributes.AND(_Header.part == self,
_Header.name == k.lower())):
return hdr.value
raise equotient.NoSuchHeader(k)
def walkMessage(self, *junk):
return ()
walkAttachments = walkMessage
def associateWithMessage(self, message):
pass
def relatedAddresses(self):
return []
def guessSentTime(self, default):
return Time()
def getAllReplyAddresses(self):
return {}
def getReplyAddresses(self):
return []
def _docFactoryFactory(testName, renderMethod='msgDetail'):
return loaders.stan(tags.div[
tags.div(render=tags.directive('liveTest'))[testName],
tags.div(render=tags.directive('msgDetail'))])
class _MsgDetailTestMixin(object):
"""
Mixin which provides some methods for setting up stores and messages
"""
def _setUpStore(self):
"""
Create a store and install the items required by a
L{xquotient.exmess.Message}
@rtype: L{axiom.store.Store}
"""
s = Store()
installOn(Inbox(store=s), s)
return s
def _setUpMsg(self):
"""
Install an innocuous incoming message in a newly-created store
@rtype: L{xquotient.exmess.Message}
"""
s = self._setUpStore()
m = Message.createIncoming(s, _Part(store=s), u'test://test')
m.subject = u'the subject'
m.sender = u'sender@host'
m.senderDisplay = u'Sender'
m.recipient = u'recipient@host'
m.sentWhen = Time.fromPOSIXTimestamp(0)
m.receivedWhen = Time.fromPOSIXTimestamp(1)
m.classifyClean()
return m
class MsgDetailTestCase(testcase.TestCase, _MsgDetailTestMixin):
"""
Tests for L{xquotient.exmess.MessageDetail}
"""
jsClass = u'Quotient.Test.MsgDetailTestCase'
def setUp(self):
"""
Setup & populate a store, and render a
L{xquotient.exmess.MessageDetail}
"""
f = MessageDetail(self._setUpMsg())
f.setFragmentParent(self)
f.docFactory = getLoader(f.fragmentName)
return f
expose(setUp)
class MsgDetailTagsTestCase(testcase.TestCase, _MsgDetailTestMixin):
"""
Tests for L{xquotient.exmess.MessageDetail} and tags
"""
jsClass = u'Quotient.Test.MsgDetailTagsTestCase'
def _setUpMsg(self, tags):
"""
Same as L{_MsgDetailTestMixin._setUpMsg}, but with a tagged message!
@param tags: tags to assign to message
@type tags: C{list} of C{unicode}
"""
msg = super(MsgDetailTagsTestCase, self)._setUpMsg()
cat = msg.store.findOrCreate(Catalog)
for tag in tags:
cat.tag(msg, tag)
return msg
def setUp(self, tags):
"""
Setup & populate a store, and render a
L{xquotient.exmess.MessageDetail}
"""
f = MessageDetail(self._setUpMsg(tags))
f.setFragmentParent(self)
f.docFactory = getLoader(f.fragmentName)
return f
expose(setUp)
class MsgDetailAddPersonTestCase(testcase.TestCase, _MsgDetailTestMixin):
"""
Test adding a person from the msg detail
"""
jsClass = u'Quotient.Test.MsgDetailAddPersonTestCase'
def __init__(self, *a, **k):
super(MsgDetailAddPersonTestCase, self).__init__(*a, **k)
self._stores = {}
def _setUpStore(self):
s = super(MsgDetailAddPersonTestCase, self)._setUpStore()
installOn(people.AddPerson(store=s), s)
return s
def verifyPerson(self, key):
"""
Called from the client after a person has been added. Verifies that
there is only one person, and that his details match those of the
sender of the single message in our store
"""
store = self._stores[key]
organizer = store.findUnique(people.Organizer)
p = self._stores[key].findUnique(
people.Person,
people.Person.storeID != organizer.storeOwnerPerson.storeID)
self.assertEquals(p.getEmailAddress(), 'sender@host')
self.assertEquals(p.getDisplayName(), 'Sender')
expose(verifyPerson)
def setUp(self, key):
"""
Setup & populate a store, and render a
L{xquotient.exmess.MessageDetail}
"""
msg = self._setUpMsg()
self._stores[key] = msg.store
f = MessageDetail(msg)
f.setFragmentParent(self)
f.docFactory = getLoader(f.fragmentName)
return f
expose(setUp)
class MsgDetailInitArgsTestCase(testcase.TestCase, _MsgDetailTestMixin):
"""
Test for L{xquotient.exmess.MessageDetail}'s initargs
"""
jsClass = u'Quotient.Test.MsgDetailInitArgsTestCase'
def _setUpMsg(self):
m = super(MsgDetailInitArgsTestCase, self)._setUpMsg()
m.store.findUnique(Inbox).showMoreDetail = True
return m
def setUp(self):
"""
Setup & populate a store, and render a
L{xquotient.exmess.MessageDetail}
"""
f = MessageDetail(self._setUpMsg())
f.setFragmentParent(self)
f.docFactory = getLoader(f.fragmentName)
return f
expose(setUp)
class _MsgDetailHeadersTestMixin(_MsgDetailTestMixin):
"""
Extension of L{_MsgDetailTestMixin} which allows the client to set
arbitrary headers on our message
"""
def _setUpMsg(self, headers):
msg = super(_MsgDetailHeadersTestMixin, self)._setUpMsg()
for (k, v) in headers.iteritems():
_Header(store=msg.store,
part=msg.impl,
name=k.lower(),
value=v)
return msg
class MsgDetailHeadersTestCase(testcase.TestCase, _MsgDetailHeadersTestMixin):
"""
Test for the rendering of messages which have various headers set
"""
jsClass = u'Quotient.Test.MsgDetailHeadersTestCase'
def setUp(self, headers):
"""
Setup & populate a store with a L{xquotient.exmess.Message} which has
the headers in C{headers} set to the given values
@type headers: C{dict} of C{unicode}
"""
msg = self._setUpMsg(headers)
f = MessageDetail(msg)
f.setFragmentParent(self)
f.docFactory = getLoader(f.fragmentName)
return f
expose(setUp)
class MsgDetailCorrespondentPeopleTestCase(testcase.TestCase, _MsgDetailHeadersTestMixin):
"""
Tests for rendering a message where various correspondents are or aren't
represented by L{xmantissa.people.Person} items in the store
"""
jsClass = u'Quotient.Test.MsgDetailCorrespondentPeopleTestCase'
def _setUpStore(self):
store = super(MsgDetailCorrespondentPeopleTestCase, self)._setUpStore()
self.organizer = people.Organizer(store=store)
installOn(self.organizer, store)
return store
def setUp(self, peopleAddresses, sender, recipient, cc):
"""
Setup & populate a store with a L{xquotient.exmess.Message} which has
correspondents set to the values of C{cc} and C{recipient}, and a
person for each email address in C{peopleAddresses}
@param sender: address to use as the value of the C{from} header
@type cc: C{unicode}
@param recipient: address to use as the value of the C{recipient}
attribute
@type cc: C{unicode}
@param cc: addresses to use as the value of the C{cc} header
@type cc: C{unicode}
@type headers: C{dict} of C{unicode}
"""
headers = {u'from': sender}
if cc:
headers[u'cc'] = cc
msg = self._setUpMsg(headers)
msg.recipient = recipient
for addr in peopleAddresses:
people.EmailAddress(
store=msg.store,
address=addr,
person=people.Person(
store=msg.store,
organizer=self.organizer))
f = MessageDetail(msg)
f.setFragmentParent(self)
f.docFactory = getLoader(f.fragmentName)
return f
expose(setUp)
class MsgBodyTestCase(testcase.TestCase, MIMEReceiverMixin):
"""
Tests for the selection and rendering of alternate text parts
"""
jsClass = u'Quotient.Test.MsgBodyTestCase'
def setUp(self, key):
# is there a better way? TestCase.mktemp() doesn't work otherwise
self._testMethodName = key
self.setUpMailStuff()
p = self.createMIMEReceiver().feedStringNow(
PartMaker('multipart/alternative', 'alt',
PartMaker('text/plain', 'this is the text/plain'),
PartMaker('text/html', 'this is the text/html')).make())
f = MessageBodyFragment(p.message, 'text/plain')
f.setFragmentParent(self)
return f
expose(setUp)
class ActionsTestCase(testcase.TestCase):
"""
Tests for Quotient.Message's actions stuff
"""
jsClass = u'Quotient.Test.ActionsTestCase'
def setUp(self):
f = MessageActions()
f.setFragmentParent(self)
return f
expose(setUp)
|
StarcoderdataPython
|
1766061
|
import json, urllib2, serial
from websocket import create_connection
port = '/dev/tty.usbmodem641'
ard = serial.Serial(port, 115200, timeout=5)
def set_valve(valve_number, state):
message = chr(valve_number | (int(state) << 3))
ard.write(message)
def get_states(distances):
states = [False] * 6
for i, stick in enumerate(distances):
if any([height['on'] for height in stick]):
states[i] = True
return states
def update_valves():
data = json.load(urllib2.urlopen('http://localhost:5000/status'))
states = get_states(data['distances'])
print "Updating states to", states
for valve, state in enumerate(states):
set_valve(valve, state)
def start_ws():
ws = create_connection("ws://localhost:5000/changes")
update_valves()
while True:
ws.recv()
update_valves()
if __name__ == "__main__":
start_ws()
|
StarcoderdataPython
|
93875
|
"""
Given n pairs of parentheses, write a function to generate all combinations of well-formed parentheses.
Example 1:
Input: n = 3
Output: ["((()))","(()())","(())()","()(())","()()()"]
Example 2:
Input: n = 1
Output: ["()"]
Constraints:
1 <= n <= 8
"""
class Solution:
def generateParenthesis(self, n: int) -> List[str]:
ans = []
self.helper(0, 0, "", n, ans)
return ans
def helper(self, left, right, current, n, ans):
if left < right:
return
if left == n and right == n:
ans.append(current)
return
if left > n or right > n:
return
new_current = current + "("
self.helper(left + 1, right, new_current, n, ans)
new_current = current + ")"
self.helper(left, right + 1, new_current, n, ans)
|
StarcoderdataPython
|
105842
|
<filename>hyperband/torch_model.py
import torch
import torch.optim as optim
import torch.nn as nn
import torch.nn.functional as F
class HiddenLayerNet(nn.Module):
def __init__(self, n_features=10, n_outputs=1, n_hidden=100, activation="relu"):
super().__init__()
self.fc1 = nn.Linear(n_features, n_hidden)
self.fc2 = nn.Linear(n_hidden, n_outputs)
self.activation = getattr(F, activation)
def forward(self, x, **kwargs):
return self.fc2(self.activation(self.fc1(x)))
|
StarcoderdataPython
|
1773797
|
<filename>day22_pong/main.py
from turtle import Screen
from score_board import Score
from draw_center_line import DrawCenterLine
from paddle import Paddle
import main
from ball import Ball
import time
WIDTH = 900
HEIGHT = 600
game_on = True
active_player = 'left'
player_1_score = 0
player_2_score = 0
screen = Screen()
screen.tracer(0)
screen.listen()
screen.setup(WIDTH, HEIGHT)
screen.bgcolor('black')
screen.title('Pong Game')
score = Score(WIDTH, HEIGHT, player_1_score, player_2_score)
paddle_1 = Paddle(-WIDTH / 2 + 30, -10)
paddle_2 = Paddle(WIDTH / 2 - 30, -10)
DrawCenterLine(600)
def move_up():
if main.active_player == 'right':
paddle_1.move_up()
else:
paddle_2.move_up()
def move_down():
if main.active_player == 'right':
paddle_1.move_down()
else:
paddle_2.move_down()
def game_over():
main.game_on = False
screen.onkeypress(move_up, 'Up')
screen.onkeypress(move_down, 'Down')
screen.onkey(game_over, 'x')
while game_on:
start_right = (10, 5)
start_left = (-10, -5)
ball = Ball((0, 0))
if active_player == 'right':
current_direction = start_left
else:
current_direction = start_right
x = current_direction[0]
y = current_direction[1]
ball.hideturtle()
curr_round = True
while curr_round:
time.sleep(.1)
(X, Y) = (ball.xcor(), ball.ycor())
(p1_x, p1_y) = (paddle_1.xcor(), paddle_1.ycor())
(p2_x, p2_y) = (paddle_2.xcor(), paddle_2.ycor())
if X >= WIDTH / 2 - 10:
player_1_score += 1
score = Score(WIDTH, HEIGHT, player_1_score, player_2_score)
active_player = 'right'
curr_round = False
elif X <= -WIDTH / 2 - 10:
player_2_score += 1
score = Score(WIDTH, HEIGHT, player_1_score, player_2_score)
active_player = 'left'
curr_round = False
elif Y >= HEIGHT / 2 - 10 or Y <= -HEIGHT / 2 + 10:
y *= -1
elif ball.xcor() >= WIDTH / 2 - 50 and ball.distance(paddle_2) < 50:
x *= -1
active_player = 'right'
elif ball.xcor() <= -WIDTH / 2 + 50 and ball.distance(paddle_1) < 50:
x *= -1
active_player = 'left'
ball.hideturtle()
ball = Ball((X + x, Y + y))
screen.update()
ball.hideturtle()
screen.exitonclick()
|
StarcoderdataPython
|
1730652
|
<gh_stars>1-10
import logging
from typing import List, Dict
from bs4 import BeautifulSoup
from parsers.pin7_cleanings import clean_row
from storages.local import FileStorage, CsvStorage
logger = logging.getLogger(__name__)
class ParsedPages:
def __init__(self, file: FileStorage, csv: CsvStorage) -> None:
self._file = file
self._csv = csv
def save(self) -> None:
result = []
for row in self._file.read_data():
(url, text) = row.split('\t')
result.extend(
PinPage(url, text).rows()
)
logger.info("All pages have been parsed: {} rows found".format(len(result)))
self._csv.write_data(result)
class PinPage:
def __init__(self, url: str, page: str) -> None:
self._url = url
self._page = page
def rows(self) -> List[Dict]:
result = []
soup = BeautifulSoup(self._page, 'html.parser')
for br in soup.find_all("br"):
br.replace_with("\n")
rows = soup.select('tr[class^="trm_0"]')
number = 1
for row in rows:
try:
result.append(clean_row(row))
number += 1
except Exception as e:
logger.exception('Failed on %s. Row %s. %s', self._url, number, str(e))
logger.error(row)
logger.info("Url parsed: {}, found rows: {}".format(self._url, len(result)))
return result
|
StarcoderdataPython
|
107125
|
<filename>src/pypevue/examples/autoAdder3e.py
#!/usr/bin/env python3
# -*- coding: utf-8 -*- -- jiw 15 Oct 2020
# This pypevu plugin develops Delaunay triangulations. Is used by
# variants of eg-auto-8-237 and eg-auto-test-2 for automatically
# making edges (cylinders) between posts. autoAdder3e was adapted
# from autoAdder2d by adding tests for 6NN nearest neighbor edges
# being included in Delaunay triangulations. Let nbr(u) be the set of
# neighbors of u. For each 6NN graph edge a-b not included among DT
# edges, code looks for DT edge c-d, with c, d in intersect(nbr(a),
# nbr(b)). If length(c,d) > length(a,b) we remove c-d and replace it
# with a-b. Note, this prelim version of autoAdder3e doesn't treat
# sets of common neighbors with more than 2 members. (Geometrically
# unlikely in 2D; easy to find in 3D solid figures...)
# pypevu has two parameters related to automatic-edge generation:
# `autoMax` - Used in test of whether to visualize circumcenters
# `autoList` - Whether to list generated edges. autoList=t says to
# list auto-edges; autoList=f says no.
from math import sqrt
from pypevue import Point, Cylinder, FunctionList as ref
from nearby.delaunay import Vert, Triangulate, CircumCircle2, CircumCircle3
from nearby.kNN import PNN, doAMethod
#==============================================================
def autoAdder(fout):
rlo = ref.LO
cyls = rlo.cyls # List of cylinders
posts = rlo.posts # List of posts
edgeList = rlo.edgeList # List of edges
nPosts = len(posts)
clo = len(cyls) # Record how many cylinders are already processed
# in this version punt color, thix, levels ...
colo, thix, lev1, lev2 = 'B', 'p', 'c','c'
npoints = len(posts)
def canon(j,k): # Canonical reference for edge j-k
return min(j,k)*npoints + max(j,k)
def decanon(t):
return t//npoints, t%npoints
# Make list of post locs in verts, recording original post numbers
verts = [Vert(p.foot, pn) for pn, p in enumerate(posts)]
print (f'verts has {len(verts)} points, ({verts[0]})...({verts[-1]})')
# Get Delaunay triangulation of verts
Vert.CircumCircle = CircumCircle2
sverts, tris, cache = Triangulate(verts)
print (f'tris has {len(tris)} faces, ({tris[0]})...({tris[-1]})')
# Make DT edges dict and edges lists
DTedges = {}; DTedgels = [0]*npoints
for f in tris: # Make arrows to verts of faces
cornerNums = f.get123 # Get triple of vert indices
p = sverts[cornerNums[-1]]
for kq in cornerNums:
q = sverts[kq]
DTedges[canon(p.num,q.num)] = 1
if not DTedgels[p.num]:
DTedgels[p.num] = []
DTedgels[p.num].append(q.num)
p = q
nDTedges = len(DTedges)
for k in range(npoints):
if DTedgels[k]:
DTedgels[k] = set(DTedgels[k])
# Make NN data and NN edges list; via list of PNN points in orig order
# PNN.kNN needs to be set before we call doAMethod
PNN.kNN = 1 # We want exactly 1 nearest nbr
PNN.kNN = 18 # Actually, we want more than that
# For ./eg-zrough3e.py 5 2 1 0 2 3 && pypevu xyz try different kNN
# and see misplaced cylinders or posts ^^^^^^^^^^
Nverts = [PNN(p.foot.x,p.foot.y,p.foot.z) for p in posts] # PNN has .BSF[] for each point
doAMethod(Nverts)
NNedges = {}
for jp, p in enumerate(Nverts):
for kq in p.nBSF:
NNedges[canon(jp,kq)] = 1
nNNedges = len(NNedges)
#print(f'DTedges has {len(DTedges)} entries and NNedges has {len(NNedges)} entries, {[decanon(k) for k in sorted(NNedges.keys())]}')
# Find edges that are in NN but not in DT
adddels = 0
for ne in NNedges:
if not ne in DTedges:
ea, eb = decanon(ne)
dab = (Nverts[ea]-Nverts[eb]).mag2()
# Get list of edges that can go to common neighbors
if type(DTedgels[ea])==set==type(DTedgels[eb]):
l = sorted(DTedgels[ea].intersection(DTedgels[eb]))
else: continue
for ec, ed in [(x,y) for x in l for y in l if x<y]:
dcd = (Nverts[ec]-Nverts[ed]).mag2()
cdC = canon(ec,ed)
# Is NN link a-b longer than DT link c-d, or c-d not present?
if dab > dcd or cdC not in DTedges:
continue # Skip it if so
# Install NN link in place of DT link
del DTedges[canon(ec,ed)]
DTedgels[ec].discard(ed)
DTedgels[ed].discard(ec)
DTedges[canon(ea,eb)] = 1
DTedgels[ea].add(eb)
DTedgels[eb].add(ea)
adddels += 1
print (f"From {nNNedges} NN edges and {nDTedges} DT edges, got {len(DTedges)} edges net by using {adddels} NN's vs DT edges")
# Make cylinders for Delaunay edges (from low post# to high#)
for e in sorted(DTedges.keys()):
pa, pb = decanon(e)
if pa not in edgeList or pb not in edgeList[pa]:
ref.addEdges(pa, pb, rlo)
cyls.append(Cylinder(pa,pb, lev1, lev2, colo, thix, ref.endGap, 0,0))
ref.writeCylinders(fout, clo, len(cyls), ref.autoList, 2)
#==============================================================
def tell():
return (autoAdder,)
|
StarcoderdataPython
|
1677540
|
<reponame>jaidevd/scikit-image
import numpy as np
from numpy.testing import assert_equal, assert_almost_equal
import skimage
from skimage import data
from skimage.filters.thresholding import (threshold_adaptive,
threshold_otsu,
threshold_yen,
threshold_isodata)
class TestSimpleImage():
def setup(self):
self.image = np.array([[0, 0, 1, 3, 5],
[0, 1, 4, 3, 4],
[1, 2, 5, 4, 1],
[2, 4, 5, 2, 1],
[4, 5, 1, 0, 0]], dtype=int)
def test_otsu(self):
assert threshold_otsu(self.image) == 2
def test_otsu_negative_int(self):
image = self.image - 2
assert threshold_otsu(image) == 0
def test_otsu_float_image(self):
image = np.float64(self.image)
assert 2 <= threshold_otsu(image) < 3
def test_yen(self):
assert threshold_yen(self.image) == 2
def test_yen_negative_int(self):
image = self.image - 2
assert threshold_yen(image) == 0
def test_yen_float_image(self):
image = np.float64(self.image)
assert 2 <= threshold_yen(image) < 3
def test_yen_arange(self):
image = np.arange(256)
assert threshold_yen(image) == 127
def test_yen_binary(self):
image = np.zeros([2,256], dtype=np.uint8)
image[0] = 255
assert threshold_yen(image) < 1
def test_yen_blank_zero(self):
image = np.zeros((5, 5), dtype=np.uint8)
assert threshold_yen(image) == 0
def test_yen_blank_max(self):
image = np.empty((5, 5), dtype=np.uint8)
image.fill(255)
assert threshold_yen(image) == 255
def test_isodata(self):
assert threshold_isodata(self.image) == 2
assert threshold_isodata(self.image, return_all=True) == [2]
def test_isodata_blank_zero(self):
image = np.zeros((5, 5), dtype=np.uint8)
assert threshold_isodata(image) == 0
assert threshold_isodata(image, return_all=True) == [0]
def test_isodata_linspace(self):
image = np.linspace(-127, 0, 256)
assert -63.8 < threshold_isodata(image) < -63.6
assert_almost_equal(threshold_isodata(image, return_all=True),
[-63.74804688, -63.25195312])
def test_isodata_16bit(self):
np.random.seed(0)
imfloat = np.random.rand(256, 256)
assert 0.49 < threshold_isodata(imfloat, nbins=1024) < 0.51
assert all(0.49 < threshold_isodata(imfloat, nbins=1024,
return_all=True))
def test_threshold_adaptive_generic(self):
def func(arr):
return arr.sum() / arr.shape[0]
ref = np.array(
[[False, False, False, False, True],
[False, False, True, False, True],
[False, False, True, True, False],
[False, True, True, False, False],
[ True, True, False, False, False]]
)
out = threshold_adaptive(self.image, 3, method='generic', param=func)
assert_equal(ref, out)
def test_threshold_adaptive_gaussian(self):
ref = np.array(
[[False, False, False, False, True],
[False, False, True, False, True],
[False, False, True, True, False],
[False, True, True, False, False],
[ True, True, False, False, False]]
)
out = threshold_adaptive(self.image, 3, method='gaussian')
assert_equal(ref, out)
out = threshold_adaptive(self.image, 3, method='gaussian', param=1.0 / 3.0)
assert_equal(ref, out)
def test_threshold_adaptive_mean(self):
ref = np.array(
[[False, False, False, False, True],
[False, False, True, False, True],
[False, False, True, True, False],
[False, True, True, False, False],
[ True, True, False, False, False]]
)
out = threshold_adaptive(self.image, 3, method='mean')
assert_equal(ref, out)
def test_threshold_adaptive_median(self):
ref = np.array(
[[False, False, False, False, True],
[False, False, True, False, False],
[False, False, True, False, False],
[False, False, True, True, False],
[False, True, False, False, False]]
)
out = threshold_adaptive(self.image, 3, method='median')
assert_equal(ref, out)
def test_otsu_camera_image():
camera = skimage.img_as_ubyte(data.camera())
assert 86 < threshold_otsu(camera) < 88
def test_otsu_coins_image():
coins = skimage.img_as_ubyte(data.coins())
assert 106 < threshold_otsu(coins) < 108
def test_otsu_coins_image_as_float():
coins = skimage.img_as_float(data.coins())
assert 0.41 < threshold_otsu(coins) < 0.42
def test_otsu_lena_image():
img = skimage.img_as_ubyte(data.lena())
assert 140 < threshold_otsu(img) < 142
def test_otsu_astro_image():
img = skimage.img_as_ubyte(data.astronaut())
assert 109 < threshold_otsu(img) < 111
def test_yen_camera_image():
camera = skimage.img_as_ubyte(data.camera())
assert 197 < threshold_yen(camera) < 199
def test_yen_coins_image():
coins = skimage.img_as_ubyte(data.coins())
assert 109 < threshold_yen(coins) < 111
def test_yen_coins_image_as_float():
coins = skimage.img_as_float(data.coins())
assert 0.43 < threshold_yen(coins) < 0.44
def test_isodata_camera_image():
camera = skimage.img_as_ubyte(data.camera())
threshold = threshold_isodata(camera)
assert np.floor((camera[camera <= threshold].mean() +
camera[camera > threshold].mean()) / 2.0) == threshold
assert threshold == 87
assert threshold_isodata(camera, return_all=True) == [87]
def test_isodata_coins_image():
coins = skimage.img_as_ubyte(data.coins())
threshold = threshold_isodata(coins)
assert np.floor((coins[coins <= threshold].mean() +
coins[coins > threshold].mean()) / 2.0) == threshold
assert threshold == 107
assert threshold_isodata(coins, return_all=True) == [107]
def test_isodata_moon_image():
moon = skimage.img_as_ubyte(data.moon())
threshold = threshold_isodata(moon)
assert np.floor((moon[moon <= threshold].mean() +
moon[moon > threshold].mean()) / 2.0) == threshold
assert threshold == 86
thresholds = threshold_isodata(moon, return_all=True)
for threshold in thresholds:
assert np.floor((moon[moon <= threshold].mean() +
moon[moon > threshold].mean()) / 2.0) == threshold
assert_equal(thresholds, [86, 87, 88, 122, 123, 124, 139, 140])
def test_isodata_moon_image_negative_int():
moon = skimage.img_as_ubyte(data.moon()).astype(np.int32)
moon -= 100
threshold = threshold_isodata(moon)
assert np.floor((moon[moon <= threshold].mean() +
moon[moon > threshold].mean()) / 2.0) == threshold
assert threshold == -14
thresholds = threshold_isodata(moon, return_all=True)
for threshold in thresholds:
assert np.floor((moon[moon <= threshold].mean() +
moon[moon > threshold].mean()) / 2.0) == threshold
assert_equal(thresholds, [-14, -13, -12, 22, 23, 24, 39, 40])
def test_isodata_moon_image_negative_float():
moon = skimage.img_as_ubyte(data.moon()).astype(np.float64)
moon -= 100
assert -14 < threshold_isodata(moon) < -13
thresholds = threshold_isodata(moon, return_all=True)
assert_almost_equal(thresholds,
[-13.83789062, -12.84179688, -11.84570312, 22.02148438,
23.01757812, 24.01367188, 38.95507812, 39.95117188])
if __name__ == '__main__':
np.testing.run_module_suite()
|
StarcoderdataPython
|
191651
|
<filename>webserver/createdb.py
import sqlite3
conn = sqlite3.connect('leaguemate.db')
c = conn.cursor()
sql_file=open('Newdatabase.sql')
sql_as_str=sql_file.read()
c.executescript(sql_as_str)
|
StarcoderdataPython
|
3351201
|
<filename>rosbag_decode/bag-decode.py
from rosbags.rosbag2 import Reader
from rosbags.serde import deserialize_cdr
from datetime import datetime
path = "rosbag_decode/test-logs/rosbag2_2021_06_01-19_24_43"
def list_topics_test():
with Reader(path) as reader:
# topic and msgtype information is available on .topics dict
for topic, msgtype in reader.topics.items():
print(topic, msgtype)
def deser_msg_test():
with Reader(path) as reader:
for topic, msgtype, timestamp, rawdata in reader.messages(['/waverunner/sys/ctrl/scenario_sys_time']):
msg = deserialize_cdr(rawdata, msgtype)
#decode from nanosecond timestamp
readable_timestamp = datetime.fromtimestamp(timestamp*1E-9)
print(readable_timestamp)
print(msg.data)
if __name__ == "__main__":
#deser_msg_test()
list_topics_test()
|
StarcoderdataPython
|
1649561
|
<gh_stars>1-10
'''
Created on 1.12.2016
@author: Darren
''''''
Implement a basic calculator to evaluate a simple expression string.
The expression string may contain open ( and closing parentheses ), the plus + or minus sign -, non-negative integers and empty spaces .
You may assume that the given expression is always valid.
Some examples:
"1 + 1" = 2
" 2-1 + 2 " = 3
"(1+(4+5+2)-3)+(6+8)" = 23
Note: Do not use the eval built-in library function.
"
'''
|
StarcoderdataPython
|
1768708
|
<filename>setup.py
from setuptools import setup, find_packages
setup(
name='python-katas',
packages=find_packages(),
version='0.1',
description='A Python GitHub repository for practicing katas.',
author='<NAME>',
author_email='<EMAIL>',
url='https://github.com/DEV3L/python_katas',
download_url='https://github.com/DEV3L/python_katas/tarball/0.1',
keywords=['dev3l', 'python', 'kata',],
install_requires=[
'pytest',
'flask',
],
classifiers=[
'Environment :: Console',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Topic :: Software Development :: Libraries :: Python Modules'],
)
|
StarcoderdataPython
|
37826
|
from midiutil.MidiFile import MIDIFile
import os
def _create_midi_mapping():
""" Create a dictionary that maps note name to midi note integer """
middle_c = 60
notes = "c", "c#", "d", "d#", "e", "f", "f#", "g", "g#", "a", "a#", "b"
equiv = (("c#", "db"), ("d#", "eb"),
("f#", "gb"), ("g#", "ab"), ("a#", "bb"))
m = {}
j, o = len(notes)-1, 3
for v in range(middle_c-1, -1, -1):
for e in equiv: m[notes[j].replace(*e) + str(o)] = v
if j == 0: o -= 1
j = (j - 1) % len(notes)
j, o = 0, 4
for v in range(middle_c, 128):
for e in equiv: m[notes[j].replace(*e) + str(o)] = v
j = (j + 1) % len(notes)
if j == 0: o += 1
return m
_midi_mapping = _create_midi_mapping()
class Song(MIDIFile):
_valid = tuple, list, type(x for x in range(1))
def __init__(self, name="test", tempo=100, num_tracks=1):
"""
Intialize Song object.
name: str, name of song/file.
tempo: int, bpm of song.
num_tracks: int, number of tracks for the midi file to have.
"""
super().__init__(num_tracks)
self.name, self.tempo, self.volume = name, tempo, 100
self.filename = "%s.mid" % name
self.path = ""
track, self.channel = 0, 0
self.time = [0]*num_tracks # start each track at the beginning
self.addTempo(track, self.time[0], self.tempo)
def addNote(self, notes, duration=4, track=0):
"""
Overrides MIDIFile's addNote method, but uses it as a subroutine. Adds
a note or notes with a duration to the specified track, then increments
the time by that duration.
notes: str or tuple of strs, notes to add at the current location of
of the track.
duration: float, number of beats for the note/chord.
track: int, which track to add to.
"""
if not isinstance(notes, Song._valid): notes = notes,
for note in notes:
note = note.lower()
if note in _midi_mapping: pitch = _midi_mapping[note]
elif note+"4" in _midi_mapping: pitch = _midi_mapping[note+"4"]
else: raise ValueError("Note not valid:", note)
super().addNote(track, self.channel, pitch,
self.time[track], duration, self.volume)
self.time[track] += duration
self.need_to_write = True
def addRest(self, duration=1, track=0):
"""
Add a rest to the track, just corresponds to adjusting the time.
duration: float, number of beats the rest lasts.
track: int, which track to add the rest to.
"""
self.time[track] += duration
self.need_to_write = True
def addText(self, text, track=0):
"""
Add text to a track at the current time. For it to be visible, there
must be a note at the current time on this track.
text: str, text to add.
track: int, which track to add the text to.
"""
super().addText(track, self.time[track], str(text))
self.need_to_write = True
def writeFile(self, path=""):
"""
Write the current midi track to a file
path: str, path to write the file to. Must end with a "/"!
"""
if not self.need_to_write: return
try:
with open(path+self.filename, "wb") as f: super().writeFile(f)
except FileNotFoundError:
os.mkdir(path)
with open(path+self.filename, "wb") as f: super().writeFile(f)
self.need_to_write = False
self.path = path
def play(self, path=""):
"""
Write the midi file, then call on the system's default midi player. On
Windows, this is probably Windows Media Player. THIS ONLY WORKS ON
WINDOWS, IF YOU WANT TO USE IT YOU MUST CHANGE THE SYSTEM CALL.
path: str, where to save the file to. Must end with a "/"!
"""
if not path and self.path: path = self.path
self.writeFile(path)
os.system("start %s" % (self.path+self.filename))
def __str__(self):
""" Return the string name of the song """
return self.filename
if __name__ == "__main__":
s = Song(name="helloworld", tempo=110, path="")
s.addNote("c")
s.addNote("d")
s.addNote(("c", "d", "e"))
s.view()
|
StarcoderdataPython
|
3216336
|
<gh_stars>0
""" Different model components to use in building the overall model.
The main component of interest is SentenceEncoder, which all the models use. """
import torch
import torch.utils.data
import torch.utils.data.distributed
from allennlp.models.model import Model
# StackedSelfAttentionEncoder
from allennlp.nn import InitializerApplicator, util
from allennlp.modules import Highway, TimeDistributed
from jiant.pytorch_transformers_interface.modules import PytorchTransformersEmbedderModule
from jiant.tasks.tasks import PairClassificationTask, PairRegressionTask
from jiant.utils import utils
from jiant.modules.simple_modules import NullPhraseLayer
from jiant.modules.bilm_encoder import BiLMEncoder
from jiant.modules.onlstm.ON_LSTM import ONLSTMStack
from jiant.modules.prpn.PRPN import PRPN
class SentenceEncoder(Model):
""" Given a sequence of tokens, embed each token and pass through a sequence encoder. """
# NOTE: Do not apply dropout to the input of this module. Will be applied
# internally.
def __init__(
self,
vocab,
text_field_embedder,
num_highway_layers,
phrase_layer,
skip_embs=True,
cove_layer=None,
dropout=0.2,
mask_lstms=True,
sep_embs_for_skip=False,
initializer=InitializerApplicator(),
):
super(SentenceEncoder, self).__init__(vocab)
if text_field_embedder is None:
self._text_field_embedder = lambda x: x
d_emb = 0
self._highway_layer = lambda x: x
else:
self._text_field_embedder = text_field_embedder
d_emb = text_field_embedder.get_output_dim()
self._highway_layer = TimeDistributed(Highway(d_emb, num_highway_layers))
self._phrase_layer = phrase_layer
self._cove_layer = cove_layer
self.pad_idx = vocab.get_token_index(vocab._padding_token)
self.skip_embs = skip_embs
self.sep_embs_for_skip = sep_embs_for_skip
d_inp_phrase = self._phrase_layer.get_input_dim()
self.output_dim = phrase_layer.get_output_dim() + (skip_embs * d_inp_phrase)
if dropout > 0:
self._dropout = torch.nn.Dropout(p=dropout)
else:
self._dropout = lambda x: x
self._mask_lstms = mask_lstms
initializer(self)
def forward(self, sent, task, reset=True):
# pylint: disable=arguments-differ
"""
Args:
- sent (Dict[str, torch.LongTensor]): From a ``TextField``.
- task (Task): Used by the _text_field_embedder to pick the correct output
ELMo representation.
- reset (Bool): if True, manually reset the states of the ELMo LSTMs present
(if using BiLM or ELMo embeddings). Set False, if want to preserve statefulness.
Returns:
- sent_enc (torch.FloatTensor): (b_size, seq_len, d_emb)
the padded values in sent_enc are set to 0
- sent_mask (torch.FloatTensor): (b_size, seq_len, d_emb); all 0/1s
"""
if reset:
self.reset_states()
# General sentence embeddings (for sentence encoder).
# Skip this for probing runs that don't need it.
if not isinstance(self._phrase_layer, NullPhraseLayer):
word_embs_in_context = self._highway_layer(self._text_field_embedder(sent))
else:
word_embs_in_context = None
# Task-specific sentence embeddings (e.g. custom ELMo weights).
# Skip computing this if it won't be used.
if self.sep_embs_for_skip:
task_word_embs_in_context = self._highway_layer(
self._text_field_embedder(sent, task._classifier_name)
)
else:
task_word_embs_in_context = None
# Make sure we're embedding /something/
assert (word_embs_in_context is not None) or (task_word_embs_in_context is not None)
if self._cove_layer is not None:
# Slightly wasteful as this repeats the GloVe lookup internally,
# but this allows CoVe to be used alongside other embedding models
# if we want to.
sent_lens = torch.ne(sent["words"], self.pad_idx).long().sum(dim=-1).data
# CoVe doesn't use <SOS> or <EOS>, so strip these before running.
# Note that we need to also drop the last column so that CoVe returns
# the right shape. If all inputs have <EOS> then this will be the
# only thing clipped.
sent_cove_embs_raw = self._cove_layer(sent["words"][:, 1:-1], sent_lens - 2)
pad_col = torch.zeros(
sent_cove_embs_raw.size()[0],
1,
sent_cove_embs_raw.size()[2],
dtype=sent_cove_embs_raw.dtype,
device=sent_cove_embs_raw.device,
)
sent_cove_embs = torch.cat([pad_col, sent_cove_embs_raw, pad_col], dim=1)
if word_embs_in_context is not None:
word_embs_in_context = torch.cat([word_embs_in_context, sent_cove_embs], dim=-1)
if task_word_embs_in_context is not None:
task_word_embs_in_context = torch.cat(
[task_word_embs_in_context, sent_cove_embs], dim=-1
)
if word_embs_in_context is not None:
word_embs_in_context = self._dropout(word_embs_in_context)
if task_word_embs_in_context is not None:
task_word_embs_in_context = self._dropout(task_word_embs_in_context)
# The rest of the model
sent_mask = util.get_text_field_mask(sent).float()
sent_lstm_mask = sent_mask if self._mask_lstms else None
if word_embs_in_context is not None:
if isinstance(self._phrase_layer, ONLSTMStack) or isinstance(self._phrase_layer, PRPN):
# The ONLSTMStack or PRPN takes the raw words as input and computes
# embeddings separately.
sent_enc, _ = self._phrase_layer(
torch.transpose(sent["words"], 0, 1), sent_lstm_mask
)
sent_enc = torch.transpose(sent_enc, 0, 1)
else:
sent_enc = self._phrase_layer(word_embs_in_context, sent_lstm_mask)
else:
sent_enc = None
# ELMoLSTM returns all layers, we just want to use the top layer
sent_enc = sent_enc[-1] if isinstance(self._phrase_layer, BiLMEncoder) else sent_enc
sent_enc = self._dropout(sent_enc) if sent_enc is not None else sent_enc
if self.skip_embs:
# Use skip connection with original sentence embs or task sentence
# embs
skip_vec = task_word_embs_in_context if self.sep_embs_for_skip else word_embs_in_context
utils.assert_for_log(
skip_vec is not None,
"skip_vec is none - perhaps embeddings are not configured properly?",
)
if isinstance(self._phrase_layer, NullPhraseLayer):
sent_enc = skip_vec
else:
sent_enc = torch.cat([sent_enc, skip_vec], dim=-1)
sent_mask = sent_mask.unsqueeze(dim=-1)
pad_mask = sent_mask == 0
assert sent_enc is not None
sent_enc = sent_enc.masked_fill(pad_mask, 0)
return sent_enc, sent_mask
def reset_states(self):
""" Reset ELMo if present; reset BiLM (ELMoLSTM) states if present """
if "token_embedder_elmo" in [
name for name, _ in self._text_field_embedder.named_children()
] and "_elmo" in [
name for name, _ in self._text_field_embedder.token_embedder_elmo.named_children()
]:
self._text_field_embedder.token_embedder_elmo._elmo._elmo_lstm._elmo_lstm.reset_states() # noqa # eek.
if isinstance(self._phrase_layer, BiLMEncoder):
self._phrase_layer.reset_states()
|
StarcoderdataPython
|
3255652
|
"""
事件对象 threading.Event
set() 将“Flag”设置为True
clear() 将“Flag”设置为False
wait() 如果“Flag”值为 False,主线程就会阻塞;如果“Flag”值为True,主线程不再阻塞。
isSet() 判断“Flag”值是否为True。
"""
import threading
import time
#实例化一个事件对象
event = threading.Event()
def run():
while not event.isSet():
print(threading.current_thread().getName(), time.ctime())
time.sleep(5)
event.wait(timeout=10) # 阻塞线程,为什么timeout=10没起到作用
if __name__ == '__main__':
#使用多线程去调用run
for i in range(10):
th = threading.Thread(target=run)
th.start()
#阻塞30s后运行主线程
time.sleep(30)
event.set()
|
StarcoderdataPython
|
149633
|
<reponame>mcorne/python-by-example<filename>examples/math.lgamma/ex1.py
import math
print(math.lgamma(2))
|
StarcoderdataPython
|
3338877
|
<filename>patterns/behavioral/template_method.py
# -*- coding: utf-8 -*-
# --------------------------------------------------------
# Licensed under the terms of the BSD 3-Clause License
# (see LICENSE for details).
# Copyright © 2018-2021, <NAME>
# All rights reserved.
# --------------------------------------------------------
class ExampleBase:
def template_method(self):
self.step_one()
self.step_two()
self.step_three()
def step_one(self):
raise NotImplementedError()
def step_two(self):
raise NotImplementedError()
def step_three(self):
raise NotImplementedError()
class Example(ExampleBase):
def step_one(self):
print('The first step of the algorithm')
def step_two(self):
print('The second step of the algorithm')
def step_three(self):
print('The third step of the algorithm')
def main():
example = Example()
example.template_method()
if __name__ == '__main__':
# Output:
# The first step of the algorithm
# The second step of the algorithm
# The third step of the algorithm
main()
|
StarcoderdataPython
|
3312738
|
<reponame>camidvorkin/frux-app-server<filename>frux_app_server/schema.py
import graphene
from .graphqlschema.mutation import Mutation
from .graphqlschema.query import Query
# pylint: disable=unused-argument
schema = graphene.Schema(query=Query, mutation=Mutation)
|
StarcoderdataPython
|
3341995
|
<reponame>HuchieWuchie/AffordanceNet<filename>utils/drawing_utils.py
import tensorflow as tf
from PIL import Image, ImageDraw
import matplotlib.pyplot as plt
from utils import bbox_utils
import numpy as np
import cv2
background = [200, 222, 250, 0]
contain = [255, 0, 0, 100]
cut = [0, 153, 0, 100]
display = [192, 192, 192, 100]
engine = [96, 96, 96, 100]
grasp = [0, 102, 204, 100]
hit = [102, 0, 102, 200]
pound = [255, 204, 229, 200]
support = [102, 51, 0, 200]
w_grasp = [255, 255, 51, 100]
scoop = [255, 120, 0, 100]
label_colors_iit = np.array([background, contain, cut, display, engine, grasp, hit, pound, support, w_grasp])
label_colors_umd = np.array([background, grasp, cut, scoop, contain, pound, support, w_grasp])
def draw_grid_map(img, grid_map, stride):
"""Drawing grid intersection on given image.
inputs:
img = (height, width, channels)
grid_map = (output_height * output_width, [y_index, x_index, y_index, x_index])
tiled x, y coordinates
stride = number of stride
outputs:
array = (height, width, channels)
"""
image = Image.fromarray(img)
draw = ImageDraw.Draw(image)
counter = 0
for grid in grid_map:
draw.rectangle((
grid[0] + stride // 2 - 2,
grid[1] + stride // 2 - 2,
grid[2] + stride // 2 + 2,
grid[3] + stride // 2 + 2), fill=(255, 255, 255, 0))
counter += 1
plt.figure()
plt.imshow(image)
plt.show()
def draw_bboxes(imgs, bboxes):
"""Drawing bounding boxes on given images.
inputs:
imgs = (batch_size, height, width, channels)
bboxes = (batch_size, total_bboxes, [y1, x1, y2, x2])
in normalized form [0, 1]
"""
colors = tf.constant([[1, 0, 0, 1]], dtype=tf.float32)
imgs_with_bb = tf.image.draw_bounding_boxes(imgs, bboxes, colors)
plt.figure()
for img_with_bb in imgs_with_bb:
plt.imshow(img_with_bb)
plt.show()
def draw_bboxes_with_labels(img, true_bboxes, true_labels, bboxes, label_indices, probs, labels,
use_masks=False, true_masks=None, mask_ids=None, pred_masks=None):
"""Drawing bounding boxes with labels on given image.
inputs:
img = (height, width, channels)
bboxes = (total_bboxes, [y1, x1, y2, x2])
in denormalized form
label_indices = (total_bboxes)
probs = (total_bboxes)
labels = [labels string list]
"""
colors = tf.random.uniform((len(labels), 4), maxval=256, dtype=tf.int32)
image = tf.keras.preprocessing.image.array_to_img(img)
width, height = image.size
draw = ImageDraw.Draw(image)
plt.rcParams['figure.figsize'] = [10, 10]
# add overlay for masks
overlay = Image.new('RGBA', image.size, (255, 255, 255, 0))
drawing = ImageDraw.Draw(overlay)
draw_true_bboxes(true_bboxes, true_labels, labels, draw)
for index, bbox in enumerate(bboxes):
y1, x1, y2, x2 = tf.split(bbox, 4)
width = x2 - x1
height = y2 - y1
if width <= 0 or height <= 0:
continue
label_index = int(label_indices[index])
color = tuple(colors[label_index].numpy())
label_text = "{0} {1:0.3f}".format(labels[label_index], probs[index])
print(labels[label_index])
draw.text((x1 + 4, y1 + 2), label_text, fill=color)
draw.rectangle((x1, y1, x2, y2), outline=color, width=3)
if use_masks:
# draw predicted masks
image = draw_true_masks(pred_masks, colors, image, drawing, overlay, labels=probs.numpy().astype(int))
# image = draw_pred_masks(pred_masks, colors, image, drawing, overlay, labels=probs.numpy().astype(int))
plt.figure()
plt.imshow(image)
plt.show()
def reset_mask_ids(mask, before_uni_ids):
# reset ID mask values from [0, 1, 4] to [0, 1, 2] to resize later
counter = 0
for id in before_uni_ids:
mask[mask == id] = counter
counter += 1
return mask
def convert_mask_to_original_ids_manual(mask, original_uni_ids):
# TODO: speed up!!!
good_range = 0.005
temp_mask = np.copy(mask) # create temp mask to do np.around()
temp_mask = np.around(temp_mask, decimals=0) # round 1.6 -> 2., 1.1 -> 1.
current_uni_ids = np.unique(temp_mask)
out_mask = np.full(mask.shape, 0, 'float32')
mh, mw = mask.shape
for i in range(mh - 1):
for j in range(mw - 1):
for k in range(1, len(current_uni_ids)):
if mask[i][j] > (current_uni_ids[k] - good_range) and mask[i][j] < (current_uni_ids[k] + good_range):
out_mask[i][j] = original_uni_ids[k]
# mask[i][j] = current_uni_ids[k]
# const = 0.005
# out_mask = original_uni_ids[(np.abs(mask - original_uni_ids[:,None,None]) < const).argmax(0)]
return out_mask
def draw_bboxes_with_labels_and_masks(img, true_bboxes, true_labels, bboxes, label_indices, probs, labels,
use_masks=False, true_masks=None, mask_ids=None, pred_masks=None, aff_labels=None, dataset='iit'):
"""Drawing bounding boxes with labels on given image.
inputs:
img = (height, width, channels)
bboxes = (total_bboxes, [y1, x1, y2, x2])
in denormalized form
label_indices = (total_bboxes)
probs = (total_bboxes)
labels = [labels string list]
"""
# colors = tf.random.uniform((len(labels), 4), maxval=256, dtype=tf.int32)
image = tf.keras.preprocessing.image.array_to_img(img)
image_copy = image.copy()
img_width, img_height = image.size
plt.rcParams['figure.figsize'] = [10, 10]
curr_mask = np.full((img_height, img_width), 0.0, 'float')
for index, bbox in enumerate(bboxes):
draw = ImageDraw.Draw(image)
# add overlay for masks
overlay = Image.new('RGBA', image.size, (255, 255, 255, 0))
drawing = ImageDraw.Draw(overlay)
# draw_true_bboxes(true_bboxes, true_labels, labels, draw)
# curr_mask = np.full((img_height, img_width), 0.0, 'float')
y1, x1, y2, x2 = tf.split(bbox, 4)
width = x2 - x1
height = y2 - y1
if width <= 0 or height <= 0:
continue
label_index = int(label_indices[index])
# color = tuple(colors[label_index].numpy())
color = tuple([0, 0, 0])
label_text = "{0} {1:0.3f}".format(labels[label_index], probs[index])
print(labels[label_index])
draw.text((x1 + 4, y1 + 2), label_text, fill=color)
draw.rectangle((x1, y1, x2, y2), outline=color, width=3)
# show corresponding masks
if use_masks:
mask = pred_masks[index]
# Calculate max index for each position in the mask -> calculate affordance label
mask = np.argmax(mask, axis=2)
# calculate distinct affordances avoiding 0
# affordance_labels = np.unique(mask)[1:]
original_affordance_labels = np.unique(mask)
# print(original_affordance_labels)
# sort before_uni_ids and reset [0, 1, 7] to [0, 1, 2]
original_affordance_labels.sort()
print(np.take(aff_labels, original_affordance_labels))
mask = reset_mask_ids(mask, original_affordance_labels)
# resize mask wrt bbox size and convert to original affordance label ids
mask = cv2.resize(mask.astype('float'), (int(width), int(height)), interpolation=cv2.INTER_LINEAR)
mask = convert_mask_to_original_ids_manual(mask, original_affordance_labels)
# TODO: add assert to check that the ids have not changed
# original_affordance_labels = np.unique(mask).astype(int)
# print(original_affordance_labels)
# add mask values to current mask but preserving the maximum index -> 0 less importance, 10 max importance
x1, x2, y1, y2 = int(x1.numpy()[0]), int(x2.numpy()[0]), int(y1.numpy()[0]), int(y2.numpy()[0])
provisional_mask = curr_mask[y1:y2, x1:x2]
curr_mask[y1:y2, x1:x2] = np.maximum(mask, provisional_mask) # assign to output mask
# for aff_label in affordance_labels:
# mask_affordance = mask.copy()
# mask_affordance[mask_affordance != aff_label] = 0
# mask_affordance[mask_affordance != 0] = 255
# mask1 = Image.fromarray(mask_affordance.astype(np.uint8), mode='L')
# print(aff_label)
# # color = tuple(colors[aff_label])
# color_roi_mask = np.take(label_colours, mask.astype('int32'), axis=0)
# plt.imshow(color_roi_mask)
# plt.show()
if use_masks:
curr_mask = curr_mask.astype('uint8')
label_colours = label_colors_iit if dataset == 'iit' else label_colors_umd
color_curr_mask = label_colours.take(curr_mask, axis=0)
color_curr_mask = Image.fromarray(color_curr_mask.astype(np.uint8), mode='RGBA')
image.paste(color_curr_mask, (0,0), color_curr_mask)
plt.figure()
plt.imshow(image)
plt.axis('off')
plt.show()
image = image_copy.copy()
def draw_true_bboxes(true_bboxes, true_labels, labels, draw):
for index, bbox in enumerate(true_bboxes):
if bbox.shape != (4,):
continue
# x1, y1, x2, y2 = tf.split(bbox, 4) #for iit dataset
y1, x1, y2, x2 = tf.split(bbox, 4) #for tf datasets
width = x2 - x1
height = y2 - y1
if width <= 0 or height <= 0:
continue
color = tuple([0, 0, 0])
draw.rectangle((x1, y1, x2, y2), outline=color, width=3)
label_text = "{0}".format(labels[true_labels[index]])
draw.text((x1 + 4, y1 + 2), label_text, fill=color)
def draw_predictions(img, true_bboxes, true_labels, pred_bboxes, pred_labels, pred_scores, labels, batch_size,
use_masks=False, true_masks=None, mask_ids=None, pred_masks=None):
img_height, img_width = img.shape[0], img.shape[1]
denormalized_true_bboxes = bbox_utils.denormalize_bboxes(true_bboxes, img_height, img_width)
denormalized_bboxes = bbox_utils.denormalize_bboxes(pred_bboxes, img_height, img_width)
draw_bboxes_with_labels(img, denormalized_true_bboxes, true_labels, denormalized_bboxes, pred_labels, pred_scores, labels,
use_masks, true_masks, mask_ids, pred_masks)
def draw_predictions_with_masks(img, true_bboxes, true_labels, pred_bboxes, pred_labels, pred_scores, labels, batch_size,
use_masks=False, true_masks=None, mask_ids=None, pred_masks=None, aff_labels=None, dataset='iit'):
img_height, img_width = img.shape[0], img.shape[1]
# denormalized_true_bboxes = bbox_utils.denormalize_bboxes(true_bboxes, img_height, img_width)
denormalized_true_bboxes = true_bboxes
denormalized_bboxes = bbox_utils.denormalize_bboxes(pred_bboxes, img_height, img_width)
draw_bboxes_with_labels_and_masks(img, denormalized_true_bboxes, true_labels, denormalized_bboxes, pred_labels,
pred_scores, labels,
use_masks, true_masks, mask_ids, pred_masks, aff_labels, dataset)
def draw_proposals(cfg, img, true_bboxes, true_labels, pred_bboxes, pred_labels,
pred_deltas_proposals, pred_labels_proposals, all_rois, rois_ok,
labels, batch_size,
use_masks=False, true_masks=None, mask_ids=None, pred_masks=None, aff_labels=None):
img_height, img_width = img.shape[0], img.shape[1]
denormalized_true_bboxes = bbox_utils.denormalize_bboxes(true_bboxes, img_height, img_width)
# pred_deltas_proposals *= cfg.VARIANCES
# pred_bboxes_proposals = bbox_utils.get_bboxes_from_deltas(pred_deltas_proposals, all_rois)
# denormalized_bboxes_proposals = bbox_utils.denormalize_bboxes(pred_bboxes_proposals, img_height, img_width)
denormalized_selected_bboxes = bbox_utils.denormalize_bboxes(rois_ok, img_height, img_width)
draw_proposals2(cfg, img, denormalized_true_bboxes, true_labels, denormalized_selected_bboxes, labels,
use_masks, true_masks, mask_ids, pred_masks, aff_labels)
def draw_proposals2(cfg, img, true_bboxes, true_labels, bboxes, labels,
use_masks=False, true_masks=None, mask_ids=None, pred_masks=None, aff_labels=None):
"""Drawing bounding boxes with labels on given image.
inputs:
img = (height, width, channels)
bboxes = (total_bboxes, [y1, x1, y2, x2])
in denormalized form
label_indices = (total_bboxes)
probs = (total_bboxes)
labels = [labels string list]
"""
original_image = tf.keras.preprocessing.image.array_to_img(img)
plt.rcParams['figure.figsize'] = [6, 6]
image = original_image.copy()
draw = ImageDraw.Draw(original_image)
draw_true_bboxes(true_bboxes, true_labels, labels, draw)
# show original image
plt.imshow(original_image)
plt.show()
print('number of proposals', bboxes.shape[0])
for index, bbox in enumerate(bboxes):
y1, x1, y2, x2 = tf.split(bbox, 4)
width = x2 - x1
height = y2 - y1
if width <= 0 or height <= 0:
continue
# crop original img to bbox proposal and resize to mask size
img = image.copy()
x1, x2, y1, y2 = int(x1.numpy()[0]), int(x2.numpy()[0]), int(y1.numpy()[0]), int(y2.numpy()[0])
img = img.crop((x1, y1, x2, y2))
img = img.resize((cfg.TRAIN_MASK_SIZE, cfg.TRAIN_MASK_SIZE))
# show corresponding masks
if use_masks:
# select corresponding mask
mask = pred_masks[index]
mask = mask.numpy()
print('affordance labels:', np.unique(mask))
curr_mask = mask # assign to output mask
# show mask
curr_mask = curr_mask.astype('uint8')
color_curr_mask = label_colours.take(curr_mask, axis=0)
color_curr_mask = Image.fromarray(color_curr_mask.astype(np.uint8), mode='RGBA')
img.paste(color_curr_mask, (0,0), color_curr_mask)
plt.figure()
plt.imshow(img)
plt.show()
def draw_ground_truth(img, true_bboxes, true_labels, true_masks, labels, dataset):
img_height, img_width = img.shape[0], img.shape[1]
true_bboxes = bbox_utils.denormalize_bboxes(true_bboxes, img_height, img_width)
# Calculate random colors for all possible affordances and set alpha
# colors = np.random.randint(0, 256, (len(labels), 4), np.int)
# colors[:, 3] = 180
# Choose colors for affordances
colors = label_colors_iit if dataset == 'iit' else label_colors_umd
image = tf.keras.preprocessing.image.array_to_img(img)
# image.putalpha(255)
draw = ImageDraw.Draw(image)
plt.rcParams['figure.figsize'] = [10, 10]
# add overlay for masks
overlay = Image.new('RGBA', image.size, (255, 255, 255, 0))
drawing = ImageDraw.Draw(overlay)
draw_true_bboxes(true_bboxes, true_labels, labels, draw)
# draw masks
image = draw_true_masks(true_masks, colors, image, overlay)
plt.figure()
plt.imshow(image)
plt.axis('off')
plt.show()
# draw all masks using different colors for each affordance label
def draw_true_masks(true_masks, colors, image, overlay, labels=None):
for index, mask in enumerate(true_masks):
mask = mask.numpy()
affordance_labels = np.unique(mask)[1:].astype(np.int) # calculate distinct affordances avoiding 0
print(affordance_labels)
# for aff_label in affordance_labels:
mask_affordance = mask.copy()
# mask_affordance[mask_affordance != aff_label] = 0
# mask_affordance[mask_affordance != 0] = 255
mask_affordance = mask_affordance.astype(np.uint8)
color_curr_mask = colors.take(mask_affordance, axis=0)
color_curr_mask = Image.fromarray(color_curr_mask.astype(np.uint8), mode='RGBA')
image.paste(color_curr_mask, (0, 0), color_curr_mask)
return image
|
StarcoderdataPython
|
64418
|
<filename>vkbottle/tools/dev_tools/auto_reload.py
import os
import sys
from watchgod import awatch
from vkbottle.modules import logger
_startup_cwd = os.getcwd()
def restart():
""" https://github.com/cherrypy/cherrypy/blob/0857fa81eb0ab647c7b59a019338bab057f7748b/cherrypy/process/wspbus.py#L305
"""
args = sys.argv[:]
logger.debug("Restarting: %s" % " ".join(args))
args.insert(0, sys.executable)
if sys.platform == "win32":
args = ['"%s"' % arg for arg in args]
os.chdir(_startup_cwd)
os.execv(sys.executable, args)
async def watch_to_reload(check_dir: str):
"""
Coro which see changes in your code and restart him.
:return:
"""
async for _ in awatch(check_dir):
logger.info("Changes were found. Restarting...")
restart()
|
StarcoderdataPython
|
3320823
|
## <NAME>
## September 28, 2020
"""
==========================
Tools for the HRRR Archive
==========================
to_180
For longitude values to be from -180 W to 180 E (not 0-360 E).
get_crs
Get cartopy projection object from xarray.Dataset
pluck_points
Pluck values at specific latitude/longitude points.
"""
import multiprocessing
import cartopy.crs as ccrs
import numpy as np
import xarray as xr
import hrrrb.archive as ha
def to_180(lon):
"""
Wrap longitude from degrees [0, 360] to degrees [-180, 180].
An alternative method is
lon[lon>180] -= 360
Parameters
----------
lon : array_like
Longitude values
"""
lon = np.array(lon)
lon = (lon + 180) % 360 - 180
return lon
def get_crs(ds):
"""
Get the cartopy coordinate reference system (crs) from a cfgrib xarray Dataset
Projection in formation is from the grib2 message for each variable.
Parameters
----------
ds : xarray.Dataset
An xarray.Dataset from a GRIB2 file opened by the cfgrib engine.
"""
# Get projection from the attributes of HRRR xr.Dataset
# CF 1.8 map projection information for the HRRR model
# http://cfconventions.org/Data/cf-conventions/cf-conventions-1.8/cf-conventions.html#_lambert_conformal
if isinstance(ds, list):
# The case when get_hrrr returns a list of xr.Datasets
return get_crs(ds[0])
if isinstance(ds, xr.Dataset):
attrs = ds[list(ds)[0]].attrs
if isinstance(ds, xr.DataArray):
attrs = ds.attrs
if attrs['GRIB_gridType'] == 'lambert':
lc_hrrr_kwargs = {
'globe': ccrs.Globe(ellipse='sphere'),
'central_latitude': attrs['GRIB_LaDInDegrees'],
'central_longitude': attrs['GRIB_LoVInDegrees'],
'standard_parallels': (attrs['GRIB_Latin1InDegrees'],\
attrs['GRIB_Latin2InDegrees'])}
lc = ccrs.LambertConformal(**lc_hrrr_kwargs)
return lc
else:
warnings.warn('GRIB_gridType is not "lambert".')
return None
def pluck_points(ds, points, names=None, dist_thresh=10_000, verbose=True):
"""
Pluck values at point nearest a give list of latitudes and longitudes pairs.
Uses a nearest neighbor approach to get the values. The general
methodology is illustrated in this
`GitHub Notebook <https://github.com/blaylockbk/pyBKB_v3/blob/master/demo/Nearest_lat-lon_Grid.ipynb>`_.
Parameters
----------
ds : xarray.Dataset
The Dataset should include coordinates for both 'latitude' and
'longitude'.
points : tuple or list of tuples
The latitude and longitude (lat, lon) coordinate pair (as a tuple)
for the points you want to pluck from the gridded Dataset.
A list of tuples may be given to return the values from multiple points.
names : list
A list of names for each point location (i.e., station name).
None will not append any names. names should be the same
length as points.
dist_thresh: int or float
The maximum distance (m) between a plucked point and a matched point.
Default is 10,000 m. If the distance is larger than this, the point
is disregarded.
Returns
-------
The Dataset values at the points nearest the requested lat/lon points.
"""
if 'lat' in ds:
ds = ds.rename(dict(lat='latitude', lon='longitude'))
if isinstance(points, tuple):
# If a tuple is give, turn into a one-item list.
points = [points]
if names is not None:
assert len(points) == len(names), '`points` and `names` must be same length.'
# Find the index for the nearest points
xs = [] # x index values
ys = [] # y index values
for point in points:
assert len(point) == 2, "``points`` should be a tuple or list of tuples (lat, lon)"
p_lat, p_lon = point
# Force longitude values to range from -180 to 180 degrees.
p_lon = to_180(p_lon)
ds['longitude'][:] = to_180(ds.longitude)
# Find absolute difference between requested point and the grid coordinates.
abslat = np.abs(ds.latitude - p_lat)
abslon = np.abs(ds.longitude - p_lon)
# Create grid of the maximum values of the two absolute grids
c = np.maximum(abslon, abslat)
# Find location where lat/lon minimum absolute value intersects
if ds.latitude.dims == ('y', 'x'):
y, x = np.where(c == np.min(c))
elif ds.latitude.dims == ('x', 'y'):
x, y = np.where(c == np.min(c))
else:
raise ValueError(f"Sorry, I do not understand dimensions {ds.latitude.dims}. Expected ('y', 'x')" )
xs.append(x[0])
ys.append(y[0])
#===================================================================
# Select Method 1:
# This method works, but returns more data than you ask for.
# It returns an NxN matrix where N is the number of points,
# and matches each point with each point (not just the coordinate
# pairs). The points you want will be along the diagonal.
# I leave this here so I remember not to do this.
#
#ds = ds.isel(x=xs, y=ys)
#
#===================================================================
#===================================================================
# Select Method 2:
# This is only *slightly* slower, but returns just the data at the
# points you requested. Creates a new dimension, called 'point'
ds = xr.concat([ds.isel(x=i, y=j) for i, j in zip(xs, ys)], dim='point')
#===================================================================
#-------------------------------------------------------------------
# 📐Approximate the Great Circle distance between matched point and
# requested point.
# Based on https://andrew.hedges.name/experiments/haversine/
#-------------------------------------------------------------------
lat1 = np.deg2rad([i[0] for i in points])
lon1 = np.deg2rad([i[1] for i in points])
lat2 = np.deg2rad(ds.latitude.data)
lon2 = np.deg2rad(ds.longitude.data)
R = 6373.0 # approximate radius of earth in km
dlon = lon2 - lon1
dlat = lat2 - lat1
a = np.sin(dlat / 2)**2 + np.cos(lat1) * np.cos(lat2) * np.sin(dlon / 2)**2
c = 2 * np.arctan2(np.sqrt(a), np.sqrt(1 - a))
distance = R * c * 1000 # converted to meters
# Add the distance values as a coordinate
ds.coords['distance'] = ('point', distance)
ds['distance'].attrs = dict(long_name='Distance between requested point and matched grid point', units='m')
#-------------------------------------------------------------------
#-------------------------------------------------------------------
# Add list of names as a coordinate
if hasattr(names, '__len__'):
# Assign the point dimension as the names.
assert len(ds.point) == len(names), f'`names` must be same length as `points` pairs.'
ds['point'] = names
## Print some info about each point:
if verbose:
zipped = zip([i[0] for i in points], [i[1] for i in points],
ds.latitude.data, ds.longitude.data, ds.distance.data, ds.point.data)
for plat, plon, glat, glon, d, name in zipped:
print(f"🔎 Matched requested point [{name}] ({plat:.3f}, {plon:.3f}) to grid point ({glat:.3f}, {glon:.3f})...distance of {d/1000:,.2f} km.")
if d > dist_thresh:
print(f' 💀 Point [{name}] Failed distance threshold')
ds.attrs['x_index'] = xs
ds.attrs['y_index'] = ys
# Drop points that do not meet the dist_thresh criteria
failed = ds.distance < dist_thresh
print(len(failed), failed.data)
warnings.warn(f' 💀 Dropped {np.sum(failed).data} point(s) that exceeded dist_thresh.')
ds = ds.where(failed, drop=True)
return ds
#=======================================================================
#=======================================================================
## WORK IN PROGRESS 🏗
# A method to download multiple forecasts and read into xarray
def _concat_hrrr(inputs):
"""Multiprocessing Helper"""
DATE, searchString, fxx, kwargs = inputs
return ha.xhrrr(DATE, searchString, fxx=fxx, **kwargs)
def concat_hrrr(DATES, searchString, fxxs=range(19), **get_hrrr_kwargs):
"""
Download variables
"""
inputs = [(DATE, searchString, f, get_hrrr_kwargs) for f in fxxs]
cores = np.minimum(len(inputs), multiprocessing.cpu_count())
print('cores=', cores)
with multiprocessing.Pool(cores) as p:
result = p.map(_concat_hrrr, inputs)
p.close()
p.join()
return xr.concat(result, dim='lead')
|
StarcoderdataPython
|
63654
|
<reponame>illicitDev/DS-Unit-3-Sprint-2-SQL-and-Databases
TOTAL_CHARACTERS = """
SELECT COUNT(name)
FROM charactercreator_character;
"""
TOTAL_SUBCLASS = """
SELECT
(SELECT COUNT(*)
FROM charactercreator_cleric
) as cleric,
(SELECT COUNT(*)
FROM charactercreator_fighter
) AS fighter,
(SELECT COUNT(*)
FROM charactercreator_mage
) AS mage,
(SELECT COUNT(*)
FROM charactercreator_necromancer
) AS necromancer,
(SELECT COUNT(*)
FROM charactercreator_thief
) AS theif;
"""
TOTAL_ITEMS = """
SELECT COUNT(*)
FROM armory_item;
"""
TOTAL_WEPONS = """
SELECT COUNT(*)
FROM armory_weapon;
"""
NON_WEPONS = """
SELECT item_id, item_ptr_id
FROM armory_item
INNER JOIN armory_weapon
ON armory_item.item_id = armory_weapon.item_ptr_id;
"""
CHARACTER_ITEMS = """
SELECT character_id, COUNT(character_id)
FROM charactercreator_character_inventory
GROUP BY character_id;
"""
|
StarcoderdataPython
|
39928
|
from typing import Tuple, Optional
import ray
from ray import workflow
@ray.remote
def intentional_fail() -> str:
raise RuntimeError("oops")
@ray.remote
def cry(error: Exception) -> None:
print("Sadly", error)
@ray.remote
def celebrate(result: str) -> None:
print("Success!", result)
@ray.remote
def send_email(result: str) -> None:
print("Sending email", result)
@ray.remote
def exit_handler(res: Tuple[Optional[str], Optional[Exception]]) -> None:
result, error = res
email = send_email.bind(f"Raw result: {result}, {error}")
if error:
handler = cry.bind(error)
else:
handler = celebrate.bind(result)
return workflow.continuation(wait_all.bind(handler, email))
@ray.remote
def wait_all(*deps):
return "done"
if __name__ == "__main__":
res = intentional_fail.options(**workflow.options(catch_exceptions=True)).bind()
print(workflow.create(exit_handler.bind(res)).run())
|
StarcoderdataPython
|
1670321
|
<gh_stars>0
l=[5,1,4,4,3,3,9,8,8,9,9]
l.sort()
print(l)
n=len(l)
print(n)
duplicatelist=[]
prev=l[0]
for i in range(1,n):
curr=l[i]
if curr==prev:
if not prev in duplicatelist:
duplicatelist=duplicatelist+[prev]
prev=curr
print(duplicatelist)
|
StarcoderdataPython
|
3218790
|
import argparse
import re
import jvmtunerInterface
from jvmtunerInterface import JvmFlagsTunerInterface
argparser = argparse.ArgumentParser(parents=[jvmtunerInterface.argparser])
argparser.add_argument(
'--jvm_spec_startup', default='java -jar SPECjvm2008.jar {source} -ikv -crf false --jvmArgs "{Opt_flags}"',
help='command template to JVMSPEC2008 statup program tuning.. ')
argparser.add_argument(
'--jvm_spec_nonstartup', default='java -jar SPECjvm2008.jar {source} -ikv -crf false -wt 30s -it 30s --jvmArgs "{Opt_flags}"',
help='command template to JVMSPEC2008 nonstatup program tuning.. ')
argparser.add_argument('--spec_type', help='Select between startup and non_startup', default='startup')
class SpecJvmTuner(JvmFlagsTunerInterface):
def __init__(self, *pargs, **kwargs):
super(SpecJvmTuner, self).__init__(args, *pargs,
**kwargs)
def execute_program(self):
temp_metric=0
for i in range(0,int(args.iterations)):
print 'running iteration '+str(i)
if(args.spec_type == 'startup'):
run_result = self.call_program(args.jvm_spec_startup.format(source=args.source,Opt_flags=self.flags))
elif(args.spec_type == 'non_startup'):
run_result = self.call_program(args.jvm_spec_nonstartup.format(source=args.source,Opt_flags=self.flags))
temp_metric=temp_metric+self.get_ms_per_op_jvm(run_result['stdout'])
temp_metric=float(temp_metric/int(args.iterations))
return temp_metric
def get_ms_per_op_jvm(self,result):
m=re.search('Score on '+str(args.source)+': [0-9]*.[0-9]*|Score on '+str(args.source)+': [0-9]*',result,flags=re.DOTALL)
ops_m=1
if m:
ops_m=m.group(0)
ops_m =re.sub('Score on '+str(args.source)+': ','',ops_m)
ops_m = re.sub(' ops/m','',ops_m)
try:
ops_m=float(ops_m)
except:
ops_m=1
time_per_op=6000.0/ops_m
return time_per_op
if __name__ == '__main__':
args = argparser.parse_args()
SpecJvmTuner.main(args)
|
StarcoderdataPython
|
99909
|
def solution(value):
print("Solution: {}".format(value))
|
StarcoderdataPython
|
35194
|
# =============================================================================
# System imports
import logging
import RPi.GPIO as RPiGPIO
# =============================================================================
# Logger setup
logger = logging.getLogger(__name__)
# =============================================================================
# Classes
class GPIO:
IN = 0
OUT = 1
_initialized = False
def __init__(self,name,channel,inout,default_value=0,active_high=True,debug=False):
self._name = name
self._channel = channel
self._inout = inout
self._active_high = active_high
self._debug = debug
logger.debug('Initializing GPIO {:<10} channel={} inout={} default={} active_high={} debug={}'
.format( self._name
, self._channel
, "in" if inout == GPIO.IN else "out"
, default_value
, self._active_high
, self._debug ))
if self._debug == False:
if GPIO._initialized == False:
self._initialize()
rpigpio_inout = RPiGPIO.IN if inout == GPIO.IN else RPiGPIO.OUT
initial_state = None
if inout == GPIO.IN:
RPiGPIO.setup( self._channel
, rpigpio_inout )
else:
initial_state = RPiGPIO.LOW
if (self._active_high == True and default_value == 1) or \
(self._active_high == False and default_value == 0):
initial_state = RPiGPIO.HIGH
RPiGPIO.setup( self._channel
, rpigpio_inout
, initial=initial_state)
def __del__(self):
if self._debug == False:
RPiGPIO.cleanup( self._channel )
def _initialize(self):
logger.debug('Initializing RpiGPIO module')
RPiGPIO.setmode(RPiGPIO.BOARD)
GPIO._initialized = True
def set(self,value):
if self._inout == GPIO.IN:
logger.error('Can\'t set input GPIO {}'.format(self._name))
else:
physical_value = value if self._active_high == True else not value
logger.debug('Setting GPIO {:<10} to {} (logical value)'.format(self._name,1 if value else 0))
if self._debug == False:
RPiGPIO.output( self._channel, physical_value )
|
StarcoderdataPython
|
70805
|
<gh_stars>0
#!/usr/bin/env python3
#
# This file is part of LiteX-Boards.
#
# Copyright (c) 2021 <NAME> <<EMAIL>>
# Copyright (c) 2019-2020 <NAME> <<EMAIL>>
# SPDX-License-Identifier: BSD-2-Clause
import os
import argparse
from migen import *
from litex_boards.platforms import snickerdoodle
from litex.build.xilinx.vivado import vivado_build_args, vivado_build_argdict
from litex.soc.interconnect import axi
from litex.soc.interconnect import wishbone
from litex.soc.cores.clock import *
from litex.soc.integration.soc_core import *
from litex.soc.integration.builder import *
from litex.soc.cores.led import LedChaser
# UTILS ---------------------------------------------------------------------------------------------
def load_ps7(soc, xci_file):
odir = os.path.join("build", "snickerdoodle", "gateware", "xci")
os.makedirs(odir, exist_ok=True)
file = "snickerdoodle_ps7.xci"
dst = os.path.join(odir, file)
if xci_file is None:
src = "https://technicaltoys-support.s3.amazonaws.com/xci/" + file
os.system("wget " + src + " -O " + dst)
else:
os.system("cp -p " + xci_file + " " + dst)
soc.cpu.set_ps7_xci(dst)
# CRG ----------------------------------------------------------------------------------------------
class _CRG(Module):
def __init__(self, platform, sys_clk_freq, use_ps7_clk=False):
self.rst = Signal()
self.clock_domains.cd_sys = ClockDomain()
# # #
if use_ps7_clk:
assert sys_clk_freq == 100e6
self.comb += ClockSignal("sys").eq(ClockSignal("ps7"))
self.comb += ResetSignal("sys").eq(ResetSignal("ps7") | self.rst)
else:
self.submodules.pll = pll = S7MMCM(speedgrade=-1)
self.comb += pll.reset.eq(self.rst)
pll.register_clkin(platform.request(platform.default_clk_name),
platform.default_clk_freq)
pll.create_clkout(self.cd_sys, sys_clk_freq)
# Ignore sys_clk to pll.clkin path created by SoC's rst.
platform.add_false_path_constraints(self.cd_sys.clk, pll.clkin)
# BaseSoC ------------------------------------------------------------------------------------------
class BaseSoC(SoCCore):
def __init__(self, variant="z7-10", sys_clk_freq=int(100e6), with_led_chaser=True,
ext_clk_freq = None,
xci_file = None,
**kwargs):
platform = snickerdoodle.Platform(variant=variant)
if ext_clk_freq:
platform.default_clk_freq = ext_clk_freq
platform.default_clk_period = 1e9 / ext_clk_freq
if kwargs.get("cpu_type", None) == "zynq7000":
kwargs["integrated_sram_size"] = 0
kwargs["with_uart"] = False
self.mem_map = {"csr": 0x4000_0000} # Zynq GP0 default
# SoCCore ----------------------------------------------------------------------------------
SoCCore.__init__(self, platform, sys_clk_freq,
ident = "LiteX SoC on Snickerdoodle",
**kwargs)
# Zynq7000 Integration ---------------------------------------------------------------------
if kwargs.get("cpu_type", None) == "zynq7000":
load_ps7(self, xci_file)
# Connect AXI GP0 to the SoC with base address of 0x43c00000 (default one)
wb_gp0 = wishbone.Interface()
self.submodules += axi.AXI2Wishbone(
axi = self.cpu.add_axi_gp_master(),
wishbone = wb_gp0,
base_address = self.mem_map['csr'])
self.add_wb_master(wb_gp0)
use_ps7_clk = True
else:
use_ps7_clk = False
# CRG --------------------------------------------------------------------------------------
self.submodules.crg = _CRG(platform, sys_clk_freq, use_ps7_clk)
# Leds -------------------------------------------------------------------------------------
if with_led_chaser:
self.submodules.leds = LedChaser(
pads = platform.request_all("user_led"),
sys_clk_freq = sys_clk_freq)
# Build --------------------------------------------------------------------------------------------
def main():
parser = argparse.ArgumentParser(description="LiteX SoC on Snickerdoodle")
parser.add_argument("--build", action="store_true", help="Build bitstream.")
parser.add_argument("--load", action="store_true", help="Load bitstream.")
parser.add_argument("--variant", default="z7-10", help="Board variant (z7-10 or z7-20).")
parser.add_argument("--ext-clk-freq", default=10e6, type=float, help="External Clock Frequency.")
parser.add_argument("--sys-clk-freq", default=100e6, type=float, help="System clock frequency.")
parser.add_argument("--xci-file", help="XCI file for PS7 configuration.")
parser.add_argument("--target", help="Vivado programmer target.")
builder_args(parser)
soc_core_args(parser)
vivado_build_args(parser)
args = parser.parse_args()
soc = BaseSoC(
variant = args.variant,
sys_clk_freq = args.sys_clk_freq,
ext_clk_freq = args.ext_clk_freq,
xci_file = args.xci_file,
**soc_core_argdict(args)
)
builder = Builder(soc, **builder_argdict(args))
builder.build(**vivado_build_argdict(args), run=args.build)
if args.load:
prog = soc.platform.create_programmer()
prog.load_bitstream(builder.get_bitstream_filename(mode="sram"), target=args.target, device=1)
if __name__ == "__main__":
main()
|
StarcoderdataPython
|
1707523
|
vel = float(input('Você está rodando a quantos km/h? '))
if vel <= 80:
print('Muito bem! Continue com segurança.')
else:
print('Você excedeu o limite de 80km/h e foi multado. A multa é de R${:.2f}.'.format((vel-80)*7))
|
StarcoderdataPython
|
1664507
|
__all__ = [
'GenerateKeys',
'GenerateKeysDeterministic'
]
import os
from ..base import ComputationStep
from ...lookup.factory import KeyServerFactory
from ...utils.exceptions import OasisException
from ...utils.coverages import SUPPORTED_COVERAGE_TYPES
from ...utils.data import get_utctimestamp
class GenerateKeys(ComputationStep):
"""
Generates keys from a model lookup, and write Oasis keys and keys error files.
The model lookup, which is normally independently implemented by the model
supplier, should generate keys as dicts with the following format
::
{
"id": <loc. ID>,
"peril_id": <OED sub-peril ID>,
"coverage_type": <OED coverage type ID>,
"area_peril_id": <area peril ID>,
"vulnerability_id": <vulnerability ID>,
"message": <loc. lookup status message>,
"status": <loc. lookup status flag indicating success, failure or no-match>
}
The keys generation command can generate these dicts, and write them to
file. It can also be used to write these to an Oasis keys file (which is a
requirement for model execution), which has the following format.::
LocID,PerilID,CoverageTypeID,AreaPerilID,VulnerabilityID
..
..
This file only lists the locations for which there has been a successful
lookup. The keys errors file lists all the locations with failing or
non-matching lookups and has the following format::
LocID,PerilID,CoverageTypeID,Message
..
..
"""
step_params = [
{'name': 'oed_location_csv', 'flag':'-x', 'is_path': True, 'pre_exist': True, 'help': 'Source location CSV file path', 'required': True},
{'name': 'keys_data_csv', 'flag':'-k', 'is_path': True, 'pre_exist': False, 'help': 'Generated keys CSV output path'},
{'name': 'keys_errors_csv', 'flag':'-e', 'is_path': True, 'pre_exist': False, 'help': 'Generated keys errors CSV output path'},
{'name': 'keys_format', 'flag':'-f', 'help': 'Keys files output format', 'choices':['oasis', 'json'], 'default':'oasis'},
{'name': 'lookup_config_json', 'flag':'-g', 'is_path': True, 'pre_exist': False, 'help': 'Lookup config JSON file path'},
{'name': 'lookup_data_dir', 'flag':'-d', 'is_path': True, 'pre_exist': True, 'help': 'Model lookup/keys data directory path'},
{'name': 'lookup_module_path', 'flag':'-l', 'is_path': True, 'pre_exist': False, 'help': 'Model lookup module path'},
{'name': 'lookup_complex_config_json', 'flag':'-L', 'is_path': True, 'pre_exist': False, 'help': 'Complex lookup config JSON file path'},
{'name': 'lookup_num_processes', 'type':int, 'default': -1, 'help': 'Number of workers in multiprocess pools'},
{'name': 'lookup_num_chunks', 'type':int, 'default': -1, 'help': 'Number of chunks to split the location file into for multiprocessing'},
{'name': 'model_version_csv', 'flag':'-v', 'is_path': True, 'pre_exist': False, 'help': 'Model version CSV file path'},
{'name': 'user_data_dir', 'flag':'-D', 'is_path': True, 'pre_exist': False, 'help': 'Directory containing additional model data files which varies between analysis runs'},
# Manager only options
{'name': 'verbose', 'default': False},
{'name': 'lookup_multiprocessing', 'default': True}, # Enable/disable multiprocessing
]
def _get_output_dir(self):
if self.keys_data_csv:
return os.path.dirname(self.keys_data_csv)
utcnow = get_utctimestamp(fmt='%Y%m%d%H%M%S')
return os.path.join(os.getcwd(), 'runs', 'keys-{}'.format(utcnow))
def run(self):
if not (self.lookup_config_json or (self.lookup_data_dir and self.model_version_csv and self.lookup_module_path)):
raise OasisException(
'No pre-generated keys file provided, and no lookup assets '
'provided to generate a keys file - if you do not have a '
'pre-generated keys file then lookup assets must be provided - '
'for a built-in lookup the lookup config. JSON file path must '
'be provided, or for custom lookups the keys data path + model '
'version file path + lookup package path must be provided'
)
output_dir = self._get_output_dir()
output_type = 'json' if self.keys_format.lower() == 'json' else 'csv'
keys_fp = self.keys_data_csv or os.path.join(output_dir, f'keys.{output_type}')
keys_errors_fp = self.keys_errors_csv or os.path.join(output_dir, f'keys-errors.{output_type}')
os.makedirs(os.path.dirname(keys_fp), exist_ok=True)
os.makedirs(os.path.dirname(keys_errors_fp), exist_ok=True)
keys_success_msg = True if self.lookup_complex_config_json else False
model_info, key_server = KeyServerFactory.create(
lookup_config_fp=self.lookup_config_json,
model_keys_data_path=self.lookup_data_dir,
model_version_file_path=self.model_version_csv,
lookup_module_path=self.lookup_module_path,
complex_lookup_config_fp=self.lookup_complex_config_json,
user_data_dir=self.user_data_dir,
output_directory=output_dir
)
res = key_server.generate_key_files(
location_fp=self.oed_location_csv,
successes_fp=keys_fp,
errors_fp=keys_errors_fp,
format=self.keys_format,
keys_success_msg=keys_success_msg,
multiproc_enabled=self.lookup_multiprocessing,
multiproc_num_cores=self.lookup_num_processes,
multiproc_num_partitions=self.lookup_num_chunks,
)
self.logger.debug(f"key generated used model {model_info}")
self.logger.info('\nKeys successful: {} generated with {} items'.format(res[0], res[1]))
if len(res) == 4:
self.logger.info('Keys errors: {} generated with {} items'.format(res[2], res[3]))
return res
class GenerateKeysDeterministic(ComputationStep):
step_params = [
{'name': 'oed_location_csv', 'flag':'-x', 'is_path': True, 'pre_exist': True, 'help': 'Source location CSV file path', 'required': True},
{'name': 'keys_data_csv', 'flag':'-k', 'is_path': True, 'pre_exist': False, 'help': 'Generated keys CSV output path'},
{'name': 'num_subperils', 'flag':'-p', 'default': 1, 'type':int, 'help': 'Set the number of subperils returned by deterministic key generator'},
{'name': 'supported_oed_coverage_types', 'type' :int, 'nargs':'+', 'default': list(v['id'] for v in SUPPORTED_COVERAGE_TYPES.values()), 'help': 'Select List of supported coverage_types [1, .. ,4]'},
]
def _get_output_dir(self):
if self.keys_data_csv:
return os.path.basename(self.keys_data_csv)
utcnow = get_utctimestamp(fmt='%Y%m%d%H%M%S')
return os.path.join(os.getcwd(), 'runs', 'keys-{}'.format(utcnow))
def run(self):
output_dir = self._get_output_dir()
keys_fp = self.keys_data_csv or os.path.join(output_dir, 'keys.csv')
config = {'builtin_lookup_type': 'deterministic',
'model': {"supplier_id": "OasisLMF",
"model_id": "Deterministic",
"model_version": "1"},
'num_subperils': self.num_subperils,
'supported_oed_coverage_types': self.supported_oed_coverage_types}
model_info, lookup = KeyServerFactory.create(
lookup_config=config,
output_directory=output_dir
)
return lookup.generate_key_files(
location_fp=self.oed_location_csv,
successes_fp=keys_fp,
format='oasis',
)
|
StarcoderdataPython
|
3247835
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from vulyk.models.task_types import AbstractTaskType
from vulyk_ner.models.tasks import NERTaggingAnswer, NERTaggingTask
class NERTaggingTaskType(AbstractTaskType):
"""
NER Tagging Task to work with Vulyk.
"""
answer_model = NERTaggingAnswer
task_model = NERTaggingTask
name = 'Тэггинг именованых сущностей в тексте'
description = ('Помогите нам найти людей, организации, даты и суммы' +
' в тексте')
template = 'base.html'
helptext_template = 'help.html'
type_name = 'ner_tagging_task'
redundancy = 2
JS_ASSETS = [
'static/scripts/vendor/jquery-ui.min.js',
'static/scripts/vendor/jquery-ui.combobox.js',
'static/scripts/vendor/jquery.svg.min.js',
'static/scripts/vendor/jquery.svgdom.min.js',
'static/scripts/vendor/jquery.ba-bbq.min.js',
'static/scripts/vendor/jquery.json.min.js',
'static/scripts/vendor/sprintf.js',
'static/scripts/vendor/webfont.js',
# # brat helpers
'static/scripts/src/configuration.js',
'static/scripts/src/util.js',
'static/scripts/src/annotation_log.js',
# # brat modules
'static/scripts/src/dispatcher.js',
'static/scripts/src/url_monitor.js',
'static/scripts/offline_ajax.js',
'static/scripts/src/visualizer.js',
'static/scripts/src/visualizer_ui.js',
'static/scripts/src/annotator_ui.js',
'static/scripts/src/spinner.js',
'static/scripts/init.js',
]
CSS_ASSETS = [
'static/styles/jquery-theme/jquery-ui.css',
'static/styles/jquery-theme/jquery-ui-redmond.css',
'static/styles/style-vis.css',
'static/styles/style-ui.css',
'static/styles/fixes.css',
]
|
StarcoderdataPython
|
4842567
|
<filename>lisa/lisa_user_data/coverage_test.py
from lisa.core.lisa_core import LISA_Core
from lisa.lisa_public_data.genes_test import FromGenes
from lisa.core.data_interface import PACKAGE_PATH, REQURED_DATASET_VERSION, INSTALL_PATH
from lisa.core.lisa_core import CONFIG_PATH as base_config_path
import numpy as np
from scipy import sparse
from lisa.core import genome_tools
import os
import configparser
from collections.abc import Iterable
from lisa.lisa_user_data.assays import ISD_Assay
from lisa.core.data_interface import DataInterface
from collections import Counter
from lisa.core.utils import Log, LoadingBar
import pyBigWig as bw
CONFIG_PATH = os.path.join(os.path.dirname(__file__),'config.ini')
_config = configparser.ConfigParser()
_config.read([base_config_path, CONFIG_PATH])
class FromCoverage(LISA_Core):
'''
lisa.FromCoverage
****************
Inputs:
* Genes of interest
* BigWig file, coverage over genome
Outputs:
* Predicted TF influence
Use LISA to infer TF influence on your geneset using your own coverage data. This test is better suited than the "regions" test when your measure produces wide peaks/areas of influence.
An example of this is H3K27ac data, which correlates with gene expression similarly to accessibility, but produces wide peaks that may span many distinct TF binding locations.
This interface outputs results in the same format as the ``FromGenes`` interface.
Example::
with open('./genelist.txt', 'r') as genes_file:
genes = [x.strip() for x in genes_file.readlines()]
results, metadata = lisa.FromRegions.using_bigwig('hg38', genes, './sample.bigwig')
results_df = pd.DataFrame(results.to_dict())
For more, see `User Guide <user_guide.md>`_.
'''
window_size = FromGenes.window_size
@classmethod
def get_docs(cls):
return '\n'.join(x.__doc__ for x in
[cls, cls.using_bigwig, cls.__init__, cls.predict])
@classmethod
def using_bigwig(cls, species, query_genes, bigwig_path, rp_map = 'enhanced_10K', isd_method = 'chipseq', background_list = [],
background_strategy = 'all', num_background_genes = 3000, seed = 2556, verbose = 4, log = None):
'''
*classmethod*
**lisa.FromCoverage.using_bigwig** (species, query_genes, bigwig_path, rp_map = 'basic', rp_decay = 10000, isd_method = 'chipseq', background_list = [], background_strategy = 'all', num_background_genes = 3000, seed = 2556, header = False, verbose = 4, log = None)
Run LISA FromCoverage test using a bigwig coverage file.
Parameters:
species: {'hg38', 'mm10'}
query_genes (list):
Genes-of-interest, in either Symbol of RefSeqID format. Must provide between 20 to 500 genes.
bigwig_path (str):
Path to bigwig file
Returns:
results (lisa.core.utils.LISA_Results):
With each key representing a table column, sorted by "summary_p_value" field. The dictionary can be passed directly to a the pandas constructor: ``results_df = pd.DataFrame(results.to_dict())``.
metadata (dict):
Test metadata. Includes query genes provided and background genes that were selected.
'''
if log is None:
log = Log()
coverage_array = cls.convert_bigwig(bigwig_path, species, log = log)
return cls(species, coverage_array, rp_map = rp_map, isd_method=isd_method, verbose=verbose, log=log)\
.predict(query_genes, background_list=background_list, background_strategy=background_strategy, num_background_genes=num_background_genes,
seed=seed)
@classmethod
def convert_bigwig(cls, bigwig, species, log = None):
if log is None:
log = Log()
genome = DataInterface.load_genome(species, cls.window_size)
coverage_array = np.zeros(len(genome))
log.append('Converting BigWig file to coverage array ...')
bar = LoadingBar('Progress', len(genome) // 1000 + 1, cold_start=True)
try:
coverage_bw = bw.open(bigwig)
log.append(bar, update_line=True)
for i, window in enumerate(genome.list_windows()):
if window.chromosome in coverage_bw.chroms():
mean_coverage = coverage_bw.stats(*window.to_tuple())[0]
coverage_array[i] = mean_coverage
if i%1000 == 0:
log.append(bar, update_line = True)
return np.nan_to_num(coverage_array)
finally:
coverage_bw.close()
def __init__(self, species, coverage_array, rp_map = 'enhanced_10K', isd_method = 'chipseq', verbose = 4, log = None):
'''
*class*
**lisa.FromCoverage** (species, regions, rp_map = 'enhanced_10K', rp_decay = 10000, isd_method = 'chipseq', verbose = 4, log = None)
Initialize the LISA test using user-defined regions.
Parameters:
species: {'hg38', 'mm10'}
coverage_array: (1D or Nx1 np.ndarray):
Array of scores over 1kb bins.
isd_method {"chipseq", "motifs"}:
Use ChIP-seq data or motifs to mark TF binding locations.
rp_map {"basic_10K", "enhanced_10K"}:
Choice of RP map, which maps the regulatory influence of a region to a gene. The "basic_10K" model is based simply off distance, with the "enhanced_10K" model masks out the promoter and exon regions of other nearby genes.
verbose (int):
Number of levels of log messages to print to stderr
Returns:
lisa object
'''
super().__init__(species, _config, self.window_size, isd_method= isd_method, verbose=verbose, log = log)
assert(isinstance(coverage_array, np.ndarray)), 'Coverage array must be of type numpy.ndarray'
assert(len(coverage_array.shape) == 1 or (1 in coverage_array.shape and len(coverage_array.shape) == 2)), 'Coverage array must be 1D array or column/row vector'
coverage_array = coverage_array.reshape(-1)
assert(len(coverage_array) == len(self.data_interface.genome)), 'Coverage array must be of same length as genome: {} bins'.format(str(len(self.data_interface.genome)))
self.coverage_array = coverage_array
rp_map_styles = self._config.get('bam_test_params','rp_map_styles').split(',')
assert(rp_map in rp_map_styles), 'RP map must be numpy/scipy.sparse array, or be one of provided maps: {}'.format(','.join(rp_map_styles))
self.rp_map_style = rp_map
def _load_factor_binding_data(self):
return self.data_interface.get_binding_data(self.isd_method)
def _load_rp_map(self):
return self.data_interface.get_rp_map(self.rp_map_style)
def _initialize_assays(self, **assay_kwargs):
self.add_assay(
ISD_Assay(**assay_kwargs, technology = 'BamTest')
)
def predict(self, query_list, background_list = [], background_strategy = 'all', num_background_genes = 3000,
seed = 2556):
'''
*method*
**.predict** (self, query_list, background_list = [], background_strategy = 'all', num_background_genes = 3000, seed = 2556)
Predict TF influence given a set of genes.
Params:
query_list (list):
Genes-of-interest, in either Symbol of RefSeqID format. Must provide between 20 to 500 genes.
background_list (list):
User-specified list of background genes to compare with query_list. Must contain more genes than query list and entire list will be used. If provided, ```background_strategy``` must be set to "provided".
background_strategy {"regulatory","random","provided"}:
Regulatory will sample background genes from a stratified sample of TADs and regulatory states, random will randomly sample from all non-query genes.
num_background_genes (int):
Number of genes to use as comparison to query genes. More background genes make test slower, but more stable.
seed (int):
Seed for gene selection and regression model initialization.
Returns:
results (lisa.core.utils.LISA_Results):
Can be passed directly to a the pandas constructor: ``results_df = pd.DataFrame(results.to_dict())``.
metadata (dict):
Test metadata. Includes query genes provided and background genes that were selected, as well as reg-scores for top 100 factors on selected genes.
'''
return super().predict(query_list, region_scores = self.coverage_array, background_list=background_list, background_strategy=background_strategy,
num_background_genes= num_background_genes, seed=seed)
|
StarcoderdataPython
|
184406
|
import sys
sys.path.insert(1, '/Users/anthonywohns/Documents/mcvean_group/age_inference/tsdate')
import tsdate
import tskit
import tsinfer
ts = tskit.load('truncated_simulation_tree.trees')
tip_weights = tsdate.find_node_tip_weights_ts(ts)
prior = tsdate.make_prior(ts.num_samples, 10000)
mixture_prior = tsdate.get_mixture_prior_ts_new(tip_weights, prior)
tsdate.age_inference(ts)
|
StarcoderdataPython
|
4831679
|
<filename>test/functional/p2p_add_connections.py<gh_stars>10-100
#!/usr/bin/env python3
# Copyright (c) 2020-2021 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test add_outbound_p2p_connection test framework functionality"""
from test_framework.p2p import P2PInterface
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (
assert_equal,
check_node_connections,
)
class P2PFeelerReceiver(P2PInterface):
def on_version(self, message):
# The bitcoind node closes feeler connections as soon as a version
# message is received from the test framework. Don't send any responses
# to the node's version message since the connection will already be
# closed.
pass
class P2PAddConnections(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 2
def setup_network(self):
self.setup_nodes()
# Don't connect the nodes
def run_test(self):
self.log.info("Add 8 outbounds to node 0")
for i in range(8):
self.log.info(f"outbound: {i}")
self.nodes[0].add_outbound_p2p_connection(P2PInterface(), p2p_idx=i, connection_type="outbound-full-relay")
self.log.info("Add 2 block-relay-only connections to node 0")
for i in range(2):
self.log.info(f"block-relay-only: {i}")
# set p2p_idx based on the outbound connections already open to the
# node, so add 8 to account for the previous full-relay connections
self.nodes[0].add_outbound_p2p_connection(P2PInterface(), p2p_idx=i + 8, connection_type="block-relay-only")
self.log.info("Add 2 block-relay-only connections to node 1")
for i in range(2):
self.log.info(f"block-relay-only: {i}")
self.nodes[1].add_outbound_p2p_connection(P2PInterface(), p2p_idx=i, connection_type="block-relay-only")
self.log.info("Add 5 inbound connections to node 1")
for i in range(5):
self.log.info(f"inbound: {i}")
self.nodes[1].add_p2p_connection(P2PInterface())
self.log.info("Add 8 outbounds to node 1")
for i in range(8):
self.log.info(f"outbound: {i}")
# bump p2p_idx to account for the 2 existing outbounds on node 1
self.nodes[1].add_outbound_p2p_connection(P2PInterface(), p2p_idx=i + 2)
self.log.info("Check the connections opened as expected")
check_node_connections(node=self.nodes[0], num_in=0, num_out=10)
check_node_connections(node=self.nodes[1], num_in=5, num_out=10)
self.log.info("Disconnect p2p connections & try to re-open")
self.nodes[0].disconnect_p2ps()
check_node_connections(node=self.nodes[0], num_in=0, num_out=0)
self.log.info("Add 8 outbounds to node 0")
for i in range(8):
self.log.info(f"outbound: {i}")
self.nodes[0].add_outbound_p2p_connection(P2PInterface(), p2p_idx=i)
check_node_connections(node=self.nodes[0], num_in=0, num_out=8)
self.log.info("Add 2 block-relay-only connections to node 0")
for i in range(2):
self.log.info(f"block-relay-only: {i}")
# bump p2p_idx to account for the 8 existing outbounds on node 0
self.nodes[0].add_outbound_p2p_connection(P2PInterface(), p2p_idx=i + 8, connection_type="block-relay-only")
check_node_connections(node=self.nodes[0], num_in=0, num_out=10)
self.log.info("Restart node 0 and try to reconnect to p2ps")
self.restart_node(0)
self.log.info("Add 4 outbounds to node 0")
for i in range(4):
self.log.info(f"outbound: {i}")
self.nodes[0].add_outbound_p2p_connection(P2PInterface(), p2p_idx=i)
check_node_connections(node=self.nodes[0], num_in=0, num_out=4)
self.log.info("Add 2 block-relay-only connections to node 0")
for i in range(2):
self.log.info(f"block-relay-only: {i}")
# bump p2p_idx to account for the 4 existing outbounds on node 0
self.nodes[0].add_outbound_p2p_connection(P2PInterface(), p2p_idx=i + 4, connection_type="block-relay-only")
check_node_connections(node=self.nodes[0], num_in=0, num_out=6)
check_node_connections(node=self.nodes[1], num_in=5, num_out=10)
self.log.info("Add 1 feeler connection to node 0")
feeler_conn = self.nodes[0].add_outbound_p2p_connection(P2PFeelerReceiver(), p2p_idx=6, connection_type="feeler")
# Feeler connection is closed
assert not feeler_conn.is_connected
# Verify version message received
assert_equal(feeler_conn.message_count["version"], 1)
# Feeler connections do not request tx relay
assert_equal(feeler_conn.last_message["version"].relay, 0)
if __name__ == '__main__':
P2PAddConnections().main()
|
StarcoderdataPython
|
154424
|
<gh_stars>1-10
from .vae import VAE
from .mvae import MultimodalVAE
from .pmvae import PartitionedMultimodalVAE
from .hier_pmvae import HierPMVAE_v1, HierPMVAE_v2
|
StarcoderdataPython
|
1622174
|
"""
Spew the contents of a file to standard output.
Similar to Unix command 'cat' or Windows command 'type'
for a single file.
The file given on the command line is interpreted relative to
a path that may be specified on the command line as -D path
or in the configuration file as DOCROOT.
"""
import config
import os
import logging
logging.basicConfig(format='%(levelname)s:%(message)s',
level=logging.DEBUG)
log = logging.getLogger(__name__)
DOCROOT = "." # Overridden by configuration
def spew(file_name):
"""Spew contents of 'source' to standard output.
Source should be a file or file-like object.
"""
source_path = os.path.join(DOCROOT, file_name)
log.debug("Source path: {}".format(source_path))
try:
with open(source_path, 'r', encoding='utf-8') as source:
for line in source:
print(line.strip())
except OSError as error:
log.warn("Failed to open or read file")
log.warn("Requested file was {}".format(source_path))
log.warn("Exception: {}".format(error))
def main():
global DOCROOT
options = config.configuration()
assert options.DOCROOT, "Document root must be specified in " \
+ "configuration file credentials.ini or on command line"
DOCROOT = options.DOCROOT
assert options.input, "You must specify an input file on the command line"
infile = options.input
spew(infile)
if __name__ == "__main__":
main()
|
StarcoderdataPython
|
1752886
|
<gh_stars>0
import numpy as np
class DataSet:
def __init__(self):
self.input = None
self.context = None
self.output = None
self.output_scaler = None
self.output_columns = None
self.output_dates = None
def get_input(self, model_name: str) -> []:
if model_name not in ["basic", "mlp", "cnn", "cnn-lstm", "lstm", "gan"]:
return [self.context, self.input]
else:
return np.dstack((self.context, self.input))
def get_output(self) -> np.array:
return self.output
|
StarcoderdataPython
|
1626012
|
from allauth.account.views import SignupView
from users.forms import CustomUserCreationForm
class MySignupView(SignupView):
form_class = CustomUserCreationForm
|
StarcoderdataPython
|
1315
|
<filename>mango/__init__.py
# In --strict mode, mypy complains about imports unless they're done this way.
#
# It complains 'Module has no attribute ABC' or 'Module "mango" does not explicitly export
# attribute "XYZ"; implicit reexport disabled'. We could dial that back by using the
# --implicit-reexport parameter, but let's keep things strict.
#
# Each import then *must* be of the form `from .file import X as X`. (Until/unless there's
# a better way.)
#
from .account import Account as Account
from .account import AccountSlot as AccountSlot
from .accountflags import AccountFlags as AccountFlags
from .accountinfo import AccountInfo as AccountInfo
from .accountinfoconverter import build_account_info_converter as build_account_info_converter
from .accountinstrumentvalues import AccountInstrumentValues as AccountInstrumentValues
from .accountinstrumentvalues import PricedAccountInstrumentValues as PricedAccountInstrumentValues
from .accountliquidator import AccountLiquidator as AccountLiquidator
from .accountliquidator import NullAccountLiquidator as NullAccountLiquidator
from .accountscout import AccountScout as AccountScout
from .accountscout import ScoutReport as ScoutReport
from .addressableaccount import AddressableAccount as AddressableAccount
from .arguments import parse_args as parse_args
from .arguments import output as output
from .balancesheet import BalanceSheet as BalanceSheet
from .cache import Cache as Cache
from .cache import MarketCache as MarketCache
from .cache import PerpMarketCache as PerpMarketCache
from .cache import PriceCache as PriceCache
from .cache import RootBankCache as RootBankCache
from .client import BetterClient as BetterClient
from .client import BlockhashNotFoundException as BlockhashNotFoundException
from .client import ClientException as ClientException
from .client import CompoundException as CompoundException
from .client import CompoundRPCCaller as CompoundRPCCaller
from .client import FailedToFetchBlockhashException as FailedToFetchBlockhashException
from .client import NodeIsBehindException as NodeIsBehindException
from .client import RateLimitException as RateLimitException
from .client import RPCCaller as RPCCaller
from .client import SlotHolder as SlotHolder
from .client import TooManyRequestsRateLimitException as TooManyRequestsRateLimitException
from .client import TooMuchBandwidthRateLimitException as TooMuchBandwidthRateLimitException
from .client import TransactionException as TransactionException
from .combinableinstructions import CombinableInstructions as CombinableInstructions
from .constants import MangoConstants as MangoConstants
from .constants import DATA_PATH as DATA_PATH
from .constants import SOL_DECIMAL_DIVISOR as SOL_DECIMAL_DIVISOR
from .constants import SOL_DECIMALS as SOL_DECIMALS
from .constants import SOL_MINT_ADDRESS as SOL_MINT_ADDRESS
from .constants import SYSTEM_PROGRAM_ADDRESS as SYSTEM_PROGRAM_ADDRESS
from .constants import WARNING_DISCLAIMER_TEXT as WARNING_DISCLAIMER_TEXT
from .constants import version as version
from .context import Context as Context
from .contextbuilder import ContextBuilder as ContextBuilder
from .createmarketoperations import create_market_instruction_builder as create_market_instruction_builder
from .createmarketoperations import create_market_operations as create_market_operations
from .encoding import decode_binary as decode_binary
from .encoding import encode_binary as encode_binary
from .encoding import encode_key as encode_key
from .encoding import encode_int as encode_int
from .ensuremarketloaded import ensure_market_loaded as ensure_market_loaded
from .ensuremarketloaded import load_market_by_symbol as load_market_by_symbol
from .group import Group as Group
from .group import GroupSlot as GroupSlot
from .group import GroupSlotPerpMarket as GroupSlotPerpMarket
from .group import GroupSlotSpotMarket as GroupSlotSpotMarket
from .healthcheck import HealthCheck as HealthCheck
from .idl import IdlParser as IdlParser
from .idl import lazy_load_cached_idl_parser as lazy_load_cached_idl_parser
from .idsjsonmarketlookup import IdsJsonMarketLookup as IdsJsonMarketLookup
from .inventory import Inventory as Inventory
from .inventory import PerpInventoryAccountWatcher as PerpInventoryAccountWatcher
from .inventory import SpotInventoryAccountWatcher as SpotInventoryAccountWatcher
from .instructions import build_cancel_perp_order_instructions as build_cancel_perp_order_instructions
from .instructions import build_cancel_spot_order_instructions as build_cancel_spot_order_instructions
from .instructions import build_close_spl_account_instructions as build_close_spl_account_instructions
from .instructions import build_create_account_instructions as build_create_account_instructions
from .instructions import build_create_associated_spl_account_instructions as build_create_associated_spl_account_instructions
from .instructions import build_create_solana_account_instructions as build_create_solana_account_instructions
from .instructions import build_create_spl_account_instructions as build_create_spl_account_instructions
from .instructions import build_create_serum_open_orders_instructions as build_create_serum_open_orders_instructions
from .instructions import build_deposit_instructions as build_deposit_instructions
from .instructions import build_faucet_airdrop_instructions as build_faucet_airdrop_instructions
from .instructions import build_mango_consume_events_instructions as build_mango_consume_events_instructions
from .instructions import build_place_perp_order_instructions as build_place_perp_order_instructions
from .instructions import build_redeem_accrued_mango_instructions as build_redeem_accrued_mango_instructions
from .instructions import build_serum_consume_events_instructions as build_serum_consume_events_instructions
from .instructions import build_serum_place_order_instructions as build_serum_place_order_instructions
from .instructions import build_serum_settle_instructions as build_serum_settle_instructions
from .instructions import build_spot_place_order_instructions as build_spot_place_order_instructions
from .instructions import build_transfer_spl_tokens_instructions as build_transfer_spl_tokens_instructions
from .instructions import build_withdraw_instructions as build_withdraw_instructions
from .instructionreporter import InstructionReporter as InstructionReporter
from .instructionreporter import SerumInstructionReporter as SerumInstructionReporter
from .instructionreporter import MangoInstructionReporter as MangoInstructionReporter
from .instructionreporter import CompoundInstructionReporter as CompoundInstructionReporter
from .instructiontype import InstructionType as InstructionType
from .instrumentlookup import InstrumentLookup as InstrumentLookup
from .instrumentlookup import NullInstrumentLookup as NullInstrumentLookup
from .instrumentlookup import CompoundInstrumentLookup as CompoundInstrumentLookup
from .instrumentlookup import IdsJsonTokenLookup as IdsJsonTokenLookup
from .instrumentlookup import NonSPLInstrumentLookup as NonSPLInstrumentLookup
from .instrumentlookup import SPLTokenLookup as SPLTokenLookup
from .instrumentvalue import InstrumentValue as InstrumentValue
from .liquidatablereport import LiquidatableState as LiquidatableState
from .liquidatablereport import LiquidatableReport as LiquidatableReport
from .liquidationevent import LiquidationEvent as LiquidationEvent
from .liquidationprocessor import LiquidationProcessor as LiquidationProcessor
from .liquidationprocessor import LiquidationProcessorState as LiquidationProcessorState
from .loadedmarket import LoadedMarket as LoadedMarket
from .logmessages import expand_log_messages as expand_log_messages
from .lotsizeconverter import LotSizeConverter as LotSizeConverter
from .mangoinstruction import MangoInstruction as MangoInstruction
from .lotsizeconverter import NullLotSizeConverter as NullLotSizeConverter
from .market import DryRunMarket as DryRunMarket
from .market import InventorySource as InventorySource
from .market import Market as Market
from .marketlookup import CompoundMarketLookup as CompoundMarketLookup
from .marketlookup import MarketLookup as MarketLookup
from .marketlookup import NullMarketLookup as NullMarketLookup
from .marketoperations import MarketInstructionBuilder as MarketInstructionBuilder
from .marketoperations import MarketOperations as MarketOperations
from .marketoperations import NullMarketInstructionBuilder as NullMarketInstructionBuilder
from .marketoperations import NullMarketOperations as NullMarketOperations
from .metadata import Metadata as Metadata
from .modelstate import ModelState as ModelState
from .notification import CompoundNotificationTarget as CompoundNotificationTarget
from .notification import ConsoleNotificationTarget as ConsoleNotificationTarget
from .notification import CsvFileNotificationTarget as CsvFileNotificationTarget
from .notification import DiscordNotificationTarget as DiscordNotificationTarget
from .notification import FilteringNotificationTarget as FilteringNotificationTarget
from .notification import MailjetNotificationTarget as MailjetNotificationTarget
from .notification import NotificationHandler as NotificationHandler
from .notification import NotificationTarget as NotificationTarget
from .notification import TelegramNotificationTarget as TelegramNotificationTarget
from .notification import parse_notification_target as parse_notification_target
from .observables import CaptureFirstItem as CaptureFirstItem
from .observables import CollectingObserverSubscriber as CollectingObserverSubscriber
from .observables import DisposePropagator as DisposePropagator
from .observables import DisposeWrapper as DisposeWrapper
from .observables import EventSource as EventSource
from .observables import FunctionObserver as FunctionObserver
from .observables import LatestItemObserverSubscriber as LatestItemObserverSubscriber
from .observables import NullObserverSubscriber as NullObserverSubscriber
from .observables import PrintingObserverSubscriber as PrintingObserverSubscriber
from .observables import TimestampedPrintingObserverSubscriber as TimestampedPrintingObserverSubscriber
from .observables import create_backpressure_skipping_observer as create_backpressure_skipping_observer
from .observables import debug_print_item as debug_print_item
from .observables import log_subscription_error as log_subscription_error
from .observables import observable_pipeline_error_reporter as observable_pipeline_error_reporter
from .openorders import OpenOrders as OpenOrders
from .oracle import Oracle as Oracle
from .oracle import OracleProvider as OracleProvider
from .oracle import OracleSource as OracleSource
from .oracle import Price as Price
from .oracle import SupportedOracleFeature as SupportedOracleFeature
from .orderbookside import OrderBookSideType as OrderBookSideType
from .orderbookside import PerpOrderBookSide as PerpOrderBookSide
from .orders import Order as Order
from .orders import OrderType as OrderType
from .orders import OrderBook as OrderBook
from .orders import Side as Side
from .ownedinstrumentvalue import OwnedInstrumentValue as OwnedInstrumentValue
from .oraclefactory import create_oracle_provider as create_oracle_provider
from .parse_account_info_to_orders import parse_account_info_to_orders as parse_account_info_to_orders
from .perpaccount import PerpAccount as PerpAccount
from .perpeventqueue import PerpEvent as PerpEvent
from .perpeventqueue import PerpEventQueue as PerpEventQueue
from .perpeventqueue import PerpFillEvent as PerpFillEvent
from .perpeventqueue import PerpOutEvent as PerpOutEvent
from .perpeventqueue import PerpUnknownEvent as PerpUnknownEvent
from .perpeventqueue import UnseenPerpEventChangesTracker as UnseenPerpEventChangesTracker
from .perpmarket import PerpMarket as PerpMarket
from .perpmarket import PerpMarketStub as PerpMarketStub
from .perpmarketdetails import PerpMarketDetails as PerpMarketDetails
from .perpmarketoperations import PerpMarketInstructionBuilder as PerpMarketInstructionBuilder
from .perpmarketoperations import PerpMarketOperations as PerpMarketOperations
from .perpopenorders import PerpOpenOrders as PerpOpenOrders
from .placedorder import PlacedOrder as PlacedOrder
from .placedorder import PlacedOrdersContainer as PlacedOrdersContainer
from .publickey import encode_public_key_for_sorting as encode_public_key_for_sorting
from .reconnectingwebsocket import ReconnectingWebsocket as ReconnectingWebsocket
from .retrier import RetryWithPauses as RetryWithPauses
from .retrier import retry_context as retry_context
from .serumeventqueue import SerumEventQueue as SerumEventQueue
from .serumeventqueue import UnseenSerumEventChangesTracker as UnseenSerumEventChangesTracker
from .serummarket import SerumMarket as SerumMarket
from .serummarket import SerumMarketStub as SerumMarketStub
from .serummarketlookup import SerumMarketLookup as SerumMarketLookup
from .serummarketoperations import SerumMarketInstructionBuilder as SerumMarketInstructionBuilder
from .serummarketoperations import SerumMarketOperations as SerumMarketOperations
from .spotmarket import SpotMarket as SpotMarket
from .spotmarket import SpotMarketStub as SpotMarketStub
from .spotmarketoperations import SpotMarketInstructionBuilder as SpotMarketInstructionBuilder
from .spotmarketoperations import SpotMarketOperations as SpotMarketOperations
from .text import indent_collection_as_str as indent_collection_as_str
from .text import indent_item_by as indent_item_by
from .token import Instrument as Instrument
from .token import SolToken as SolToken
from .token import Token as Token
from .tokenaccount import TokenAccount as TokenAccount
from .tokenbank import BankBalances as BankBalances
from .tokenbank import InterestRates as InterestRates
from .tokenbank import NodeBank as NodeBank
from .tokenbank import RootBank as RootBank
from .tokenbank import TokenBank as TokenBank
from .tradeexecutor import ImmediateTradeExecutor as ImmediateTradeExecutor
from .tradeexecutor import NullTradeExecutor as NullTradeExecutor
from .tradeexecutor import TradeExecutor as TradeExecutor
from .tradehistory import TradeHistory as TradeHistory
from .transactionscout import TransactionScout as TransactionScout
from .transactionscout import fetch_all_recent_transaction_signatures as fetch_all_recent_transaction_signatures
from .transactionscout import mango_instruction_from_response as mango_instruction_from_response
from .valuation import AccountValuation as AccountValuation
from .valuation import TokenValuation as TokenValuation
from .valuation import Valuation as Valuation
from .version import Version as Version
from .wallet import Wallet as Wallet
from .walletbalancer import FilterSmallChanges as FilterSmallChanges
from .walletbalancer import FixedTargetBalance as FixedTargetBalance
from .walletbalancer import LiveAccountBalancer as LiveAccountBalancer
from .walletbalancer import LiveWalletBalancer as LiveWalletBalancer
from .walletbalancer import NullWalletBalancer as NullWalletBalancer
from .walletbalancer import PercentageTargetBalance as PercentageTargetBalance
from .walletbalancer import TargetBalance as TargetBalance
from .walletbalancer import WalletBalancer as WalletBalancer
from .walletbalancer import calculate_required_balance_changes as calculate_required_balance_changes
from .walletbalancer import parse_fixed_target_balance as parse_fixed_target_balance
from .walletbalancer import parse_target_balance as parse_target_balance
from .walletbalancer import sort_changes_for_trades as sort_changes_for_trades
from .watcher import LamdaUpdateWatcher as LamdaUpdateWatcher
from .watcher import ManualUpdateWatcher as ManualUpdateWatcher
from .watcher import Watcher as Watcher
from .watchers import build_group_watcher as build_group_watcher
from .watchers import build_account_watcher as build_account_watcher
from .watchers import build_cache_watcher as build_cache_watcher
from .watchers import build_spot_open_orders_watcher as build_spot_open_orders_watcher
from .watchers import build_serum_open_orders_watcher as build_serum_open_orders_watcher
from .watchers import build_perp_open_orders_watcher as build_perp_open_orders_watcher
from .watchers import build_price_watcher as build_price_watcher
from .watchers import build_serum_inventory_watcher as build_serum_inventory_watcher
from .watchers import build_orderbook_watcher as build_orderbook_watcher
from .websocketsubscription import IndividualWebSocketSubscriptionManager as IndividualWebSocketSubscriptionManager
from .websocketsubscription import SharedWebSocketSubscriptionManager as SharedWebSocketSubscriptionManager
from .websocketsubscription import WebSocketAccountSubscription as WebSocketAccountSubscription
from .websocketsubscription import WebSocketLogSubscription as WebSocketLogSubscription
from .websocketsubscription import WebSocketProgramSubscription as WebSocketProgramSubscription
from .websocketsubscription import WebSocketSubscription as WebSocketSubscription
from .websocketsubscription import WebSocketSubscriptionManager as WebSocketSubscriptionManager
from .layouts import layouts
import decimal
# Increased precision from 18 to 36 because for a decimal like:
# val = Decimal("17436036573.2030800")
#
# The following rounding operations would both throw decimal.InvalidOperation:
# val.quantize(Decimal('.000000001'))
# round(val, 9)
decimal.getcontext().prec = 36
|
StarcoderdataPython
|
157942
|
<gh_stars>10-100
import numpy as np
import matplotlib.pyplot as plt
L = 3
Y, X = np.mgrid[-L:L:100j, -L:L:100j]
U = -1 - X**2 + Y
V = 1 + X - Y**2
speed = np.sqrt(U*U + V*V)
plt.imshow(speed, extent=[-L, L, -L, L], alpha=0.5)
plt.colorbar(label='speed')
plt.streamplot(X, Y, U, V, linewidth=0.2*speed)
plt.title('Streamlines')
plt.xlabel('x-axis')
plt.ylabel('y-axis')
plt.show()
|
StarcoderdataPython
|
4822144
|
import os
from pathlib import Path
from shutil import copy2
import pytest
from yappa.cli_helpers import create_function_version
from yappa.config_generation import create_default_config
from yappa.packaging.s3 import delete_bucket
from yappa.utils import save_yaml
from yappa.yc import YC
@pytest.fixture(scope="session")
def yc():
return YC.setup()
COPIED_FILES = (
Path(Path(__file__).resolve().parent, "test_apps", "fastapi_app.py"),
Path(Path(__file__).resolve().parent, "test_apps", "django_wsgi.py"),
Path(Path(__file__).resolve().parent, "test_apps", "django_settings.py"),
Path(Path(__file__).resolve().parent, "test_apps", "flask_app.py"),
Path(Path(__file__).resolve().parent, "test_apps",
"apps_requirements.txt"),
)
PACKAGE_FILES = (
Path("package", "utils.py"),
Path("package", "subpackage", "subutils.py"),
Path(".idea"),
Path(".git", "config"),
Path("venv", "flask.py"),
)
def create_empty_files(*paths):
for path in paths:
os.makedirs(path.parent, exist_ok=True)
open(path, "w").close()
@pytest.fixture(scope="session")
def apps_dir(tmpdir_factory):
dir_ = tmpdir_factory.mktemp('package')
os.chdir(dir_)
assert not os.listdir()
create_empty_files(*PACKAGE_FILES)
for file in COPIED_FILES:
copy2(file, ".")
return dir_
@pytest.fixture(scope="session")
def config_filename():
return "yappa-config.yaml"
APPS_CONFIGS = (
("flask", "flask_app.app", None, "wsgi"),
("django", "django_wsgi.app", "django_settings", "wsgi"),
("fastapi", "fastapi_app.app", None, "asgi"),
)
@pytest.fixture(scope="session",
params=APPS_CONFIGS,
ids=[config[0] for config in APPS_CONFIGS])
def config(request, apps_dir, config_filename):
config = create_default_config(config_filename)
config.update(
project_slug=f"test-function-{request.param[0]}",
manage_function_name=f"test-function-{request.param[0]}-manage",
requirements_file="apps_requirements.txt",
entrypoint=request.param[1],
application_type=request.param[3],
django_settings_module=request.param[2],
bucket="test-bucket-231",
excluded_paths=(
".idea",
".git",
"venv",
),
is_public=True,
)
save_yaml(config, config_filename)
return config
@pytest.fixture(scope="session")
def function(config, yc):
function, _ = yc.create_function(config["project_slug"])
if config["django_settings_module"]:
yc.create_function(config["manage_function_name"])
yield function
yc.delete_function(config["project_slug"])
if config["django_settings_module"]:
yc.delete_function(config["manage_function_name"])
UPLOAD_STATEGIES = (
"s3",
"direct",
)
@pytest.fixture(scope="session", params=UPLOAD_STATEGIES, ids=UPLOAD_STATEGIES)
def function_version(request, yc, function, config, config_filename,
s3_credentials):
yield create_function_version(yc, config, request.param, config_filename)
delete_bucket(config["bucket"], **s3_credentials)
@pytest.fixture(scope="session")
def s3_credentials(yc):
return yc.get_s3_key()
@pytest.fixture()
def sample_event():
return {
"httpMethod": "GET",
"headers": {
"HTTP_HOST": ""
},
"url": "http://sampleurl.ru/",
"params": {},
"multiValueParams": {},
"pathParams": {},
"multiValueHeaders": {},
"queryStringParameters": {},
"multiValueQueryStringParameters": {},
"requestContext": {
"identity": {"sourceIp": "172.16.58.3",
"userAgent": "Mozilla/5.0"},
"httpMethod": "GET",
"requestId": "0f61048c-2ba9",
"requestTime": "18/Jun/2021:03:56:37 +0000",
"requestTimeEpoch": 1623988597},
"body": "",
"isBase64Encoded": False}
|
StarcoderdataPython
|
92556
|
from __future__ import print_function
import numpy as np
import pandas as pd
import inspect
import os
import time
from . import Model
from . import Utils as U
#------------------------------
#FINDING NEAREST NEIGHBOR
#------------------------------
def mindistance(x,xma,Nx):
distx = 0
mindist = 1000000 * U.PC * U.AU
j = None
for i in range(Nx):
distx = abs(x-xma[i])
if (distx<mindist):
mindist=distx
j=i
return j
#------------------------------
#OVERLAPING SUBMODELS INTO GRID
#------------------------------
def overlap(GRID, submodels = [''], folder = './Subgrids/',
T_min = 30., rho_min = 1.e9,
all = False, radmc3d = False):
func_name = inspect.stack()[0][3]
if folder[-1] != '/': folder = folder + '/'
t0 = time.time()
num=int(np.loadtxt(os.popen("ls -1 %s*.dat| wc -l"%folder)))
data=os.popen("ls -1 %s*.dat"%folder).read().split('\n',num)[:-1]
if all:
names = [name for name in data] # if '_S.' in name or '_MSW' in name]
files = [np.loadtxt(name) for name in names]
else:
submodels = [folder + sub for sub in submodels]
names = [name for name in submodels]
files = [np.loadtxt(name) for name in names]
detected = [tmp.split(folder)[1] for tmp in data]
read = [tmp.split(folder)[1] for tmp in names]
print ("Running function '%s'..."%func_name)
print ('Files detected (%d):'%num, detected,
'\nFiles to merge in grid (%d):'%len(files), read)
NTotal = GRID.NPoints
Nx, Ny, Nz = GRID.Nodes
cm3_to_m3 = 1e6
gamma = 7./5 #Gamma for diatomic molecules
DENS = -1*np.ones(NTotal) #, dtype='float64') * 0.5 # * dens_back
TEMP = np.zeros(NTotal) # * temp_back * dens_back
ab0 = 5e-8
ABUND = np.zeros(NTotal) #np.ones(NTotal) * ab0
gtd0 = 100.
GTD = np.zeros(NTotal) #np.ones(NTotal) * gtd0
VEL = [np.zeros(NTotal),np.zeros(NTotal),np.ones(NTotal)*7e8]
#----------------------
#----------------------
#-------------------
#SUBGRIDS DEFINITION
#-------------------
NFiles = len(files); CountFiles = np.arange(NFiles)
lenFiles = [len(file) for file in files]
dens_tmp = [[{},{}] for i in CountFiles]
temp_tmp = [{} for i in CountFiles]
vel_tmp = [[{} for i in CountFiles] for i in range(3)]
abund_tmp = [{} for i in CountFiles]
gtd_tmp = [{} for i in CountFiles]
hg=0
IDList = [[] for i in CountFiles]
Xgrid, Ygrid, Zgrid = GRID.XYZgrid
for m in range(NFiles):
for n in files[m]:
x,y,z = n[1], n[2], n[3]
i = mindistance(x,Xgrid,Nx)
j = mindistance(y,Ygrid,Ny)
k = mindistance(z,Zgrid,Nz)
Num = i*(Ny)*(Nz)+j*(Nz)+k; #ID for the Global Grid
#if Num in IDList[m]: #Really slow as the size of IDList increases
try:
dens_tmp[m][0][Num] += n[4]
dens_tmp[m][1][Num] += 1
temp_tmp[m][Num] += n[4] * n[5]
vel_tmp[0][m][Num] += n[4] * n[6]
vel_tmp[1][m][Num] += n[4] * n[7]
vel_tmp[2][m][Num] += n[4] * n[8]
abund_tmp[m][Num] += n[4] * n[9]
gtd_tmp[m][Num] += n[4] * n[10]
except KeyError:
#else:
dens_tmp[m][0][Num] = n[4]
dens_tmp[m][1][Num] = 1
temp_tmp[m][Num] = n[4] * n[5]
vel_tmp[0][m][Num] = n[4] * n[6]
vel_tmp[1][m][Num] = n[4] * n[7]
vel_tmp[2][m][Num] = n[4] * n[8]
abund_tmp[m][Num] = n[4] * n[9]
gtd_tmp[m][Num] = n[4] * n[10]
IDList[m].append(Num)
#hg+=1
#if hg%50000 == 0: print (hg)
print ('Finished merging for: %s'%names[m])
print ('Computing combined densities, temperatures, etc....')
for m in range(NFiles):
for ind in IDList[m]:
dens_tot = dens_tmp[m][0][ind]
temp_tmp[m][ind] = temp_tmp[m][ind] / dens_tot
abund_tmp[m][ind] = abund_tmp[m][ind] / dens_tot
gtd_tmp[m][ind]= gtd_tmp[m][ind] / dens_tot
vel_tmp[0][m][ind] = vel_tmp[0][m][ind] / dens_tot
vel_tmp[1][m][ind] = vel_tmp[1][m][ind] / dens_tot
vel_tmp[2][m][ind] = vel_tmp[2][m][ind] / dens_tot
dens_tmp[m][0][ind] = dens_tot / dens_tmp[m][1][ind]
#-------------------
#FOR THE GLOBAL GRID
#-------------------
dens_dum = dens_tmp[m][0][ind]
temp_dum = temp_tmp[m][ind]
vel0_dum = vel_tmp[0][m][ind]
vel1_dum = vel_tmp[1][m][ind]
vel2_dum = vel_tmp[2][m][ind]
abund_dum = abund_tmp[m][ind]
gtd_dum = gtd_tmp[m][ind]
DENS[ind] += dens_dum
TEMP[ind] += dens_dum * temp_dum
VEL[0][ind] += dens_dum * vel0_dum
VEL[1][ind] += dens_dum * vel1_dum
VEL[2][ind] += dens_dum * vel2_dum
ABUND[ind] += dens_dum * abund_dum
GTD[ind] += dens_dum * gtd_dum
TEMP = TEMP / DENS
ABUND = ABUND / DENS
GTD = GTD / DENS
VEL[0] = VEL[0] / DENS
VEL[1] = VEL[1] / DENS
VEL[2] = VEL[2] / DENS
VEL = Model.Struct( **{'x': VEL[0], 'y': VEL[1], 'z': VEL[2]})
ind = np.where(DENS == -1.0)
DENS[ind] = rho_min
ABUND[ind] = ab0 #?
GTD[ind] = gtd0 #?
DENS = np.where(DENS < rho_min, rho_min, DENS)
TEMP = np.where(TEMP == 0., T_min, TEMP)
if radmc3d: Model.Datatab_RADMC3D_FreeFree(DENS,TEMP,GRID)
else: Model.DataTab_LIME(DENS,TEMP,VEL,ABUND,GTD,GRID)
AllProp = Model.Struct( **{'GRID': GRID, 'density': DENS, 'temperature': TEMP, 'vel': VEL, 'abundance': ABUND, 'gtd': GTD})
print ('%s is done!'%func_name)
print ('Ellapsed time: %.3fs' % (time.time() - t0))
print ('-------------------------------------------------\n-------------------------------------------------')
return AllProp
|
StarcoderdataPython
|
1631745
|
<reponame>AlexArcPy/GDBee
# -*- coding: UTF-8 -*-
"""Container of tabs."""
from PyQt5.QtWidgets import (QTabWidget, QAction, QToolButton, QMessageBox)
from PyQt5.QtCore import Qt
from tab import Tab
from geodatabase import Geodatabase
from cfg import dev_mode, not_connected_to_gdb_message
########################################################################
class TabWidget(QTabWidget):
"""Container of tabs."""
# ----------------------------------------------------------------------
def __init__(self, parent=None):
"""Initialize TabWidget with basic properties."""
super(TabWidget, self).__init__(parent)
self.latest_query_index = 0 # to show as `Query <index>` in tab name
self.setTabsClosable(True)
self.tabCloseRequested.connect(self.on_close_tab_mouse)
self.close_query_window = QAction('Close tab', self)
self.close_query_window.setShortcut('Ctrl+W')
self.close_query_window.triggered.connect(self.on_close_tab_keyboard)
self.addAction(self.close_query_window)
self.tab_button = QToolButton(self)
self.tab_button.setText('+')
self.tab_button.setToolTip('Add a new tab')
self.setCornerWidget(self.tab_button, Qt.TopRightCorner)
self.tab_button.clicked.connect(self.add_tab_page)
if dev_mode:
self.add_tab_page()
# ----------------------------------------------------------------------
def add_tab_page(self):
"""Add a new empty tab.
Create empty query panel and empty result table.
"""
empty_tab = Tab()
self.latest_query_index += 1
self.addTab(empty_tab, 'Query {idx}'.format(
idx=self.latest_query_index))
if self.tabBar().count() > 1:
current_tab_gdb = getattr(self.widget(
self.currentIndex()), 'gdb', None)
if current_tab_gdb:
empty_tab.gdb = current_tab_gdb
empty_tab.connected_gdb_path_label.setText(
self.widget(self.currentIndex()).gdb.path)
empty_tab.connect_to_geodatabase(
evt=None, triggered_with_browse=False)
empty_tab._fill_toc()
else: # the first tab
empty_tab.connected_gdb_path_label.setText(
not_connected_to_gdb_message)
# focus on the newly added tab
self.setCurrentWidget(empty_tab)
# focus on the query text panel to be able to start typing directly
empty_tab.query.setFocus()
if dev_mode:
empty_tab.gdb = Geodatabase('NYC.gdb')
empty_tab.connected_gdb_path_label.setText(empty_tab.gdb.path)
empty_tab._set_gdb_items_highlight()
empty_tab._fill_toc()
empty_tab.query.setText('select * from streets limit 1000')
empty_tab.run_query()
return
# ----------------------------------------------------------------------
def on_close_tab_mouse(self, index):
"""Close the tab upon clicking on the close icon confirming first."""
tab_to_close = self.widget(index)
if tab_to_close.query.document().toPlainText():
self.close_tab_handler(index)
else:
self.removeTab(index)
return
# ----------------------------------------------------------------------
def close_tab_handler(self, index):
"""Show confirmation message box before closing a tab."""
msg = QMessageBox()
msg.setText('Are you sure you want to close this tab?')
msg.setWindowTitle('Close tab')
msg.setStandardButtons(QMessageBox.Ok | QMessageBox.Cancel)
msg.buttonClicked.connect(
lambda evt, arg=index: self.close_tab(
evt, index))
msg.exec_()
return
# ----------------------------------------------------------------------
def close_tab(self, evt, index):
"""Close tab method."""
if evt.text() == 'OK':
self.removeTab(index)
return
# ----------------------------------------------------------------------
def on_close_tab_keyboard(self):
"""Close the tab upon using the keyboard shortcut."""
index = self.currentIndex()
tab_to_close = self.widget(index)
if tab_to_close.query.document().toPlainText():
self.close_tab_handler(index)
else:
self.removeTab(index)
return
|
StarcoderdataPython
|
23568
|
<gh_stars>1-10
# -*- coding: utf-8 -*-
# ---------------------------------------------------------------------
# main.pool application
# ---------------------------------------------------------------------
# Copyright (C) 2007-2019 The NOC Project
# See LICENSE for details
# ---------------------------------------------------------------------
# NOC modules
from noc.lib.app.extdocapplication import ExtDocApplication
from noc.main.models.pool import Pool
from noc.core.translation import ugettext as _
class PoolApplication(ExtDocApplication):
"""
Pool application
"""
title = _("Pool")
menu = [_("Setup"), _("Pools")]
model = Pool
glyph = "database"
default_ordering = ["name"]
|
StarcoderdataPython
|
1767074
|
<reponame>kenneym/py-feat<gh_stars>10-100
import cv2
import numpy as np
import pandas as pd
import torch
import math
import pandas as pd
import numpy as np
import feat.au_detectors.JAANet.JAANet_model as network
import torch.nn as nn
from PIL import Image
from torchvision import transforms
from feat.utils import get_resource_path, convert68to49
import os
class JAANet(nn.Module):
def __init__(self) -> None:
"""
Initialize.
Args:
img_data: numpy array image data files of shape (N,3,W,H)
land_data: numpy array landmark data of shape (N, 49*2)
"""
# self.imgs = img_data
# self.land_data = land_data
super(JAANet,self).__init__()
self.params = {
"config_unit_dim": 8,
"config_crop_size": 176,
"config_map_size": 44,
"config_au_num": 12,
"config_land_num": 49,
"config_fill_coeff": 0.56,
"config_write_path_prefix": get_resource_path(),
}
config_unit_dim = self.params["config_unit_dim"]
config_crop_size = self.params["config_crop_size"]
config_map_size = self.params["config_map_size"]
config_au_num = self.params["config_au_num"]
config_land_num = self.params["config_land_num"]
config_fill_coeff = self.params["config_fill_coeff"]
config_write_path_prefix = self.params["config_write_path_prefix"]
self.region_learning = network.network_dict["HMRegionLearning"](
input_dim=3, unit_dim=config_unit_dim
)
self.align_net = network.network_dict["AlignNet"](
crop_size=config_crop_size,
map_size=config_map_size,
au_num=config_au_num,
land_num=config_land_num,
input_dim=config_unit_dim * 8,
fill_coeff=config_fill_coeff,
)
self.local_attention_refine = network.network_dict["LocalAttentionRefine"](
au_num=config_au_num, unit_dim=config_unit_dim
)
self.local_au_net = network.network_dict["LocalAUNetv2"](
au_num=config_au_num,
input_dim=config_unit_dim * 8,
unit_dim=config_unit_dim,
)
self.global_au_feat = network.network_dict["HLFeatExtractor"](
input_dim=config_unit_dim * 8, unit_dim=config_unit_dim
)
self.au_net = network.network_dict["AUNet"](
au_num=config_au_num, input_dim=12000, unit_dim=config_unit_dim
)
self.use_gpu = torch.cuda.is_available()
if self.use_gpu:
self.region_learning = self.region_learning.cuda()
self.align_net = self.align_net.cuda()
self.local_attention_refine = self.local_attention_refine.cuda()
self.local_au_net = self.local_au_net.cuda()
self.global_au_feat = self.global_au_feat.cuda()
self.au_net = self.au_net.cuda()
# Load parameters
# load_map = 'cpu' if True else 'false'
# au_occur_model_path = os.path.join(
# config_write_path_prefix , '/region_learning' , '.pth')
# print("should load data at ",os.path.join(config_write_path_prefix , 'region_learning.pth'))
# print("Directory Files:")
# print(os.listdir(config_write_path_prefix))
self.region_learning.load_state_dict(
torch.load(
os.path.join(config_write_path_prefix, "region_learning.pth")
)
)
self.align_net.load_state_dict(
torch.load(os.path.join(config_write_path_prefix, "align_net.pth"))
)
self.local_attention_refine.load_state_dict(
torch.load(
os.path.join(config_write_path_prefix, "local_attention_refine.pth")
)
)
self.local_au_net.load_state_dict(
torch.load(os.path.join(config_write_path_prefix, "local_au_net.pth"))
)
self.global_au_feat.load_state_dict(
torch.load(os.path.join(config_write_path_prefix, "global_au_feat.pth"))
)
self.au_net.load_state_dict(
torch.load(os.path.join(config_write_path_prefix, "au_net.pth"))
)
else:
self.region_learning.load_state_dict(
torch.load(
os.path.join(config_write_path_prefix, "region_learning.pth"),
map_location={"cuda:0": "cpu"},
)
)
self.align_net.load_state_dict(
torch.load(
os.path.join(config_write_path_prefix, "align_net.pth"),
map_location={"cuda:0": "cpu"},
)
)
self.local_attention_refine.load_state_dict(
torch.load(
os.path.join(
config_write_path_prefix, "local_attention_refine.pth"
),
map_location={"cuda:0": "cpu"},
)
)
self.local_au_net.load_state_dict(
torch.load(
os.path.join(config_write_path_prefix, "local_au_net.pth"),
map_location={"cuda:0": "cpu"},
)
)
self.global_au_feat.load_state_dict(
torch.load(
os.path.join(config_write_path_prefix, "global_au_feat.pth"),
map_location={"cuda:0": "cpu"},
)
)
self.au_net.load_state_dict(
torch.load(
os.path.join(config_write_path_prefix, "au_net.pth"),
map_location={"cuda:0": "cpu"},
)
)
self.region_learning.eval()
self.align_net.eval()
self.local_attention_refine.eval()
self.local_au_net.eval()
self.global_au_feat.eval()
self.au_net.eval()
def align_face_49pts(self, img, img_land, box_enlarge=2.9, img_size=200):
"""
code from:
https://github.com/ZhiwenShao/PyTorch-JAANet/blob/master/dataset/face_transform.py
Did some small modifications to fit into our program.
The function performs preproecessing transformations on pictures.
Args:
img: iamges loaded by cv2. Shape: (3,H,W)
img_land: landmark file for the img. Shape()
box_enlarge: englarge factor for the face transform, centered at face
img_size: size of the desired output image
Return:
aligned_img: aligned images by cv2
new_land: transformed landmarks
biocular: biocular distancxe
"""
leftEye0 = (
img_land[2 * 19]
+ img_land[2 * 20]
+ img_land[2 * 21]
+ img_land[2 * 22]
+ img_land[2 * 23]
+ img_land[2 * 24]
) / 6.0
leftEye1 = (
img_land[2 * 19 + 1]
+ img_land[2 * 20 + 1]
+ img_land[2 * 21 + 1]
+ img_land[2 * 22 + 1]
+ img_land[2 * 23 + 1]
+ img_land[2 * 24 + 1]
) / 6.0
rightEye0 = (
img_land[2 * 25]
+ img_land[2 * 26]
+ img_land[2 * 27]
+ img_land[2 * 28]
+ img_land[2 * 29]
+ img_land[2 * 30]
) / 6.0
rightEye1 = (
img_land[2 * 25 + 1]
+ img_land[2 * 26 + 1]
+ img_land[2 * 27 + 1]
+ img_land[2 * 28 + 1]
+ img_land[2 * 29 + 1]
+ img_land[2 * 30 + 1]
) / 6.0
deltaX = rightEye0 - leftEye0
deltaY = rightEye1 - leftEye1
l = math.sqrt(deltaX * deltaX + deltaY * deltaY)
sinVal = deltaY / l
cosVal = deltaX / l
mat1 = np.mat([[cosVal, sinVal, 0], [-sinVal, cosVal, 0], [0, 0, 1]])
mat2 = np.mat(
[
[leftEye0, leftEye1, 1],
[rightEye0, rightEye1, 1],
[img_land[2 * 13], img_land[2 * 13 + 1], 1],
[img_land[2 * 31], img_land[2 * 31 + 1], 1],
[img_land[2 * 37], img_land[2 * 37 + 1], 1],
]
)
mat2 = (mat1 * mat2.T).T
cx = float((max(mat2[:, 0]) + min(mat2[:, 0]))) * 0.5
cy = float((max(mat2[:, 1]) + min(mat2[:, 1]))) * 0.5
if float(max(mat2[:, 0]) - min(mat2[:, 0])) > float(
max(mat2[:, 1]) - min(mat2[:, 1])
):
halfSize = 0.5 * box_enlarge * float((max(mat2[:, 0]) - min(mat2[:, 0])))
else:
halfSize = 0.5 * box_enlarge * float((max(mat2[:, 1]) - min(mat2[:, 1])))
scale = (img_size - 1) / 2.0 / halfSize
mat3 = np.mat(
[
[scale, 0, scale * (halfSize - cx)],
[0, scale, scale * (halfSize - cy)],
[0, 0, 1],
]
)
mat = mat3 * mat1
aligned_img = cv2.warpAffine(
img,
mat[0:2, :],
(img_size, img_size),
cv2.INTER_LINEAR,
borderValue=(128, 128, 128),
)
land_3d = np.ones((int(len(img_land) / 2), 3))
land_3d[:, 0:2] = np.reshape(np.array(img_land), (int(len(img_land) / 2), 2))
mat_land_3d = np.mat(land_3d)
new_land = np.array((mat * mat_land_3d.T).T)
new_land = np.reshape(new_land[:, 0:2], len(img_land))
return aligned_img, new_land
def detect_au(self, imgs, land_data):
lenth_index = [len(ama) for ama in land_data]
lenth_cumu = np.cumsum(lenth_index)
flat_faces = np.array([item for sublist in land_data for item in sublist]) # Flatten the faces
flat_faces = flat_faces.transpose(0,2,1)
pt49_array = None
img_transforms = transforms.Compose(
[
transforms.CenterCrop(176),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]),
]
)
input_torch = None
land_torch = None
for i in range(flat_faces.shape[0]):
frame_assignment = np.where(i<=lenth_cumu)[0][0] # which frame is it?
land_convert = convert68to49(flat_faces[i]).T
new_land_data = land_convert.flatten()
new_img, new_land = self.align_face_49pts(imgs[frame_assignment], new_land_data)
new_img = cv2.cvtColor(new_img, cv2.COLOR_BGR2RGB)
im_pil = Image.fromarray(new_img)
input = img_transforms(im_pil)
if len(input.shape) < 4:
input.unsqueeze_(0)
new_land = torch.from_numpy(new_land)
if input_torch is None:
input_torch = input
else:
input_torch = torch.cat((input_torch,input),0)
if land_torch is None:
land_torch = new_land
else:
land_torch = torch.cat((land_torch,new_land),0)
if self.use_gpu:
input_torch, land_torch = input_torch.cuda(), land_torch.cuda()
region_feat = self.region_learning(input_torch)
align_feat, align_output, aus_map = self.align_net(region_feat)
if self.use_gpu:
aus_map = aus_map.cuda()
output_aus_map = self.local_attention_refine(aus_map.detach())
local_au_out_feat, local_aus_output = self.local_au_net(region_feat, output_aus_map)
local_aus_output = (local_aus_output[:, 1, :]).exp()
global_au_out_feat = self.global_au_feat(region_feat)
concat_au_feat = torch.cat(
(align_feat, global_au_out_feat, local_au_out_feat.detach()), 1
)
aus_output = self.au_net(concat_au_feat)
aus_output = (aus_output[:, 1, :]).exp()
all_output = aus_output.data.cpu().float()
AUoccur_pred_prob = all_output.data.numpy()
return AUoccur_pred_prob
|
StarcoderdataPython
|
3358906
|
# Copyright (C) 2016 by VLAM3D Software inc. https://www.vlam3d.com
# This code is licensed under the MIT license (MIT) (http://opensource.org/licenses/MIT)
from __future__ import print_function
import argparse
import vulkanmitts as vk
import numpy as np
from cube_data import *
from vkcontextmanager import vkreleasing, VkContextManager
from transforms import *
def render_textured_cube(vkc, cube_coords):
vkc.init_presentable_image()
rp_begin = vkc.make_render_pass_begin_info()
vk.resetCommandBuffer(vkc.command_buffers[0],0)
vk.beginCommandBuffer(vkc.command_buffers[0],vk.CommandBufferBeginInfo(0,None))
vk.cmdBeginRenderPass(vkc.command_buffers[0], rp_begin, vk.VK_SUBPASS_CONTENTS_INLINE)
vk.cmdBindPipeline(vkc.command_buffers[0], vk.VK_PIPELINE_BIND_POINT_GRAPHICS, vkc.pipeline[0])
vk.cmdBindDescriptorSets(vkc.command_buffers[0], vk.VK_PIPELINE_BIND_POINT_GRAPHICS, vkc.pipeline_layout, 0, vkc.descriptor_set, [])
vk.cmdBindVertexBuffers(vkc.command_buffers[0], 0, vk.VkBufferVector(1,vkc.vertex_buffer), vk.VkDeviceSizeVector(1,0))
vkc.init_viewports()
vkc.init_scissors()
vk.cmdDraw(vkc.command_buffers[0], cube_coords.shape[0], 1, 0, 0)
vk.cmdEndRenderPass(vkc.command_buffers[0])
vkc.stage_readback_copy()
vk.endCommandBuffer(vkc.command_buffers[0])
with vkreleasing( vk.createFence(vkc.device, vk.FenceCreateInfo(0)) ) as draw_fence:
submit_info_vec = vk.VkSubmitInfoVector()
submit_info_vec.append( vk.SubmitInfo( vk.VkSemaphoreVector(), vk.VkFlagVector(1,vk.VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT), vkc.command_buffers, vk.VkSemaphoreVector()) )
vk.queueSubmit(vkc.device_queue, submit_info_vec, draw_fence)
command_buffer_finished = False
cmd_fences = vk.VkFenceVector(1,draw_fence)
while not command_buffer_finished:
try:
vk.waitForFences(vkc.device, cmd_fences, True, 100000000)
command_buffer_finished = True
except RuntimeError:
pass
vkc.readback_map_copy()
vkc.save_readback_image('textured_cube.png')
def hello_pyvk(texture_file, output_img_file):
cube_coords = get_xyzw_uv_cube_coords()
print('Creating Vulkan Context')
with VkContextManager(VkContextManager.VKC_INIT_PIPELINE, VkContextManager.VKC_OFFSCREEN) as vkc:
render_textured_cube(vkc,cube_coords)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Renders a textured cube to an image file.')
parser.add_argument('--texture',type=str,default='lena_std.png',help='Path to image file for texture map')
parser.add_argument('--outimg',type=str,default='hello_pyvk.png',help='Path to output image')
args = parser.parse_args()
hello_pyvk(args.texture, args.outimg)
|
StarcoderdataPython
|
156301
|
#-------------------------------------------------------------------------------
# Name: powerlaw.py
# Purpose: This is a set of power law coefficients(a,b) for the calculation of
# empirical rain attenuation model A = a*R^b.
#-------------------------------------------------------------------------------
def get_coef_ab(freq, mod_type = 'MP'):
'''
Returns power law coefficients according to model type (mod_type)
at a given frequency[Hz].
Input:
freq - frequency, [Hz].
Optional:
mod_type - model type for power law. This can be 'ITU_R2005',
'MP','GAMMA'. By default, mode_type = 'ITU_R2005'.
Output:
a,b - power law coefficients.
'''
if mod_type == 'ITU_R2005':
a = {'18': 0.07393, '23': 0.1285, '38': 0.39225}
b = {'18':1.0404605978, '23': 0.99222272,'38':0.8686641682}
elif mod_type == 'MP':
a = {'18': 0.05911087, '23': 0.1080751, '38': 0.37898495}
b = {'18': 1.08693514, '23': 1.05342886, '38': 0.92876888}
elif mod_type=='GAMMA':
a = {'18': 0.04570854, '23': 0.08174184, '38': 0.28520923}
b = {'18': 1.09211488, '23': 1.08105214, '38': 1.01426258}
freq = int(freq/1e9)
return a[str(freq)], b[str(freq)]
|
StarcoderdataPython
|
3332605
|
<filename>tests/test_xarray_plugin.py
import os.path
import xarray as xr
HERE = os.path.dirname(__file__)
def test_xarray_open_dataset():
cog_file = os.path.join(HERE, "sample.tif")
ds = xr.open_dataset(cog_file, engine="gdal-raw")
assert isinstance(ds, xr.Dataset)
assert "band1" in ds.data_vars
assert ds.data_vars["band1"].shape == (500, 500)
assert "grid_mapping" in ds.data_vars["band1"].attrs
assert "spatial_ref" in ds.data_vars
assert "spatial_ref" not in ds.coords
ds.to_netcdf("test-coordinates.nc")
ds = xr.open_dataset(cog_file, engine="gdal-raw", decode_coords="all")
assert "grid_mapping" in ds.data_vars["band1"].encoding
assert "spatial_ref" not in ds.data_vars
assert "spatial_ref" in ds.coords
ds.to_netcdf("test-all.nc")
|
StarcoderdataPython
|
1626846
|
<reponame>darkless456/Python<filename>class7.py
# class.py
class num(object):
def __init__(self,value):
self.value = value
def getNeg(self):
return -self.value
def setNeg(self,value):
self.value = -value
def delNeg(self):
print("Value also deleted")
del self.value
neg = property(getNeg, setNeg, delNeg, "I'm negative")
x = num(1.1)
print(x.neg)
x.neg = -22
print(x.value)
print(num.neg.__doc__)
del x.neg
|
StarcoderdataPython
|
90419
|
<gh_stars>1-10
# This file is based on the original C++ modeltest.cpp from:
# http://code.qt.io/cgit/qt/qtbase.git/tree/tests/auto/other/modeltest/modeltest.cpp
# Licensed under the following terms:
#
# Copyright (C) 2015 The Qt Company Ltd.
# Contact: http://www.qt.io/licensing/
#
# This file is part of the test suite of the Qt Toolkit.
#
# $QT_BEGIN_LICENSE:LGPL21$
# Commercial License Usage
# Licensees holding valid commercial Qt licenses may use this file in
# accordance with the commercial license agreement provided with the
# Software or, alternatively, in accordance with the terms contained in
# a written agreement between you and The Qt Company. For licensing terms
# and conditions see http://www.qt.io/terms-conditions. For further
# information use the contact form at http://www.qt.io/contact-us.
#
# GNU Lesser General Public License Usage
# Alternatively, this file may be used under the terms of the GNU Lesser
# General Public License version 2.1 or version 3 as published by the Free
# Software Foundation and appearing in the file LICENSE.LGPLv21 and
# LICENSE.LGPLv3 included in the packaging of this file. Please review the
# following information to ensure the GNU Lesser General Public License
# requirements will be met: https://www.gnu.org/licenses/lgpl.html and
# http://www.gnu.org/licenses/old-licenses/lgpl-2.1.html.
#
# As a special exception, The Qt Company gives you certain additional
# rights. These rights are described in The Qt Company LGPL Exception
# version 1.1, included in the file LGPL_EXCEPTION.txt in this package.
#
# $QT_END_LICENSE$
from __future__ import print_function
import collections
from pytestqt.qt_compat import qt_api
_Changing = collections.namedtuple('_Changing', 'parent, old_size, last, next')
class ModelTester:
"""A tester for Qt's QAbstractItemModels.
:ivar bool data_display_may_return_none: if the model implementation is
allowed to return None from data() for DisplayRole.
"""
def __init__(self, config):
self._model = None
self._fetching_more = None
self._insert = None
self._remove = None
self._changing = []
self.data_display_may_return_none = False
def _debug(self, text):
print('modeltest: ' + text)
def _modelindex_debug(self, index):
"""Get a string for debug output for a QModelIndex."""
if not index.isValid():
return '<invalid> (0x{:x})'.format(id(index))
else:
data = self._model.data(index, qt_api.QtCore.Qt.DisplayRole)
return '{}/{} {!r} (0x{:x})'.format(
index.row(), index.column(),
qt_api.extract_from_variant(data),
id(index))
def check(self, model):
"""Runs a series of checks in the given model.
Connect to all of the models signals.
Whenever anything happens recheck everything.
"""
assert model is not None
self._model = model
self._fetching_more = False
self._insert = []
self._remove = []
self._changing = []
self._model.columnsAboutToBeInserted.connect(self._run)
self._model.columnsAboutToBeRemoved.connect(self._run)
self._model.columnsInserted.connect(self._run)
self._model.columnsRemoved.connect(self._run)
self._model.dataChanged.connect(self._run)
self._model.headerDataChanged.connect(self._run)
self._model.layoutAboutToBeChanged.connect(self._run)
self._model.layoutChanged.connect(self._run)
self._model.modelReset.connect(self._run)
self._model.rowsAboutToBeInserted.connect(self._run)
self._model.rowsAboutToBeRemoved.connect(self._run)
self._model.rowsInserted.connect(self._run)
self._model.rowsRemoved.connect(self._run)
# Special checks for changes
self._model.layoutAboutToBeChanged.connect(
self._on_layout_about_to_be_changed)
self._model.layoutChanged.connect(self._on_layout_changed)
self._model.rowsAboutToBeInserted.connect(
self._on_rows_about_to_be_inserted)
self._model.rowsAboutToBeRemoved.connect(
self._on_rows_about_to_be_removed)
self._model.rowsInserted.connect(self._on_rows_inserted)
self._model.rowsRemoved.connect(self._on_rows_removed)
self._model.dataChanged.connect(self._on_data_changed)
self._model.headerDataChanged.connect(self._on_header_data_changed)
self._run()
def _cleanup(self):
"""Not API intended for users, but called from the fixture function."""
if self._model is None:
return
self._model.columnsAboutToBeInserted.disconnect(self._run)
self._model.columnsAboutToBeRemoved.disconnect(self._run)
self._model.columnsInserted.disconnect(self._run)
self._model.columnsRemoved.disconnect(self._run)
self._model.dataChanged.disconnect(self._run)
self._model.headerDataChanged.disconnect(self._run)
self._model.layoutAboutToBeChanged.disconnect(self._run)
self._model.layoutChanged.disconnect(self._run)
self._model.modelReset.disconnect(self._run)
self._model.rowsAboutToBeInserted.disconnect(self._run)
self._model.rowsAboutToBeRemoved.disconnect(self._run)
self._model.rowsInserted.disconnect(self._run)
self._model.rowsRemoved.disconnect(self._run)
self._model.layoutAboutToBeChanged.disconnect(
self._on_layout_about_to_be_changed)
self._model.layoutChanged.disconnect(self._on_layout_changed)
self._model.rowsAboutToBeInserted.disconnect(
self._on_rows_about_to_be_inserted)
self._model.rowsAboutToBeRemoved.disconnect(
self._on_rows_about_to_be_removed)
self._model.rowsInserted.disconnect(self._on_rows_inserted)
self._model.rowsRemoved.disconnect(self._on_rows_removed)
self._model.dataChanged.disconnect(self._on_data_changed)
self._model.headerDataChanged.disconnect(self._on_header_data_changed)
self._model = None
def _run(self):
assert self._model is not None
assert self._fetching_more is not None
if self._fetching_more:
return
self._test_basic()
self._test_row_count()
self._test_column_count()
self._test_has_index()
self._test_index()
self._test_parent()
self._test_data()
def _test_basic(self):
"""Try to call a number of the basic functions (not all).
Make sure the model doesn't outright segfault, testing the functions
which make sense.
"""
assert self._model.buddy(qt_api.QtCore.QModelIndex()) == qt_api.QtCore.QModelIndex()
self._model.canFetchMore(qt_api.QtCore.QModelIndex())
assert self._column_count(qt_api.QtCore.QModelIndex()) >= 0
display_data = self._model.data(qt_api.QtCore.QModelIndex(),
qt_api.QtCore.Qt.DisplayRole)
assert qt_api.extract_from_variant(display_data) is None
self._fetch_more(qt_api.QtCore.QModelIndex())
flags = self._model.flags(qt_api.QtCore.QModelIndex())
assert flags == qt_api.QtCore.Qt.ItemIsDropEnabled or not flags
self._has_children(qt_api.QtCore.QModelIndex())
self._model.hasIndex(0, 0)
self._model.headerData(0, qt_api.QtCore.Qt.Horizontal)
self._model.index(0, 0)
self._model.itemData(qt_api.QtCore.QModelIndex())
cache = None
self._model.match(qt_api.QtCore.QModelIndex(), -1, cache)
self._model.mimeTypes()
assert self._parent(qt_api.QtCore.QModelIndex()) == qt_api.QtCore.QModelIndex()
assert self._model.rowCount() >= 0
self._model.setData(qt_api.QtCore.QModelIndex(), None, -1)
self._model.setHeaderData(-1, qt_api.QtCore.Qt.Horizontal, None)
self._model.setHeaderData(999999, qt_api.QtCore.Qt.Horizontal, None)
self._model.sibling(0, 0, qt_api.QtCore.QModelIndex())
self._model.span(qt_api.QtCore.QModelIndex())
self._model.supportedDropActions()
def _test_row_count(self):
"""Test model's implementation of rowCount() and hasChildren().
Models that are dynamically populated are not as fully tested here.
The models rowCount() is tested more extensively in _check_children(),
but this catches the big mistakes.
"""
# check top row
top_index = self._model.index(0, 0, qt_api.QtCore.QModelIndex())
rows = self._model.rowCount(top_index)
assert rows >= 0
if rows > 0:
assert self._has_children(top_index)
second_level_index = self._model.index(0, 0, top_index)
if second_level_index.isValid(): # not the top level
# check a row count where parent is valid
rows = self._model.rowCount(second_level_index)
assert rows >= 0
if rows > 0:
assert self._has_children(second_level_index)
def _test_column_count(self):
"""Test model's implementation of columnCount() and hasChildren().
columnCount() is tested more extensively in _check_children(),
but this catches the big mistakes.
"""
# check top row
top_index = self._model.index(0, 0, qt_api.QtCore.QModelIndex())
assert self._column_count(top_index) >= 0
# check a column count where parent is valid
child_index = self._model.index(0, 0, top_index)
if child_index.isValid():
assert self._column_count(child_index) >= 0
def _test_has_index(self):
"""Test model's implementation of hasIndex().
hasIndex() is tested more extensively in _check_children(),
but this catches the big mistakes.
"""
# Make sure that invalid values return an invalid index
assert not self._model.hasIndex(-2, -2)
assert not self._model.hasIndex(-2, 0)
assert not self._model.hasIndex(0, -2)
rows = self._model.rowCount()
columns = self._column_count()
# check out of bounds
assert not self._model.hasIndex(rows, columns)
assert not self._model.hasIndex(rows + 1, columns + 1)
if rows > 0:
assert self._model.hasIndex(0, 0)
def _test_index(self):
"""Test model's implementation of index().
index() is tested more extensively in _check_children(),
but this catches the big mistakes.
"""
# Make sure that invalid values return an invalid index
assert self._model.index(-2, -2) == qt_api.QtCore.QModelIndex()
assert self._model.index(-2, 0) == qt_api.QtCore.QModelIndex()
assert self._model.index(0, -2) == qt_api.QtCore.QModelIndex()
rows = self._model.rowCount()
columns = self._column_count()
if rows == 0:
return
# Catch off by one errors
assert self._model.index(rows, columns) == qt_api.QtCore.QModelIndex()
assert self._model.index(0, 0).isValid()
# Make sure that the same index is *always* returned
a = self._model.index(0, 0)
b = self._model.index(0, 0)
assert a == b
def _test_parent(self):
"""Tests model's implementation of QAbstractItemModel::parent()."""
# Make sure the model won't crash and will return an invalid
# QModelIndex when asked for the parent of an invalid index.
assert self._parent(qt_api.QtCore.QModelIndex()) == qt_api.QtCore.QModelIndex()
if self._model.rowCount() == 0:
return
# Column 0 | Column 1 |
# QModelIndex() | |
# \- top_index | top_index_1 |
# \- child_index | child_index_1 |
# Common error test #1, make sure that a top level index has a parent
# that is a invalid QModelIndex.
top_index = self._model.index(0, 0, qt_api.QtCore.QModelIndex())
assert self._parent(top_index) == qt_api.QtCore.QModelIndex()
# Common error test #2, make sure that a second level index has a
# parent that is the first level index.
if self._model.rowCount(top_index) > 0:
child_index = self._model.index(0, 0, top_index)
assert self._parent(child_index) == top_index
# Common error test #3, the second column should NOT have the same
# children as the first column in a row.
# Usually the second column shouldn't have children.
top_index_1 = self._model.index(0, 1, qt_api.QtCore.QModelIndex())
if self._model.rowCount(top_index_1) > 0:
child_index = self._model.index(0, 0, top_index)
child_index_1 = self._model.index(0, 0, top_index_1)
assert child_index != child_index_1
# Full test, walk n levels deep through the model making sure that all
# parent's children correctly specify their parent.
self._check_children(qt_api.QtCore.QModelIndex())
def _check_children(self, parent, current_depth=0):
"""Check parent/children relationships.
Called from the parent() test.
A model that returns an index of parent X should also return X when
asking for the parent of the index.
This recursive function does pretty extensive testing on the whole
model in an effort to catch edge cases.
This function assumes that rowCount(), columnCount() and index()
already work. If they have a bug it will point it out, but the above
tests should have already found the basic bugs because it is easier to
figure out the problem in those tests then this one.
"""
# First just try walking back up the tree.
p = parent
while p.isValid():
p = p.parent()
# For models that are dynamically populated
if self._model.canFetchMore(parent):
self._fetch_more(parent)
rows = self._model.rowCount(parent)
columns = self._column_count(parent)
if rows > 0:
assert self._has_children(parent)
# Some further testing against rows(), columns(), and hasChildren()
assert rows >= 0
assert columns >= 0
if rows > 0:
assert self._has_children(parent)
self._debug("Checking children of {} with depth {} "
"({} rows, {} columns)".format(
self._modelindex_debug(parent), current_depth,
rows, columns))
top_left_child = self._model.index(0, 0, parent)
assert not self._model.hasIndex(rows + 1, 0, parent)
for r in range(rows):
if self._model.canFetchMore(parent):
self._fetch_more(parent)
assert not self._model.hasIndex(r, columns + 1, parent)
for c in range(columns):
assert self._model.hasIndex(r, c, parent)
index = self._model.index(r, c, parent)
# rowCount() and columnCount() said that it existed...
assert index.isValid()
# index() should always return the same index when called twice
# in a row
modified_index = self._model.index(r, c, parent)
assert index == modified_index
# Make sure we get the same index if we request it twice in a
# row
a = self._model.index(r, c, parent)
b = self._model.index(r, c, parent)
assert a == b
sibling = self._model.sibling(r, c, top_left_child)
assert index == sibling
sibling = top_left_child.sibling(r, c)
assert index == sibling
# Some basic checking on the index that is returned
assert index.model() == self._model
assert index.row() == r
assert index.column() == c
data = self._model.data(index, qt_api.QtCore.Qt.DisplayRole)
if not self.data_display_may_return_none:
assert qt_api.extract_from_variant(data) is not None
# If the next test fails here is some somewhat useful debug you
# play with.
if self._parent(index) != parent:
self._debug(
"parent-check failed for index {}:\n"
" parent {} != expected {}".format(
self._modelindex_debug(index),
self._modelindex_debug(self._parent(index)),
self._modelindex_debug(parent)
)
)
# Check that we can get back our real parent.
assert self._parent(index) == parent
# recursively go down the children
if self._has_children(index) and current_depth < 10:
self._debug("{} has {} children".format(
self._modelindex_debug(index),
self._model.rowCount(index)
))
self._check_children(index, current_depth + 1)
# make sure that after testing the children that the index
# doesn't change.
newer_index = self._model.index(r, c, parent)
assert index == newer_index
self._debug("Children check for {} done".format(self._modelindex_debug(parent)))
def _test_data(self):
"""Test model's implementation of data()"""
# Invalid index should return an invalid qvariant
value = self._model.data(qt_api.QtCore.QModelIndex(), qt_api.QtCore.Qt.DisplayRole)
assert qt_api.extract_from_variant(value) is None
if self._model.rowCount() == 0:
return
# A valid index should have a valid QVariant data
assert self._model.index(0, 0).isValid()
# shouldn't be able to set data on an invalid index
ok = self._model.setData(qt_api.QtCore.QModelIndex(), "foo",
qt_api.QtCore.Qt.DisplayRole)
assert not ok
types = [
(qt_api.QtCore.Qt.ToolTipRole, str),
(qt_api.QtCore.Qt.StatusTipRole, str),
(qt_api.QtCore.Qt.WhatsThisRole, str),
(qt_api.QtCore.Qt.SizeHintRole, qt_api.QtCore.QSize),
(qt_api.QtCore.Qt.FontRole, qt_api.QtGui.QFont),
(qt_api.QtCore.Qt.BackgroundColorRole, (qt_api.QtGui.QColor, qt_api.QtGui.QBrush)),
(qt_api.QtCore.Qt.TextColorRole, (qt_api.QtGui.QColor, qt_api.QtGui.QBrush)),
]
# General purpose roles with a fixed expected type
for role, typ in types:
data = self._model.data(self._model.index(0, 0), role)
assert data == None or isinstance(data, typ), role
# Check that the alignment is one we know about
alignment = self._model.data(self._model.index(0, 0),
qt_api.QtCore.Qt.TextAlignmentRole)
alignment = qt_api.extract_from_variant(alignment)
if alignment is not None:
try:
alignment = int(alignment)
except (TypeError, ValueError):
assert 0, '%r should be a TextAlignmentRole enum' % alignment
mask = int(qt_api.QtCore.Qt.AlignHorizontal_Mask |
qt_api.QtCore.Qt.AlignVertical_Mask)
assert alignment == alignment & mask
# Check that the "check state" is one we know about.
state = self._model.data(self._model.index(0, 0),
qt_api.QtCore.Qt.CheckStateRole)
assert state in [None, qt_api.QtCore.Qt.Unchecked, qt_api.QtCore.Qt.PartiallyChecked,
qt_api.QtCore.Qt.Checked]
def _on_rows_about_to_be_inserted(self, parent, start, end):
"""Store what is about to be inserted.
This gets stored to make sure it actually happens in rowsInserted.
"""
last_index = self._model.index(start - 1, 0, parent)
next_index = self._model.index(start, 0, parent)
parent_rowcount = self._model.rowCount(parent)
self._debug("rows about to be inserted: start {}, end {}, parent {}, "
"parent row count {}, last item {}, next item {}".format(
start, end,
self._modelindex_debug(parent),
parent_rowcount,
self._modelindex_debug(last_index),
self._modelindex_debug(next_index),
)
)
last_data = self._model.data(last_index)
next_data = self._model.data(next_index)
c = _Changing(parent=parent, old_size=parent_rowcount,
last=last_data, next=next_data)
self._insert.append(c)
def _on_rows_inserted(self, parent, start, end):
"""Confirm that what was said was going to happen actually did."""
c = self._insert.pop()
last_data = self._model.data(self._model.index(start - 1, 0, parent))
next_data = self._model.data(self._model.index(end + 1, 0, c.parent))
expected_size = c.old_size + (end - start + 1)
current_size = self._model.rowCount(parent)
self._debug("rows inserted: start {}, end {}".format(start, end))
self._debug(" from rowsAboutToBeInserted: parent {}, "
"size {} (-> {} expected), "
"next data {!r}, last data {!r}".format(
self._modelindex_debug(c.parent),
c.old_size, expected_size,
qt_api.extract_from_variant(c.next),
qt_api.extract_from_variant(c.last)
)
)
self._debug(" now in rowsInserted: parent {}, size {}, "
"next data {!r}, last data {!r}".format(
self._modelindex_debug(parent),
current_size,
qt_api.extract_from_variant(next_data),
qt_api.extract_from_variant(last_data)
)
)
if not qt_api.QtCore.qVersion().startswith('4.'):
# Skipping this on Qt4 as the parent changes for some reason:
# modeltest: rows about to be inserted: [...]
# parent <invalid> (0x7f8f540eacf8), [...]
# [...]
# modeltest: from rowsAboutToBeInserted:
# parent 0/0 None (0x7f8f540eacf8), [...]
# modeltest: now in rowsInserted:
# parent <invalid> (0x7f8f60a96cf8) [...]
assert c.parent == parent
for ii in range(start, end + 1):
idx = self._model.index(ii, 0, parent)
self._debug(" item {} inserted: {}".format(ii,
self._modelindex_debug(idx)))
self._debug('')
assert current_size == expected_size
assert c.last == last_data
assert c.next == next_data
def _on_layout_about_to_be_changed(self):
for i in range(max(self._model.rowCount(), 100)):
idx = qt_api.QtCore.QPersistentModelIndex(self._model.index(i, 0))
self._changing.append(idx)
def _on_layout_changed(self):
for p in self._changing:
assert p == self._model.index(p.row(), p.column(), p.parent())
self._changing = []
def _on_rows_about_to_be_removed(self, parent, start, end):
"""Store what is about to be removed to make sure it actually happens.
This gets stored to make sure it actually happens in rowsRemoved.
"""
last_index = self._model.index(start - 1, 0, parent)
next_index = self._model.index(end + 1, 0, parent)
parent_rowcount = self._model.rowCount(parent)
self._debug("rows about to be removed: start {}, end {}, parent {}, "
"parent row count {}, last item {}, next item {}".format(
start, end,
self._modelindex_debug(parent),
parent_rowcount,
self._modelindex_debug(last_index),
self._modelindex_debug(next_index),
)
)
last_data = self._model.data(last_index)
next_data = self._model.data(next_index)
c = _Changing(parent=parent, old_size=parent_rowcount,
last=last_data, next=next_data)
self._remove.append(c)
def _on_rows_removed(self, parent, start, end):
"""Confirm that what was said was going to happen actually did."""
c = self._remove.pop()
last_data = self._model.data(self._model.index(start - 1, 0, c.parent))
next_data = self._model.data(self._model.index(start, 0, c.parent))
current_size = self._model.rowCount(parent)
expected_size = c.old_size - (end - start + 1)
self._debug("rows removed: start {}, end {}".format(start, end))
self._debug(" from rowsAboutToBeRemoved: parent {}, "
"size {} (-> {} expected), "
"next data {!r}, last data {!r}".format(
self._modelindex_debug(c.parent),
c.old_size, expected_size,
qt_api.extract_from_variant(c.next),
qt_api.extract_from_variant(c.last)
)
)
self._debug(" now in rowsRemoved: parent {}, size {}, "
"next data {!r}, last data {!r}".format(
self._modelindex_debug(parent),
current_size,
qt_api.extract_from_variant(next_data),
qt_api.extract_from_variant(last_data)
)
)
if not qt_api.QtCore.qVersion().startswith('4.'):
# Skipping this on Qt4 as the parent changes for some reason
# see _on_rows_inserted for details
assert c.parent == parent
assert current_size == expected_size
assert c.last == last_data
assert c.next == next_data
def _on_data_changed(self, top_left, bottom_right):
assert top_left.isValid()
assert bottom_right.isValid()
common_parent = bottom_right.parent()
assert top_left.parent() == common_parent
assert top_left.row() <= bottom_right.row()
assert top_left.column() <= bottom_right.column()
row_count = self._model.rowCount(common_parent)
column_count = self._column_count(common_parent)
assert bottom_right.row() < row_count
assert bottom_right.column() < column_count
def _on_header_data_changed(self, orientation, start, end):
assert orientation in [qt_api.QtCore.Qt.Horizontal, qt_api.QtCore.Qt.Vertical]
assert start >= 0
assert end >= 0
assert start <= end
if orientation == qt_api.QtCore.Qt.Vertical:
item_count = self._model.rowCount()
else:
item_count = self._column_count()
assert start < item_count
assert end < item_count
def _column_count(self, parent=qt_api.QtCore.QModelIndex()):
"""
Workaround for the fact that ``columnCount`` is a private method in
QAbstractListModel/QAbstractTableModel subclasses.
"""
if isinstance(self._model, qt_api.QAbstractListModel):
return 1 if parent == qt_api.QtCore.QModelIndex() else 0
else:
return self._model.columnCount(parent)
def _parent(self, index):
"""
.. see:: ``_column_count``
"""
model_types = (qt_api.QAbstractListModel, qt_api.QAbstractTableModel)
if isinstance(self._model, model_types):
return qt_api.QtCore.QModelIndex()
else:
return self._model.parent(index)
def _has_children(self, parent=qt_api.QtCore.QModelIndex()):
"""
.. see:: ``_column_count``
"""
model_types = (qt_api.QAbstractListModel, qt_api.QAbstractTableModel)
if isinstance(self._model, model_types):
return parent == qt_api.QtCore.QModelIndex() and self._model.rowCount() > 0
else:
return self._model.hasChildren(parent)
def _fetch_more(self, parent):
"""Call ``fetchMore`` on the model and set ``self._fetching_more``."""
self._fetching_more = True
self._model.fetchMore(parent)
self._fetching_more = False
|
StarcoderdataPython
|
3269793
|
<filename>backend/dataportal/test_plots.py
import pytest
from datetime import datetime, timedelta
import random
from .plots import pulsar_summary_plot
def generate_random_utcs(n=10):
min_year = 2018
max_year = 2020
start = datetime(min_year, 1, 1, 00, 00, 00)
years = max_year - min_year + 1
end = start + timedelta(days=365 * years)
datetimes = []
for i in range(n):
datetimes.append(start + (end - start) * random.random())
return datetimes
def generate_random_snrs(n=10):
return random.sample(range(1, 10000), n)
def generate_random_integrations(n=10):
return random.sample(range(1, 256), n)
def test_pulsar_summary_plot():
UTCs = generate_random_utcs()
snrs = generate_random_snrs()
lengths = generate_random_integrations()
bands = ["L-band"] * len(UTCs)
js, div = pulsar_summary_plot(UTCs, snrs, lengths, bands)
assert "</div>" in div
assert "</script>" in js
assert js != "<script></script>"
|
StarcoderdataPython
|
3240734
|
<filename>tests/test_beam.py
from __future__ import absolute_import, division, print_function
import os
import dxtbx
from dxtbx.model.beam import BeamFactory
def test_beam():
dxtbx_dir = dxtbx.__path__[0]
image = os.path.join(dxtbx_dir, "tests", "phi_scan_001.cbf")
assert BeamFactory.imgCIF(image)
|
StarcoderdataPython
|
1768214
|
from KnapsackProblem.KnapsackProblem import KnapsackProblem
if __name__ == '__main__':
print("------------- Knapsack Informations -------------")
max_weight = int(input("Select max weight of knapsack: "))
length_items = int(input("Select max amount of items in the knapsack: "))
max_weight_items = int(input("Select the max weight of the items in the knapsack: "))
max_price_items = int(input("Select the maximum value of the items in the knapsack: "))
lenght_population = int(input("Select population size: "))
lenght_generations = int(input("Select the number of generations: "))
knapsack = KnapsackProblem(max_weight, length_items, max_weight_items, max_price_items, lenght_population, lenght_generations)
knapsack.solveProblem
|
StarcoderdataPython
|
1762696
|
<gh_stars>1-10
import time
import oauth2 as oauth
import requests
from yahoo_weather.config.config import yahoo
from yahoo_weather.config.units import Unit
def get_city_url(API_param, city, unit=Unit.celsius):
return oauth.Request(method="GET", url=yahoo.url_city.format(city=city, unit=unit), parameters=_get_parameters(API_param))
def get_location_url(API_param, lat, long, unit=Unit.celsius):
return oauth.Request(method="GET", url=yahoo.url_location.format(lat=lat, lon=long, unit=unit), parameters=_get_parameters(API_param))
def _get_parameters(API_param):
# Set the base oauth_* parameters along with any other parameters required
# for the API call.
return {
"Yahoo-App-Id": API_param.APP_ID,
'oauth_timestamp': str(int(time.time())),
'oauth_signature_method': "HMAC-SHA1",
'oauth_version': "1.0",
'oauth_nonce': oauth.generate_nonce(),
'oauth_consumer_key': API_param.api_key
}
def _get_consumer(API_param):
# Setup the Consumer with the api_keys given by the provider
return oauth.Consumer(key=API_param.api_key, secret=API_param.api_secret)
def request_api(req,API_param):
# Create the signature
signature = oauth.SignatureMethod_HMAC_SHA1().sign(req, _get_consumer(API_param), None)
# Add the Signature to the request
req['oauth_signature'] = signature
api_result = requests.get(req.to_url())
return api_result
|
StarcoderdataPython
|
1683119
|
import json
import logging
import os
from smda.Disassembler import Disassembler
def detectBackend():
backend = ""
version = ""
try:
import idaapi
import idautils
backend = "IDA"
version = idaapi.IDA_SDK_VERSION
except:
pass
return (backend, version)
if __name__ == "__main__":
BACKEND, VERSION = detectBackend()
if BACKEND == "IDA":
from smda.ida.IdaInterface import IdaInterface
ida_interface = IdaInterface()
binary = ida_interface.getBinary()
base_addr = ida_interface.getBaseAddr()
DISASSEMBLER = Disassembler(backend=BACKEND)
REPORT = DISASSEMBLER.disassembleBuffer(binary, base_addr)
output_path = ida_interface.getIdbDir()
output_filepath = output_path + "ConvertedFromIdb.smda"
with open(output_filepath, "w") as fout:
json.dump(REPORT.toDict(), fout, indent=1, sort_keys=True)
print("Output saved to: %s" % output_filepath)
else:
raise Exception("No supported backend found.")
|
StarcoderdataPython
|
100673
|
# -*- coding: utf-8 -*-
__title__ = 'apostle'
__version__ = '0.1.0'
__build__ = 0x000100
__author__ = '<NAME>'
__license__ = 'MIT'
__copyright__ = 'Copyright 2013 Apostle.io'
import os
domain_key = os.getenv('APOSTLE_DOMAIN_KEY')
delivery_host = os.getenv('APOSTLE_DELIVERY_HOST', 'https://deliver.apostle.io')
from apostle.exceptions import ValidationError
from apostle.mail import Mail
from apostle.queue import Queue
def deliver(template_id, options):
if not template_id:
raise exceptions.ValidationError("No template id provided")
if not options and not "email" in options:
raise exceptions.ValidationError("No email address provided")
queue = get_queue()
queue.add(Mail(template_id, options))
queue.deliver()
def get_queue():
return Queue()
|
StarcoderdataPython
|
164740
|
"""
Copyright (c) 2022, Magentix
This code is licensed under simplified BSD license (see LICENSE for details)
StaPy JsMin Plugin - Version 1.0.0
Requirements:
- jsmin
"""
from pathlib import Path
import jsmin
import os
def file_content_opened(content, args: dict) -> str:
if _get_file_extension(args['path']) != 'js':
return content
return jsmin.jsmin(content.decode()).encode()
def file_copy_before(source: str, args: dict) -> str:
if _get_file_extension(source) != 'js':
return source
return _get_min_file(source, jsmin.jsmin(_get_file_content(source)))
def _get_min_file(source: str, content: str) -> str:
file = open(_get_tmp_file_path(source), 'w', encoding='utf-8')
file.write(content)
file.close()
return _get_tmp_file_path(source)
def _get_tmp_file_path(source: str) -> str:
name = os.path.normpath(_get_current_directory() + '/../tmp/' + os.path.basename(source) + '.min')
_create_directory(name)
return name
def _create_directory(path) -> None:
if _get_file_extension(path):
path = os.path.dirname(path)
Path(os.path.normpath(path)).mkdir(parents=True, exist_ok=True)
def _get_current_directory() -> str:
return os.path.dirname(os.path.realpath(__file__))
def _get_file_content(source: str) -> str:
file = open(os.path.normpath(source), 'r', encoding="utf-8")
content = file.read()
file.close()
return content
def _get_file_extension(file: str) -> str:
name, extension = os.path.splitext(file)
if not extension:
extension = ''
return extension.replace('.', '')
|
StarcoderdataPython
|
3212355
|
import os
from sklearn.model_selection import train_test_split
path='../train_wav'
train_txt='../meta/vox2_train.txt'
val_txt='../meta/vox2_val.txt'
def generate_txt(path):
datasets = os.listdir(path)
i = 0
data_list=[]
label=[]
for dataset in datasets:
dataset_path = os.path.join(path, dataset)
second_datasets = os.listdir(dataset_path)
for second_dataset in second_datasets:
audio_path = os.path.join(dataset_path, second_dataset)
audio_path2 = os.path.join(dataset, second_dataset)
audio_datasets = os.listdir(audio_path)
for audio_dataset in audio_datasets:
audio = os.path.join(audio_path2, audio_dataset)
audio_path = audio.replace('\\', '/')
data_list.append(audio_path)
label.append(i)
i += 1
return data_list,label
def write_txt(x,y,txt_path):
txt=open(txt_path,'w')
for j in range(len(x)):
txt.write(str(x[j])+' '+str(y[j]))
txt.write('\n')
txt.close()
dataset,labels=generate_txt(path)
x_train,x_val,y_train,y_val=train_test_split(dataset,labels,test_size=0.2,random_state=1)
write_txt(x_train,y_train,train_txt)
write_txt(x_val,y_val,val_txt)
|
StarcoderdataPython
|
1673370
|
<gh_stars>0
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright(c)2013 NTT corp. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
""" Unit tests for websockifyserver """
import errno
import os
import logging
import select
import shutil
import socket
import ssl
from mox3 import stubout
import sys
import tempfile
import unittest
import socket
import signal
from websockify import websockifyserver
try:
from BaseHTTPServer import BaseHTTPRequestHandler
except ImportError:
from http.server import BaseHTTPRequestHandler
try:
from StringIO import StringIO
BytesIO = StringIO
except ImportError:
from io import StringIO
from io import BytesIO
def raise_oserror(*args, **kwargs):
raise OSError('fake error')
class FakeSocket(object):
def __init__(self, data=''):
if isinstance(data, bytes):
self._data = data
else:
self._data = data.encode('latin_1')
def recv(self, amt, flags=None):
res = self._data[0:amt]
if not (flags & socket.MSG_PEEK):
self._data = self._data[amt:]
return res
def makefile(self, mode='r', buffsize=None):
if 'b' in mode:
return BytesIO(self._data)
else:
return StringIO(self._data.decode('latin_1'))
class WebSockifyRequestHandlerTestCase(unittest.TestCase):
def setUp(self):
super(WebSockifyRequestHandlerTestCase, self).setUp()
self.stubs = stubout.StubOutForTesting()
self.tmpdir = tempfile.mkdtemp('-websockify-tests')
# Mock this out cause it screws tests up
self.stubs.Set(os, 'chdir', lambda *args, **kwargs: None)
self.stubs.Set(BaseHTTPRequestHandler, 'send_response',
lambda *args, **kwargs: None)
def fake_send_error(self, code, message=None, explain=None):
self.last_code = code
self.stubs.Set(BaseHTTPRequestHandler, 'send_error',
fake_send_error)
def tearDown(self):
"""Called automatically after each test."""
self.stubs.UnsetAll()
os.rmdir(self.tmpdir)
super(WebSockifyRequestHandlerTestCase, self).tearDown()
def _get_server(self, handler_class=websockifyserver.WebSockifyRequestHandler,
**kwargs):
web = kwargs.pop('web', self.tmpdir)
return websockifyserver.WebSockifyServer(
handler_class, listen_host='localhost',
listen_port=80, key=self.tmpdir, web=web,
record=self.tmpdir, daemon=False, ssl_only=0, idle_timeout=1,
**kwargs)
def test_normal_get_with_only_upgrade_returns_error(self):
server = self._get_server(web=None)
handler = websockifyserver.WebSockifyRequestHandler(
FakeSocket('GET /tmp.txt HTTP/1.1'), '127.0.0.1', server)
def fake_send_response(self, code, message=None):
self.last_code = code
self.stubs.Set(BaseHTTPRequestHandler, 'send_response',
fake_send_response)
handler.do_GET()
self.assertEqual(handler.last_code, 405)
def test_list_dir_with_file_only_returns_error(self):
server = self._get_server(file_only=True)
handler = websockifyserver.WebSockifyRequestHandler(
FakeSocket('GET / HTTP/1.1'), '127.0.0.1', server)
def fake_send_response(self, code, message=None):
self.last_code = code
self.stubs.Set(BaseHTTPRequestHandler, 'send_response',
fake_send_response)
handler.path = '/'
handler.do_GET()
self.assertEqual(handler.last_code, 404)
class WebSockifyServerTestCase(unittest.TestCase):
def setUp(self):
super(WebSockifyServerTestCase, self).setUp()
self.stubs = stubout.StubOutForTesting()
self.tmpdir = tempfile.mkdtemp('-websockify-tests')
# Mock this out cause it screws tests up
self.stubs.Set(os, 'chdir', lambda *args, **kwargs: None)
def tearDown(self):
"""Called automatically after each test."""
self.stubs.UnsetAll()
os.rmdir(self.tmpdir)
super(WebSockifyServerTestCase, self).tearDown()
def _get_server(self, handler_class=websockifyserver.WebSockifyRequestHandler,
**kwargs):
return websockifyserver.WebSockifyServer(
handler_class, listen_host='localhost',
listen_port=80, key=self.tmpdir, web=self.tmpdir,
record=self.tmpdir, **kwargs)
def test_daemonize_raises_error_while_closing_fds(self):
server = self._get_server(daemon=True, ssl_only=1, idle_timeout=1)
self.stubs.Set(os, 'fork', lambda *args: 0)
self.stubs.Set(signal, 'signal', lambda *args: None)
self.stubs.Set(os, 'setsid', lambda *args: None)
self.stubs.Set(os, 'close', raise_oserror)
self.assertRaises(OSError, server.daemonize, keepfd=None, chdir='./')
def test_daemonize_ignores_ebadf_error_while_closing_fds(self):
def raise_oserror_ebadf(fd):
raise OSError(errno.EBADF, 'fake error')
server = self._get_server(daemon=True, ssl_only=1, idle_timeout=1)
self.stubs.Set(os, 'fork', lambda *args: 0)
self.stubs.Set(os, 'setsid', lambda *args: None)
self.stubs.Set(signal, 'signal', lambda *args: None)
self.stubs.Set(os, 'close', raise_oserror_ebadf)
self.stubs.Set(os, 'open', raise_oserror)
self.assertRaises(OSError, server.daemonize, keepfd=None, chdir='./')
def test_handshake_fails_on_not_ready(self):
server = self._get_server(daemon=True, ssl_only=0, idle_timeout=1)
def fake_select(rlist, wlist, xlist, timeout=None):
return ([], [], [])
self.stubs.Set(select, 'select', fake_select)
self.assertRaises(
websockifyserver.WebSockifyServer.EClose, server.do_handshake,
FakeSocket(), '127.0.0.1')
def test_empty_handshake_fails(self):
server = self._get_server(daemon=True, ssl_only=0, idle_timeout=1)
sock = FakeSocket('')
def fake_select(rlist, wlist, xlist, timeout=None):
return ([sock], [], [])
self.stubs.Set(select, 'select', fake_select)
self.assertRaises(
websockifyserver.WebSockifyServer.EClose, server.do_handshake,
sock, '127.0.0.1')
def test_handshake_policy_request(self):
# TODO(directxman12): implement
pass
def test_handshake_ssl_only_without_ssl_raises_error(self):
server = self._get_server(daemon=True, ssl_only=1, idle_timeout=1)
sock = FakeSocket('some initial data')
def fake_select(rlist, wlist, xlist, timeout=None):
return ([sock], [], [])
self.stubs.Set(select, 'select', fake_select)
self.assertRaises(
websockifyserver.WebSockifyServer.EClose, server.do_handshake,
sock, '127.0.0.1')
def test_do_handshake_no_ssl(self):
class FakeHandler(object):
CALLED = False
def __init__(self, *args, **kwargs):
type(self).CALLED = True
FakeHandler.CALLED = False
server = self._get_server(
handler_class=FakeHandler, daemon=True,
ssl_only=0, idle_timeout=1)
sock = FakeSocket('some initial data')
def fake_select(rlist, wlist, xlist, timeout=None):
return ([sock], [], [])
self.stubs.Set(select, 'select', fake_select)
self.assertEqual(server.do_handshake(sock, '127.0.0.1'), sock)
self.assertTrue(FakeHandler.CALLED, True)
def test_do_handshake_ssl(self):
# TODO(directxman12): implement this
pass
def test_do_handshake_ssl_without_ssl_raises_error(self):
# TODO(directxman12): implement this
pass
def test_do_handshake_ssl_without_cert_raises_error(self):
server = self._get_server(daemon=True, ssl_only=0, idle_timeout=1,
cert='afdsfasdafdsafdsafdsafdas')
sock = FakeSocket("\x16some ssl data")
def fake_select(rlist, wlist, xlist, timeout=None):
return ([sock], [], [])
self.stubs.Set(select, 'select', fake_select)
self.assertRaises(
websockifyserver.WebSockifyServer.EClose, server.do_handshake,
sock, '127.0.0.1')
def test_do_handshake_ssl_error_eof_raises_close_error(self):
server = self._get_server(daemon=True, ssl_only=0, idle_timeout=1)
sock = FakeSocket("\x16some ssl data")
def fake_select(rlist, wlist, xlist, timeout=None):
return ([sock], [], [])
def fake_wrap_socket(*args, **kwargs):
raise ssl.SSLError(ssl.SSL_ERROR_EOF)
class fake_create_default_context():
def __init__(self, purpose):
self.verify_mode = None
self.options = 0
def load_cert_chain(self, certfile, keyfile):
pass
def set_default_verify_paths(self):
pass
def load_verify_locations(self, cafile):
pass
def wrap_socket(self, *args, **kwargs):
raise ssl.SSLError(ssl.SSL_ERROR_EOF)
self.stubs.Set(select, 'select', fake_select)
if (hasattr(ssl, 'create_default_context')):
# for recent versions of python
self.stubs.Set(ssl, 'create_default_context', fake_create_default_context)
else:
# for fallback for old versions of python
self.stubs.Set(ssl, 'wrap_socket', fake_wrap_socket)
self.assertRaises(
websockifyserver.WebSockifyServer.EClose, server.do_handshake,
sock, '127.0.0.1')
def test_do_handshake_ssl_sets_ciphers(self):
test_ciphers = 'TEST-CIPHERS-1:TEST-CIPHER-2'
class FakeHandler(object):
def __init__(self, *args, **kwargs):
pass
server = self._get_server(handler_class=FakeHandler, daemon=True,
idle_timeout=1, ssl_ciphers=test_ciphers)
sock = FakeSocket("\x16some ssl data")
def fake_select(rlist, wlist, xlist, timeout=None):
return ([sock], [], [])
class fake_create_default_context():
CIPHERS = ''
def __init__(self, purpose):
self.verify_mode = None
self.options = 0
def load_cert_chain(self, certfile, keyfile):
pass
def set_default_verify_paths(self):
pass
def load_verify_locations(self, cafile):
pass
def wrap_socket(self, *args, **kwargs):
pass
def set_ciphers(self, ciphers_to_set):
fake_create_default_context.CIPHERS = ciphers_to_set
self.stubs.Set(select, 'select', fake_select)
if (hasattr(ssl, 'create_default_context')):
# for recent versions of python
self.stubs.Set(ssl, 'create_default_context', fake_create_default_context)
server.do_handshake(sock, '127.0.0.1')
self.assertEqual(fake_create_default_context.CIPHERS, test_ciphers)
else:
# for fallback for old versions of python
# not supperted, nothing to test
pass
def test_do_handshake_ssl_sets_opions(self):
test_options = 0xCAFEBEEF
class FakeHandler(object):
def __init__(self, *args, **kwargs):
pass
server = self._get_server(handler_class=FakeHandler, daemon=True,
idle_timeout=1, ssl_options=test_options)
sock = FakeSocket("\x16some ssl data")
def fake_select(rlist, wlist, xlist, timeout=None):
return ([sock], [], [])
class fake_create_default_context(object):
OPTIONS = 0
def __init__(self, purpose):
self.verify_mode = None
self._options = 0
def load_cert_chain(self, certfile, keyfile):
pass
def set_default_verify_paths(self):
pass
def load_verify_locations(self, cafile):
pass
def wrap_socket(self, *args, **kwargs):
pass
def get_options(self):
return self._options
def set_options(self, val):
fake_create_default_context.OPTIONS = val
options = property(get_options, set_options)
self.stubs.Set(select, 'select', fake_select)
if (hasattr(ssl, 'create_default_context')):
# for recent versions of python
self.stubs.Set(ssl, 'create_default_context', fake_create_default_context)
server.do_handshake(sock, '127.0.0.1')
self.assertEqual(fake_create_default_context.OPTIONS, test_options)
else:
# for fallback for old versions of python
# not supperted, nothing to test
pass
def test_fallback_sigchld_handler(self):
# TODO(directxman12): implement this
pass
def test_start_server_error(self):
server = self._get_server(daemon=False, ssl_only=1, idle_timeout=1)
sock = server.socket('localhost')
def fake_select(rlist, wlist, xlist, timeout=None):
raise Exception("fake error")
self.stubs.Set(websockifyserver.WebSockifyServer, 'socket',
lambda *args, **kwargs: sock)
self.stubs.Set(websockifyserver.WebSockifyServer, 'daemonize',
lambda *args, **kwargs: None)
self.stubs.Set(select, 'select', fake_select)
server.start_server()
def test_start_server_keyboardinterrupt(self):
server = self._get_server(daemon=False, ssl_only=0, idle_timeout=1)
sock = server.socket('localhost')
def fake_select(rlist, wlist, xlist, timeout=None):
raise KeyboardInterrupt
self.stubs.Set(websockifyserver.WebSockifyServer, 'socket',
lambda *args, **kwargs: sock)
self.stubs.Set(websockifyserver.WebSockifyServer, 'daemonize',
lambda *args, **kwargs: None)
self.stubs.Set(select, 'select', fake_select)
server.start_server()
def test_start_server_systemexit(self):
server = self._get_server(daemon=False, ssl_only=0, idle_timeout=1)
sock = server.socket('localhost')
def fake_select(rlist, wlist, xlist, timeout=None):
sys.exit()
self.stubs.Set(websockifyserver.WebSockifyServer, 'socket',
lambda *args, **kwargs: sock)
self.stubs.Set(websockifyserver.WebSockifyServer, 'daemonize',
lambda *args, **kwargs: None)
self.stubs.Set(select, 'select', fake_select)
server.start_server()
def test_socket_set_keepalive_options(self):
keepcnt = 12
keepidle = 34
keepintvl = 56
server = self._get_server(daemon=False, ssl_only=0, idle_timeout=1)
sock = server.socket('localhost',
tcp_keepcnt=keepcnt,
tcp_keepidle=keepidle,
tcp_keepintvl=keepintvl)
if hasattr(socket, 'TCP_KEEPCNT'):
self.assertEqual(sock.getsockopt(socket.SOL_TCP,
socket.TCP_KEEPCNT), keepcnt)
self.assertEqual(sock.getsockopt(socket.SOL_TCP,
socket.TCP_KEEPIDLE), keepidle)
self.assertEqual(sock.getsockopt(socket.SOL_TCP,
socket.TCP_KEEPINTVL), keepintvl)
sock = server.socket('localhost',
tcp_keepalive=False,
tcp_keepcnt=keepcnt,
tcp_keepidle=keepidle,
tcp_keepintvl=keepintvl)
if hasattr(socket, 'TCP_KEEPCNT'):
self.assertNotEqual(sock.getsockopt(socket.SOL_TCP,
socket.TCP_KEEPCNT), keepcnt)
self.assertNotEqual(sock.getsockopt(socket.SOL_TCP,
socket.TCP_KEEPIDLE), keepidle)
self.assertNotEqual(sock.getsockopt(socket.SOL_TCP,
socket.TCP_KEEPINTVL), keepintvl)
|
StarcoderdataPython
|
3304786
|
<reponame>kelmore5/python-json-utilities
import json as jsons
from typing import Sequence, Union, Any, Dict, List, Set, Callable, Optional, Type
from kelmore_arrays import ArrayTools as Arrays
from kelmore_arrays.arrays import Matrix
Items = Union[None, Dict[str, Any]]
JSONList = List[Dict[Any, Any]]
class DictObject:
json: Items
def __init__(self, json_item: Items):
self.json = json_item
def __str__(self):
return str(self.json)
def __repr__(self):
return str(self.json)
class JSONCheck:
@staticmethod
def equal(list_a: dict, list_b: dict) -> bool:
"""Checks if two json items are equivalent
:param list_a: A json dictionary
:param list_b: A json dictionary
:return: True if the two items are equivalent, False otherwise
"""
for key in list_a:
if key in list_b:
if list_a[key] != list_b[key]:
return False
else:
return False
return True
@staticmethod
def has_children(json: dict, include_arrays: Optional[bool] = False) -> bool:
if not isinstance(json, dict):
return False
for key in json:
if isinstance(json[key], dict) or \
(include_arrays and isinstance(json[key], list)):
return True
return False
class JSONFiles:
@staticmethod
def open(full_path: str) -> dict:
""" prec: file_name is a valid json file path
postc: opens the json file and returns it as an object"""
with open(full_path) as data_file:
return jsons.load(data_file)
@staticmethod
def save(full_path: str, json: dict) -> None:
""" prec: file_name is a valid file path, json_object is a json object
postc: saves the json_object to file_name"""
with open(full_path, 'w') as outfile:
jsons.dump(json, outfile)
class JSONLists:
@staticmethod
def keys(json_list: List[Dict[str, any]]) -> List[str]:
all_keys: Set[str] = set()
for json_item in json_list:
all_keys = all_keys | set(json_item.keys())
return list(all_keys)
@staticmethod
def reduce(json_list: Sequence[dict], keys_to_keep: Sequence[str]) -> Sequence[dict]:
"""
Removes all the keys except those in keys_to_keep from all the json items with json_list
:param json_list: (Sequence[dict]) A list of dictionary objects
:param keys_to_keep: (Sequence[str]) A list of keys to keep within each item in json_list
:return: The json_list but reduced to only the specified keys from keys_to_keep
"""
for item in json_list:
JSONTransform.reduce(item, keys_to_keep)
return json_list
@staticmethod
def remove_duplicates(json_list: List[dict]) -> List[dict]:
"""
Removes all duplicate json dictionaries in a list of json dictionaries
:param json_list: A list of json dictionaries
:return: The json list
"""
to_remove: List[int] = []
for json_idx in range(len(json_list) - 1):
json_item: dict = json_list[json_idx]
for json_idx_2, json_item_2 in enumerate(json_list[json_idx + 1:]):
if JSONCheck.equal(json_item, json_item_2):
to_remove.append(json_idx_2)
return Arrays.transform.remove_indexes(json_list, to_remove)
@staticmethod
def replace_keys(json_list: List[dict],
keys_to_replace: Sequence[str],
replacement_keys: Sequence[str]) -> Sequence[dict]:
if not Arrays.check.equal_length(keys_to_replace, replacement_keys):
raise IndexError('Could not replace the json keys for the given list. '
'The length of the key arrays do not match.')
for idx, json_dict in enumerate(json_list):
json_list[idx] = JSONTransform.replace_keys(
json_dict,
keys_to_replace,
replacement_keys
)
return json_list
@staticmethod
def replace_keys_custom(json_list: Sequence[dict],
replace_function: Callable[[str], str]) -> List[dict]:
return [JSONTransform.replace_keys_custom(x, replace_function) for x in json_list]
class JSONTransform:
@staticmethod
def create(fields: Sequence[str], values: Sequence[object]) -> dict:
if not Arrays.check.equal_length(fields, values):
raise IndexError('Could not create the dictionary. '
'The length of fields and values did not match.')
output = {}
for idx, field in enumerate(fields):
output[field] = values[idx]
return output
@staticmethod
def flatten(json_item: dict,
recursive: bool = False):
if recursive:
return JSONTransform._recursive_flatten_helper(json_item)
keys: List[str] = list(json_item.keys())
for key in keys:
child: Any = json_item[key]
if isinstance(child, dict):
child: dict = json_item.pop(key)
json_item = JSONTransform.merge(json_item, child)
return json_item
@staticmethod
def intersection(json_list_1: dict,
json_list_2: dict,
fields: List[str] = None) -> dict:
"""
Takes two json dictionaries, finds the overlapping elements (aka elements that are in
both json_list_1 and json_list_2), and then adds the overlapped element to a new
json dictionary
:param json_list_1: json dictionary
:param json_list_2: json dictionary
:param fields: list of fields
:return: The intersection between json_list_1 and json_list_2
"""
intersection: dict = {}
for key in json_list_1:
if key in json_list_2:
json_value_1: Any = json_list_1[key]
json_value_2: Any = json_list_2[key]
if json_value_2:
intersection[key] = json_value_2
else:
intersection[key] = json_value_1
if fields:
all_keys: List[str] = list(intersection.keys())
keys_to_remove: List[str] = [x for x in all_keys if x not in fields]
for key in keys_to_remove:
del intersection[key]
return intersection
@staticmethod
def matrix(matrix: Matrix,
headers: List[str] = None) -> List[dict]:
if not headers:
headers = matrix[0]
del matrix[0]
output: List[dict] = []
for row in matrix:
while len(row) < len(headers):
row.append(None)
while len(row) > len(headers):
row.pop()
output.append(JSONTransform.create(headers, row))
return output
@staticmethod
def merge(json_a: dict, json_b: dict):
return {**json_a, **json_b}
@staticmethod
def replace_keys(json: dict,
keys_to_replace: Sequence[str],
replacement_keys: Sequence[str]) -> dict:
if not Arrays.check.equal_length(keys_to_replace, replacement_keys):
raise IndexError('Could not replace the json keys for the given list. '
'The length of the key arrays do not match.')
for key_idx, key in enumerate(keys_to_replace):
if key in json:
replacement: str = replacement_keys[key_idx]
datum: Any = json.get(key)
del json[key]
json[replacement] = datum
return json
@staticmethod
def replace_keys_custom(json: dict,
replace_function: Callable[[str], str]) -> dict:
keys: List[str] = list(json.keys())
for key in keys:
new_key: str = replace_function(key)
datum: Any = json[key]
del json[key]
json[new_key] = datum
return json
@staticmethod
def reduce(json: dict,
keys_to_keep: Sequence[str]) -> dict:
keys = [x for x in json.keys() if x not in keys_to_keep]
for key in keys:
if key in json:
del json[key]
return json
@staticmethod
def remove_null_values(json_item: dict) -> dict:
keys: List[str] = list(json_item.keys())
for key in keys:
if json_item.get(key) is None:
del json_item[key]
return json_item
@staticmethod
def _recursive_flatten_helper(json_item: dict):
has_children: bool = JSONCheck.has_children(json_item)
while has_children is True:
json_item = JSONTransform.flatten(json_item)
has_children = JSONCheck.has_children(json_item)
return json_item
class JSONTools:
check: Type[JSONCheck] = JSONCheck
files: Type[JSONFiles] = JSONFiles
lists: Type[JSONLists] = JSONLists
transform: Type[JSONTransform] = JSONTransform
|
StarcoderdataPython
|
1610528
|
#!/usr/bin/python
# ecoding: utf-8
import sys, getopt, time, os
import numpy as np
from ratlib import *
import matplotlib.pyplot as mpl
helpmsg = """
drawh5sec.py -i rat.xvg -o rat5h.pdf -d 9:00 -n 20:53
-i Input extracted rat data
-o Output filename (PDF format)
-d Day-time start, HH:MM
-n Night-time start, HH:MM
Calculate overnight 5s -- histograms and plot them.
5.58 - conversion coefficient into m/min.
Extract rat data from the whole datafile with `extract-rat.py` before run.
"""
cset = ['-r', '-g','-b']
time2str = lambda x: \
time.strftime( '%d.%m.%Y %H:%M', time.localtime(x) )
def main():
try:
opts, args = getopt.getopt(sys.argv[1:], 'hm:i:o:d:n:')
except getopt.error, msg:
print msg
print 'for help use -h'
sys.exit(2)
infile = outf = daytime = nighttime = ''
for o, a in opts:
if o == '-h':
print helpmsg
sys.exit(0)
elif o == '-i':
infile = a
elif o == '-o':
outf = a
elif o == '-d':
daytime = a
elif o == '-n':
nighttime = a
if infile == '' or outf == '' or daytime == '' or nighttime == '':
print 'for help use -h'
sys.exit(2)
try:
dts = daytime.split(':')
nts = nighttime.split(':')
mday = int(dts[0])*60+int(dts[1])
mnight = int(nts[0])*60+int(nts[1])
if mday > 60*24 or mnight > 60*24:
raise ValueError('Numbers in time HH:MM')
except Exception as e:
print e
print 'Problems in time definition.'
sys.exit(51)
try:
rca = dq.RotCurveAnalyzer(infile)
print 'RotCurveAnalyzer started for your data.'
except Exception as e:
print e
print 'Wrong file format. Check the last line.'
sys.exit(42)
data = rca.getData(-1,-1)
dayint,lightint,nightint,resday,resli,resni = \
rca.getFullDayNightData(mday, mday, mnight, data['t'][0], data['t'][-1])
ndays = len(nightint)
print '============='
print 'DAY-NIGHT INTERVALS:'
for i in range(min(len(nightint),len(resni)) ):
print '%s - %s -- %d' % ( time2str(nightint[i][0]), time2str(nightint[i][1]), resni[i][1] )
'''
final statistics cycle
'''
weekar = []
for i in range(ndays):
if i % 7 == 0:
weekar.append([])
night_interval_start = max(nightint[i][0], data['t'][0])
night_interval_stop = min(nightint[i][1], data['t'][-1])
_t = int (night_interval_stop - night_interval_start)
__nighttime = '%02d:%02d' % ( (_t / 3600), ( (_t % 3600) / 60 ) )
_i = data['t'].searchsorted(night_interval_start)
_j = data['t'].searchsorted(night_interval_stop)
weekar[-1] += data['v0'][_i:_j].tolist()
for i in range(len(weekar)):
weekar[i] = np.array(weekar[i])*5.58
# drawing ..
print 'Drawing ..'
fig = mpl.figure(figsize=(5,10))
mpl.subplots_adjust(top=0.9,bottom=0.1,hspace=0.15, left=0.19, right=0.95)
ax = mpl.subplot(211)
spints = np.array(range(2,20))*5.58
fd = open(outf+'.data', 'w')
fd.write(''.join(map(lambda x: '%-6.1f ' %x, spints))+'\n')
for w in range(3):
h,e = np.histogram(weekar[w], spints, normed=True)
mpl.plot(e[:-1], h, cset[w], label=str(w+1))
fd.write(''.join(map(lambda x: '%-8.5f' %x, h))+'\n')
mpl.legend(loc='upper right')
fd.close()
# mpl.legend(fontsize=10)
mpl.ylim(0,0.05)
#mpl.text(0.05, 0.9, 'Velocities', transform=ax.transAxes)
mpl.xlim(10,110)
mpl.xlabel(u'Local velocity, m/min')
mpl.ylabel(u'Velocity prob. density')
mpl.savefig(outf)
print 'done.'
if __name__ == '__main__':
main()
|
StarcoderdataPython
|
199698
|
<filename>app/article/permissions.py
from rest_framework import permissions
from core.models import Article
class AuthorAccessPermission(permissions.BasePermission):
def has_permission(self, request, view):
if request.user.is_anonymous:
return False
elif request.user.is_author:
return True
def has_object_permission(self, request, view, obj):
if obj.owner == request.user or request.user.is_superuser:
return True
|
StarcoderdataPython
|
1602277
|
from django.db import models, OperationalError
from django.urls import reverse
import os
from hashlib import sha256
from prplatform.core.models import TimeStampedModel
from prplatform.users.models import User, StudentGroup
from prplatform.courses.models import Course
from prplatform.exercises.models import SubmissionExercise, ReviewExercise
from prplatform.exercises.question_models import Question
class BaseSubmission(TimeStampedModel):
course = models.ForeignKey(Course, related_name="%(class)s_submissions", on_delete=models.CASCADE)
submitter_user = models.ForeignKey(User, related_name="%(class)s_submitters", on_delete=models.CASCADE)
submitter_group = models.ForeignKey(StudentGroup, on_delete=models.CASCADE, null=True)
class Meta:
abstract = True
ordering = ['-created']
@property
def submitter(self):
if self.submitter_group:
return self.submitter_group
else:
return self.submitter_user
def is_owner(self, user):
if self.submitter_group:
# TODO: perf, make group an optional parameter
return self.submitter_group == self.course.find_studentgroup_by_user(user)
return self.submitter_user == user
def get_absolute_url(self):
urls = {'OriginalSubmission': 'courses:submissions:original-detail',
'ReviewSubmission': 'courses:submissions:review-detail'}
base_course = self.course.base_course
return reverse(urls[self.__class__.__name__], kwargs={
'base_url_slug': base_course.url_slug,
'url_slug': self.course.url_slug,
'pk': self.exercise.pk,
'sub_pk': self.pk
})
def get_delete_url(self):
urls = {'OriginalSubmission': 'courses:submissions:original-delete',
'ReviewSubmission': 'courses:submissions:review-delete'}
return reverse(urls[self.__class__.__name__], kwargs={
'base_url_slug': self.course.base_course.url_slug,
'url_slug': self.course.url_slug,
'pk': self.exercise.pk,
'sub_pk': self.pk
})
def get_download_token_for(self, user, request):
previous_token = user.download_tokens.filter(submission_id=self.pk).first()
if previous_token:
return previous_token.token
query_bytes = request.META['QUERY_STRING'].encode()
hash_digest = sha256(query_bytes).hexdigest()
DownloadToken(submission_id=self.pk, user=user, token=hash_digest).save()
return hash_digest
def upload_fp(instance, filename):
""" This will be the filename of the uploaded file """
return f"uploads/course_{instance.course.pk}/ex_{instance.exercise.pk}/sub_{instance.pk}/{filename}"
class OriginalSubmission(BaseSubmission):
"""
This describes a submission that is done to return something
original to the service. This is not peer-reviewing.
This is something that some other person will be peer-reviewing.
"""
exercise = models.ForeignKey(SubmissionExercise, related_name="submissions", on_delete=models.CASCADE)
text = models.TextField(max_length=5000, blank=True)
file = models.FileField(upload_to=upload_fp, blank=True)
SUBMITTED = 'submitted'
BOOMERANG = 'boomerang'
READY_FOR_REVIEW = 'ready_for_review'
SUBMISSION_STATE_CHOICES = (
(SUBMITTED, 'Submitted'),
(BOOMERANG, 'Boomerang'),
(READY_FOR_REVIEW, 'Ready for review'),
)
state = models.CharField(
max_length=16,
choices=SUBMISSION_STATE_CHOICES,
default=READY_FOR_REVIEW,
)
def __str__(self):
# return f"Submitter: {self.submitter} | {self.exercise} ({str(self.created)[:16]})"
return f"Submitter: {self.submitter} | {self.exercise}"
def filecontents_or_none(self):
if self.file and os.path.splitext(self.file.name)[1] in ['.py', '.txt']:
try:
return self.file.read().decode("utf-8")
except Exception:
pass
return None
def submissions_by_same_submitter(self):
all_subs = OriginalSubmission.objects.filter(exercise=self.exercise)
if self.exercise.use_groups:
return all_subs.filter(submitter_group=self.submitter_group)
else:
return all_subs.filter(submitter_user=self.submitter_user)
def get_file_download_url(self):
return reverse('courses:submissions:download', kwargs={
'base_url_slug': self.course.base_course.url_slug,
'url_slug': self.course.url_slug,
'pk': self.pk
})
def get_update_state_url(self):
return reverse('courses:update-submission-state', kwargs={
'base_url_slug': self.course.base_course.url_slug,
'url_slug': self.course.url_slug,
'pk': self.pk
})
def save(self, *args, **kwargs):
""" Overrides the model's save method so that when a file is uploaded
its name may contain the object's PK. The PK would not be available
at the save time since the row wouldn't have been written to the DB
just yet.
"""
if self.pk is None:
uploaded_file = self.file
self.file = None
super().save(*args, **kwargs)
self.file = uploaded_file
super().save(*args, **kwargs)
class ReviewSubmission(BaseSubmission):
exercise = models.ForeignKey(ReviewExercise, related_name="submissions", on_delete=models.CASCADE)
reviewed_submission = models.ForeignKey(OriginalSubmission, related_name="reviews", on_delete=models.CASCADE)
def answers_in_ordered_list(self):
return sorted(self.answers.all(), key=lambda a: self.exercise.question_order.index(a.question.pk))
def __str__(self):
return f"{self.submitter} -> {self.reviewed_submission.submitter} | {self.exercise}"
def save_and_destroy_lock(self, *args, **kwargs):
if self.pk is not None:
raise OperationalError('This cannot be used to update the instance!')
else:
locks = self.exercise.reviewlocks_for(self.submitter_user)
if locks.count() != 1:
raise OperationalError(f'There should be exactly 1 reviewlock! Found: {locks.count()}')
locks.first().delete()
super().save(*args, **kwargs)
def answer_upload_fp(instance, filename):
""" This will be the filename of the uploaded file """
return f"uploads/course_{instance.submission.course.pk}/" + \
f"answers_{instance.submission.exercise.pk}/ans_{instance.submission.pk}/{filename}"
class Answer(models.Model):
submission = models.ForeignKey(ReviewSubmission, related_name="answers", on_delete=models.CASCADE)
question = models.ForeignKey(Question, related_name="answers", on_delete=models.CASCADE)
value_text = models.CharField(max_length=5000, blank=True, null=True)
value_choice = models.CharField(max_length=20, blank=True, null=True)
uploaded_file = models.FileField(upload_to=answer_upload_fp, blank=True)
def save(self, *args, **kwargs):
""" Overrides the model's save method so that when a file is uploaded
its name may contain the object's PK. The PK would not be available
at the save time since the row wouldn't have been written to the DB
just yet.
"""
if self.pk is None:
uploaded_file = self.uploaded_file
self.uploaded_file = None
super().save(*args, **kwargs)
self.uploaded_file = uploaded_file
super().save(*args, **kwargs)
def get_choice_question_value(self):
""" This is the original string representation from the shown question """
if not self.value_choice:
return None
return [c[1] for c in self.question.choices if c[0] == self.value_choice][0]
def get_file_download_url(self):
return reverse('courses:submissions:download', kwargs={
'base_url_slug': self.submission.course.base_course.url_slug,
'url_slug': self.submission.course.url_slug,
'pk': self.pk
}) + "?type=answer"
class DownloadToken(models.Model):
submission_id = models.PositiveIntegerField()
user = models.ForeignKey(User, related_name='download_tokens', on_delete=models.CASCADE)
token = models.CharField(max_length=64)
|
StarcoderdataPython
|
3316559
|
<gh_stars>1-10
import json
from django.contrib.auth.models import User
from django.test import TestCase
from django.urls import reverse
from rest_framework import status
from rest_framework.test import RequestsClient
from rest_framework.test import APIRequestFactory
from rest_framework.test import APITestCase
class TestAPI(APITestCase):
def test_get_users(self):
"""Test Get Users"""
url = '/api/users/'
response = self.client.get(url)
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_create_users(self):
"""Test Create Users"""
url = '/api/users/'
response = self.client.post(url, {'username': 'test'})
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
|
StarcoderdataPython
|
4823214
|
<reponame>amartin-git/vpp-snmp-agent
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from vppstats import VPPStats
from vppapi import VPPApi
import sys
import agentx
try:
import argparse
except ImportError:
print("ERROR: install argparse manually: sudo pip install argparse")
sys.exit(2)
class MyAgent(agentx.Agent):
def setup(self):
global vppstat, vpp, logger
self.logger.info("Connecting to VPP Stats Segment")
vppstat = VPPStats(socketname='/run/vpp/stats.sock', timeout=2)
if not vppstat.connect():
self.logger.error("Can't connect to VPP Stats API, bailing")
return False
vpp = VPPApi(clientname='vpp-snmp-agent')
if not vpp.connect():
logger.error("Can't connect to VPP API, bailing")
return False
self.register('1.3.6.1.2.1.2.2.1')
self.register('1.3.6.1.2.1.31.1.1.1')
return True
def update(self):
global vppstat, vpp
vppstat.connect()
vpp.connect()
ds = agentx.DataSet()
ifaces = vpp.get_ifaces()
self.logger.debug("%d VPP interfaces retrieved" % len(ifaces))
self.logger.debug("%d VPP Stats interfaces retrieved" % len(vppstat['/if/names']))
for i in range(len(vppstat['/if/names'])):
ifname = vppstat['/if/names'][i]
idx = 1000+i
ds.set('1.3.6.1.2.1.2.2.1.1.%u' % (idx), 'int', idx)
ds.set('1.3.6.1.2.1.2.2.1.2.%u' % (idx), 'str', ifname)
if ifname.startswith("loop"):
ds.set('1.3.6.1.2.1.2.2.1.3.%u' % (idx), 'int', 24) # softwareLoopback
else:
ds.set('1.3.6.1.2.1.2.2.1.3.%u' % (idx), 'int', 6) # ethermet-csmacd
mtu = 0
if not ifname in ifaces:
self.logger.warning("Could not get MTU for interface %s", ifname)
else:
mtu = ifaces[ifname].mtu[0]
ds.set('1.3.6.1.2.1.2.2.1.4.%u' % (idx), 'int', mtu)
speed = 0
if ifname.startswith("loop") or ifname.startswith("tap"):
speed = 1000000000
elif not ifname in ifaces:
self.logger.warning("Could not get link speed for interface %s", ifname)
else:
speed = ifaces[ifname].link_speed * 1000
if speed >= 2**32:
speed = 2**32 - 1
ds.set('1.3.6.1.2.1.2.2.1.5.%u' % (idx), 'gauge32', speed)
mac = "00:00:00:00:00:00"
if not ifname in ifaces:
self.logger.warning("Could not get PhysAddress for interface %s", ifname)
else:
mac = str(ifaces[ifname].l2_address)
ds.set('1.3.6.1.2.1.2.2.1.6.%u' % (idx), 'str', mac)
admin_status = 3 # testing
if not ifname in ifaces:
self.logger.warning("Could not get AdminStatus for interface %s", ifname)
else:
if int(ifaces[ifname].flags) & 1:
admin_status = 1 # up
else:
admin_status = 2 # down
ds.set('1.3.6.1.2.1.2.2.1.7.%u' % (idx), 'int', admin_status)
oper_status = 3 # testing
if not ifname in ifaces:
self.logger.warning("Could not get OperStatus for interface %s", ifname)
else:
if int(ifaces[ifname].flags) & 2:
oper_status = 1 # up
else:
oper_status = 2 # down
ds.set('1.3.6.1.2.1.2.2.1.8.%u' % (idx), 'int', oper_status)
ds.set('1.3.6.1.2.1.2.2.1.9.%u' % (idx), 'ticks', 0)
ds.set('1.3.6.1.2.1.2.2.1.10.%u' % (idx), 'u32', vppstat['/if/rx'][:, i].sum_octets() % 2**32)
ds.set('1.3.6.1.2.1.2.2.1.11.%u' % (idx), 'u32', vppstat['/if/rx'][:, i].sum_packets() % 2**32)
ds.set('1.3.6.1.2.1.2.2.1.12.%u' % (idx), 'u32', vppstat['/if/rx-multicast'][:, i].sum_packets() % 2**32)
ds.set('1.3.6.1.2.1.2.2.1.13.%u' % (idx), 'u32', vppstat['/if/rx-no-buf'][:, i].sum() % 2**32)
ds.set('1.3.6.1.2.1.2.2.1.14.%u' % (idx), 'u32', vppstat['/if/rx-error'][:, i].sum() % 2**32)
ds.set('1.3.6.1.2.1.2.2.1.16.%u' % (idx), 'u32', vppstat['/if/tx'][:, i].sum_octets() % 2**32)
ds.set('1.3.6.1.2.1.2.2.1.17.%u' % (idx), 'u32', vppstat['/if/tx'][:, i].sum_packets() % 2**32)
ds.set('1.3.6.1.2.1.2.2.1.18.%u' % (idx), 'u32', vppstat['/if/tx-multicast'][:, i].sum_packets() % 2**32)
ds.set('1.3.6.1.2.1.2.2.1.19.%u' % (idx), 'u32', vppstat['/if/drops'][:, i].sum() % 2**32)
ds.set('1.3.6.1.2.1.2.2.1.20.%u' % (idx), 'u32', vppstat['/if/tx-error'][:, i].sum() % 2**32)
ds.set('1.3.6.1.2.1.31.1.1.1.1.%u' % (idx), 'str', ifname)
ds.set('1.3.6.1.2.1.31.1.1.1.2.%u' % (idx), 'u32', vppstat['/if/rx-multicast'][:, i].sum_packets() % 2**32)
ds.set('1.3.6.1.2.1.31.1.1.1.3.%u' % (idx), 'u32', vppstat['/if/rx-broadcast'][:, i].sum_packets() % 2**32)
ds.set('1.3.6.1.2.1.31.1.1.1.4.%u' % (idx), 'u32', vppstat['/if/tx-multicast'][:, i].sum_packets() % 2**32)
ds.set('1.3.6.1.2.1.31.1.1.1.5.%u' % (idx), 'u32', vppstat['/if/tx-broadcast'][:, i].sum_packets() % 2**32)
ds.set('1.3.6.1.2.1.31.1.1.1.6.%u' % (idx), 'u64', vppstat['/if/rx'][:, i].sum_octets())
ds.set('1.3.6.1.2.1.31.1.1.1.7.%u' % (idx), 'u64', vppstat['/if/rx'][:, i].sum_packets())
ds.set('1.3.6.1.2.1.31.1.1.1.8.%u' % (idx), 'u64', vppstat['/if/rx-multicast'][:, i].sum_packets())
ds.set('1.3.6.1.2.1.31.1.1.1.9.%u' % (idx), 'u64', vppstat['/if/rx-broadcast'][:, i].sum_packets())
ds.set('1.3.6.1.2.1.31.1.1.1.10.%u' % (idx), 'u64', vppstat['/if/tx'][:, i].sum_octets())
ds.set('1.3.6.1.2.1.31.1.1.1.11.%u' % (idx), 'u64', vppstat['/if/tx'][:, i].sum_packets())
ds.set('1.3.6.1.2.1.31.1.1.1.12.%u' % (idx), 'u64', vppstat['/if/tx-multicast'][:, i].sum_packets())
ds.set('1.3.6.1.2.1.31.1.1.1.13.%u' % (idx), 'u64', vppstat['/if/tx-broadcast'][:, i].sum_packets())
speed = 0
if ifname.startswith("loop") or ifname.startswith("tap"):
speed = 1000
elif not ifname in ifaces:
self.logger.warning("Could not get link speed for interface %s", ifname)
else:
speed = int(ifaces[ifname].link_speed / 1000)
ds.set('1.3.6.1.2.1.31.1.1.1.15.%u' % (idx), 'gauge32', speed)
ds.set('1.3.6.1.2.1.31.1.1.1.16.%u' % (idx), 'int', 2) # Hardcode to false(2)
ds.set('1.3.6.1.2.1.31.1.1.1.17.%u' % (idx), 'int', 1) # Hardcode to true(1)
ds.set('1.3.6.1.2.1.31.1.1.1.18.%u' % (idx), 'str', ifname)
ds.set('1.3.6.1.2.1.31.1.1.1.19.%u' % (idx), 'ticks', 0) # Hardcode to Timeticks: (0) 0:00:00.00
return ds
def main():
parser = argparse.ArgumentParser(formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument('-a', dest='address', default="localhost:705", type=str, help="""Location of the SNMPd agent (unix-path or host:port), default localhost:705""")
parser.add_argument('-p', dest='period', type=int, default=30, help="""Period to poll VPP, default 30 (seconds)""")
parser.add_argument('-d', dest='debug', action='store_true', help="""Enable debug, default False""")
args = parser.parse_args()
if args.debug:
print("Arguments:", args)
agentx.setup_logging(debug=args.debug)
try:
a = MyAgent(server_address=args.address, period=args.period)
a.run()
except Exception as e:
print("Unhandled exception:", e)
a.stop()
except KeyboardInterrupt:
a.stop()
if __name__ == "__main__":
main()
|
StarcoderdataPython
|
1753669
|
<reponame>ProzorroUKR/reports<filename>reports/tests/utils.py<gh_stars>0
# coding: utf-8
import mock
import os.path
test_data = {
"procurementMethod": "open",
"doc_type": "Tender",
"qualificationPeriod": {
"startDate": "2017-11-14T15:15:00+02:00"
},
"date": "2017-11-15T00:01:50Z",
"owner": "test",
"_id": "tender_id",
"tenderID": "UA-2017-11-30",
"dateModified": "2017-08-31T19:03:53.704712+03:00",
"tenderPeriod": {
"startDate": "2017-11-13T15:15:00+02:00",
},
"enquiryPeriod": {
"startDate": "2017-11-13T15:15:00+02:00",
},
"value": {
"currency": "UAH",
"amount": 1000,
"valueAddedTaxIncluded": False
}
}
class MockCurrencyResponce(object):
text = u'''[
{"r030":643,"txt":"Російський рубль",
"rate":2,"cc":"RUB","exchangedate":"16.05.2016"},
{"r030":978,"txt":"Євро",
"rate":2,"cc":"EUR","exchangedate":"16.05.2016"},
{"r030":840,"txt":"Долар США",
"rate":2,"cc":"USD","exchangedate":"16.05.2016"}]
'''
class CatalogApiResponce(object):
def search(self, resource, ids, fields):
if resource == "profile":
profiles = {}
for profile_id in ids:
profiles[profile_id] = {
"id": profile_id,
"access_owner": "access_owner_of_profile_{}".format(profile_id)
}
return profiles
if resource == "offer":
offers = {}
for offer_id in ids:
offers[offer_id] = {
"id": offer_id,
"relatedProduct": "relatedProduct_of_offer_".format(offer_id),
"access_owner": "access_owner_of_offer_{}".format(offer_id)
}
return offers
if resource == "product":
products = {}
for product_id in ids:
products[product_id] = {
"id": product_id,
"relatedProfile": "relatedProfile_of_product_".format(product_id),
}
return products
test_config = os.path.join(os.path.dirname(__file__), 'tests.yaml')
def get_mock_parser():
mock_parse = mock.MagicMock()
type(mock_parse.return_value).config = mock.PropertyMock(
return_value=test_config)
type(mock_parse.return_value).broker = mock.PropertyMock(
return_value='test')
type(mock_parse.return_value).period = mock.PropertyMock(
return_value=[])
type(mock_parse.return_value).kind = mock.PropertyMock(
return_value=['kind', 'general'])
type(mock_parse.return_value).status = mock.PropertyMock(
return_value={'action': '', 'statuses': ['complete', 'active']})
return mock_parse
|
StarcoderdataPython
|
3206014
|
#! /usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Copyright 2017 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# basic test of vtkMarchingContourFilter
import vtk
sphere = vtk.vtkSphere()
sphere.SetCenter(127., 127., 127.)
sphere.SetRadius(100.)
sample = vtk.vtkSampleFunction()
sample.SetImplicitFunction(sphere)
sample.SetModelBounds(0., 256., 0., 255., 0., 255.)
sample.SetSampleDimensions(10, 10, 10)
#contour = vtk.vtkContourFilter()
contour = vtk.vtkMarchingContourFilter()
contour.ComputeNormalsOff()
contour.SetInputConnection(sample.GetOutputPort())
contour.SetValue(0, 0.0)
mapper = vtk.vtkPolyDataMapper()
mapper.SetInputConnection(contour.GetOutputPort())
actor = vtk.vtkActor()
actor.SetMapper(mapper)
renderer = vtk.vtkRenderer()
renderer.AddActor(actor)
window = vtk.vtkRenderWindow()
window.AddRenderer(renderer)
interactor = vtk.vtkRenderWindowInteractor()
interactor.SetRenderWindow(window)
interactor.Initialize()
interactor.Start()
|
StarcoderdataPython
|
101755
|
<reponame>x22x22/python-ceph-cfg
# Import Python Libs
from __future__ import absolute_import
# Python imports
import os
import os.path
import platform
import logging
import shlex
import tempfile
try:
import ConfigParser
except:
import configparser as ConfigParser
# local modules
from . import constants
from . import utils
from . import util_which
log = logging.getLogger(__name__)
class Error(Exception):
"""
Error
"""
def __str__(self):
doc = self.__doc__.strip()
return ': '.join([doc] + [str(a) for a in self.args])
def _retrive_osd_details_from_dir(directory):
osd_required_files = set(["ceph_fsid", "fsid", "magic"])
osd_details = {}
dir_content = os.listdir(directory)
if not osd_required_files.issubset(dir_content):
log.debug("osd details not found in:%s" % (directory))
return None
log.debug("Reading osd details from '%s'" % (directory))
with open('%s/ceph_fsid' % (directory), 'r') as infile:
osd_details["ceph_fsid"] = infile.read().strip()
with open('%s/fsid' % (directory), 'r') as infile:
osd_details["fsid"] = infile.read().strip()
with open('%s/magic' % (directory), 'r') as infile:
osd_details["magic"] = infile.read().strip()
# Journel uuid may not exist when partition reused.
path_journal_uuid = '%s/journal_uuid' % (directory)
if os.path.isfile(path_journal_uuid):
log.debug("Reading '%s'" % (path_journal_uuid))
with open('%s/journal_uuid' % (directory), 'r') as infile:
osd_details["journal_uuid"] = infile.read().strip()
# whoami may not exist when OSD has never been activated.
path_whoami = '%s/whoami' % (directory)
if os.path.isfile(path_whoami):
log.debug("Reading '%s'" % (path_whoami))
with open('%s/whoami' % (directory), 'r') as infile:
osd_details["whoami"] = infile.read().strip()
path_link = '%s/journal' % (directory)
if os.path.islink(path_link):
log.debug("Reading '%s'" % (path_link))
osd_details["dev_journal"] = os.path.realpath(path_link)
return osd_details
def retrive_osd_details(device_name):
osd_details = {}
if device_name is None:
return None
try:
tmpd = tempfile.mkdtemp()
log.info("Create temp directory %s" %(tmpd))
try:
out_mnt = utils.execute_local_command(['mount',device_name,tmpd])
if out_mnt['retcode'] == 0:
osd_details = _retrive_osd_details_from_dir(tmpd)
finally:
utils.execute_local_command(['umount',tmpd])
finally:
log.info("Destroy temp directory %s" %(tmpd))
os.rmdir(tmpd)
return osd_details
class model_updater():
"""
Basic model updator retrives data and adds to model
"""
def __init__(self, model):
self.model = model
def defaults_hostname(self):
if self.model.hostname is None:
self.model.hostname = platform.node().split('.')[0]
def defaults_refresh(self):
# Default cluster name / uuid values
if self.model.cluster_name is None and self.model.cluster_uuid is None:
log.info("Defaulting cluster name to 'ceph'")
self.model.cluster_name = "ceph"
if self.model.cluster_name is not None and self.model.cluster_uuid is None:
self.model.cluster_uuid = utils._get_cluster_uuid_from_name(self.model.cluster_name)
log.info("From cluster name '%s' got cluster uuid '%s'" % (self.model.cluster_name, self.model.cluster_uuid))
if self.model.cluster_name is None and self.model.cluster_uuid is not None:
self.model.cluster_name = utils._get_cluster_name_from_uuid(self.model.cluster_uuid)
log.info("From cluster uuid '%s' got cluster name '%s'" % (self.model.cluster_uuid, self.model.cluster_name))
def symlinks_refresh(self):
'''
List all symlinks under /dev/disk/
'''
interesting_dirs = set(["by-path","by-id","by-uuid","by-partuuid"])
paths = {}
for root, dirs, files in os.walk("/dev/disk/"):
path_head, path_tail = os.path.split(root)
if not path_tail in interesting_dirs:
continue
for file_name in files:
file_path = os.path.join(root,file_name)
if not os.path.islink(file_path):
continue
real_path = os.path.realpath(file_path)
if not real_path in paths.keys():
paths[real_path] = []
paths[real_path].append(file_path)
self.model.symlinks = paths
def lsblk_version_refresh(self):
"""
Get lsblk version as this is older on RHEL 7.2
"""
arguments = [ util_which.which_lsblk.path, "--version" ]
output = utils.execute_local_command(arguments)
if output["retcode"] != 0:
raise Error("Failed executing '%s' Error rc=%s, stdout=%s stderr=%s" % (
" ".join(arguments),
output["retcode"],
output["stdout"],
output["stderr"]
))
version_str = output["stdout"].strip()
version_list = shlex.split(version_str)
if len(version_list) < 4:
raise Error("Failed processing lsblk version string '%s'" % (version_str))
version_split = version_list[3].split(".")
self.model.lsblk_version.major = int(version_split[0])
if len(version_split) > 1:
self.model.lsblk_version.minor = int(version_split[1])
if len(version_split) > 2:
self.model.lsblk_version.revision = int(version_split[2])
else:
self.model.lsblk_version.revision = 0
def _lsblk_arguements(self):
"""
Utility function for lsblk
"""
if self.model.lsblk_version.major is None:
self.lsblk_version_refresh()
if self.model.lsblk_version.major < 2:
raise Error("lsblk version too old '%s'" % (self.model.lsblk_version))
if self.model.lsblk_version.major == 2 and self.model.lsblk_version.minor < 23:
raise Error("lsblk version maybe too old '%s'" % (self.model.lsblk_version))
# RHEL 7.2 uses version 2.23.2
if self.model.lsblk_version.major == 2 and self.model.lsblk_version.minor < 25:
# Note we dont have "PARTTYPE"
log.warning("Using lsblk is old, results may be incomplete.")
return [
"--ascii",
"--output",
"NAME,FSTYPE,MOUNTPOINT,PARTLABEL,PARTUUID,PKNAME,ROTA,RQ-SIZE,SCHED,SIZE,TYPE,UUID,VENDOR",
"--pairs",
"--paths",
"--bytes"
]
return [
"--ascii",
"--output-all",
"--pairs",
"--paths",
"--bytes"
]
def partitions_all_refresh_lsblk(self):
'''
List all partition details using lsblk
'''
part_map = {}
cmd = [ util_which.which_lsblk.path ] + self._lsblk_arguements()
output = utils.execute_local_command(cmd)
if output['retcode'] != 0:
raise Error("Failed running: lsblk --ascii --output-all")
all_parts = {}
for line in output['stdout'].split('\n'):
partition = {}
for token in shlex.split(line):
token_split = token.split("=")
if len(token_split) == 1:
continue
key = token_split[0]
value = "=".join(token_split[1:])
if len(value) == 0:
continue
partition[key] = value
part_name = partition.get("NAME")
if part_name is None:
continue
part_type = partition.get("TYPE")
if part_type == "disk":
all_parts[part_name] = partition
continue
disk_name = partition.get("PKNAME")
if not disk_name in all_parts:
continue
part_map[part_name] = disk_name
if None == all_parts[disk_name].get("PARTITION"):
all_parts[disk_name]["PARTITION"] = {}
all_parts[disk_name]["PARTITION"][part_name] = partition
self.model.lsblk = all_parts
self.model.part_pairent = part_map
def partitions_all_refresh_parted(self):
'''
List all partition details using parted
'''
arguments = [
util_which.which_parted.path,
'-s',
'-m',
'-l',
'print'
]
output = utils.execute_local_command(arguments)
if output["retcode"] != 0:
raise Error("Failed executing '%s' Error rc=%s, stdout=%s stderr=%s" % (
" ".join(arguments),
output["retcode"],
output["stdout"],
output["stderr"]
))
lines = output["stdout"].split('\n')
lines_num = len(lines)
if lines_num == 0:
return
chunk_lines = []
chunk_end = int(lines_num - 1)
for line_num in list(reversed(range(lines_num))):
if lines[line_num] == 'BYT;':
chunk_lines.append((int(line_num), chunk_end))
chunk_end = int(line_num)
parted_dict = {}
for chunk_start,chunk_end in chunk_lines:
chunk_list = lines[chunk_start:chunk_end]
disk_line_split = chunk_list[1].split(':')
parted_dict_disk = {
'disk' : disk_line_split[0],
'size' : disk_line_split[1],
'driver' : disk_line_split[2],
'sector_size_logical' : disk_line_split[3],
'sector_size_physical' : disk_line_split[4],
'table' : disk_line_split[5],
'vendor' : disk_line_split[6],
'partition' : {}
}
for chunk_line in range(2,len(chunk_list)):
part_line = chunk_list[chunk_line]
if len(part_line) == 0:
continue
part_line_split = part_line.split(':')
part_path = disk_line_split[0] + part_line_split[0]
part_line_dict = {
'Path' : part_path,
'Number' : part_line_split[0],
'Start' : part_line_split[1],
'End' : part_line_split[2],
'Size' : part_line_split[3],
'File system' : part_line_split[4],
'Flags' : part_line_split[4].split(',')
}
parted_dict_disk['partition'][part_path] = part_line_dict
parted_dict[disk_line_split[0]] = parted_dict_disk
self.model.parted = parted_dict
def partitions_all_refresh(self):
'''
List all partition details
'''
self.partitions_all_refresh_lsblk()
self.partitions_all_refresh_parted()
def discover_partitions_refresh(self):
'''
List all OSD and journal partitions
'''
osd_all = set()
journal_all = set()
osd_details = {}
for diskname in self.model.lsblk.keys():
disk = self.model.lsblk.get(diskname)
if disk is None:
continue
part_struct = disk.get("PARTITION")
if part_struct is None:
continue
for partname in part_struct.keys():
part_details = part_struct.get(partname)
if part_details is None:
continue
mount_point = part_details.get("MOUNTPOINT")
if mount_point == '[SWAP]':
continue
if mount_point is not None:
osd_md = _retrive_osd_details_from_dir(mount_point)
if osd_md is not None:
osd_details[partname] = osd_md
part_type = part_details.get("PARTTYPE")
if part_type == constants.OSD_UUID:
osd_all.add(partname)
if mount_point is not None:
continue
osd_md = retrive_osd_details(partname)
if osd_md is not None:
osd_details[partname] = osd_md
continue
if part_type == constants.JOURNAL_UUID:
journal_all.add(partname)
continue
if mount_point is not None:
continue
fs_type = part_details.get("FSTYPE")
if fs_type is None:
continue
if not fs_type in ['xfs', 'btrfs', 'ext4']:
continue
osd_md = retrive_osd_details(partname)
if osd_md is not None:
osd_details[partname] = osd_md
# Now we combine our data to find incorrectly labeled OSD's
# and build osd data structure discovered_osd
discovered_osd = {}
for osd_dev_data in osd_details.keys():
# Agregate data into osd_all.
osd_all.add(osd_dev_data)
osd_md = osd_details.get(osd_dev_data)
if osd_md is None:
continue
# Agregate data into journal_all.
osd_dev_journel_raw = osd_md.get("dev_journal")
if osd_dev_journel_raw is not None:
journal_all.add(osd_dev_journel_raw)
osd_md["dev"] = osd_dev_data
disk_name = self.model.part_pairent.get(osd_dev_data)
if disk_name is not None:
osd_md["dev_parent"] = disk_name
ceph_fsid = osd_md.get("ceph_fsid")
if not ceph_fsid in discovered_osd.keys():
discovered_osd[ceph_fsid] = []
discovered_osd[ceph_fsid].append(osd_md)
self.model.partitions_osd = osd_all
self.model.partitions_journal = journal_all
self.model.discovered_osd = discovered_osd
def load_confg(self, cluster_name):
configfile = "/etc/ceph/%s.conf" % (cluster_name)
if not os.path.isfile(configfile):
raise Error("Cluster confg file does not exist:'%s'" % configfile)
self.model.ceph_conf.read(configfile)
def mon_members_refresh(self):
try:
mon_initial_members_name_raw = self.model.ceph_conf.get("global","mon_initial_members")
except ConfigParser.NoSectionError:
raise Error("Cluster config file does not have a [global] section")
except ConfigParser.NoOptionError:
raise Error("Cluster config file does not set mon_initial_members")
mon_initial_members_name_cleaned = []
for mon_split in mon_initial_members_name_raw.split(","):
mon_initial_members_name_cleaned.append(mon_split.strip())
try:
mon_initial_members_addr_raw = self.model.ceph_conf.get("global","mon_host")
except ConfigParser.NoOptionError:
raise Error("Cluster config file does not set mon_host")
mon_initial_members_addr_cleaned = []
for mon_split in mon_initial_members_addr_raw.split(","):
mon_initial_members_addr_cleaned.append(mon_split.strip())
if len(mon_initial_members_name_cleaned) != len(mon_initial_members_addr_cleaned):
raise Error("config has different numbers of mon 'names' and ip addresses")
output = []
mon_initial_members_name_len = len(mon_initial_members_name_cleaned)
for idx in range(0,mon_initial_members_name_len):
output.append((
mon_initial_members_name_cleaned[idx],
mon_initial_members_addr_cleaned[idx]
))
self.model.mon_members = output
def ceph_version_refresh(self):
arguments = [
"ceph",
"--version"
]
output = utils.execute_local_command(arguments)
if output["retcode"] != 0:
raise Error("Failed executing '%s' Error rc=%s, stdout=%s stderr=%s" % (
" ".join(arguments),
output["retcode"],
output["stdout"],
output["stderr"]))
version_raw = output["stdout"].strip()
version_raw_split = shlex.split(version_raw)
if len(version_raw_split) != 4 and len(version_raw_split) != 6 :
raise Error("ceph returned an invalid version:'%s' " % (version_raw))
if version_raw_split[0] != "ceph":
raise Error("ceph returned an invalid version first value is not ceph:'%s' " % (version_raw))
if version_raw_split[1] != "version":
raise Error("ceph returned an invalid version second value is not 'version':'%s' " % (version_raw))
version_public_raw = version_raw_split[2]
version_public = version_public_raw.split(".")
if len(version_public) < 3:
raise Error("ceph returned an invalid version second value is not 'version':'%s' " % (version_raw))
self.model.ceph_version.major = int(version_public[0])
self.model.ceph_version.minor = int(version_public[1])
self.model.ceph_version.revision = ".".join(version_public[2:])
self.model.ceph_version.uuid = version_raw_split[3].strip("()")
|
StarcoderdataPython
|
137414
|
import errno
import json
import os
import time
from packaging import version
from . import __version__, settings, utils
def get_global_config_path():
old_path = os.path.join(os.path.expanduser("~"), settings.ALDRYN_DOT_FILE)
if os.path.exists(old_path):
return old_path
else:
return settings.DIVIO_GLOBAL_CONFIG_FILE
class Config(object):
config = {}
def __init__(self):
super(Config, self).__init__()
self.config_path = get_global_config_path()
self.read()
def read(self):
try:
with open(self.config_path, "r") as fh:
config = json.load(fh)
except IOError:
# file doesn't exist
config = {}
except ValueError:
# invalid config
config = {}
self.config = config
def save(self):
# Create folders if they don't exist yet.
if not os.path.exists(os.path.dirname(self.config_path)):
try:
os.makedirs(os.path.dirname(self.config_path))
except OSError as exc: # Guard against race condition
if exc.errno != errno.EEXIST:
raise
with open(self.config_path, "w+") as fh:
json.dump(self.config, fh)
def check_for_updates(self, force=False):
"""check for updates daily"""
if self.config.get("disable_update_check", False) and not force:
return
timestamp_key = "update_check_timestamp"
version_key = "update_check_version"
last_checked = self.config.get(timestamp_key, None)
now = int(time.time())
installed_version = version.parse(__version__)
pypi_error = None
if force or not last_checked or last_checked < now - (60 * 60 * 24):
# try to access PyPI to get the latest available version
remote_version, pypi_error = utils.get_latest_version_from_pypi()
if remote_version:
if remote_version > installed_version:
self.config[version_key] = str(remote_version)
self.config[timestamp_key] = now
self.save()
elif remote_version is False:
# fail silently, nothing the user can do about this
self.config.pop(version_key, None)
newest_version_s = self.config.get(version_key, None)
newest_version = None
if newest_version_s:
newest_version = version.parse(newest_version_s)
if newest_version <= installed_version:
self.config.pop(version_key)
self.save()
return dict(
current=__version__,
remote=str(newest_version),
update_available=(
newest_version > installed_version if newest_version else False
),
pypi_error=pypi_error,
)
def skip_doctor(self):
return self.config.get("skip_doctor")
def get_skip_doctor_checks(self):
checks = self.config.get("skip_doctor_checks")
if not checks or not isinstance(checks, list):
return []
return checks
|
StarcoderdataPython
|
1792123
|
<filename>migrations/versions/f3c80e79066f_.py
"""empty message
Revision ID: f3c80e79066f
Revises: <PASSWORD>
Create Date: 2019-06-05 20:12:49.715771
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import mysql
# revision identifiers, used by Alembic.
revision = 'f3c80e79066f'
down_revision = 'f<PASSWORD>'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.alter_column('asistencias', 'asistio',
existing_type=mysql.TINYINT(display_width=1),
type_=sa.Boolean(),
existing_nullable=False)
op.alter_column('miembros', 'hoja_firmada',
existing_type=mysql.TINYINT(display_width=1),
type_=sa.Boolean(),
existing_nullable=True)
op.alter_column('seguimientos', 'fecha_seg',
existing_type=mysql.DATETIME(),
type_=sa.Date(),
existing_nullable=False)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.alter_column('seguimientos', 'fecha_seg',
existing_type=sa.Date(),
type_=mysql.DATETIME(),
existing_nullable=False)
op.alter_column('miembros', 'hoja_firmada',
existing_type=sa.Boolean(),
type_=mysql.TINYINT(display_width=1),
existing_nullable=True)
op.alter_column('asistencias', 'asistio',
existing_type=sa.Boolean(),
type_=mysql.TINYINT(display_width=1),
existing_nullable=False)
# ### end Alembic commands ###
|
StarcoderdataPython
|
117895
|
<filename>script/cardiff_20_20/read_data.py<gh_stars>1-10
import os
from os import path
import pathlib
import matplotlib.pyplot as plt
import compound_poisson
import dataset
def main():
path_here = pathlib.Path(__file__).parent.absolute()
figure_dir = path.join(path_here, "figure")
if not path.isdir(figure_dir):
os.mkdir(figure_dir)
cardiff = dataset.CardiffTraining()
model_field = cardiff.model_field
rain = cardiff.rain
time_series = compound_poisson.TimeSeries(model_field, rain)
time_series.time_array = cardiff.time_array
time_series.print_figures(figure_dir)
for i in range(time_series.n_model_field):
plt.figure()
plt.plot(time_series.time_array, time_series.x[:, i])
plt.xlabel("Time")
plt.ylabel(time_series.model_field_name[i])
plt.savefig(path.join(figure_dir, "model_field_" + str(i) + ".pdf"))
plt.close()
if __name__ == "__main__":
main()
|
StarcoderdataPython
|
76812
|
<reponame>gbudd3/spectre-api-python
#!/usr/local/bin/python3
"""
The spectre module is used to make access to Lumeta's Spectre API
a little easier (Lumeta and Spectre are trademarks of the Lumeta Corporation).
"""
import requests
import urllib3
import spectreapi
import json
from typing import Optional, List, Iterable
class Server:
"""
A Server is used to make API Calls to a Lumeta Spectre(r) server
It's not meant to be instantiated directly, use one of its
subclasses (e.g. UsernameServer or APIKeyServer) based on how we're
authenticating to the Spectre Command Center in question.
"""
def __init__(self, server, page_size=500, verify_cert=False):
self.session = requests.Session()
self.session.verify = False
self.page_size = page_size
self.url = "https://" + server + "/api/rest/"
self._host = server
self._version = None
self.session.timeout = 1
if verify_cert is False:
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
@property
def host(self) -> str:
'''Returns the server name (or IP) specified in the constructor'''
return self._host
@property
def name(self) -> str:
'''Returns the server name '''
return self._name
@property
def version(self) -> str:
'''Returns the version of the Spectre server we're talking with (as reported by that server)'''
return self._version
def close(self):
'''
It's not _required_ to close a Server, but if you don't, you might
hold tcp sockets open (the underlying requests and urllib3 modules
hold them for keepalive purposes)
'''
self.session.close()
def post(self, api, **kargs) -> requests.Response:
"""
This method POSTs to the Spectre API
>>> import spectreapi
>>> s = spectreapi.UsernameServer("server", "username", "password")
>>> data = '''
... [{
... "@class":"zone",
... "name":"Twilight",
... "description": "Zone to Test Scanning",
... "organization":{"id":1, "name":"Test Organization"}
... }]
... '''
>>> r = s.post("zone", data=data)
>>> r.json()['status']
'SUCCESS'
"""
if 'headers' not in kargs:
kargs['headers'] = {'Accept': 'json:pretty', 'Content-Type': 'application/json'}
result = self.session.post(self.url + api, **kargs)
if not result.ok:
raise APIException(result)
return result
def raw_post(self, api, **kargs) -> requests.Response:
"""
This method POSTs to the Spectre API but _doesn't_ set any headers.
This is so we can make things like file uploads work (because they seem to
require XML for reasons as yet mysterious. For example, this is currently the only
way to setup the SNMP server:
data='''<set_snmpd_request>
<SNMPDaemonConfig>
<readOnlyCommunity>
<community>public</community>
</readOnlyCommunity>
</SNMPDaemonConfig>
</set_snmpd_request>
'''
server.raw_post('management/snmpd',data=data,headers={'Content-Type':'application/xml'})
"""
result = self.session.post(self.url + api, **kargs)
if not result.ok:
raise APIException(result)
return result
def put(self, api, **kargs):
'''Method PUTs through to the server'''
if 'headers' not in kargs:
kargs['headers'] = {'Accept': 'json:pretty', 'Content-Type': 'application/json'}
result = self.session.put(self.url + api, **kargs)
if not result.ok:
raise APIException(result)
return result
def delete(self, api, **kargs) -> requests.Response:
'''Method sends DELETEs through to server'''
if 'headers' not in kargs:
kargs['headers'] = {'Accept': 'json:pretty', 'Content-Type': 'application/json'}
result = self.session.delete(self.url + api, **kargs)
if not result.ok:
raise APIException(result)
return result
def getpage(self, api, params=None, page=0, headers=None) -> requests.Response:
"""
This private method is in place to handle the actual
fetching of GET API calls
"""
if params is None:
params = {"query.pagesize": self.page_size}
if headers is None:
headers = {'Accept': 'json:pretty', 'Content-Type': 'application/json'}
params["query.pagesize"] = self.page_size
params["query.page"] = page
results = self.session.get(self.url+api, params=params, timeout=120, headers=headers)
if not results.ok:
print(results.text)
raise APIException(results)
return results
def get(self, api, params=None) -> Iterable['spectreapi.Response']:
"""
Use this method to GET results from an API call and produce
an iterable response
>>> import spectreapi
>>> s=spectreapi.UsernameServer('server','username','password')
>>> s.get('zone').results.json()
{'@class': 'apiresponse', 'status': 'SUCCESS', 'method': 'ZoneManagement.getZones', 'total': 2, 'results': [{'@class': 'zone', 'id': 2, 'name': 'Twilight', 'description': 'Zone to Test Scanning'}, {'@class': 'zone', 'id': 1, 'name': 'Zone1', 'description': 'Default Zone'}]}
>>> r = s.get('zone')
>>> for z in r:
... print(z)
...
{'@class': 'zone', 'id': 2, 'name': 'Twilight', 'description': 'Zone to Test Scanning'}
{'@class': 'zone', 'id': 1, 'name': 'Zone1', 'description': 'Default Zone'}
>>>
"""
return spectreapi.Response(self, api, params)
def query(self, api="zonedata/devices"):
"""
>>> import spectreapi
>>> s=spectreapi.UsernameServer('server','username','password')
>>> q=s.query().filter('zone.id',2).detail('Attributes')
>>> for d in q.run():
... print(d)
"""
return Query(self, api)
def get_zones(self) -> List['spectreapi.Zone']:
'''Returns all the Zones configured on the server'''
zones = []
result = self.get('zone')
for zone in result:
zones.append(spectreapi.Zone(zone['id'], zone['name'], zone['description'], server=self))
return zones
def get_zone_by_name(self, name) -> Optional['spectreapi.Zone']:
'''Returns the Zone configured on the server named <name> (if present)'''
results = self.get('zone')
for zone in results:
if zone['name'] == name:
return spectreapi.Zone(zone['id'], zone['name'], zone['description'], server=self)
return None
def get_or_create_zone(self, name, description="Test Zone", organization={"id": 1, "name": "Test Organization"}):
zone = self.get_zone_by_name(name)
if zone:
return zone
data = [{ "@class" : "zone",
"name" : name,
"description" : description,
"organization": organization
}]
r = self.post("zone", data=json.dumps(data))
zone = self.get_zone_by_name(name)
return zone
def get_collectors(self) -> List['spectreapi.Collector']:
'''Returns the Collectors configured on the server'''
collectors = []
results = self.get('zone/collector')
for collector in results:
collectors.append(spectreapi.Collector(
collector['id'],
collector['uuid'],
collector['name'],
spectreapi.Zone(collector['zone']['id'], collector['zone']['name']),
server=self,
))
return collectors
def get_collector_by_name(self, name) -> Optional['spectreapi.Collector']:
'''Returns the Collector configured on the server named <name> (if present)'''
results = self.get('zone/collector')
for collector in results:
if collector['name'] == name:
return spectreapi.Collector(
collector['id'],
collector['uuid'],
collector['name'],
spectreapi.Zone(collector['zone']['id'], collector['zone']['name']),
server=self,
)
return None
class Response():
"""
This class is used to present the results of a "GET" API call
It handles iterating through the results and fetching pages as
needed from the server
"""
def __init__(self, server, api, params):
self.server = server
self.api = api
self.params = params
self.page = 0
self.page_line = 0
self.results = self.server.getpage(api, params, page=self.page)
if "total" in self.results.json():
self.total = self.results.json()['total']
else:
self.total = 1
def rewind(self):
'''Used to reset state after iterating over results'''
self.page = 0
self.page_line = 0
self.results = self.server.getpage(
self.api, self.params, page=self.page)
def __iter__(self):
return self
def __next__(self):
'''This facilitates being able to iterate over the results of a GET'''
if self.page * self.server.page_size + self.page_line == self.total:
self.rewind()
raise StopIteration
if self.page_line < self.server.page_size:
self.page_line += 1
try:
return self.results.json()['results'][self.page_line - 1]
except IndexError:
self.rewind()
raise StopIteration # This could happen if the underlying query shrinks under us
else:
self.page_line = 1
self.page += 1
self.results = self.server.getpage(
self.api, self.params, page=self.page)
try:
return self.results.json()['results'][0]
except IndexError:
self.rewind()
raise StopIteration # This could happen if the underlying query shrinks under us
@property
def result(self):
"""Return result 0 (the only result for singletons"""
return self.values()[0]
def value(self):
"""Return value 0 (the only value for singletons (replaces result())"""
return self.results.json()['results'][0]
def values(self):
"""Return the values from the API call"""
return self.results.json()['results']
class APIKeyServer(Server):
"""
An APIKeyServer is a Server that uses authentication via API key.
You get an API key from the CLI via the "user key new <username>" command
"""
def __init__(self, server, api_key, page_size=500, verify_cert=False):
"""
APIKeyServer(server,api_key) where
server is the Spectre server you're connecting to and
api_key is the API key you've generated
>>> import spectreapi
>>> s = APIKeyServer("i3", api_key="<KEY> \
<KEY>"+ \
"<KEY>")
>>> r = s.getpage("system/information")
>>> r.json()['status']
'SUCCESS'
>>> r.json()['results'][0]['name']
'i3'
"""
super().__init__(server, page_size=page_size, verify_cert=verify_cert)
self.session.headers['Authorization'] = "Bearer " + api_key
results = self.get("system/information")
self._version = results.result['version']
self._name = results.result['name']
class UsernameServer(Server):
"""
This Server uses username and password authentication for the initial
request, and then uses a session cookie from there out
"""
def __init__(self, server, username, password, page_size=500, verify_cert=False):
super().__init__(server, page_size=page_size, verify_cert=verify_cert)
auth = requests.auth.HTTPBasicAuth(username, password)
headers = {'Accept': 'json:pretty', 'Content-Type': 'application/json'}
results = requests.get(self.url + "system/information", headers=headers, verify=False, auth=auth)
self._version = results.json()['results'][0]['version']
self._name = results.json()['results'][0]['name']
self.session.cookies = results.cookies
class Query:
"""
Class to store the state around a GET request from a server
>>> import spectreapi
>>> s=spectreapi.UsernameServer('server','username','password')
>>> q=s.query().filter('zone.id',2).detail('Attributes')
>>> for d in q.run():
... print(d)
"""
def __init__(self, server, api) -> 'spectreapi.Query':
"""
Setup a query for a server with api call <api>
"""
self.server = server
self.api = api
self.params = {}
def run(self) -> Iterable['spectreapi.Response']:
"""
Go ahead and execute the query, return the results
"""
return self.server.get(self.api, self.params)
def filter(self, name, value=True) -> 'spectreapi.Query':
"""
Add a filter to the query
"""
self.params['filter.' + name] = value
return self
def detail(self, name) -> 'spectreapi.Query':
"""
Add a detail to the query
"""
self.params['detail.' + name] = True
return self
class SpectreException(Exception):
'''General Base Spectre exception'''
pass
class NoServerException(SpectreException):
'''Specter exception for when we call a
method that needs a server but we don't have one'''
pass
class InvalidArgument(SpectreException):
'''Invalid argument'''
pass
class APIException(SpectreException):
'''We got an exception back from the Spectre API call'''
def __init__(self, request):
super().__init__()
self.request = request
def __str__(self):
return self.request.text
|
StarcoderdataPython
|
35002
|
from src.uint8 import uint8
def test_constructor1():
assert int(uint8(20)) == 20
def test_constructor2():
assert uint8(256) == uint8(0)
assert uint8(260) == uint8(4)
assert uint8(-1) == uint8(255)
assert uint8(-5) == uint8(251)
assert uint8(-5) != uint8(252)
def test_add_other():
assert (uint8(50), 0) == uint8(20) + uint8(30)
assert (uint8(5), 1) == uint8(250) + uint8(11)
def test_add_int():
assert (uint8(50), 0) == uint8(20) + 30
assert (uint8(5), 1) == uint8(250) + 11
assert (uint8(251), 1) == uint8(1) + -6
def test_sub_other():
assert (uint8(246), 1) == uint8(20) - uint8(30)
assert (uint8(239), 0) == uint8(250) - uint8(11)
def test_sub_int():
assert (uint8(246), 1) == uint8(20) - 30
assert (uint8(239), 0) == uint8(250) - 11
assert (uint8(7), 0) == uint8(1) - -6
def test_and_other():
assert uint8(24) == uint8(31) & uint8(24)
assert uint8(0) == uint8(17) & uint8(12)
val = uint8(31)
val &= uint8(24)
assert val == uint8(24)
def test_and_int():
assert uint8(24) == uint8(31) & 24
assert uint8(0) == uint8(17) & 12
val = uint8(31)
val &= 24
assert val == uint8(24)
def test_eq_int():
assert uint8(42) == 42
assert uint8(256) == 0
assert uint8(-1) == 255
def test_mod():
assert uint8(32) % 2 == 0
assert uint8(5) % uint8(2) == 1
def test_lshift():
assert (uint8(0), 1) == uint8(128) << 1
assert (uint8(128), 1) == uint8(192) << 1
assert (uint8(64), 0) == uint8(32) << 1
def test_getitem():
assert uint8(16)[4] == 1
assert uint8(16)[5] == 0
for i in range(8):
assert uint8(255)[i] == 1
assert uint8(0)[i] == 0
|
StarcoderdataPython
|
1661254
|
# -*- coding: utf-8 -*-
from app.common.http_methods import post_request, get_request
from app.common.target_urls import GENERIC_MISSION_PAGE, MISSION_STOPOVER
from app.missions.missionparser import parse_all_missions_in_page, get_country_list, parse_stopover
def list_missions(mission_type, countries_list):
result = []
countries_list = countries_list
for country_id in countries_list:
page = post_request(GENERIC_MISSION_PAGE.format(mission_type=mission_type), {u'id_pays': country_id})
countries_missions = parse_all_missions_in_page(page, country_id)
for mission in countries_missions:
if mission['stopover']:
stopover_html = get_request(MISSION_STOPOVER.format(mission_id=mission['mission_nb']))
mission['stopover'] = parse_stopover(stopover_html)
result += countries_missions
return result
def list_dest_countries_id_by_mission_type(missions_type):
""" returns the list of countries available for each mission_type """
result = {}
for i in missions_type:
html_page = get_request(GENERIC_MISSION_PAGE.format(mission_type=i))
result[i] = get_country_list(html_page)
import itertools
print("{} requests to be sent".format(len(list(itertools.chain.from_iterable(result.values())))))
return result
|
StarcoderdataPython
|
1628202
|
<reponame>kislam01/skelebot
import argparse
import unittest
from unittest import mock
import skelebot as sb
class TestPrime(unittest.TestCase):
def test_addParsers(self):
parser = argparse.ArgumentParser(formatter_class=argparse.RawTextHelpFormatter)
subparsers = parser.add_subparsers(dest="prime")
prime = sb.components.prime.Prime()
subparsers = prime.addParsers(subparsers)
self.assertNotEqual(subparsers.choices["prime"], None)
@mock.patch('skelebot.components.prime.docker')
def test_execute(self, mock_docker):
mock_docker.build.return_value = 0
mock_docker.save.return_value = 0
config = sb.objects.config.Config()
args = argparse.Namespace(output=None)
prime = sb.components.prime.Prime()
prime.execute(config, args)
mock_docker.build.assert_called_with(config)
@mock.patch('skelebot.components.prime.docker')
def test_execute_output(self, mock_docker):
mock_docker.build.return_value = 0
mock_docker.save.return_value = 0
config = sb.objects.config.Config()
args = argparse.Namespace(output="my-image.img")
prime = sb.components.prime.Prime()
prime.execute(config, args)
mock_docker.build.assert_called_with(config)
mock_docker.save.assert_called_with(config, "my-image.img")
@mock.patch('skelebot.components.prime.docker')
def test_execute_exception(self, mock_docker):
mock_docker.build.return_value = 0
mock_docker.save.return_value = 1
config = sb.objects.config.Config()
args = argparse.Namespace(output="my-image.img")
prime = sb.components.prime.Prime()
try:
prime.execute(config, args)
self.fail("Exception Expected")
except Exception as exc:
self.assertEqual(str(exc), "Priming Failed")
mock_docker.build.assert_called_with(config)
mock_docker.save.assert_called_with(config, "my-image.img")
if __name__ == '__main__':
unittest.main()
|
StarcoderdataPython
|
1750876
|
<reponame>fmitch/incubator-tvm<gh_stars>0
import logging
import time
import sys
import os
import numpy as np
from multiprocessing import Pool, cpu_count
import random
import string
from tensors import *
import pickle
import tvm
import topi
from topi.testing import conv2d_nchw_python
from tvm import te
from tvm import autotvm
from tvm.autotvm.tuner import XGBTuner, GATuner, RandomTuner, GridSearchTuner, DataVolumeTuner
import tvm.contrib.graph_runtime as runtime
#from tvm.autotvm.task.topi_integration import deserialize_args
from collections import namedtuple
from itertools import permutations
import argparse
#import logging
#logging.getLogger('autotvm').setLevel(logging.DEBUG)
#logging.getLogger('autotvm').addHandler(logging.StreamHandler(sys.stdout))
global num_threads
num_threads = 32
os.environ["TVM_NUM_THREADS"] = str(num_threads)
letters = string.digits + string.ascii_letters
def get_matmul_dv(ind):
config = task.config_space.get(ind)
d_foot, d_vol = autotvm.tuner.data_volume_estimator.estimate_dv(*get_matmul_extents_info(M,N,K,config,matmul_index))
return -1*(d_vol[2][:,:,-1].sum(axis=0) * np.array([64/100e9, 64/44e9, 64/25e9])).sum()
def concurrency_ratio(ind):
config = task.config_space.get(ind)
mo_value = np.ceil(M / config['tile_m'].size[-1])
no_value = np.ceil(N / config['tile_n'].size[-1])
concurrency = mo_value * no_value
return np.floor(concurrency/num_threads) / np.ceil(concurrency/num_threads)
def get_dv(ind):
config = task.config_space.get(ind)
d_foot, d_vol = autotvm.tuner.data_volume_estimator.estimate_dv(*get_extents_info(config))
return -1*(d_vol[2][:,:,-1].sum(axis=0) * np.array([64/100e9, 64/44e9, 64/25e9])).sum()
def limited_test(ind):
tic = time.time()
lower_llvm_limit = 1
upper_llvm_limit = 2
lower_asm_limit = 0.5
upper_asm_limit = 2
results = []
config = task.config_space.get(ind)
with autotvm.ApplyConfig(config):
with tvm.target.create("llvm -mcpu=core-avx2"):
s, arg_bufs = task.func(*task.args)
op_func = tvm.build(s, arg_bufs)
build_time = time.time() - tic
ll_source = op_func.get_source()
funcs = ll_source.split('\n\n')
llvm_opint = 0
asm_opint = 0
length = 0
for func in funcs:
if 'fmuladd.v' in func and len(func) > length:
length = len(func)
longest = func
loads = 0
stores = 0
fmas = 0
if length > 0:
lines = longest.split('\n')
for line in lines:
if 'load <' in line:
loads += 1
elif 'store <' in line:
stores += 1
elif 'fmuladd.v8' in line:
fmas += 1
if loads+stores > 0:
llvm_opint = fmas / (loads+stores)
if llvm_opint >= lower_llvm_limit and llvm_opint <= upper_llvm_limit:
tic = time.time()
asm_source = op_func.get_source('asm')
asm_time = time.time() - tic
funcs = asm_source.split(':\n')
length = 0
for func in funcs:
if 'vfmadd' in func and len(func) > length:
length = len(func)
longest = func
moves = 0
fmas = 0
if length > 0:
lines = longest.split('\n')
for line in lines:
if 'vmov' in line and 'ymm' in line:
moves += 1
elif 'vfmadd' in line and 'ymm' in line:
fmas += 1
if '(%r' in line:
moves += 1
if moves > 0:
asm_opint = fmas / moves
if asm_opint >= lower_asm_limit and asm_opint <= upper_asm_limit:
module_file = os.path.join('/tmp/', ''.join(random.choice(letters) for i in range(10)) + '.o')
op_func.save(module_file)
return module_file, llvm_opint, asm_opint, ind,build_time, asm_time
return '', llvm_opint, asm_opint, ind, build_time, 0
def eval_time(ind, module_file):
config = task.config_space.get(ind)
with autotvm.ApplyConfig(config):
with tvm.target.create("llvm -mcpu=core-avx2"):
s, arg_bufs = task.func(*task.args)
func = tvm.runtime.load_module(module_file)
a_np = np.random.uniform(size=(N, N))
b_np = np.random.uniform(size=(N, N))
c_np = np.zeros((N,N))
ctx = tvm.cpu()
a_tvm = tvm.nd.array(a_np.astype(np.float32), ctx=ctx)
b_tvm = tvm.nd.array(b_np.astype(np.float32), ctx=ctx)
c_tvm = tvm.nd.array(c_np.astype(np.float32), ctx=ctx)
evaluator = func.time_evaluator(func.entry_name, ctx, repeat=10,number=4,)
variation = 1
while variation > 0.05:
res = np.array(sorted(evaluator(a_tvm, b_tvm, c_tvm).results)[:-5])
variation = res.std() / res.mean()
#if tuple(arg_bufs[1].shape) == b_tvm.shape:
# res = evaluator(c_tvm, b_tvm, a_tvm)
#else:
# res = evaluator(c_tvm, a_tvm, b_tvm)
return res.mean(), ind
def tune_kernels(args, trials, cr_limit):
func_create = 'template/matmul'
global task
task = autotvm.task.create(func_create,
args=(M,N,K,matmul_index,'float32'),
target='llvm -mcpu=core-avx2')
print(task.config_space)
outer_trials = min(int(1e9), len(task.config_space))
trials = min(trials, len(task.config_space))
pickle_file = 'data/matmul/perm%.2f_timed_asm_matmul%i_%s_%icore_%i.pkl' % (cr_limit, matmul_index, N, num_threads, trials)
if os.path.exists(pickle_file):
print('File exists', pickle_file)
return
with open(pickle_file, 'rb') as fi:
inds, res, dv, res_times, asm, llvm = pickle.load(fi)
best = np.array(res).mean(axis=1).argsort()
inds = np.array(inds)
cr = []
for ind in inds:
cr.append(concurrency_ratio(ind))
cr = np.array(cr)
res = np.array(res).mean(axis=1)
print(res[best[:10]])
print(np.array(asm)[best[:10]])
print(np.array(llvm)[best[:10]])
print(cr[best[:10]])
#for ind in inds[best[:10]]:
# print(task.config_space.get(ind))
return
pool_threads = 80#cpu_count()
#configs = np.random.choice(len(task.config_space), size=outer_trials, replace=False)
configs = range(outer_trials)
print('Running Data Volume model...')
tic = time.time()
with Pool(pool_threads) as p:
cr = p.map(concurrency_ratio, configs)
print('CR for %i configs: %f' % (len(configs), time.time() - tic))
cr = np.array(cr)
configs = np.array(configs)[(cr > cr_limit)]
cr = np.array(cr)[(cr > cr_limit)]
with Pool(pool_threads) as p:
dv = p.map(get_matmul_dv, configs)
print('DV for %i configs: %f' % (len(configs), time.time() - tic))
dv = -1*np.array(dv)
dv_order = dv.argsort()
configs = configs[dv_order]
dv = dv[dv_order]
num_configs = len(configs)
dv_dict = dict(zip(configs,dv))
best_flops = 0.0
flops = 0.0
counter = 0
print('Running on hardware...')
sorted_order = np.array(dv).argsort()
vec_counter = 0
to_try = np.array(configs)[sorted_order]
build_counter = 0
inds = []
results = []
dv = []
asm_opints = []
llvm_opints = []
result_times = []
asm_times = 0
while len(results) < trials and build_counter < num_configs:
inds_to_test = []
module_files = []
start_index = build_counter
with Pool(pool_threads) as p:
for module_file, llvm, asm, ind, build_time, asm_time in p.map(limited_test, to_try[start_index:start_index+100*pool_threads]):
#for ind in to_try:
# should_test, ind = limited_test(ind)
build_counter += 1
if len(module_file) > 0:
llvm_opints.append(llvm)
asm_opints.append(asm)
inds_to_test.append(ind)
module_files.append(module_file)
vec_counter += 1
#print('Prepping tests: %.2f/%.2f GFLOPS %i/%i (%i), %.1f s \r' %
# (flops, best_flops, counter, num_configs,
# build_counter, time.time()-tic), end='')
#finished_index = np.where(to_try == inds_to_test[-1])[0][0]
#to_try = to_try[finished_index+1:]
#with Pool(6) as p:
# for x, ind in p.imap(limited_test, to_try):
inds_to_test = np.array(inds_to_test)
for ind, module_file in zip(inds_to_test, module_files):
x, ind = eval_time(ind, module_file)
result_times.append(time.time() - tic)
counter += 1
mean_time = np.array(x).mean()
flops = task.flop/(mean_time*1e9)
best_flops = max(flops, best_flops)
if best_flops == flops:
best_ind = ind
inds.append(ind)
results.append(x)
dv.append(dv_dict[ind])
#print('Testing: %.2f/%.2f GFLOPS %i/%i (%i), %.1f s \r' %
# (flops, best_flops, counter, num_configs,
# build_counter, time.time()-tic), end='')
os.remove(module_file)
os.remove(module_file+'.so')
print()
print('Best config:', task.config_space.get(best_ind))
print('Saving %s' % pickle_file)
with open(pickle_file, 'wb') as output:
pickle.dump([inds, results, dv, result_times, asm_opints, llvm_opints],
output, pickle.HIGHEST_PROTOCOL)
return
def tune_and_evaluate():
dilation = 1;
parser = argparse.ArgumentParser(description='Run TC benchmarks in TVM')
parser.add_argument( '-t','--trials', help="Int. Number of trials to sample", default=2000, type=int)
parser.add_argument( '-b','--benchmark', help="Int. Number of Tensor Contraction benchmark (1-4)", default=1, type=int)
global M, N, K
global matmul_index
args = parser.parse_args()
trials = args.trials
ind = args.benchmark
cr_limit = 0.9
for size in [1000,4000]:
matmul_index = ind
print("Tuning TC %i..." % matmul_index)
#key = list(benchmarks.keys())[args.benchmark]
M,N,K = [size,size,size]
print("M, N, K")
print(M, N, K)
tune_kernels(args, trials, cr_limit)
if __name__ == "__main__":
tune_and_evaluate()
|
StarcoderdataPython
|
1607280
|
<filename>source/index.py
import os
import io
import requests
from lxml import etree
service_index_source = 'https://www.kulturarvsdata.se/ksamsok/api?method=getServiceOrganization&value=all'
r = requests.get(service_index_source)
xml = etree.XML(r.content)
institutions = list()
services = list()
for institution_node in xml.xpath('/result/institution'):
institution = {}
institution['name_sv'] = institution_node.xpath('.//namnswe')[0].text or ''
institution['name_en'] = institution_node.xpath('.//namneng')[0].text or ''
institution['webpage'] = institution_node.xpath('.//websida')[0].text or ''
institution['contact_email'] = institution_node.xpath('.//epostkontaktperson')[0].text or ''
if institution['webpage'] == '':
del institution
continue
institutions.append(institution)
for service_node in institution_node.xpath('.//services/service'):
service = {}
service['title'] = service_node.xpath('.//namn')[0].text or ''
service['description'] = service_node.xpath('.//beskrivning')[0].text or ''
service['contact_email'] = institution['contact_email']
service['publisher_url'] = institution['webpage']
if service['description'] == '':
del service
continue
services.append(service)
del service
del institution
agent_template_file = open('source/agent-template.xml', 'r')
agent_template = agent_template_file.read()
agent_template_file.close()
dataset_template_file = open('source/dataset-template.xml', 'r')
dataset_template = dataset_template_file.read()
dataset_template_file.close()
root_template_file = open('source/root-template.rdf', 'r')
root_template = root_template_file.read()
root_template_file.close()
agents_output = ''
datasets_output = ''
for agent in institutions:
agents_output += agent_template.replace('{ webpage }', agent['webpage']).replace('{ email }', agent['contact_email']).replace('{ name_en }', agent['name_en']).replace('{ name_sv }', agent['name_sv'])
for dataset in services:
datasets_output += dataset_template.replace('{ description }', dataset['description']).replace('{ publisher }', dataset['publisher_url']).replace('{ title }', dataset['title']).replace('{ email }', dataset['contact_email'])
final_data = root_template.replace('{ datasets }', datasets_output).replace('{ agents }', agents_output)
os.makedirs(os.path.dirname('output/soch.rdf'), exist_ok=True)
with open('output/soch.rdf', 'w', encoding='utf-8') as f:
f.write(final_data)
print('DONE')
|
StarcoderdataPython
|
174158
|
<gh_stars>0
import os
from urllib.parse import urljoin, urlparse
import urllib
import ntpath
is_win32 = os.name == "nt"
def createDirectory(base, new_dir):
if is_win32:
new_dir = cleanName(new_dir, ".")
if not base.startswith("\\\\?\\"): base = "\\\\?\\" + base
path_new_dir = os.path.join(base, new_dir)
if not os.path.exists(path_new_dir): os.mkdir(path_new_dir)
return path_new_dir
def longPath(path):
if is_win32 and not path.startswith("\\\\?\\"):
return "\\\\?\\" + path
return path
def try_get(src, getter, expected_type=None):
if not isinstance(getter, (list, tuple)):
getter = [getter]
for get in getter:
try:
v = get(src)
except (AttributeError, KeyError, TypeError, IndexError):
pass
else:
if expected_type is None or isinstance(v, expected_type):
return v
return None
def cleanName(value, deletechars = '<>:"/\\|?*\r\n'):
value = str(value)
for c in deletechars:
value = value.replace(c,'')
return value
def GetFileNameFromUrl(url):
urlParsed = urlparse(urllib.parse.unquote(url))
fileName = os.path.basename(urlParsed.path).encode('utf-8')
return cleanName(fileName)
def pathLeaf(path):
'''
Name..........: pathLeaf
Description...: get file name from full path
Parameters....: path - string. Full path
Return values.: string file name
Author........: None
'''
head, tail = ntpath.split(path)
return tail or ntpath.basename(head)
def path_join(*args):
new_path = os.path.join(*args)
if os.path.altsep:
return new_path.replace(os.path.sep, os.path.altsep)
return new_path
|
StarcoderdataPython
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.