id
stringlengths 1
7
| text
stringlengths 6
1.03M
| dataset_id
stringclasses 1
value |
---|---|---|
3373357
|
<reponame>Retraces/UkraineBot
/home/runner/.cache/pip/pool/dc/44/72/482de660ef3e35f01f4c398c39dd327cfb98b3c91c7aac535ca15ba590
|
StarcoderdataPython
|
96206
|
<gh_stars>10-100
'''
Copyright 2019 Trustees of the University of Pennsylvania
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
import argparse
import getpass
import functools
from pennprov.connection.mprov_connection import MProvConnection
from ieeg.auth import Session
from ieeg.dataset import Annotation
from ieeg.mprov_listener import MProvListener
def dataset_required(func):
"""
Obtains dataset for func and calls it, passing dataset as first argument.
"""
@functools.wraps(func)
def pass_dataset(args):
if not args.password:
args.password = <PASSWORD>('IEEG Password: ')
if args.mprov_user and not args.mprov_password:
args.mprov_password = <PASSWORD>('MProv Password: ')
if args.host:
Session.host = args.host
if args.port:
Session.port = args.port
Session.method = 'http' if args.no_ssl else 'https'
mprov_listener = None
if args.mprov_user:
mprov_url = 'http://localhost:8088' if args.mprov_url is None else args.mprov_url
if args.mprov_graph:
MProvConnection.graph_name = args.mprov_graph
mprov_connection = MProvConnection(
args.mprov_user, args.mprov_password, mprov_url)
mprov_listener = MProvListener(mprov_connection)
with Session(args.user, args.password, mprov_listener=mprov_listener) as session:
dataset = session.open_dataset(args.dataset)
func(dataset, args)
session.close_dataset(dataset)
return pass_dataset
@dataset_required
def read(dataset, args):
"""
Reads annotations from dataset.
"""
layer_name = args.layer
layer_to_count = dataset.get_annotation_layers()
if not layer_name:
print(layer_to_count)
else:
expected_count = layer_to_count.get(layer_name)
if not expected_count:
print('Layer', layer_name, 'does not exist')
return
actual_count = 0
max_results = None if expected_count < 100 else 100
call_number = 0
while actual_count < expected_count:
annotations = dataset.get_annotations(
layer_name, first_result=actual_count, max_results=max_results)
call_number += 1
actual_count += len(annotations)
first = annotations[0].start_time_offset_usec
last = annotations[-1].end_time_offset_usec
print("got", len(annotations), "annotations on call #",
call_number, "covering", first, "usec to", last, "usec")
print("got", actual_count, "annotations in total")
@dataset_required
def add(dataset, args):
"""
Adds two annotations to the given dataset layer.
"""
layer_name = args.layer
if not layer_name:
layer_to_count = dataset.get_annotation_layers()
print(layer_to_count)
else:
annotated_labels = [dataset.ch_labels[0], dataset.ch_labels[-1]]
annotations = [Annotation(dataset, args.user,
'Test', 'A test annotation', layer_name, 100000, 200100, annotated_labels=annotated_labels),
Annotation(dataset, args.user,
'Test 2', 'A test annotation', layer_name, 200000, 300200, annotated_labels=annotated_labels)]
dataset.add_annotations(annotations)
layer_to_count = dataset.get_annotation_layers()
print(layer_to_count)
@dataset_required
def move(dataset, args):
"""
Move annotations from one layer to another.
"""
from_layer = args.from_layer
to_layer = args.to_layer
layer_to_count = dataset.get_annotation_layers()
if not from_layer:
print(layer_to_count)
else:
count = layer_to_count.get(from_layer)
if not count:
print(from_layer, 'contains no annotations')
else:
print('Moving', count,
'annotations from', from_layer, 'to', to_layer)
moved = dataset.move_annotation_layer(from_layer, to_layer)
print('Moved', moved, 'annotations')
print(dataset.get_annotation_layers())
@dataset_required
def delete(dataset, args):
"""
Delete annotations from the given layer.
"""
layer_to_count = dataset.get_annotation_layers()
layer_name = args.layer
if not layer_name:
print(layer_to_count)
else:
print('Deleting', layer_to_count[layer_name],
'annotations from', layer_name)
deleted = dataset.delete_annotation_layer(layer_name)
print('Deleted', deleted, 'annotations')
print(dataset.get_annotation_layers())
def fail_no_command(args):
"""
Reports failure when no subcommand was given.
"""
args.parser.error('A subcommand is required.')
def validate(args):
"""
Do any validation of args that argparse does not provide.
"""
if hasattr(args, 'from_layer'):
# Must be a move
if (args.from_layer and not args.to_layer or args.to_layer and not args.from_layer):
args.parser.error('Both from_layer and to_layer must be provided.')
def main():
"""
Parses the command line and dispatches subcommand.
"""
# create the top-level parser
parser = argparse.ArgumentParser(
epilog='<subcommand> -h for subcommand help')
parser.add_argument('-u', '--user', required=True, help='username')
parser.add_argument('-p', '--password',
help='password (will be prompted if missing)')
parser.add_argument('--mprov_user', help='MProv username')
parser.add_argument('--mprov_password',
help='MProv password (will be prompted if missing)')
parser.add_argument('--mprov_url',
help='MProv URL')
parser.add_argument('--mprov_graph',
help='MProv graph name')
parser.add_argument('--host', help='the host')
parser.add_argument('--no_ssl', action='store_true', default=False,
help="Do not use https. Ignored unless --host is set.")
parser.add_argument(
'--port', help='The port. Ignored unless --host is set.')
parser.set_defaults(func=fail_no_command, parser=parser)
subparsers = parser.add_subparsers(title='subcommands',
description='valid subcommands')
dataset_parser = argparse.ArgumentParser(add_help=False)
dataset_parser.add_argument('dataset', help='dataset name')
layer_parser = argparse.ArgumentParser(add_help=False)
layer_parser.add_argument(
'layer', nargs='?', help='Layer name. If missing, print layers in dataset.')
# The "read" command
parser_read = subparsers.add_parser('read',
parents=[dataset_parser, layer_parser],
help='Read annotations from the given dataset layer.')
parser_read.set_defaults(func=read, parser=parser_read)
# The "add" command
parser_add = subparsers.add_parser('add',
parents=[dataset_parser, layer_parser],
help='Add two test annotations to the given dataset layer.')
parser_add.set_defaults(func=add, parser=parser_add)
# The "delete" command
parser_delete = subparsers.add_parser('delete',
parents=[
dataset_parser, layer_parser],
help='Delete the given annotation layer.')
parser_delete.set_defaults(func=delete, parser=parser_delete)
# The "move" command
parser_move = subparsers.add_parser('move',
parents=[dataset_parser],
help="""Move annotations from the source layer
to the destination layer.""")
parser_move.add_argument(
'from_layer', nargs='?', help='source layer')
parser_move.add_argument(
'to_layer', nargs='?', help='destination layer')
parser_move.set_defaults(func=move, parser=parser_move)
args = parser.parse_args()
validate(args)
args.func(args)
if __name__ == "__main__":
main()
|
StarcoderdataPython
|
3347643
|
<gh_stars>0
# Copyright 2014 PerfKitBenchmarker Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module containing abstract class for reliable resources.
The Resource class wraps unreliable create and delete commands in retry loops
and checks for resource existence so that resources can be created and deleted
reliably.
"""
import abc
import logging
import time
from typing import List
from absl import flags
from perfkitbenchmarker import errors
from perfkitbenchmarker import vm_util
FLAGS = flags.FLAGS
_RESOURCE_REGISTRY = {}
def GetResourceClass(base_class, **kwargs):
"""Returns the subclass with the corresponding attributes.
Args:
base_class: The base class of the resource to return
(e.g. BaseVirtualMachine).
**kwargs: Every attribute/value of the subclass's REQUIRED_ATTRS that were
used to register the subclass.
Raises:
Exception: If no class could be found with matching attributes.
"""
key = [base_class.__name__]
key += sorted(kwargs.items())
if tuple(key) not in _RESOURCE_REGISTRY:
raise errors.Resource.SubclassNotFoundError(
'No %s subclass defined with the attributes: %s' %
(base_class.__name__, kwargs))
return _RESOURCE_REGISTRY.get(tuple(key))
class AutoRegisterResourceMeta(abc.ABCMeta):
"""Metaclass which allows resources to automatically be registered."""
# See BaseResource
RESOURCE_TYPE: str
REQUIRED_ATTRS: List[str]
def __init__(cls, name, bases, dct):
if (all(hasattr(cls, attr) for attr in cls.REQUIRED_ATTRS) and
cls.RESOURCE_TYPE):
unset_attrs = [
attr for attr in cls.REQUIRED_ATTRS if getattr(cls, attr) is None]
# Raise exception if subclass with unset attributes.
if unset_attrs and cls.RESOURCE_TYPE != cls.__name__:
raise Exception(
'Subclasses of %s must have the following attrs set: %s. For %s '
'the following attrs were not set: %s.' %
(cls.RESOURCE_TYPE, cls.REQUIRED_ATTRS, cls.__name__, unset_attrs))
key = [cls.RESOURCE_TYPE]
key += sorted([(attr, getattr(cls, attr)) for attr in cls.REQUIRED_ATTRS])
_RESOURCE_REGISTRY[tuple(key)] = cls
super(AutoRegisterResourceMeta, cls).__init__(name, bases, dct)
class BaseResource(metaclass=AutoRegisterResourceMeta):
"""An object representing a cloud resource.
Attributes:
created: True if the resource has been created.
deleted: True if the resource has been deleted.
user_managed: Whether Create() and Delete() should be skipped.
frozen: Whether the resource is currently in a frozen state.
enable_freeze_restore: Whether the resource should use freeze/restore when
the option is specified on the command line. Different benchmarks may want
different resources to have freeze/restore enabled.
create_on_restore_error: Whether to create the resource if there is an issue
while restoring.
delete_on_freeze_error: Whether to delete the resource if there is an issue
while freezing.
create_start_time: The start time of the last create.
delete_start_time: The start time of the last delete.
create_end_time: The end time of the last create.
delete_end_time: The end time of the last delete.
resource_ready_time: The time when the resource last became ready.
metadata: Dictionary of resource metadata.
"""
# The name of the base class (e.g. BaseVirtualMachine) that will be extended
# with auto-registered subclasses.
RESOURCE_TYPE = None
# A list of attributes that are used to register Resource subclasses
# (e.g. CLOUD).
REQUIRED_ATTRS = ['CLOUD']
# Timeout in seconds for resource to be ready.
READY_TIMEOUT = None
# Time between retries.
POLL_INTERVAL = 5
def __init__(
self,
user_managed=False,
enable_freeze_restore=False,
create_on_restore_error=False,
delete_on_freeze_error=False,
):
super(BaseResource, self).__init__()
self.created = user_managed
self.deleted = user_managed
self.user_managed = user_managed
self.frozen: bool = False
self.enable_freeze_restore = enable_freeze_restore
self.create_on_restore_error = create_on_restore_error
self.delete_on_freeze_error = delete_on_freeze_error
# Creation and deletion time information
# that we may make use of later.
self.create_start_time = None
self.delete_start_time = None
self.create_end_time = None
self.delete_end_time = None
self.resource_ready_time = None
self.metadata = dict()
def GetResourceMetadata(self):
"""Returns a dictionary of metadata about the resource."""
return self.metadata.copy()
@abc.abstractmethod
def _Create(self):
"""Creates the underlying resource."""
raise NotImplementedError()
def _Restore(self) -> None:
"""Restores the underlying resource from a file.
This method is required if using Restore() with a resource.
"""
raise NotImplementedError()
def _Freeze(self) -> None:
"""Freezes the underlying resource to a long-term, sustainable state.
This method is required if using Restore() with a resource.
"""
raise NotImplementedError()
def _UpdateTimeout(self, timeout_minutes: int) -> None:
"""Updates the underlying resource's timeout after a successful freeze.
This method is required if using Freeze()/Restore() with a resource.
Args:
timeout_minutes: The number of minutes past the current time at which the
resource should be considered expired.
"""
raise NotImplementedError()
@abc.abstractmethod
def _Delete(self):
"""Deletes the underlying resource.
Implementations of this method should be idempotent since it may
be called multiple times, even if the resource has already been
deleted.
"""
raise NotImplementedError()
def _Exists(self):
"""Returns true if the underlying resource exists.
Supplying this method is optional. If it is not implemented then the
default is to assume success when _Create and _Delete do not raise
exceptions.
"""
raise NotImplementedError()
def _IsReady(self):
"""Return true if the underlying resource is ready.
Supplying this method is optional. Use it when a resource can exist
without being ready. If the subclass does not implement
it then it just returns true.
Returns:
True if the resource was ready in time, False if the wait timed out.
"""
return True
def _IsDeleting(self):
"""Return true if the underlying resource is getting deleted.
Supplying this method is optional. Potentially use when the resource has an
asynchronous deletion operation to avoid rerunning the deletion command and
track the deletion time correctly. If the subclass does not implement it
then it just returns false.
Returns:
True if the resource was being deleted, False if the resource was in a non
deleting state.
"""
return False
def _PreDelete(self):
"""Method that will be called once before _DeleteResource() is called.
Supplying this method is optional. If it is supplied, it will be called
once, before attempting to delete the resource. It is intended to allow
data about the resource to be collected right before it is deleted.
"""
pass
def _PostCreate(self):
"""Method that will be called once after _CreateResource() is called.
Supplying this method is optional. If it is supplied, it will be called
once, after the resource is confirmed to exist. It is intended to allow
data about the resource to be collected or for the resource to be tagged.
"""
pass
def _CreateDependencies(self):
"""Method that will be called once before _CreateResource() is called.
Supplying this method is optional. It is intended to allow additional
flexibility in creating resource dependencies separately from _Create().
"""
pass
def _DeleteDependencies(self):
"""Method that will be called once after _DeleteResource() is called.
Supplying this method is optional. It is intended to allow additional
flexibility in deleting resource dependencies separately from _Delete().
"""
pass
@vm_util.Retry(retryable_exceptions=(errors.Resource.RetryableCreationError,))
def _CreateResource(self):
"""Reliably creates the underlying resource."""
if self.created:
return
# Overwrite create_start_time each time this is called,
# with the assumption that multple calls to Create() imply
# that the resource was not actually being created on the
# backend during previous failed attempts.
self.create_start_time = time.time()
self._Create()
try:
if not self._Exists():
raise errors.Resource.RetryableCreationError(
'Creation of %s failed.' % type(self).__name__)
except NotImplementedError:
pass
self.created = True
self.create_end_time = time.time()
@vm_util.Retry(retryable_exceptions=(errors.Resource.RetryableDeletionError,),
timeout=3600)
def _DeleteResource(self):
"""Reliably deletes the underlying resource."""
# Retryable method which allows waiting for deletion of the resource.
@vm_util.Retry(poll_interval=self.POLL_INTERVAL, fuzz=0, timeout=3600,
retryable_exceptions=(
errors.Resource.RetryableDeletionError,))
def WaitUntilDeleted():
if self._IsDeleting():
raise errors.Resource.RetryableDeletionError('Not yet deleted')
if self.deleted:
return
if not self.delete_start_time:
self.delete_start_time = time.time()
self._Delete()
WaitUntilDeleted()
try:
if self._Exists():
raise errors.Resource.RetryableDeletionError(
'Deletion of %s failed.' % type(self).__name__)
except NotImplementedError:
pass
def Restore(self) -> None:
"""Restores a resource instead of creating it.
Raises:
RestoreError: Generic error encompassing restore failures.
"""
logging.info('Restoring resource %s.', repr(self))
try:
self._Restore()
except NotImplementedError as e:
raise errors.Resource.RestoreError(
f'Class {self.__class__} does not have _Restore() implemented but a '
'restore file was provided.') from e
except Exception as e:
raise errors.Resource.RestoreError('Error restoring resource '
f'{repr(self)}') from e
self.frozen = False
self.UpdateTimeout(FLAGS.timeout_minutes)
def Create(self, restore: bool = False) -> None:
"""Creates a resource and its dependencies.
Args:
restore: Whether to restore the resource instead of creating. If
enable_freeze_restore is false, this proceeds with creation.
Raises:
RestoreError: If there is an error while restoring.
"""
@vm_util.Retry(poll_interval=self.POLL_INTERVAL, fuzz=0,
timeout=self.READY_TIMEOUT,
retryable_exceptions=(
errors.Resource.RetryableCreationError,))
def WaitUntilReady():
if not self._IsReady():
raise errors.Resource.RetryableCreationError('Not yet ready')
if self.user_managed:
return
if restore and self.enable_freeze_restore:
try:
self.Restore()
return
except errors.Resource.RestoreError:
logging.exception(
'Encountered an exception while attempting to Restore(). '
'Creating: %s', self.create_on_restore_error)
if not self.create_on_restore_error:
raise
self._CreateDependencies()
self._CreateResource()
WaitUntilReady()
if not self.resource_ready_time:
self.resource_ready_time = time.time()
self._PostCreate()
def Freeze(self) -> None:
"""Freezes a resource instead of deleting it.
Raises:
FreezeError: Generic error encompassing freeze failures.
"""
logging.info('Freezing resource %s.', repr(self))
# Attempt to call freeze, failing if unimplemented.
try:
self._Freeze()
except NotImplementedError as e:
raise errors.Resource.FreezeError(
f'Class {self.__class__} does not have _Freeze() implemented but '
'Freeze() was called.') from e
except Exception as e:
raise errors.Resource.FreezeError(
f'Error freezing resource {repr(self)}') from e
# If frozen successfully, attempt to update the timeout.
self.frozen = True
self.UpdateTimeout(FLAGS.persistent_timeout_minutes)
def Delete(self, freeze: bool = False) -> None:
"""Deletes a resource and its dependencies.
Args:
freeze: Whether to freeze the resource instead of deleting. If
enable_freeze_restore is false, this proceeds with deletion.
Raises:
FreezeError: If there is an error while freezing.
"""
if self.user_managed:
return
if freeze and self.enable_freeze_restore:
try:
self.Freeze()
return
except errors.Resource.FreezeError:
logging.exception(
'Encountered an exception while attempting to Freeze(). '
'Deleting: %s', self.delete_on_freeze_error)
if not self.delete_on_freeze_error:
raise
self._PreDelete()
self._DeleteResource()
self.deleted = True
self.delete_end_time = time.time()
self._DeleteDependencies()
def UpdateTimeout(self, timeout_minutes: int) -> None:
"""Updates the timeout of the underlying resource.
Args:
timeout_minutes: The number of minutes past the current time at which the
resource should be considered expired.
Raises:
NotImplementedError: If the resource has not implemented _UpdateTimeout().
"""
logging.info('Updating timeout for %s.', repr(self))
try:
self._UpdateTimeout(timeout_minutes)
except NotImplementedError:
logging.exception(
'Class %s does not have _UpdateTimeout() implemented, which is '
'needed for Freeze(). Please add an implementation.', self.__class__)
raise
|
StarcoderdataPython
|
149527
|
<gh_stars>0
# -*- coding: utf-8 -*-
__author__ = '<NAME>'
__email__ = '<EMAIL>'
__version__ = '0.1.0'
|
StarcoderdataPython
|
3294077
|
<reponame>jdpdev/birbcam
from .adjust import Adjust
import numpy as np
import logging
class AdjustUp(Adjust):
def setup(self):
logging.info(f"[AdjustUp] take_over")
def do_adjust(self, camera):
if self._shutterFlipper.is_at_end:
self.finish()
return
camera.shutter_speed = self._shutterFlipper.next()
def check_exposure(self, exposure):
delta = exposure - self._targetLevel
logging.info(f"[AdjustUp] {exposure}, {delta} < {self._levelMargin}, {self._lastExposure}")
if self._lastExposure != None:
lastDelta = self._lastExposure - self._targetLevel
# stop if crossed line
if np.sign(delta) != np.sign(lastDelta):
return True
# stop if close enough
if abs(delta) < self._levelMargin:
return True
return False
|
StarcoderdataPython
|
1772849
|
<reponame>ashleyjsands/fishpic
import requests
import re
import json
import os
from bs4 import BeautifulSoup
from scrap_urls import download_image
from os.path import join
protocol_and_domain = "http://www.fish.gov.au"
def scrape_urls(urls):
species_urls = get_species_urls(urls)
print("Number of species", len(species_urls))
return list(map(lambda a: get_species(a), species_urls))
def get_species_urls(jurisdiction_urls):
species_urls = set()
for url in jurisdiction_urls:
content_div = get_content(url)
report = single(content_div.find_all('div', {"class": "sustainability-report"}))
anchors = single(report.find_all("ul", {"class": "clearfix"})).find_all("a")
species_urls.update(map(get_page_url, anchors))
return list(species_urls)
def get_page_url(anchor):
href = anchor.get("href")
return protocol_and_domain + re.sub("\?jurisdictionId=\d+", "", href)
def get_species(url):
content_div = get_content(url)
common_name = single(content_div.find_all('h2', {"class": "maintitle"})).get_text()
scientific_name = single(content_div.find_all('p', {"class": "subtitle"})).get_text()
gallery = single_or_none(content_div.find_all('div', {"class": "gallery"}))
image_urls = []
if gallery != None:
image_urls = list(map(lambda a: protocol_and_domain + a.get("href"), gallery.find_all("a", {"class": "image"})))
return {
"CommonName": common_name,
"ScientificName": scientific_name,
"ImageUrls": image_urls,
"Url": url
}
def get_content(url):
print("Scraping", url)
r = requests.get(url)
data = r.text
soup = BeautifulSoup(data, 'html5lib')
content_div = single(soup.find_all('div', {"class": "content"}))
return content_div
def find_one(soup, element, attributes=None):
results = soup.find_all(element, attributes)
return single(results)
def find_one_of(element, selector_tuples):
for element_type, attributes in selector_tuples:
results = element.find_all(element_type, attributes)
if len(results) == 1:
return single(results)
return None
def single(iterable):
assert len(iterable) == 1
return iterable[0]
def single_or_none(iterable):
return iterable[0] if len(iterable) == 1 else None
def get_class_name(common_name):
class_name = s["CommonName"].replace(" ", "_").lower()
corrections = { "golden_snapper": "golden_snapper_fingermark" }
if class_name in corrections:
return corrections[class_name]
else:
return class_name
if __name__ == "__main__":
urls = [
"http://www.fish.gov.au/Jurisdiction/Commonwealth",
"http://www.fish.gov.au/Jurisdiction/New-South-Wales",
"http://www.fish.gov.au/Jurisdiction/Queensland",
"http://www.fish.gov.au/Jurisdiction/South-Australia",
"http://www.fish.gov.au/Jurisdiction/Tasmania",
"http://www.fish.gov.au/Jurisdiction/Victoria",
"http://www.fish.gov.au/Jurisdiction/Western-Australia",
"http://www.fish.gov.au/Jurisdiction/Northern-Territory",
]
test_urls = [
]
species = scrape_urls(urls)
dataset_path = os.path.join(os.environ['FISHPIC_DATASETS_PATH'], "fish.gov.au")
json_file_path = join(dataset_path, "species.json")
with open(json_file_path, "w") as text_file:
json.dump(species, text_file)
print("Created", json_file_path)
print("Downloading images")
for s in species:
class_name = get_class_name(s["CommonName"])
species_dir = join(dataset_path, class_name)
if not os.path.isdir(species_dir):
os.makedirs(species_dir)
for image_url in s["ImageUrls"]:
download_image(image_url, species_dir)
print("Finished")
|
StarcoderdataPython
|
3371491
|
<filename>yocto/poky/meta/lib/oe/packagedata.py
import codecs
import os
def packaged(pkg, d):
return os.access(get_subpkgedata_fn(pkg, d) + '.packaged', os.R_OK)
def read_pkgdatafile(fn):
pkgdata = {}
def decode(str):
c = codecs.getdecoder("string_escape")
return c(str)[0]
if os.access(fn, os.R_OK):
import re
f = open(fn, 'r')
lines = f.readlines()
f.close()
r = re.compile("([^:]+):\s*(.*)")
for l in lines:
m = r.match(l)
if m:
pkgdata[m.group(1)] = decode(m.group(2))
return pkgdata
def get_subpkgedata_fn(pkg, d):
return d.expand('${PKGDATA_DIR}/runtime/%s' % pkg)
def has_subpkgdata(pkg, d):
return os.access(get_subpkgedata_fn(pkg, d), os.R_OK)
def read_subpkgdata(pkg, d):
return read_pkgdatafile(get_subpkgedata_fn(pkg, d))
def has_pkgdata(pn, d):
fn = d.expand('${PKGDATA_DIR}/%s' % pn)
return os.access(fn, os.R_OK)
def read_pkgdata(pn, d):
fn = d.expand('${PKGDATA_DIR}/%s' % pn)
return read_pkgdatafile(fn)
#
# Collapse FOO_pkg variables into FOO
#
def read_subpkgdata_dict(pkg, d):
ret = {}
subd = read_pkgdatafile(get_subpkgedata_fn(pkg, d))
for var in subd:
newvar = var.replace("_" + pkg, "")
if newvar == var and var + "_" + pkg in subd:
continue
ret[newvar] = subd[var]
return ret
def _pkgmap(d):
"""Return a dictionary mapping package to recipe name."""
pkgdatadir = d.getVar("PKGDATA_DIR", True)
pkgmap = {}
try:
files = os.listdir(pkgdatadir)
except OSError:
bb.warn("No files in %s?" % pkgdatadir)
files = []
for pn in filter(lambda f: not os.path.isdir(os.path.join(pkgdatadir, f)), files):
try:
pkgdata = read_pkgdatafile(os.path.join(pkgdatadir, pn))
except OSError:
continue
packages = pkgdata.get("PACKAGES") or ""
for pkg in packages.split():
pkgmap[pkg] = pn
return pkgmap
def pkgmap(d):
"""Return a dictionary mapping package to recipe name.
Cache the mapping in the metadata"""
pkgmap_data = d.getVar("__pkgmap_data", False)
if pkgmap_data is None:
pkgmap_data = _pkgmap(d)
d.setVar("__pkgmap_data", pkgmap_data)
return pkgmap_data
def recipename(pkg, d):
"""Return the recipe name for the given binary package name."""
return pkgmap(d).get(pkg)
|
StarcoderdataPython
|
1782885
|
from should_be.core import BaseMixin
try:
from collections.abc import Container, Iterable
except ImportError:
# python < 3.3
from collections import Container, Iterable
class ContainerMixin(BaseMixin):
target_class = Container
def should_include(self, target):
if isinstance(target, Iterable):
msg = ('{txt} should have included {val}, but did not have '
'items {items}')
missing_items = []
for item in target:
if item not in self:
missing_items.append(item)
self.should_follow(len(missing_items) == 0, msg,
val=target,
items=missing_items)
else:
msg = '{txt} should have included {val}, but did not'
self.should_follow(target in self, msg,
val=target)
def shouldnt_include(self, target):
if isinstance(target, Iterable):
msg = '{txt} should not have included {val}, but did anyway'
missing_items = []
for item in target:
if item not in self:
missing_items.append(item)
self.should_follow(len(missing_items) > 0, msg,
val=target,
items=missing_items)
else:
msg = '{txt} should not have included {val}, but did anyway'
self.should_follow(target not in self, msg,
val=target)
|
StarcoderdataPython
|
1760058
|
<filename>clearml_agent/version.py<gh_stars>0
__version__ = '1.2.0rc3'
|
StarcoderdataPython
|
53660
|
from datasets.__local__ import implemented_datasets
from datasets.mnist import MNIST_DataLoader
from datasets.cifar10 import CIFAR_10_DataLoader
from datasets.bedroom import Bedroom_DataLoader
from datasets.toy import ToySeq_DataLoader
from datasets.normal import Normal_DataLoader
from datasets.adult import Adult_DataLoader
def load_dataset(learner, dataset_name, pretrain=False):
assert dataset_name in implemented_datasets
if dataset_name == "mnist":
data_loader = MNIST_DataLoader
if dataset_name == "cifar10":
data_loader = CIFAR_10_DataLoader
if dataset_name == "bedroom":
data_loader = Bedroom_DataLoader
if dataset_name == "toyseq":
data_loader = ToySeq_DataLoader
if dataset_name == "normal":
data_loader = Normal_DataLoader
if dataset_name == "adult":
data_loader = Adult_DataLoader
# load data with data loader
learner.load_data(data_loader=data_loader, pretrain=pretrain)
# check all parameters have been attributed
learner.data.check_all()
|
StarcoderdataPython
|
3354352
|
<reponame>anbo225/docklet
from intra.system import system_manager
from intra.billing import billing_manager
from intra.cgroup import cgroup_manager
from policy.quota import *
from intra.smart import smart_controller
class case_handler:
# [Order-by] lexicographic order
# curl -L -X POST -F uuid=docklet-1-0 http://0.0.0.0:1729/v1/billing/increment
def billing_increment(form, args):
return billing_manager.fetch_increment_and_clean(form['uuid'])
# curl -L -X POST http://0.0.0.0:1729/v1/cgroup/container/list
def cgroup_container_list(form, args):
return cgroup_manager.get_cgroup_containers()
# curl -L -X POST -F policy=etime_rev_policy http://0.0.0.0:1729/v1/smart/quota/policy
def smart_quota_policy(form, args):
msg = 'success'
try:
smart_controller.set_policy(eval(form['policy']))
except Exception as e:
msg = e
return {'message': msg}
# curl -L -X POST -F uuid=n1 http://0.0.0.0:1729/v1/cgroup/container/limit
def cgroup_container_limit(form, args):
return cgroup_manager.get_container_limit(form['uuid'])
# curl -L -X POST -F uuid=n1 http://0.0.0.0:1729/v1/cgroup/container/sample
def cgroup_container_sample(form, args):
return cgroup_manager.get_container_sample(form['uuid'])
# curl -L -X POST http://0.0.0.0:1729/v1/system/loads
def system_loads(form, args):
return system_manager.get_system_loads()
# curl -L -X POST http://0.0.0.0:1729/v1/system/memsw/available
def system_memsw_available(form, args):
return system_manager.get_available_memsw()
# curl -L -X POST -F size=16 http://0.0.0.0:1729/v1/system/swap/extend
def system_swap_extend(form, args):
return system_manager.extend_swap(int(form['size']))
# curl -L -X POST http://0.0.0.0:1729/v1/system/swap/clear
def system_swap_clear(form, args):
return system_manager.clear_all_swaps()
# curl -L -X POST http://0.0.0.0:1729/v1/system/total/physical/memory
def system_total_physical_memory(form, args):
return system_manager.get_total_physical_memory_for_containers()
'''
# curl -X POST -F uuid=n1 http://0.0.0.0:1729/v1/blacklist/add
def blacklist_add(form):
exists = form['uuid'] in smart_controller.blacklist
if not exists:
smart_controller.blacklist.add(form['uuid'])
return {"changed": not exists}
# curl -X POST -F uuid=n1 http://0.0.0.0:1729/v1/blacklist/remove
def blacklist_remove(form):
exists = form['uuid'] in smart_controller.blacklist
if exists:
smart_controller.blacklist.remove(form['uuid'])
return {"changed": exists}
# curl -X POST http://0.0.0.0:1729/v1/blacklist/show
def blacklist_show(form):
blacklist = []
for item in smart_controller.blacklist:
blacklist.append(item)
return blacklist
'''
|
StarcoderdataPython
|
1771390
|
<filename>ACM-Solution/SPCQ.py
from sys import stdin,stdout
def digit(n):
s=0
while n: n,r=divmod(n,10);s+=r
return s
a,*b=map(int,stdin.buffer.readlines())
out=[]
for i in b:
while(i%digit(i)!=0):i+=1
out.append("%d"%i)
print("\n".join(out))
|
StarcoderdataPython
|
1777740
|
<gh_stars>0
import json
import yaml
import os
def json_to_yaml(file_path: str):
with open(file_path,) as f:
output_file_content = {}
output_file = open(os.path.basename(file_path).replace(".json","")+'.yaml','w')
output_file_content = json.load(f)
yaml.dump(output_file_content, output_file)
output_file.close()
json_to_yaml('donuts.json')
json_to_yaml('emojis.json')
|
StarcoderdataPython
|
137540
|
import os
import re
import traceback
from typing import List
import pytz
import tweepy
from django.core.management.base import BaseCommand
from django.db import transaction
from slacker import Slacker
from tweepy.cursor import ItemIterator
from apps.cultivar.apple import Apple
from apps.tweets.models import Tweets, LastSearch
LOCAL_TIMEZONE = pytz.timezone('Asia/Tokyo')
# 一日に200ツイートはしないはず...
TWEET_COUNT = 200
class Command(BaseCommand):
def handle(self, *args, **options):
""" manage.pyで使うときのエントリポイント """
self.cultivars = Apple().cultivars
self.last_search = self.get_last_search()
statuses = self.gather_tweets()
if statuses:
self.save_with_transaction(statuses)
print('finish')
def get_last_search(self) -> LastSearch:
""" 前回検索情報を取得する """
return LastSearch.objects.first()
def gather_tweets(self) -> List:
""" ツイートを取得し、idの降順にソートする
tweepyにて関連するツイートを取得
"""
try:
statuses = self._get_statuses_from_api()
return sorted(statuses, key=lambda s: s.id, reverse=True)
except Exception:
self.log(traceback.format_exc())
def _get_statuses_from_api(self) -> ItemIterator:
""" Twitter APIよりツイートを取得する
テストしやすいよう、プライベートメソッドとして切り出した
"""
auth = tweepy.AppAuthHandler(
os.environ['TWITTER_CONSUMER_KEY'],
os.environ['TWITTER_CONSUMER_SECRET'])
api = tweepy.API(auth)
options = self.get_api_options(self.last_search)
return tweepy.Cursor(api.user_timeline, **options).items(TWEET_COUNT)
def get_api_options(self, last_search: LastSearch or None) -> dict:
""" Twitter APIで使うオプションの内容を取得 """
if last_search:
return {
'id': os.environ['USER_ID'],
'since_id': last_search.prev_since_id
}
else:
return {'id': os.environ['USER_ID']}
def save_with_transaction(self, statuses: List):
""" トランザクションで各種テーブルを更新する """
try:
with transaction.atomic():
# リンゴ情報を含むツイートのみを保存
tweets = self.delete_unrelated_tweets(statuses)
if tweets:
# この方法だとcreated_at順ではなく品種順になるけど、
# DB上特に困ることはないので、このままで良い
for c in self.cultivars:
# 条件を満たすリストの要素に対して処理を行うために内包表記を使ってる
# バッククォート(`)で囲まれた部分を品種とみなす
[self.save_tweets(t, c) for t in tweets if "`" + c['Name'] + "`" in t.text]
# 検索済idの保存
self.save_last_search(self.last_search, statuses[0].id)
print('commit')
# 例外が発生した場合は、Djangoが自動的にロールバックする
except Exception:
self.log(traceback.format_exc())
print('rollback')
def delete_unrelated_tweets(self, statuses: List) -> List:
""" ツイートのうち、[リンゴ]で始まるもの以外を削除 """
pattern = re.compile(r'\[リンゴ\]')
return [x for x in statuses if pattern.match(x.text)]
def save_tweets(self, twitter_status, cultivar: dict):
""" ツイートの保存 """
arg = {
'name': cultivar['Name'],
'tweet_id': twitter_status.id,
'tweet': twitter_status.text,
'tweeted_at': LOCAL_TIMEZONE.localize(twitter_status.created_at)
}
t = Tweets(**arg)
t.save()
def save_last_search(self, last_searched: LastSearch, prev_since_id: int):
""" 検索済のうち、最新のIDを保存 """
if last_searched:
last_searched.prev_since_id = prev_since_id
last_searched.save()
else:
arg = {
'prev_since_id': prev_since_id
}
obj = LastSearch(**arg)
obj.save()
def log(self, log_message: str):
""" ログを出力し、設定されていればSlackへも通知する """
print(log_message)
if os.environ['SLACK_TOKEN']:
slack = Slacker(os.environ['SLACK_TOKEN'])
slack.chat.post_message(os.environ['SLACK_CHANNEL'], log_message)
|
StarcoderdataPython
|
73365
|
<reponame>Alexhuszagh/XLDiscoverer
'''
XlPy/Tools/Xic_Picking/weighting
________________________________
Tools to weight peak-picking algorithms by biophysical properties,
such as correlations between different isotope patterns.
:copyright: (c) 2015 The Regents of the University of California.
:license: GNU GPL, see licenses/GNU GPLv3.txt for more details.
'''
# load modules
import numpy as np
from xldlib.definitions import ZIP
from xldlib.utils import logger
from xldlib.utils.xictools import scoring
# CORRELATIONS
# ------------
def get_cluster_dotps(clusters):
return np.array([cluster.get_dotps() for cluster in clusters])
def get_cluster_correlation(clusters):
return np.array([cluster.get_masscorrelations() for cluster in clusters])
# EUCLIDEAN DISTANCE
# ------------------
def get_nonoverlapping_distances(clusters):
'''Returns the euclidean distance from each anchor to the cluster'''
distances = []
for cluster in clusters:
distances.append(list(cluster.yield_distance()))
# need to Tranpose so it is by anchor and not by cluster
return np.array(distances).T
# WEIGHTING
# ---------
@logger.call('peakpicking', 'debug')
def get_isotope_weight(clusters, dotp_weight=0.35, size_weight=0.2,
mass_weight=0.45, **kwds):
'''
Returns a weighted matrix for each isotope cluster
'''
# calculate our size, dotp and mass_correlation coefficients
weights = (scoring.get_size_weight(i.start, i.end) for i in clusters)
size = np.fromiter(weights, dtype=int)
# tranpose since we want grouped by cluster, not by charge
dotp = get_cluster_dotps(clusters).T
mass = get_cluster_correlation(clusters).T
return (size*size_weight) + (dotp*dotp_weight) + (mass*mass_weight)
@logger.call('peakpicking', 'debug')
def get_anchor_weight(clusters):
'''
Weights all the anchor points using a nearest non-overlapping
feature approach (1D).
If the element overlaps, the value is 1.
If the element does not overlap but is the nearest, then the value is 2.
The returned weight is 1 / sqrt(value)
'''
# get the euclidean distances sorted
distance_matrix = get_nonoverlapping_distances(clusters)
sortedargs = np.argsort(distance_matrix)
# weight each distance
weight_matrix = np.zeros(sortedargs.shape)
zipped = ZIP(sortedargs, distance_matrix)
for row, (indexes, distances) in enumerate(zipped):
counter = 2
for index in indexes:
if distances[index] == 0:
weight_matrix[row][index] = 1
else:
weight_matrix[row][index] = counter
counter += 1
return np.prod((1 / np.sqrt(weight_matrix)), axis=0)
|
StarcoderdataPython
|
3399713
|
<filename>scripts/word2vec.py
import sys
from gensim.models import Word2Vec
from gensim.models import KeyedVectors
print("Loading model...")
model = KeyedVectors.load_word2vec_format(sys.argv[1], binary=True)
print("Model loaded.")
k = 3
input = sys.stdin.read().splitlines()
for line in input:
words = line.split(" ")
# Remove words not in the dictionary
for word in words:
if word not in model.vocab:
words.remove(word)
if not words:
continue
similar = model.most_similar(words, topn=k)
for result in similar:
print("{} => {}".format(line, result[0]))
|
StarcoderdataPython
|
3346735
|
from office365.runtime.client_value import ClientValue
class FileCreationInformation(ClientValue):
"""Represents properties that can be set when creating a file by using the FileCollection.Add method."""
def __init__(self, url=None, overwrite=False, content=None):
"""
:type url: str
"""
super(FileCreationInformation, self).__init__()
self._url = url
self._overwrite = overwrite
self._content = content
def to_json(self):
return {
"overwrite": self.overwrite,
"url": self.url
}
@property
def content(self):
"""Gets the binary content of the file."""
return self._content
@content.setter
def content(self, value):
"""Sets the binary content of the file."""
self._content = value
@property
def overwrite(self):
"""Indicates whether to overwrite an existing file with the same name and in the same location
as the one being added."""
return self._overwrite
@property
def url(self):
"""The URL of the file."""
return self._url
@url.setter
def url(self, value):
self._url = value
@overwrite.setter
def overwrite(self, value):
self._overwrite = value
|
StarcoderdataPython
|
1615662
|
import logging
import traceback
from unittest import mock
from .common import BuiltinTest
from bfg9000.builtins import core # noqa
from bfg9000 import exceptions
from bfg9000.path import Path, Root
from bfg9000.safe_str import safe_str, safe_format
class TestCore(BuiltinTest):
def test_warning(self):
with mock.patch('warnings.warn') as warn:
self.context['warning']('message')
warn.assert_called_once_with('message')
with mock.patch('warnings.warn') as warn:
self.context['warning']('message', 1, Path('path'), 'bar')
warn.assert_called_once_with(
'message 1 ' + repr(Path('path')) + ' bar'
)
def test_info(self):
with mock.patch('logging.log') as log:
self.context['info']('message')
tb = traceback.extract_stack()[1:]
tb[-1].lineno -= 1
log.assert_called_once_with(logging.INFO, 'message', extra={
'full_stack': tb, 'show_stack': False
})
with mock.patch('logging.log') as log:
self.context['info']('message', 1, Path('path'), 'bar')
tb = traceback.extract_stack()[1:]
tb[-1].lineno -= 1
log.assert_called_once_with(
logging.INFO, 'message 1 ' + repr(Path('path')) + ' bar',
extra={
'full_stack': tb, 'show_stack': False
}
)
with mock.patch('logging.log') as log:
self.context['info']('message', show_stack=True)
tb = traceback.extract_stack()[1:]
tb[-1].lineno -= 1
log.assert_called_once_with(logging.INFO, 'message', extra={
'full_stack': tb, 'show_stack': True
})
def test_debug(self):
with mock.patch('logging.log') as log:
self.context['debug']('message')
tb = traceback.extract_stack()[1:]
tb[-1].lineno -= 1
log.assert_called_once_with(logging.DEBUG, 'message', extra={
'full_stack': tb, 'show_stack': True
})
with mock.patch('logging.log') as log:
self.context['debug']('message', 1, Path('path'), 'bar')
tb = traceback.extract_stack()[1:]
tb[-1].lineno -= 1
log.assert_called_once_with(
logging.DEBUG, 'message 1 ' + repr(Path('path')) + ' bar',
extra={
'full_stack': tb, 'show_stack': True
}
)
with mock.patch('logging.log') as log:
self.context['debug']('message', show_stack=False)
tb = traceback.extract_stack()[1:]
tb[-1].lineno -= 1
log.assert_called_once_with(logging.DEBUG, 'message', extra={
'full_stack': tb, 'show_stack': False
})
def test_exceptions(self):
for name in dir(exceptions):
t = getattr(exceptions, name)
if isinstance(t, type):
self.assertIs(self.context[name], t)
def test_safe_str(self):
self.assertIs(self.context['safe_str'], safe_str)
self.assertIs(self.context['safe_format'], safe_format)
def test_submodule(self):
def mock_execute(context, path):
return context.PathEntry(path)
with mock.patch('bfg9000.build.execute_file',
mock.MagicMock(wraps=mock_execute)) as m:
self.assertEqual(self.context['submodule']('dir'), {})
m.assert_called_once_with(self.context,
Path('dir/build.bfg', Root.srcdir))
with self.context.push_path(Path('dir/build.bfg', Root.srcdir)), \
mock.patch('bfg9000.build.execute_file',
mock.MagicMock(wraps=mock_execute)) as m: # noqa
self.assertEqual(self.context['submodule']('sub'), {})
m.assert_called_once_with(self.context,
Path('dir/sub/build.bfg', Root.srcdir))
def test_export(self):
with self.context.push_path(Path('foo/build.bfg', Root.srcdir)) as p:
self.context['export'](foo='foo')
self.assertEqual(p.exports, {'foo': 'foo'})
self.assertRaises(ValueError, self.context['export'], bar='bar')
|
StarcoderdataPython
|
13895
|
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of NVIDIA CORPORATION nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import test_plan
import settings
class Module(test_plan.Testplan):
runScript = settings.KMD_RUNSCRIPT
deviceTargets = ['sim', 'ufpga']
def __init__(self):
super(Module, self).__init__(__name__)
# Convenience globals
kmd = Module.runScript
devices = Module.deviceTargets
ces = ["Core Engine Scheduler"]
nn = ["Neural Network"]
convd = ["CONV HW - Direct"]
convi = ["CONV HW - Image"]
convw = ["CONV HW - Winograd"]
convp = ["CONV HW - Pipeline"]
sdpx1 = ["SDP X1 HW"]
sdpx2 = ["SDP X2 HW"]
sdpy = ["SDP Y HW"]
sdpf = ["SDP HW - Full"]
cdp = ["CDP HW"]
pdp = ["PDP HW"]
def registerNvSmallTests(self, testplan):
testplan.append(
[0, "Written", kmd, "CONV_D_L0_0_small", None, convd, devices, "Convolution test - Sanity test direct convolution",
"Direct convolution, 8x8x128 input cube, 3x3x128 kernel cube and 32 kernels input and weight read from DRAM, no mean and bias data, output written to DRAM through SDP."])
testplan.append(
[0, "Written", kmd, "SDP_X1_L0_0_small", None, sdpx1, devices,
"SDP test - Sanity test for SDP, only X1 enabled with ALU, X2 and Y disable. No DMA used",
"Element wise sum operation in X1, 8x8x32 input cube and 8x8x32 bias cube. Activation function as ReLU"])
testplan.append(
[0, "Written", kmd, "CDP_L0_0_small", None, cdp, devices, "CDP test - Sanity test for CDP",
"Use only linear table with LUT configured with all 1. 8x8x32 input cube and 8x8x32 output cube."])
testplan.append(
[0, "Written", kmd, "PDP_L0_0_small", None, pdp, devices, "PDP test - Sanity test for PDP with max pooling",
"Max pooling, 8x8x32 input cube, 8x8x32 output cube, no padding, 1x1 kernel size. No need to compare data. It is enough if task succeeds to pass this test."])
testplan.append(
[0, "Written", kmd, "NN_L0_1_small", None, nn, devices, "AlexNet", "AlexNet"])
def registerFirmwareSmallTests(self):
testplan = []
registerNvSmallTests(self, testplan)
for item in testplan:
test = test_plan.Test()
test.level = item[0]
test.status = item[1]
test.runscript = item[2]
test.name = item[3]
test.options = item[4]
test.features = item[5]
test.targets = item[6]
test.description = item[7]
test.dependencies = None
self.add_test(test)
def registerTests(self):
registerFirmwareSmallTests(self)
Module.register_tests = registerTests
|
StarcoderdataPython
|
3343809
|
import mysql.connector
import pandas as pd
import pyodbc;
mydb = pyodbc.connect(driver='{SQL Server}', host='rods-data-server-01.database.windows.net', database='Data-Rod-Input', user='admin-rods', password='<PASSWORD>')
mycursor1 = mydb.cursor()
mycursor1.execute("TRUNCATE TABLE `data_input_test`")
mydb.commit()
print("Bien")
|
StarcoderdataPython
|
1680726
|
<gh_stars>1-10
"""
compat
======
Cross-compatible functions for Python 2 and 3.
Key items to import for 2/3 compatible code:
* iterators: range(), map(), zip(), filter(), reduce()
* lists: lrange(), lmap(), lzip(), lfilter()
* unicode: u() [u"" is a syntax error in Python 3.0-3.2]
* longs: long (int in Python 3)
* callable
* iterable method compatibility: iteritems, iterkeys, itervalues
* Uses the original method if available, otherwise uses items, keys, values.
* types:
* text_type: unicode in Python 2, str in Python 3
* binary_type: str in Python 2, bythes in Python 3
* string_types: basestring in Python 2, str in Python 3
* bind_method: binds functions to classes
Python 2.6 compatibility:
* OrderedDict
* Counter
Other items:
* OrderedDefaultDict
"""
# pylint disable=W0611
import functools
import itertools
from distutils.version import LooseVersion
from itertools import product
import sys
import types
PY3 = (sys.version_info[0] >= 3)
# import iterator versions of these functions
try:
import __builtin__ as builtins
# not writeable when instantiated with string, doesn't handle unicode well
from cStringIO import StringIO as cStringIO
# always writeable
from StringIO import StringIO
BytesIO = StringIO
import cPickle
except ImportError:
import builtins
from io import StringIO, BytesIO
cStringIO = StringIO
import pickle as cPickle
if PY3:
def isidentifier(s):
return s.isidentifier()
def str_to_bytes(s, encoding='ascii'):
return s.encode(encoding)
def bytes_to_str(b, encoding='utf-8'):
return b.decode(encoding)
# have to explicitly put builtins into the namespace
range = range
map = map
zip = zip
filter = filter
reduce = functools.reduce
long = int
unichr = chr
# list-producing versions of the major Python iterating functions
def lrange(*args, **kwargs):
return list(range(*args, **kwargs))
def lzip(*args, **kwargs):
return list(zip(*args, **kwargs))
def lmap(*args, **kwargs):
return list(map(*args, **kwargs))
def lfilter(*args, **kwargs):
return list(filter(*args, **kwargs))
else:
# Python 2
import re
_name_re = re.compile(r"[a-zA-Z_][a-zA-Z0-9_]*$")
def isidentifier(s, dotted=False):
return bool(_name_re.match(s))
def str_to_bytes(s, encoding='ascii'):
return s
def bytes_to_str(b, encoding='ascii'):
return b
range = xrange
zip = itertools.izip
filter = itertools.ifilter
map = itertools.imap
reduce = reduce
long = long
unichr = unichr
# Python 2-builtin ranges produce lists
lrange = builtins.range
lzip = builtins.zip
lmap = builtins.map
lfilter = builtins.filter
def iteritems(obj, **kwargs):
"""replacement for six's iteritems for Python2/3 compat
uses 'iteritems' if available and otherwise uses 'items'.
Passes kwargs to method."""
func = getattr(obj, "iteritems", None)
if not func:
func = obj.items
return func(**kwargs)
def iterkeys(obj, **kwargs):
func = getattr(obj, "iterkeys", None)
if not func:
func = obj.keys
return func(**kwargs)
def itervalues(obj, **kwargs):
func = getattr(obj, "itervalues", None)
if not func:
func = obj.values
return func(**kwargs)
def bind_method(cls, name, func):
"""Bind a method to class, python 2 and python 3 compatible.
Parameters
----------
cls : type
class to receive bound method
name : basestring
name of method on class instance
func : function
function to be bound as method
Returns
-------
None
"""
# only python 2 has bound/unbound method issue
if not PY3:
setattr(cls, name, types.MethodType(func, None, cls))
else:
setattr(cls, name, func)
# ----------------------------------------------------------------------------
# functions largely based / taken from the six module
# Much of the code in this module comes from <NAME>'s six library.
# The license for this library can be found in LICENSES/SIX and the code can be
# found at https://bitbucket.org/gutworth/six
if PY3:
string_types = str,
integer_types = int,
class_types = type,
text_type = str
binary_type = bytes
def u(s):
return s
else:
string_types = basestring,
integer_types = (int, long)
class_types = (type, types.ClassType)
text_type = unicode
binary_type = str
def u(s):
return unicode(s, "unicode_escape")
string_and_binary_types = string_types + (binary_type,)
try:
# callable reintroduced in later versions of Python
callable = callable
except NameError:
def callable(obj):
return any("__call__" in klass.__dict__ for klass in type(obj).__mro__)
# ----------------------------------------------------------------------------
# Python 2.6 compatibility shims
#
# OrderedDict Shim from <NAME>, python core dev
# http://code.activestate.com/recipes/576693-ordered-dictionary-for-py24/
# here to support versions before 2.6
if not PY3:
# don't need this except in 2.6
try:
from thread import get_ident as _get_ident
except ImportError:
from dummy_thread import get_ident as _get_ident
try:
from _abcoll import KeysView, ValuesView, ItemsView
except ImportError:
pass
class _OrderedDict(dict):
'Dictionary that remembers insertion order'
# An inherited dict maps keys to values.
# The inherited dict provides __getitem__, __len__, __contains__, and get.
# The remaining methods are order-aware.
# Big-O running times for all methods are the same as for regular
# dictionaries.
# The internal self.__map dictionary maps keys to links in a doubly linked
# list. The circular doubly linked list starts and ends with a sentinel
# element. The sentinel element never gets deleted (this simplifies the
# algorithm). Each link is stored as a list of length three: [PREV, NEXT,
# KEY].
def __init__(self, *args, **kwds):
'''Initialize an ordered dictionary. Signature is the same as for
regular dictionaries, but keyword arguments are not recommended
because their insertion order is arbitrary.
'''
if len(args) > 1:
raise TypeError('expected at most 1 arguments, got %d' % len(args))
try:
self.__root
except AttributeError:
self.__root = root = [] # sentinel node
root[:] = [root, root, None]
self.__map = {}
self.__update(*args, **kwds)
def __setitem__(self, key, value, dict_setitem=dict.__setitem__):
'od.__setitem__(i, y) <==> od[i]=y'
# Setting a new item creates a new link which goes at the end of the
# linked list, and the inherited dictionary is updated with the new
# key/value pair.
if key not in self:
root = self.__root
last = root[0]
last[1] = root[0] = self.__map[key] = [last, root, key]
dict_setitem(self, key, value)
def __delitem__(self, key, dict_delitem=dict.__delitem__):
'od.__delitem__(y) <==> del od[y]'
# Deleting an existing item uses self.__map to find the link which is
# then removed by updating the links in the predecessor and successor
# nodes.
dict_delitem(self, key)
link_prev, link_next, key = self.__map.pop(key)
link_prev[1] = link_next
link_next[0] = link_prev
def __iter__(self):
'od.__iter__() <==> iter(od)'
root = self.__root
curr = root[1]
while curr is not root:
yield curr[2]
curr = curr[1]
def __reversed__(self):
'od.__reversed__() <==> reversed(od)'
root = self.__root
curr = root[0]
while curr is not root:
yield curr[2]
curr = curr[0]
def clear(self):
'od.clear() -> None. Remove all items from od.'
try:
for node in itervalues(self.__map):
del node[:]
root = self.__root
root[:] = [root, root, None]
self.__map.clear()
except AttributeError:
pass
dict.clear(self)
def popitem(self, last=True):
'''od.popitem() -> (k, v), return and remove a (key, value) pair.
Pairs are returned in LIFO order if last is true or FIFO order if
false.
'''
if not self:
raise KeyError('dictionary is empty')
root = self.__root
if last:
link = root[0]
link_prev = link[0]
link_prev[1] = root
root[0] = link_prev
else:
link = root[1]
link_next = link[1]
root[1] = link_next
link_next[0] = root
key = link[2]
del self.__map[key]
value = dict.pop(self, key)
return key, value
# -- the following methods do not depend on the internal structure --
def keys(self):
'od.keys() -> list of keys in od'
return list(self)
def values(self):
'od.values() -> list of values in od'
return [self[key] for key in self]
def items(self):
'od.items() -> list of (key, value) pairs in od'
return [(key, self[key]) for key in self]
def iterkeys(self):
'od.iterkeys() -> an iterator over the keys in od'
return iter(self)
def itervalues(self):
'od.itervalues -> an iterator over the values in od'
for k in self:
yield self[k]
def iteritems(self):
'od.iteritems -> an iterator over the (key, value) items in od'
for k in self:
yield (k, self[k])
def update(*args, **kwds):
'''od.update(E, **F) -> None. Update od from dict/iterable E and F.
If E is a dict instance, does: for k in E: od[k] = E[k]
If E has a .keys() method, does: for k in E.keys(): od[k] = E[k]
Or if E is an iterable of items, does:for k, v in E: od[k] = v
In either case, this is followed by: for k, v in F.items(): od[k] = v
'''
if len(args) > 2:
raise TypeError('update() takes at most 2 positional '
'arguments (%d given)' % (len(args),))
elif not args:
raise TypeError('update() takes at least 1 argument (0 given)')
self = args[0]
# Make progressively weaker assumptions about "other"
other = ()
if len(args) == 2:
other = args[1]
if isinstance(other, dict):
for key in other:
self[key] = other[key]
elif hasattr(other, 'keys'):
for key in other.keys():
self[key] = other[key]
else:
for key, value in other:
self[key] = value
for key, value in kwds.items():
self[key] = value
# let subclasses override update without breaking __init__
__update = update
__marker = object()
def pop(self, key, default=__marker):
'''od.pop(k[,d]) -> v, remove specified key and return the\
corresponding value. If key is not found, d is returned if given,
otherwise KeyError is raised.
'''
if key in self:
result = self[key]
del self[key]
return result
if default is self.__marker:
raise KeyError(key)
return default
def setdefault(self, key, default=None):
'od.setdefault(k[,d]) -> od.get(k,d), also set od[k]=d if k not in od'
if key in self:
return self[key]
self[key] = default
return default
def __repr__(self, _repr_running={}):
'od.__repr__() <==> repr(od)'
call_key = id(self), _get_ident()
if call_key in _repr_running:
return '...'
_repr_running[call_key] = 1
try:
if not self:
return '%s()' % (self.__class__.__name__,)
return '%s(%r)' % (self.__class__.__name__, list(self.items()))
finally:
del _repr_running[call_key]
def __reduce__(self):
'Return state information for pickling'
items = [[k, self[k]] for k in self]
inst_dict = vars(self).copy()
for k in vars(OrderedDict()):
inst_dict.pop(k, None)
if inst_dict:
return (self.__class__, (items,), inst_dict)
return self.__class__, (items,)
def copy(self):
'od.copy() -> a shallow copy of od'
return self.__class__(self)
@classmethod
def fromkeys(cls, iterable, value=None):
'''OD.fromkeys(S[, v]) -> New ordered dictionary with keys from S and
values equal to v (which defaults to None).
'''
d = cls()
for key in iterable:
d[key] = value
return d
def __eq__(self, other):
'''od.__eq__(y) <==> od==y. Comparison to another OD is
order-sensitive while comparison to a regular mapping is
order-insensitive.
'''
if isinstance(other, OrderedDict):
return (len(self) == len(other) and
list(self.items()) == list(other.items()))
return dict.__eq__(self, other)
def __ne__(self, other):
return not self == other
# -- the following methods are only used in Python 2.7 --
def viewkeys(self):
"od.viewkeys() -> a set-like object providing a view on od's keys"
return KeysView(self)
def viewvalues(self):
"od.viewvalues() -> an object providing a view on od's values"
return ValuesView(self)
def viewitems(self):
"od.viewitems() -> a set-like object providing a view on od's items"
return ItemsView(self)
# {{{ http://code.activestate.com/recipes/576611/ (r11)
try:
from operator import itemgetter
from heapq import nlargest
except ImportError:
pass
class _Counter(dict):
'''Dict subclass for counting hashable objects. Sometimes called a bag
or multiset. Elements are stored as dictionary keys and their counts
are stored as dictionary values.
>>> Counter('zyzygy')
Counter({'y': 3, 'z': 2, 'g': 1})
'''
def __init__(self, iterable=None, **kwds):
'''Create a new, empty Counter object. And if given, count elements
from an input iterable. Or, initialize the count from another mapping
of elements to their counts.
>>> c = Counter() # a new, empty counter
>>> c = Counter('gallahad') # a new counter from an iterable
>>> c = Counter({'a': 4, 'b': 2}) # a new counter from a mapping
>>> c = Counter(a=4, b=2) # a new counter from keyword args
'''
self.update(iterable, **kwds)
def __missing__(self, key):
return 0
def most_common(self, n=None):
'''List the n most common elements and their counts from the most
common to the least. If n is None, then list all element counts.
>>> Counter('abracadabra').most_common(3)
[('a', 5), ('r', 2), ('b', 2)]
'''
if n is None:
return sorted(iteritems(self), key=itemgetter(1), reverse=True)
return nlargest(n, iteritems(self), key=itemgetter(1))
def elements(self):
'''Iterator over elements repeating each as many times as its count.
>>> c = Counter('ABCABC')
>>> sorted(c.elements())
['A', 'A', 'B', 'B', 'C', 'C']
If an element's count has been set to zero or is a negative number,
elements() will ignore it.
'''
for elem, count in iteritems(self):
for _ in range(count):
yield elem
# Override dict methods where the meaning changes for Counter objects.
@classmethod
def fromkeys(cls, iterable, v=None):
raise NotImplementedError(
'Counter.fromkeys() is undefined. Use Counter(iterable) instead.')
def update(self, iterable=None, **kwds):
'''Like dict.update() but add counts instead of replacing them.
Source can be an iterable, a dictionary, or another Counter instance.
>>> c = Counter('which')
>>> c.update('witch') # add elements from another iterable
>>> d = Counter('watch')
>>> c.update(d) # add elements from another counter
>>> c['h'] # four 'h' in which, witch, and watch
4
'''
if iterable is not None:
if hasattr(iterable, 'iteritems'):
if self:
self_get = self.get
for elem, count in iteritems(iterable):
self[elem] = self_get(elem, 0) + count
else:
dict.update(
self, iterable) # fast path when counter is empty
else:
self_get = self.get
for elem in iterable:
self[elem] = self_get(elem, 0) + 1
if kwds:
self.update(kwds)
def copy(self):
'Like dict.copy() but returns a Counter instance instead of a dict.'
return Counter(self)
def __delitem__(self, elem):
'''Like dict.__delitem__() but does not raise KeyError for missing
values.'''
if elem in self:
dict.__delitem__(self, elem)
def __repr__(self):
if not self:
return '%s()' % self.__class__.__name__
items = ', '.join(map('%r: %r'.__mod__, self.most_common()))
return '%s({%s})' % (self.__class__.__name__, items)
# Multiset-style mathematical operations discussed in:
# Knuth TAOCP Volume II section 4.6.3 exercise 19
# and at http://en.wikipedia.org/wiki/Multiset
#
# Outputs guaranteed to only include positive counts.
#
# To strip negative and zero counts, add-in an empty counter:
# c += Counter()
def __add__(self, other):
'''Add counts from two counters.
>>> Counter('abbb') + Counter('bcc')
Counter({'b': 4, 'c': 2, 'a': 1})
'''
if not isinstance(other, Counter):
return NotImplemented
result = Counter()
for elem in set(self) | set(other):
newcount = self[elem] + other[elem]
if newcount > 0:
result[elem] = newcount
return result
def __sub__(self, other):
''' Subtract count, but keep only results with positive counts.
>>> Counter('abbbc') - Counter('bccd')
Counter({'b': 2, 'a': 1})
'''
if not isinstance(other, Counter):
return NotImplemented
result = Counter()
for elem in set(self) | set(other):
newcount = self[elem] - other[elem]
if newcount > 0:
result[elem] = newcount
return result
def __or__(self, other):
'''Union is the maximum of value in either of the input counters.
>>> Counter('abbb') | Counter('bcc')
Counter({'b': 3, 'c': 2, 'a': 1})
'''
if not isinstance(other, Counter):
return NotImplemented
_max = max
result = Counter()
for elem in set(self) | set(other):
newcount = _max(self[elem], other[elem])
if newcount > 0:
result[elem] = newcount
return result
def __and__(self, other):
''' Intersection is the minimum of corresponding counts.
>>> Counter('abbb') & Counter('bcc')
Counter({'b': 1})
'''
if not isinstance(other, Counter):
return NotImplemented
_min = min
result = Counter()
if len(self) < len(other):
self, other = other, self
for elem in filter(self.__contains__, other):
newcount = _min(self[elem], other[elem])
if newcount > 0:
result[elem] = newcount
return result
if sys.version_info[:2] < (2, 7):
OrderedDict = _OrderedDict
Counter = _Counter
else:
from collections import OrderedDict, Counter
# http://stackoverflow.com/questions/4126348
# Thanks to @martineau at SO
from dateutil import parser as _date_parser
import dateutil
if LooseVersion(dateutil.__version__) < '2.0':
@functools.wraps(_date_parser.parse)
def parse_date(timestr, *args, **kwargs):
timestr = bytes(timestr)
return _date_parser.parse(timestr, *args, **kwargs)
else:
parse_date = _date_parser.parse
class OrderedDefaultdict(OrderedDict):
def __init__(self, *args, **kwargs):
newdefault = None
newargs = ()
if args:
newdefault = args[0]
if not (newdefault is None or callable(newdefault)):
raise TypeError('first argument must be callable or None')
newargs = args[1:]
self.default_factory = newdefault
super(self.__class__, self).__init__(*newargs, **kwargs)
def __missing__(self, key):
if self.default_factory is None:
raise KeyError(key)
self[key] = value = self.default_factory()
return value
def __reduce__(self): # optional, for pickle support
args = self.default_factory if self.default_factory else tuple()
return type(self), args, None, None, list(self.items())
|
StarcoderdataPython
|
3208469
|
<reponame>ajdillhoff/3dhpe-udd<gh_stars>1-10
import torch
import torch.nn.functional as F
from utils.quaternion import qmul
class HandModel(torch.nn.Module):
""" Layer that converts model parameters into transformation matrices and
3D joint locations."""
def __init__(self, positions, rotations, skeleton, trainable_idxs):
super(HandModel, self).__init__()
self.positions = positions.to(torch.cuda.current_device())
self.base_rotations = rotations.to(torch.cuda.current_device())
self.skeleton = skeleton
self.trainable_idxs = trainable_idxs
self.skeleton.inherit_scale = False
def forward(self, x):
batch_size = x.shape[0]
x = x.view(batch_size, -1, 4)
pos_pred = x[:, :, :4]
# shape_pred = x[:, :, 4:]
rot_offset = F.normalize(pos_pred, dim=-1)
# scale_pred = torch.min(torch.max(torch.ones_like(shape_pred) * -0.5,
# shape_pred),
# torch.ones_like(shape_pred) * 0.5)
# Position
positions = self.positions.to(x.device).unsqueeze(0).repeat(batch_size, 1, 1)
# Rotation
rotations = self.base_rotations.to(torch.cuda.current_device()).repeat(batch_size, 1, 1)
new_rotations = qmul(rotations[:, self.trainable_idxs].view(-1, 4), rot_offset.view(-1, 4))
new_rotations = new_rotations.view(batch_size, -1, 4)
rotations[:, self.trainable_idxs] = new_rotations
# Scale parameter
scale_params = torch.ones_like(positions)
# if self.predict_shape is True:
# scale_params[:, self.trainable_idxs] += scale_pred
# Forward kinematics
local_transforms = self.skeleton.to_matrix(rotations, positions, scale_params).cuda()
coord_pred = self.skeleton.forward_kinematics2(rotations, positions, scale_params).cuda()
coord_pred = coord_pred.permute((1, 0, 2))
return local_transforms, coord_pred, rot_offset
|
StarcoderdataPython
|
1698692
|
#!/usr/bin/env python
r'''
https://www.hackerrank.com/challenges/two-strings/problem
'''
import math
import os
import random
import re
import sys
# Complete the twoStrings function below.
def twoStrings_v1(s1, s2):
if len(s1) == 0 or len(s2) == 0:
return 'NO'
cset= set()
for c in s1:
cset.add(c)
for c in s2:
cset.add(c)
return 'YES' if len(s1) + len(s2) != len(cset) else 'NO'
def twoStrings(s1, s2):
if len(s1) >= len(s2):
ls = s1
ss = s2
else:
ls = s2
ss = s1
cset = set()
for c in ls:
cset.add(c)
for c in ss:
if c in cset:
return 'YES'
return 'NO'
import unittest
class FAT(unittest.TestCase):
def setUp(self):
pass
def test_01(self):
tdatas = [
('hello', 'world', 'YES'),
('hi', 'world', 'NO')
]
for s1, s2, a in tdatas:
r = twoStrings(s1, s2)
self.assertEqual(a, r, 'Exp={}; Real={}'.format(a, r))
|
StarcoderdataPython
|
1754273
|
<filename>trmmlib/products.py
# -*- coding: utf-8 -*-
#-------------------------------------------------------------------------------
# Name: products
# Purpose:
#
# Authors: <NAME>
#
# Created: 2015-11-6
# Copyright: (c) <NAME>
# Licence: The MIT License
#-------------------------------------------------------------------------------
#!/usr/bin/env python
import wradlib
import numpy as np
import gc
import sys
def read_trmm(f):
"""Read TRMM data that comes on NetCDF.
Parameters
----------
f : string (TRMM file path)
Returns
-------
out : X, Y, R
Two dimensional arrays of longitudes, latitudes, and rainfall
"""
data = wradlib.io.read_generic_netcdf(f)
x = data["variables"]["longitude"]["data"]
y = data["variables"]["latitude"]["data"]
X, Y = np.meshgrid(x,y)
X = X - 180.
R = data["variables"]["r"]["data"][0]
R = np.roll(R,720,axis=1)
return X, Y, R
def read_trmm_bin(f):
"""Read TRMM data that comes as binary data (bin).
Parameters
----------
f : string (TRMM file path)
Returns
-------
out : X, Y, R
Two dimensional arrays of longitudes, latitudes, and rainfall
"""
nlat = 480
## nlon = 1440
# Read data
R = np.fromfile(f, dtype="f4")
if sys.byteorder=="little":
R = R.byteswap()
R = np.reshape(R, (1440,480), order="F")
R = np.rot90(R)
R = np.roll(R, 720, axis=1)
# Make grid
y = np.arange(59.875, 59.875-nlat*0.25, -0.25)
x = np.arange(0, 360, 0.25) - (180.-0.25/2)
X, Y = np.meshgrid(x,y)
# R = np.roll(R,720,axis=1)
return X, Y, R
def read_imerg(f, var = "Grid/precipitationCal", meshgrid=True):
"""Read IMERG data that comes on HDF5.
Parameters
----------
f : string (IMERG file path)
var : string
The variable to be extracted from the HDF5 file
Returns
-------
out : X, Y, R
Two dimensional arrays of longitudes, latitudes, and rainfall
"""
data = wradlib.io.read_generic_hdf5(f)
y = data["Grid/lat"]["data"]
x = data["Grid/lon"]["data"]
if meshgrid:
x, y = np.meshgrid(x,y)
# X = X - 180.
var = data[var]["data"].T
# var = np.roll(var,len(x)/2,axis=1)
return x, y, var
def read_imerg_custom_h5(f, meshgrid=True):
"""Read our own custom daily product.
"""
data, meta = wradlib.io.from_hdf5(f)
y = meta["y"]
x = meta["x"]
if meshgrid:
x, y = np.meshgrid(x,y)
return x, y, data
if __name__ == '__main__':
X, Y, R = read_imerg(r"X:\gpm\imerg\2014\06\09\3B-HHR.MS.MRG.3IMERG.20140609-S000000-E002959.0000.V03D.HDF5")
|
StarcoderdataPython
|
1664899
|
<filename>locuspocus/locus/attrs.py
class LocusAttrs:
# a restricted dict interface to attributes
def __init__(self, attrs=None):
self._attrs = attrs
def __len__(self):
if self.empty:
return 0
else:
return len(self._attrs)
def __eq__(self, other):
if self.empty and other.empty:
return True
elif len(self) != len(other):
# Short circuit on length
return False
else:
return sorted(self.items()) == sorted(other.items())
@property
def empty(self):
if self._attrs is None:
return True
else:
return False
def keys(self):
if self.empty:
return []
else:
return self._attrs.keys()
def values(self):
if self.empty:
return []
else:
return self._attrs.values()
def items(self):
if self.empty:
return {}
else:
return self._attrs.items()
def __contains__(self, key):
if self.empty:
return False
return key in self._attrs
def __getitem__(self, key):
if self.empty:
raise KeyError()
return self._attrs[key]
def __setitem__(self, key, val):
if self.empty:
self._attrs = {}
self._attrs[key] = val
def __repr__(self):
if self.empty:
return repr({})
return repr(self._attrs)
|
StarcoderdataPython
|
3277406
|
# -*- coding: utf-8 -*-
from __future__ import division, unicode_literals
import collections
from . import config
from . import lexers
from .messages import *
from .htmlhelpers import *
from .widlparser.widlparser import parser
ColoredText = collections.namedtuple('ColoredText', ['text', 'color'])
class IDLUI(object):
def warn(self, msg):
die("{0}", msg.rstrip())
class HighlightMarker(object):
# Just applies highlighting classes to IDL stuff.
def markupTypeName(self, text, construct):
return ('<span class=n>', '</span>')
def markupName(self, text, construct):
return ('<span class=nv>', '</span>')
def markupKeyword(self, text, construct):
return ('<span class=kt>', '</span>')
def markupEnumValue(self, text, construct):
return ('<span class=s>', '</span>')
def addSyntaxHighlighting(doc):
try:
import pygments as pyg
from pygments.lexers import get_lexer_by_name
from pygments import formatters
except ImportError:
die("Bikeshed now uses Pygments for syntax highlighting.\nPlease run `$ sudo pip install pygments` from your command line.")
return
customLexers = {
"css": lexers.CSSLexer()
}
def highlight(el, lang):
text = textContent(el)
if lang in ["idl", "webidl"]:
widl = parser.Parser(text, IDLUI())
marker = HighlightMarker()
nested = parseHTML(unicode(widl.markup(marker)))
coloredText = collections.deque()
for n in childNodes(flattenHighlighting(nested)):
if isElement(n):
coloredText.append(ColoredText(textContent(n), n.get('class')))
else:
coloredText.append(ColoredText(n, None))
else:
if lang in customLexers:
lexer = customLexers[lang]
else:
try:
lexer = get_lexer_by_name(lang, encoding="utf-8", stripAll=True)
except pyg.util.ClassNotFound:
die("'{0}' isn't a known syntax-highlighting language. See http://pygments.org/docs/lexers/. Seen on:\n{1}", lang, outerHTML(el), el=el)
return
coloredText = parsePygments(pyg.highlight(text, lexer, formatters.RawTokenFormatter()))
# empty span at beginning
# extra linebreak at the end
mergeHighlighting(el, coloredText)
addClass(el, "highlight")
highlightingOccurred = False
if find("pre.idl, xmp.idl", doc) is not None:
highlightingOccurred = True
def translateLang(lang):
# Translates some names to ones Pygment understands
if lang == "aspnet":
return "aspx-cs"
if lang in ["markup", "svg"]:
return "html"
return lang
# Translate Prism-style highlighting into Pygment-style
for el in findAll("[class*=language-], [class*=lang-]", doc):
match = re.search("(?:lang|language)-(\w+)", el.get("class"))
if match:
el.set("highlight", match.group(1))
# Highlight all the appropriate elements
for el in findAll("xmp, pre, code", doc):
attr, lang = closestAttr(el, "nohighlight", "highlight")
if attr == "nohighlight":
continue
if attr is None:
if el.tag in ["pre", "xmp"] and hasClass(el, "idl"):
if isNormative(el):
# Already processed/highlighted.
continue
lang = "idl"
elif doc.md.defaultHighlight is None:
continue
else:
lang = doc.md.defaultHighlight
highlight(el, translateLang(lang))
highlightingOccurred = True
if highlightingOccurred:
# To regen the styles, edit and run the below
#from pygments import token
#from pygments import style
#class PrismStyle(style.Style):
# default_style = "#000000"
# styles = {
# token.Name: "#0077aa",
# token.Name.Tag: "#669900",
# token.Name.Builtin: "noinherit",
# token.Name.Variable: "#222222",
# token.Name.Other: "noinherit",
# token.Operator: "#999999",
# token.Punctuation: "#999999",
# token.Keyword: "#990055",
# token.Literal: "#000000",
# token.Literal.Number: "#000000",
# token.Literal.String: "#a67f59",
# token.Comment: "#708090"
# }
#print formatters.HtmlFormatter(style=PrismStyle).get_style_defs('.highlight')
doc.extraStyles['style-syntax-highlighting'] += '''
.highlight:not(.idl) { background: hsl(24, 20%, 95%); }
code.highlight { padding: .1em; border-radius: .3em; }
pre.highlight, pre > code.highlight { display: block; padding: 1em; margin: .5em 0; overflow: auto; border-radius: 0; }
.highlight .c { color: #708090 } /* Comment */
.highlight .k { color: #990055 } /* Keyword */
.highlight .l { color: #000000 } /* Literal */
.highlight .n { color: #0077aa } /* Name */
.highlight .o { color: #999999 } /* Operator */
.highlight .p { color: #999999 } /* Punctuation */
.highlight .cm { color: #708090 } /* Comment.Multiline */
.highlight .cp { color: #708090 } /* Comment.Preproc */
.highlight .c1 { color: #708090 } /* Comment.Single */
.highlight .cs { color: #708090 } /* Comment.Special */
.highlight .kc { color: #990055 } /* Keyword.Constant */
.highlight .kd { color: #990055 } /* Keyword.Declaration */
.highlight .kn { color: #990055 } /* Keyword.Namespace */
.highlight .kp { color: #990055 } /* Keyword.Pseudo */
.highlight .kr { color: #990055 } /* Keyword.Reserved */
.highlight .kt { color: #990055 } /* Keyword.Type */
.highlight .ld { color: #000000 } /* Literal.Date */
.highlight .m { color: #000000 } /* Literal.Number */
.highlight .s { color: #a67f59 } /* Literal.String */
.highlight .na { color: #0077aa } /* Name.Attribute */
.highlight .nc { color: #0077aa } /* Name.Class */
.highlight .no { color: #0077aa } /* Name.Constant */
.highlight .nd { color: #0077aa } /* Name.Decorator */
.highlight .ni { color: #0077aa } /* Name.Entity */
.highlight .ne { color: #0077aa } /* Name.Exception */
.highlight .nf { color: #0077aa } /* Name.Function */
.highlight .nl { color: #0077aa } /* Name.Label */
.highlight .nn { color: #0077aa } /* Name.Namespace */
.highlight .py { color: #0077aa } /* Name.Property */
.highlight .nt { color: #669900 } /* Name.Tag */
.highlight .nv { color: #222222 } /* Name.Variable */
.highlight .ow { color: #999999 } /* Operator.Word */
.highlight .mb { color: #000000 } /* Literal.Number.Bin */
.highlight .mf { color: #000000 } /* Literal.Number.Float */
.highlight .mh { color: #000000 } /* Literal.Number.Hex */
.highlight .mi { color: #000000 } /* Literal.Number.Integer */
.highlight .mo { color: #000000 } /* Literal.Number.Oct */
.highlight .sb { color: #a67f59 } /* Literal.String.Backtick */
.highlight .sc { color: #a67f59 } /* Literal.String.Char */
.highlight .sd { color: #a67f59 } /* Literal.String.Doc */
.highlight .s2 { color: #a67f59 } /* Literal.String.Double */
.highlight .se { color: #a67f59 } /* Literal.String.Escape */
.highlight .sh { color: #a67f59 } /* Literal.String.Heredoc */
.highlight .si { color: #a67f59 } /* Literal.String.Interpol */
.highlight .sx { color: #a67f59 } /* Literal.String.Other */
.highlight .sr { color: #a67f59 } /* Literal.String.Regex */
.highlight .s1 { color: #a67f59 } /* Literal.String.Single */
.highlight .ss { color: #a67f59 } /* Literal.String.Symbol */
.highlight .vc { color: #0077aa } /* Name.Variable.Class */
.highlight .vg { color: #0077aa } /* Name.Variable.Global */
.highlight .vi { color: #0077aa } /* Name.Variable.Instance */
.highlight .il { color: #000000 } /* Literal.Number.Integer.Long */
'''
def mergeHighlighting(el, coloredText):
# Merges a tree of Pygment-highlighted HTML
# into the original element's markup.
# This works because Pygment effectively colors each character with a highlight class,
# merging them together into runs of text for convenience/efficiency only;
# the markup structure is a flat list of sibling elements containing raw text
# (and maybe some un-highlighted raw text between them).
def colorizeEl(el, coloredText):
for node in childNodes(el, clear=True):
if isElement(node):
appendChild(el, colorizeEl(node, coloredText))
else:
appendChild(el, *colorizeText(node, coloredText))
return el
def colorizeText(text, coloredText):
nodes = []
while text and coloredText:
nextColor = coloredText.popleft()
if len(nextColor.text) <= len(text):
if nextColor.color is None:
nodes.append(nextColor.text)
else:
nodes.append(E.span({"class":nextColor.color}, nextColor.text))
text = text[len(nextColor.text):]
else: # Need to use only part of the nextColor node
if nextColor.color is None:
nodes.append(text)
else:
nodes.append(E.span({"class":nextColor.color}, text))
# Truncate the nextColor text to what's unconsumed,
# and put it back into the deque
nextColor = ColoredText(nextColor.text[len(text):], nextColor.color)
coloredText.appendleft(nextColor)
text = ''
return nodes
colorizeEl(el, coloredText)
def flattenHighlighting(el):
# Given a highlighted chunk of markup that is "nested",
# flattens it into a sequence of text and els with just text,
# by merging classes upward.
container = E.div()
for node in childNodes(el):
if not isElement(node):
# raw text
appendChild(container, node)
elif not hasChildElements(node):
# el with just text
appendChild(container, node)
else:
# el with internal structure
overclass = el.get("class", "")
flattened = flattenHighlighting(node)
for subnode in childNodes(flattened):
if isElement(subnode):
addClass(subnode, overclass)
appendChild(container, subnode)
else:
appendChild(container, E.span({"class":overclass},subnode))
return container
def parsePygments(text):
tokenClassFromName = {
"Token.Comment": "c",
"Token.Keyword": "k",
"Token.Literal": "l",
"Token.Name": "n",
"Token.Operator": "o",
"Token.Punctuation": "p",
"Token.Comment.Multiline": "cm",
"Token.Comment.Preproc": "cp",
"Token.Comment.Single": "c1",
"Token.Comment.Special": "cs",
"Token.Keyword.Constant": "kc",
"Token.Keyword.Declaration": "kd",
"Token.Keyword.Namespace": "kn",
"Token.Keyword.Pseudo": "kp",
"Token.Keyword.Reserved": "kr",
"Token.Keyword.Type": "kt",
"Token.Literal.Date": "ld",
"Token.Literal.Number": "m",
"Token.Literal.String": "s",
"Token.Name.Attribute": "na",
"Token.Name.Class": "nc",
"Token.Name.Constant": "no",
"Token.Name.Decorator": "nd",
"Token.Name.Entity": "ni",
"Token.Name.Exception": "ne",
"Token.Name.Function": "nf",
"Token.Name.Label": "nl",
"Token.Name.Namespace": "nn",
"Token.Name.Property": "py",
"Token.Name.Tag": "nt",
"Token.Name.Variable": "nv",
"Token.Operator.Word": "ow",
"Token.Literal.Number.Bin": "mb",
"Token.Literal.Number.Float": "mf",
"Token.Literal.Number.Hex": "mh",
"Token.Literal.Number.Integer": "mi",
"Token.Literal.Number.Oct": "mo",
"Token.Literal.String.Backtick": "sb",
"Token.Literal.String.Char": "sc",
"Token.Literal.String.Doc": "sd",
"Token.Literal.String.Double": "s2",
"Token.Literal.String.Escape": "se",
"Token.Literal.String.Heredoc": "sh",
"Token.Literal.String.Interpol": "si",
"Token.Literal.String.Other": "sx",
"Token.Literal.String.Regex": "sr",
"Token.Literal.String.Single": "s1",
"Token.Literal.String.Symbol": "ss",
"Token.Name.Variable.Class": "vc",
"Token.Name.Variable.Global": "vg",
"Token.Name.Variable.Instance": "vi",
"Token.Literal.Number.Integer.Long": "il"
}
coloredText = collections.deque()
for line in text.split("\n"):
if not line:
continue
tokenName,_,tokenTextRepr = line.partition("\t")
tokenText = eval(tokenTextRepr)
if not tokenText:
continue
if tokenName == "Token.Text":
tokenClass = None
else:
tokenClass = tokenClassFromName.get(tokenName, None)
coloredText.append(ColoredText(tokenText, tokenClass))
return coloredText
|
StarcoderdataPython
|
47333
|
<filename>clone_scanner.py<gh_stars>0
# -*- coding: utf-8 -*-
#
# Clone Scanner, version 1.3 for WeeChat version 0.3
# Latest development version: https://github.com/FiXato/weechat_scripts
#
# A Clone Scanner that can manually scan channels and
# automatically scans joins for users on the channel
# with multiple nicknames from the same host.
#
# Upon join by a user, the user's host is compared to the infolist of
# already connected users to see if they are already online from
# another nickname. If the user is a clone, it will report it.
# With the '/clone_scanner scan' command you can manually scan a chan.
#
# See /set plugins.var.python.clone_scanner.* for all possible options
# Use the brilliant iset.pl plugin (/weeget install iset) to see what they do
# Or check the sourcecode below.
#
# Example output for an on-join scan result:
# 21:32:46 ▬▬▶ FiXato_Odie (<EMAIL>) has joined #lounge
# 21:32:46 FiXato_Odie is already on the channel as FiXato!<EMAIL> and FiX!<EMAIL>
#
# Example output for a manual scan:
# 21:34:44 fixato.net is online from 3 nicks:
# 21:34:44 - FiXato!<EMAIL>
# 21:34:44 - FiX!<EMAIL>
# 21:34:44 - FiX<EMAIL>_Odie!<EMAIL>
#
## History:
### 2011-09-11: FiXato:
#
# * version 0.1: initial release.
# * Added an on-join clone scan. Any user that joins a channel will be
# matched against users already on the channel.
#
# * version 0.2: manual clone scan
# * Added a manual clone scan via /clone_scanner scan
# you can specify a target channel with:
# /clone_scanner scan #myChannelOnCurrentServer
# or:
# /clone_scanner scan Freenode.#myChanOnSpecifiedNetwork
# * Added completion
#
### 2011-09-12: FiXato:
#
# * version 0.3: Refactor galore
# * Refactored some code. Codebase should be DRYer and clearer now.
# * Manual scan report lists by host instead of nick now.
# * Case-insensitive host-matching
# * Bugfixed the infolist memleak.
# * on-join scanner works again
# * Output examples added to the comments
#
### 2011-09-19
# * version 0.4: Option galore
# * Case-insensitive buffer lookup fix.
# * Made most messages optional through settings.
# * Made on-join alert and clone report key a bit more configurable.
# * Added formatting options for on-join alerts.
# * Added format_message helper method that accepts multiple whitespace-separated weechat.color() options.
# * Added formatting options for join messages
# * Added formatting options for clone reports
# * Added format_from_config helper method that reads the given formatting key from the config
#
# * version 0.5: cs_buffer refactoring
# * dropping the manual cs_create_buffer call in favor for a cs_get_buffer() method
#
### 2012-02-10: FiXato:
#
# * version 0.6: Stop shoving that buffer in my face!
# * The clone_scanner buffer should no longer pop up by itself when you load the script.
# It should only pop up now when you actually a line needs to show up in the buffer.
#
# * version 0.7: .. but please pop it up in my current window when I ask for it
# * Added setting plugins.var.python.clone_scanner.autofocus
# This will autofocus the clone_scanner buffer in the current window if another window isn't
# already showing it, and of course only when the clone_scanner buffer is triggered
#
### 2012-02-10: FiXato:
#
# * version 0.8: .. and only when it is first created..
# * Prevents the buffer from being focused every time there is activity in it and not being shown in a window.
#
### 2012-04-01: FiXato:
#
# * version 0.9: Hurrah for bouncers...
# * Added the option plugins.var.python.clone_scanner.compare_idents
# Set it to 'on' if you don't want people with different idents to be marked as clones.
# Useful on channels with bouncers.
#
### 2012-04-02: FiXato:
#
# * version 1.0: Bugfix
# * Fixed the on-join scanner bug introduced by the 0.9 release.
# I was not properly comparing the new [email protected] key in all places yet.
# Should really have tested this better ><
#
### 2012-04-03: FiXato:
#
# * version 1.1: Stop being so sensitive!
# * Continuing to fix the on-join scanner bugs introduced by the 0.9 release.
# The [email protected] dict key wasn't being lowercased for comparison in the on-join scan.
#
# * version 1.2: So shameless!
# * Added shameless advertising for my script through /clone_scanner advertise
#
### 2013-04-09: FiXato:
# * version 1.3: Such a killer rabbit
# * Thanks to <NAME> aka killerrabbit clone_scanner.py now supports:
# * local channels (&-prefixed)
# * nameless channels (just # or &)
#
## Acknowledgements:
# * Sebastien "Flashcode" Helleu, for developing the kick-ass chat/IRC
# client WeeChat
# * ArZa, whose kickban.pl script helped me get started with using the
# infolist results.
# * LayBot, for requesting the ident comparison
# * Curtis "killerrabbit" Sorensen, for sending in two pull-requests,
# adding support for local and nameless channels.
#
## TODO:
# - Add option to enable/disable public clone reporting aka msg channels
# - Add option to enable/disable scanning on certain channels/networks
# - Add cross-channel clone scan
# - Add cross-server clone scan
#
## Copyright (c) 2011-2012 <NAME>. "FiXato" Slagter,
# <FiXato [at] Gmail [dot] com>
# http://google.com/profiles/FiXato
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NON-INFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
SCRIPT_NAME = "clone_scanner"
SCRIPT_AUTHOR = "<NAME>. 'FiXato' Slagter <fixato [at] gmail [dot] com>"
SCRIPT_VERSION = "1.3"
SCRIPT_LICENSE = "MIT"
SCRIPT_DESC = "A Clone Scanner that can manually scan channels and automatically scans joins for users on the channel with multiple nicknames from the same host."
SCRIPT_COMMAND = "clone_scanner"
SCRIPT_CLOSE_CB = "cs_close_cb"
import_ok = True
try:
import weechat
except ImportError:
print "This script must be run under WeeChat."
import_ok = False
import re
cs_buffer = None
cs_settings = (
("autofocus", "on", "Focus the clone_scanner buffer in the current window if it isn't already displayed by a window."),
("compare_idents", "off", "Match against <EMAIL> instead of just the hostname. Useful if you don't want different people from bouncers marked as clones"),
("display_join_messages", "off", "Display all joins in the clone_scanner buffer"),
("display_onjoin_alert_clone_buffer", "on", "Display an on-join clone alert in the clone_scanner buffer"),
("display_onjoin_alert_target_buffer", "on", "Display an on-join clone alert in the buffer where the clone was detected"),
("display_onjoin_alert_current_buffer", "off", "Display an on-join clone alert in the current buffer"),
("display_scan_report_clone_buffer", "on", "Display manual scan reports in the clone buffer"),
("display_scan_report_target_buffer", "off", "Display manual scan reports in the buffer of the scanned channel"),
("display_scan_report_current_buffer", "on", "Display manual scan reports in the current buffer"),
("clone_report_key", "mask", "Which 'key' to display in the clone report: 'mask' for full hostmasks, or 'nick' for nicks"),
("clone_onjoin_alert_key", "mask", "Which 'key' to display in the on-join alerts: 'mask' for full hostmasks, or 'nick' for nicks"),
("colors.onjoin_alert.message", "red", "The on-join clone alert's message colour. Formats are space separated."),
("colors.onjoin_alert.nick", "bold red", "The on-join clone alert's nick colour. Formats are space separated. Note: if you have colorize_nicks, this option might not work as expected."),
("colors.onjoin_alert.channel", "red", "The on-join clone alert's channel colour. Formats are space separated."),
("colors.onjoin_alert.matches", "bold red", "The on-join clone alert's matches (masks or nicks) colour. Formats are space separated. Note: if you have colorize_nicks, this option might not work as expected."),
("colors.join_messages.message", "chat", "The base colour for the join messages."),
("colors.join_messages.nick", "bold", "The colour for the 'nick'-part of the join messages. Note: if you have colorize_nicks, this option might not always work as expected."),
("colors.join_messages.identhost", "chat", "The colour for the 'ident@host'-part of the join messages."),
("colors.join_messages.channel", "bold", "The colour for the 'channel'-part of the join messages."),
("colors.clone_report.header.message", "chat", "The colour of the clone report header."),
("colors.clone_report.header.number_of_hosts", "bold", "The colour of the number of hosts in the clone report header."),
("colors.clone_report.header.channel", "bold", "The colour of the channel name in the clone report header."),
("colors.clone_report.subheader.message", "chat", "The colour of the clone report subheader."),
("colors.clone_report.subheader.host", "bold", "The colour of the host in the clone report subheader."),
("colors.clone_report.subheader.number_of_clones", "bold", "The colour of the number of clones in the clone report subheader."),
("colors.clone_report.clone.message", "chat", "The colour of the clone hit in the clone report message."),
("colors.clone_report.clone.match", "chat", "The colour of the match details (masks or nicks) in the clone report."),
("colors.mask.nick", "bold", "The formatting of the nick in the match mask."),
("colors.mask.identhost", "", "The formatting of the identhost in the match mask."),
)
def get_validated_key_from_config(setting):
key = weechat.config_get_plugin(setting)
if key != 'mask' and key != 'nick':
weechat.prnt("", "Key %s not found. Valid settings are 'nick' and 'mask'. Reverted the setting to 'mask'" % key)
weechat.config_set_plugin("clone_report_key", "mask")
key = "mask"
return key
def format_message(msg, formats, reset_color='chat'):
if type(formats) == str:
formats = formats.split()
formatted_message = msg
needs_color_reset = False
for format in formats:
if format in ['bold', 'reverse', 'italic', 'underline']:
end_format = '-%s' % format
else:
needs_color_reset = True
end_format = ""
formatted_message = "%s%s%s" % (weechat.color(format), formatted_message, weechat.color(end_format))
if needs_color_reset:
formatted_message += weechat.color(reset_color)
return formatted_message
def format_from_config(msg, config_option):
return format_message(msg, weechat.config_get_plugin(config_option))
def on_join_scan_cb(data, signal, signal_data):
network = signal.split(',')[0]
joined_nick = weechat.info_get("irc_nick_from_host", signal_data)
join_match_data = re.match(':[^!]+!([^@]+@(\S+)) JOIN :?([#&]\S*)', signal_data)
parsed_ident_host = join_match_data.group(1).lower()
parsed_host = join_match_data.group(2).lower()
if weechat.config_get_plugin("compare_idents") == "on":
hostkey = parsed_ident_host
else:
hostkey = parsed_host
chan_name = join_match_data.group(3)
network_chan_name = "%s.%s" % (network, chan_name)
chan_buffer = weechat.info_get("irc_buffer", "%s,%s" % (network, chan_name))
if not chan_buffer:
print "No IRC channel buffer found for %s" % network_chan_name
return weechat.WEECHAT_RC_OK
if weechat.config_get_plugin("display_join_messages") == "on":
message = "%s%s%s%s%s" % (
format_from_config(joined_nick, "colors.join_messages.nick"),
format_from_config("!", "colors.join_messages.message"),
format_from_config(parsed_ident_host, "colors.join_messages.identhost"),
format_from_config(" JOINed ", "colors.join_messages.message"),
format_from_config(network_chan_name, "colors.join_messages.channel"),
)
#Make sure message format is also applied if no formatting is given for nick
message = format_from_config(message, "colors.join_messages.message")
weechat.prnt(cs_get_buffer(), message)
clones = get_clones_for_buffer("%s,%s" % (network, chan_name), hostkey)
if clones:
key = get_validated_key_from_config("clone_onjoin_alert_key")
filtered_clones = filter(lambda clone: clone['nick'] != joined_nick, clones[hostkey])
match_strings = map(lambda m: format_from_config(m[key], "colors.onjoin_alert.matches"), filtered_clones)
join_string = format_from_config(' and ',"colors.onjoin_alert.message")
masks = join_string.join(match_strings)
message = "%s %s %s %s %s" % (
format_from_config(joined_nick, "colors.onjoin_alert.nick"),
format_from_config("is already on", "colors.onjoin_alert.message"),
format_from_config(network_chan_name, "colors.onjoin_alert.channel"),
format_from_config("as", "colors.onjoin_alert.message"),
masks
)
message = format_from_config(message, weechat.config_get_plugin("colors.onjoin_alert.message"))
if weechat.config_get_plugin("display_onjoin_alert_clone_buffer") == "on":
weechat.prnt(cs_get_buffer(),message)
if weechat.config_get_plugin("display_onjoin_alert_target_buffer") == "on":
weechat.prnt(chan_buffer, message)
if weechat.config_get_plugin("display_onjoin_alert_current_buffer") == "on":
weechat.prnt(weechat.current_buffer(),message)
return weechat.WEECHAT_RC_OK
def cs_get_buffer():
global cs_buffer
if not cs_buffer:
# Sets notify to 0 as this buffer does not need to be in hotlist.
cs_buffer = weechat.buffer_new("clone_scanner", "", \
"", SCRIPT_CLOSE_CB, "")
weechat.buffer_set(cs_buffer, "title", "Clone Scanner")
weechat.buffer_set(cs_buffer, "notify", "0")
weechat.buffer_set(cs_buffer, "nicklist", "0")
if weechat.config_get_plugin("autofocus") == "on":
if not weechat.window_search_with_buffer(cs_buffer):
weechat.command("", "/buffer " + weechat.buffer_get_string(cs_buffer,"name"))
return cs_buffer
def cs_close_cb(*kwargs):
""" A callback for buffer closing. """
global cs_buffer
#TODO: Ensure the clone_scanner buffer gets closed if its option is set and the script unloads
cs_buffer = None
return weechat.WEECHAT_RC_OK
def get_channel_from_buffer_args(buffer, args):
server_name = weechat.buffer_get_string(buffer, "localvar_server")
channel_name = args
if not channel_name:
channel_name = weechat.buffer_get_string(buffer, "localvar_channel")
match_data = re.match('\A(irc.)?([^.]+)\.([#&]\S*)\Z', channel_name)
if match_data:
channel_name = match_data.group(3)
server_name = match_data.group(2)
return server_name, channel_name
def get_clones_for_buffer(infolist_buffer_name, hostname_to_match=None):
matches = {}
infolist = weechat.infolist_get("irc_nick", "", infolist_buffer_name)
while(weechat.infolist_next(infolist)):
ident_hostname = weechat.infolist_string(infolist, "host")
host_matchdata = re.match('([^@]+)@(\S+)', ident_hostname)
if not host_matchdata:
continue
hostname = host_matchdata.group(2).lower()
ident = host_matchdata.group(1).lower()
if weechat.config_get_plugin("compare_idents") == "on":
hostkey = ident_hostname.lower()
else:
hostkey = hostname
if hostname_to_match and hostname_to_match.lower() != hostkey:
continue
nick = weechat.infolist_string(infolist, "name")
matches.setdefault(hostkey,[]).append({
'nick': nick,
'mask': "%s!%s" % (
format_from_config(nick, "colors.mask.nick"),
format_from_config(ident_hostname, "colors.mask.identhost")),
'ident': ident,
'ident_hostname': ident_hostname,
'hostname': hostname,
})
weechat.infolist_free(infolist)
#Select only the results that have more than 1 match for a host
return dict((k, v) for (k, v) in matches.iteritems() if len(v) > 1)
def report_clones(clones, scanned_buffer_name, target_buffer=None):
# Default to clone_scanner buffer
if not target_buffer:
target_buffer = cs_get_buffer()
if clones:
clone_report_header = "%s %s %s%s" % (
format_from_config(len(clones), "colors.clone_report.header.number_of_hosts"),
format_from_config("hosts with clones were found on", "colors.clone_report.header.message"),
format_from_config(scanned_buffer_name, "colors.clone_report.header.channel"),
format_from_config(":", "colors.clone_report.header.message"),
)
clone_report_header = format_from_config(clone_report_header, "colors.clone_report.header.message")
weechat.prnt(target_buffer, clone_report_header)
for (host, clones) in clones.iteritems():
host_message = "%s %s %s %s" % (
format_from_config(host, "colors.clone_report.subheader.host"),
format_from_config("is online from", "colors.clone_report.subheader.message"),
format_from_config(len(clones), "colors.clone_report.subheader.number_of_clones"),
format_from_config("nicks:", "colors.clone_report.subheader.message"),
)
host_message = format_from_config(host_message, "colors.clone_report.subheader.message")
weechat.prnt(target_buffer, host_message)
for user in clones:
key = get_validated_key_from_config("clone_report_key")
clone_message = "%s%s" % (" - ", format_from_config(user[key], "colors.clone_report.clone.match"))
clone_message = format_from_config(clone_message,"colors.clone_report.clone.message")
weechat.prnt(target_buffer, clone_message)
else:
weechat.prnt(target_buffer, "No clones found on %s" % scanned_buffer_name)
def cs_command_main(data, buffer, args):
if args[0:4] == 'scan':
server_name, channel_name = get_channel_from_buffer_args(buffer, args[5:])
clones = get_clones_for_buffer('%s,%s' % (server_name, channel_name))
if weechat.config_get_plugin("display_scan_report_target_buffer") == "on":
target_buffer = weechat.info_get("irc_buffer", "%s,%s" % (server_name, channel_name))
report_clones(clones, '%s.%s' % (server_name, channel_name), target_buffer)
if weechat.config_get_plugin("display_scan_report_clone_buffer") == "on":
report_clones(clones, '%s.%s' % (server_name, channel_name))
if weechat.config_get_plugin("display_scan_report_current_buffer") == "on":
report_clones(clones, '%s.%s' % (server_name, channel_name), weechat.current_buffer())
elif args[0:9] == 'advertise':
weechat.command("", "/input insert /me is using FiXato's CloneScanner v%s for WeeChat. Get the latest version from: https://github.com/FiXato/weechat_scripts/blob/master/clone_scanner.py" % SCRIPT_VERSION)
return weechat.WEECHAT_RC_OK
def cs_set_default_settings():
global cs_settings
# Set default settings
for option, default_value, description in cs_settings:
if not weechat.config_is_set_plugin(option):
weechat.config_set_plugin(option, default_value)
version = weechat.info_get("version_number", "") or 0
if int(version) >= 0x00030500:
weechat.config_set_desc_plugin(option, description)
if __name__ == "__main__" and import_ok:
if weechat.register(SCRIPT_NAME, SCRIPT_AUTHOR, SCRIPT_VERSION,
SCRIPT_LICENSE, SCRIPT_DESC, SCRIPT_CLOSE_CB, ""):
cs_set_default_settings()
cs_buffer = weechat.buffer_search("python", "clone_scanner")
weechat.hook_signal("*,irc_in2_join", "on_join_scan_cb", "")
weechat.hook_command(SCRIPT_COMMAND,
SCRIPT_DESC,
"[scan] [[plugin.][network.]channel] | [advertise] | [help]",
"the target_buffer can be: \n"
"- left out, so the current channel buffer will be scanned.\n"
"- a plain channel name, such as #weechat, in which case it will prefixed with the current network name\n"
"- a channel name prefixed with network name, such as Freenode.#weechat\n"
"- a channel name prefixed with plugin and network name, such as irc.freenode.#weechat\n"
"See /set plugins.var.python.clone_scanner.* for all possible configuration options",
" || scan %(buffers_names)"
" || advertise"
" || help",
"cs_command_main", "")
|
StarcoderdataPython
|
172321
|
<reponame>cheery/lever<filename>runtime/space/interface.py
import re
from rpython.rlib.objectmodel import compute_hash, specialize, always_inline
from rpython.rlib import jit, rgc
import space, weakref
class Object:
_immutable_fields_ = ['interface', 'custom_interface', 'flag', 'number', 'value', 'contents', 'data', 'string', 'iterator', 'arity', 'methods', 'default', 'cells']
__slots__ = []
__attrs__ = []
# The metaclass here takes care every object will get an interface.
# So programmer doesn't need to do that.
class __metaclass__(type):
def __init__(cls, name, bases, dict):
if name not in ('Object', 'Interface', 'Null') and 'interface' not in dict:
cls.interface = Interface(
parent = cls.__bases__[0].interface,
name = re.sub("(.)([A-Z]+)", r"\1_\2", name).lower().decode('utf-8'),
methods = {})
if re.match("^L[A-Z]", name):
cls.interface.name = name[1:].decode('utf-8')
if name not in ('BoundMethod', 'Builtin'):
expose_internal_methods(cls.interface, dict)
def call(self, argv):
raise space.unwind(space.LTypeError(u"cannot call " + self.repr()))
def getitem(self, index):
raise space.unwind(space.LKeyError(self, index))
def setitem(self, index, value):
raise space.unwind(space.LKeyError(self, index))
def iter(self):
raise space.unwind(space.LTypeError(u"cannot iterate " + self.repr()))
def listattr(self):
listing = []
for name in self.__class__.interface.methods.keys():
listing.append(space.String(name))
return listing
def getattr(self, index):
method = self.__class__.interface.lookup_method(index)
if method is not None:
return BoundMethod(self, index, method)
else:
raise space.unwind(space.LAttributeError(self, index))
def setattr(self, index, value):
raise space.unwind(space.LAttributeError(self, index))
def callattr(self, name, argv):
return self.getattr(name).call(argv)
def getattr_or(self, index, default):
try:
return self.getattr(index)
except space.Unwinder as w:
if isinstance(w.exception, space.LAttributeError):
return default
raise
def contains(self, obj):
raise space.unwind(space.LTypeError(u"%s cannot contain" % self.repr()))
def repr(self):
return u"<%s>" % space.get_interface(self).name
def hash(self):
return compute_hash(self)
def eq(self, other):
return self is other
@classmethod
def instantiator(cls, fn):
def _instantiate_b_(interface, argv):
return fn(argv)
cls.interface.instantiate = _instantiate_b_
register_instantiator(cls.interface, fn)
return fn
@classmethod
def instantiator2(cls, decorator):
def _decorator_(fn):
fn = decorator(fn)
def _instantiate_wrapper_(interface, argv):
return fn(argv)
cls.interface.instantiate = _instantiate_wrapper_
register_instantiator(cls.interface, fn)
return fn
return _decorator_
@classmethod
def builtin_method(cls, fn):
from builtin import Builtin
builtin = Builtin(fn)
cls.interface.methods[builtin.name] = builtin
@classmethod
def method(cls, name, decorator):
def _decarotar_(fn):
from builtin import Builtin
builtin = Builtin(decorator(fn), name)
cls.interface.methods[builtin.name] = builtin
return fn
return _decarotar_
class Interface(Object):
_immutable_fields_ = ['instantiate?', 'methods', 'parent']
# Should add possibility to freeze the interface?
def __init__(self, parent, name, methods, instantiate=None):
assert isinstance(name, unicode)
self.parent = parent
self.name = name
self.instantiate = instantiate
self.methods = methods
self.doc = None
self.multimethods = {} # Describes which multimethods are defined for
# this interface. The record is described in the
# runtime/space/multimethod.py
self.multimethod_index = {}
self.weakref = WeakInterface(self)
def call(self, argv):
if self.instantiate is None:
if self.name == u'null':
raise space.unwind(space.LTypeError(u"cannot call null"))
raise space.unwind(space.LTypeError(u"cannot instantiate " + self.name))
return self.instantiate(self, argv)
def repr(self):
return self.name
def getattr(self, name):
if name == u"doc":
return null if self.doc is None else self.doc
method = self.lookup_method(name)
if method is not None:
return method
method = self.__class__.interface.lookup_method(name)
if method is not None:
return BoundMethod(self, name, method)
return Object.getattr(self, name)
@jit.elidable
def lookup_method(self, name):
this = self
method = this.methods.get(name, None)
while method is None and (this.parent not in (null, Interface.interface)):
this = this.parent
method = this.methods.get(name, None)
return method
def setattr(self, name, value):
if name == u"doc":
self.doc = value
return null
else:
return Object.setattr(self, name, value)
def listattr(self):
listing = []
listing.append(space.String(u"doc"))
for methodname in self.methods.keys():
listing.append(space.String(methodname))
return listing
class WeakInterface(object):
def __init__(self, interface):
self.weakref = weakref.ref(interface)
Interface.interface = Interface(None, u"interface", {})
Interface.interface.parent = Interface.interface
# TODO: explain myself, why parent of an interface is an interface?
# ... I forgot.. that happens.
# This approach ensures that we have Null.__class__.interface that points to Null.
# It allows the null to behave like an interface, except that null is its own interface.
class Null(Interface):
pass # The class was a late addition.. Apparently the behavior relied on a bug
# that was fixed somewhere on the way in the PyPy.
null = Null(None, u"null", {})
null.parent = null
Null.interface = null
Object.interface = Interface(null, u"object", {})
class BoundMethod(Object):
_immutable_fields_ = ['obj', 'name', 'methodfn']
def __init__(self, obj, name, methodfn):
self.obj = obj
self.name = name
self.methodfn = methodfn
def call(self, argv):
return self.methodfn.call([self.obj] + argv)
def getattr(self, name):
return self.methodfn.getattr(name)
def setattr(self, name, value):
return self.methodfn.setattr(name, value)
def listattr(self):
return self.methodfn.listattr()
def repr(self):
return u"%s.%s" % (self.obj.repr(), self.name)
# Notice that cast != instantiation.
# The distinction is very important.
cast_methods = {}
def cast_for(cls):
def _cast_decorator_(x):
cast_methods[cls] = x
return x
return _cast_decorator_
# Cast didn't appear to handle well as a class method, so I made this
# convenient table construct that uses default handling when conversion
# is not available.
# User objects will not have access to implement this method of casting.
# Userspace casting will be treated as separate problem.
# TODO: frame entry association could be "cool" here. So you would know
# where a cast attempt failed.
@specialize.arg(1, 2)
def cast(x, cls, info=u"something"):
if isinstance(x, cls): # This here means that cast won't change object
return x # if it is already correct type.
try:
fn = cast_methods[cls]
except KeyError as _:
raise space.unwind(space.LTypeError(u"expected %s is %s, got %s" % (
info, cls.interface.name, x.repr())))
res = fn(x)
if isinstance(res, cls):
return res
# TODO: Consider alternative ways to say it. :)
raise space.unwind(space.LTypeError(u"implicit conversion of %s at %s into %s returned %s" % (
x.repr(), info, cls.interface.name, res.repr())))
# Variation of cast that accepts a null value and translates it to None.
@always_inline
@specialize.arg(1, 2)
def cast_n(x, cls, info=u"something"):
if x is null:
return None
return cast(x, cls, info)
# Yes, this is a hacky hack.
import builtin
def expose_internal_methods(interface, methods):
for name in methods:
if name in internal_methods:
interface.methods[u"+" + name.decode('utf-8')] = builtin.Builtin(
hate_them,
spec=internal_methods[name],
source_location=builtin.get_source_location(methods[name]))
internal_methods = {
u"call": (0, 0, True, ['argv'], None),
u"getitem": (0, 0, False, ['index'], None),
u"setitem": (0, 0, False, ['index', 'value'], None),
u"iter": (0, 0, False, [], None),
#u"listattr": (0, 0, False, []), # TODO: figure out what to do with these.
#u"getattr": (0, 0, False, ['name'], None), # these all are usually
#u"setattr": (0, 0, False, ['name', 'value'], None), # overloaded to handle attributes.
u"contains": (0, 0, False, ['value'], None),
u"repr": (0, 0, False, [], None),
u"hash": (0, 0, False, [], None),
}
def register_instantiator(interface, fn):
# You should not be able to call the true instantiator of an object.
# But calling a fake shouldn't harm either.
interface.methods[u"+init"] = builtin.Builtin(
(lambda argv: None),
spec=builtin.get_spec(fn),
source_location=builtin.get_source_location(fn))
# Internal methods help at documenting the system.
# TODO: rethink about lifting this eventually?
def hate_them(argv):
raise space.unwind(space.LError(u"hate them"))
#expose_internal_methods(Interface)
#expose_internal_methods(Object) # if I do this,
# every method will have internal_methods
# Besides, Object methods are placeholders.
# I doubt we miss these.
#expose_internal_methods(BoundMethod)
#expose_internal_methods(builtin.Builtin)
# When your good names are your best.
@Interface.instantiator2(builtin.signature(Object))
def Interface_init_is_cast(obj):
return space.get_interface(obj)
# Only active with the user-defined interfaces that may be 'lost'.
@Interface.method(u"+finalize", builtin.signature(Interface))
def Interface_finalize(self):
for record in self.multimethods:
record.multimethod.unregister_record(record)
|
StarcoderdataPython
|
157215
|
import lists, multiples
print "The LCM of all numbers below 10 is:\n" + str(1 * 2 * 2 * 2 * 3 * 3 * 5 * 7)
print "The LCM of all the numbers below 20 is:\n" + str(1 * 2 * 2 * 2 * 2 * 3 * 3 * 5 * 7 * 11 * 13 * 17 * 19)
|
StarcoderdataPython
|
25340
|
# build.py
import os
import platform
import sys
from distutils.core import setup
from torch.utils.ffi import create_extension
extra_compile_args = ['-std=c++11', '-fPIC']
warp_ctc_path = "../build"
if platform.system() == 'Darwin':
lib_ext = ".dylib"
else:
lib_ext = ".so"
if "WARP_CTC_PATH" in os.environ:
warp_ctc_path = os.environ["WARP_CTC_PATH"]
if not os.path.exists(os.path.join(warp_ctc_path, "libwarpctc" + lib_ext)):
print(("Could not find libwarpctc.so in {}.\n"
"Build warp-ctc and set WARP_CTC_PATH to the location of"
" libwarpctc.so (default is '../build')").format(warp_ctc_path))
sys.exit(1)
include_dirs = [os.path.realpath('../include')]
ffi = create_extension(
name='warp_ctc',
language='c++',
headers=['src/binding.h'],
sources=['src/binding.cpp'],
with_cuda=True,
include_dirs=include_dirs,
library_dirs=[os.path.realpath(warp_ctc_path)],
runtime_library_dirs=[os.path.realpath(warp_ctc_path)],
libraries=['warpctc'],
extra_compile_args=extra_compile_args)
ffi = ffi.distutils_extension()
ffi.name = 'warpctc_pytorch._warp_ctc'
setup(
name="warpctc_pytorch",
version="0.1",
packages=["warpctc_pytorch"],
ext_modules=[ffi],
)
|
StarcoderdataPython
|
1648660
|
import json
from util.utils import get_recursively
from sizer.regression_sizer import RegressionSizer
from sizer.workflow_sizer import WorkflowSizer
class State:
def __init__(self, name, arn, state_dict, start, end):
self.name = name
self.arn = arn
self.state_dict = state_dict
self.start = start
self.end = end
self.next = []
def to(self, other):
self.next.append(other)
def parse_state_machine(definition, next=None):
states = []
items = reversed(list(definition['States'].items()))
for item in items:
state_name, state_dict = item[0], item[1]
end = state_dict.get('End', False)
state = State(name=state_name, arn=state_dict.get('Resource', ''), state_dict=state_dict, start=state_name == definition['StartAt'], end=end)
name_to_state[state_name] = state
if state_dict['Type'] == 'Parallel':
branch_next = name_to_state[state_dict['Next']]
for branch in state_dict['Branches']:
states += parse_state_machine(branch, branch_next)
for branch in state_dict['Branches']:
state.to(name_to_state[branch['StartAt']])
else:
if next and end:
state.to(next)
elif 'Next' in state_dict:
state.to(name_to_state[state_dict['Next']])
states.append(state)
return states
if __name__ == '__main__':
import sys
argv = sys.argv[1:]
#defaults
file = None
arn = None
payloads = None
elat_constraint=2000
sizes = [128,256,512,1024,2048,3096]
name_to_state = {}
if len(argv) <= 1:
print("Usage: <workflow-arn> <workflow.json> <elat_constraint> <payloads> <sizes>")
exit(0)
#TODO: needs content validation ;)
arn = argv[0]
file = argv[1]
if len(argv) > 2:
elat_constraint=argv[2]
if len(argv) > 3:
with open(argv[3]) as f:
payloads = json.load(f)
if len(argv) > 4:
sizes = argv[4:]
with open(file) as f:
json_content = json.load(f)
print(get_recursively(json_content, 'Resource'))
states = list(reversed(parse_state_machine(json_content)))
lambda_client = boto3.client('lambda')
lambdas = []
transitions = []
states_list = []
for state in states:
states_list.append(
{'name': state.name, 'arn':state.arn }
)
lambdas.append(state.arn)
for next in state.next:
transitions.append({
'from': state.name,
'to': next.name
})
initial_state = name_to_state[json_content['StartAt']]
lambdas = set(lambdas)
#TODO: force user interaction to halt if we do not want to sample
total_cost = 0
total_duration = 0
#generate induvidual models
for f in lambdas:
p = {}
if payloads is not None and f in payloads:
p = payloads[f]
sizer = RegressionSizer(lambda_arn=f,payload=p,balanced_weight=0.5,sample_runs=5,memory_sizes=sizes)
result, logs, popt, cost = sizer.configure_function()
res = {
'arn':f,
'memorySize': result.memory_size,
'cost': result.cost,
'duration': result.duration,
'total_cost':cost,
}
print(json.dumps(res, indent=4))
total_cost += total_cost
total_duration += result.duration
wfs = WorkflowSizer(arn,elat_constraint)
sizes,elat,cost = wfs.run()
res = {
'arn':arn,
'cost':cost,
'elat':elat,
'total_cost':total_cost,
'total_duration':total_duration,
'sizes':sizes,
}
print(json.dumps(res, indent=4))
|
StarcoderdataPython
|
4835498
|
# Generated by Django 2.0.3 on 2018-05-16 13:01
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('app', '0010_auto_20180509_2126'),
]
operations = [
migrations.CreateModel(
name='Mail',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('email', models.EmailField(db_index=True, max_length=254, unique=True)),
],
),
]
|
StarcoderdataPython
|
4826050
|
# -*- coding: utf-8 -*-
# Coded in Python 3.8.7 64-Bit
import random
import math
import numpy as np
import matplotlib.pyplot as plt
import matplotlib as mpl
mpl.style.use('seaborn')
POPULATION_SIZE = 10
GENERATIONS = 300
CROSSOVER_RATE = 0.8
MUTATION_RATE = 0.6
NO_OF_GENES = 10 # This var is overwritten for FITNESS_CHOICE 3 & 4
NO_OF_PARENTS = 3
DEBUG = False
# Elitism - Keep the best 10 percent of any population to go to the next generation
# Fancied coding this as I hadn't done this in Task 1.
ELITISM = True
# The size of the random population sample when selecting parents.
# Must be less than or equal to (Population size - PARENT_NO)
TOURNAMENT_SELECTION_NUMBER = 6
# The Combinatorial Optimisation problem the GA needs to solve.
FITNESS_CHOICE = 6
# Choice list:
# 1. Sum ones in a binary string - Minimization (constrained set being 0,1 (Base 2))
# 2. Sum ones in a binary string - Maximization (constrained set being 0,1 (Base 2))
# 3. Try to reach an sentence (constrained set being the lowercase alphabet + some grammar)
# 4. Reaching a target number in Hex (constrained set being Base 16)
# 5. Knapsack problem (constrained by the randomly generated 'items')
# 6. Travelling Salesman (task 3a) (constrained by the randomly generated 'cities')
# Used for nice output
FITNESS_DICTIONARY = {
1: "Sum Ones (Minimisation)",
2: "Sum Ones (Maximisation)",
3: "Reaching a Target String",
4: "Reaching a Target Number (in Hexadecimal)",
5: "Knapsack Problem",
6: "Travelling Salesman"
}
# This is used to represent the different discreet domains, dependent on FITNESS_CHOICE
GENES = ''
# For FITNESS_CHOICE = 3
TARGET_SENTENCE = "Lorem ipsum dolor sit amet" # This needs to be larger than a single
# For FITNESS_CHOICE = 4
TARGET_NUMBER = 1286058684673 # This needs to be above 16 (as NO_OF_GENES must be >1)
# For FITNESS_CHOICE = 5
KNAPSACK_ITEMS = {} # Random every run. Key : <ITEM_ID>, Value: (<WEIGHT>,<VALUE>)
KNAPSACK_THRESHOLD = 20 # Make weight the knapsack can take
# For FITNESS_CHOICE = 6 (i.e. Travelling Salesman)
import City
BOARD_SIZE = 100 # The max x and y of where the cities can be
NO_OF_CITIES = NO_OF_GENES # Reads better in the code
def generate_population(size):
# For Populations that are constrained by binary
population = [ [ random.randint(0,1) for _ in range(NO_OF_GENES)] for _ in range(POPULATION_SIZE)]
if FITNESS_CHOICE == 3:
# For the population constrained by the characters in the "GENES" variable
population = [ [ random.choice(GENES) for _ in range(NO_OF_GENES)] for _ in range(POPULATION_SIZE)]
if FITNESS_CHOICE == 6:
# For the travelling salesman problem - create NO_OF_CITIES Cities, at random points.
population = [ [ City.City(x,random.randrange(0,BOARD_SIZE),random.randrange(0,BOARD_SIZE)) for x in range(NO_OF_CITIES)] for _ in range(POPULATION_SIZE) ]
return population
def compute_fitness(individual):
# A cleaner way of separating the different problems the GA can solve
switcher = {
1: sum_ones,
2: sum_ones,
3: match_sentence,
4: match_number,
5: knapsack,
6: travelling_salesman
}
func = switcher.get(FITNESS_CHOICE)
return func(individual)
def sum_ones(individual):
# Steps through individual and if it's equal to the digit it's supposed to be, add 1 to fitness
fitness = 0
# Reduce code base by having both the minimization and maximization problem use this function
digit = 0 if FITNESS_CHOICE == 1 else 1
for x in individual:
if x == digit:
fitness += 1
return fitness
def match_sentence(individual):
# Add one to fitness if the character in the individual is the same, and in same position, as target sentence
fitness = 0
for index in range(len(individual)):
if individual[index] == TARGET_SENTENCE[index]:
fitness += 1
return fitness
def match_number(individual):
fitness = 0
for x in range(len(individual)):
if individual[x] == TARGET_NUMBER[x]:
fitness += 1
return fitness
def knapsack(individual):
sackValue = 0
sackWeight = 0
# Find total value and total weight of knapsack
for x in range(len(individual)):
if individual[x] == 1:
sackWeight += KNAPSACK_ITEMS[x][0]
sackValue += KNAPSACK_ITEMS[x][1]
# If the weight is above the threshold, this isn't a viable solution
if sackWeight > KNAPSACK_THRESHOLD:
return 0
else:
return sackValue
def travelling_salesman(individual):
fitness = 0
# For each of the cities
for x in range(len(individual)):
try:
city1 = individual[x]
city2 = individual[x+1]
except IndexError:
city2 = individual[0]
# Add to the fitness, the cost from city 1 to city 2
fitness += city1.cost_to_city(city2)
# A higher fitness is better - therefore we need to inverse this
# (The '* 10000' is to make the numbers more meaningful)
fitness = abs(1/fitness) * 10000
return fitness
# Tournament Selection, as I did Roulette Wheel selection in Task 1.
def selection(population, fitness):
parents = []
# For each parent
for _ in range(NO_OF_PARENTS):
# Get TOURNAMENT_SELECTION_NUMBER random indexes of the population and their fitness
indicies = random.sample(range(0,len(population)),TOURNAMENT_SELECTION_NUMBER)
random_fitness = [ fitness[x] for x in indicies]
# Add to parents, the indiviudal with the highest fitness of the randomly selected group
parents.append(population[indicies[random_fitness.index(max(random_fitness))]])
# And delete from the population
del population[indicies[random_fitness.index(max(random_fitness))]]
return parents
# Crossover selection depends on the problem the GA needs to solve.
def crossover(parents, no_of_offspring):
offspring = []
for k in range(no_of_offspring):
# Cyclic <-- That's a good word
p1_indx = k%NO_OF_PARENTS
p2_indx = (k+1)%NO_OF_PARENTS
if FITNESS_CHOICE == 6:
# Need ordered crossover for Travelling Salesman.
if random.random() < CROSSOVER_RATE:
indiv_offspring = [0 for _ in range(NO_OF_CITIES)] # Initialise it so I can change variables at specific places
# Get 2 points, crspt_1 is always going to be less than crspt_2
crspt_1 = random.randint(0,NO_OF_GENES-2)
crspt_2 = 0
while crspt_2 <= crspt_1:
crspt_2 = random.randint(1,NO_OF_GENES-1)
# Set the new offspring to have the cities between the crosspoints of parent 1
indiv_offspring[crspt_1:crspt_2] = parents[p1_indx][crspt_1:crspt_2]
# (Just trust me, this works)
# Start at Parent 2's 2nd cross point, add city if it's ID doesn't already appear in the new offspring
off_count = 0
par_count = 0
# Repeat until the new offspring has the required amount of cities.
while len([x for x in indiv_offspring if type(x) == City.City]) != NO_OF_CITIES:
# Next position of parent 2 to check
parent_index = (crspt_2+par_count)%NO_OF_CITIES
city_ids = [ x.id for x in indiv_offspring if type(x) == City.City]
# If parent 2's city ID at index 'parent_index' is not already in the new offspring
if not parents[p2_indx][parent_index].id in city_ids:
# Add the City in parent 2's parent_index, to the next available space in the new offspring
offspring_index = (crspt_2+off_count)%NO_OF_CITIES
indiv_offspring[offspring_index] = parents[p2_indx][parent_index]
off_count += 1
par_count += 1
# Useful Debug for confirming the crossover selection works.
#print(crspt_1)
#print(crspt_2)
#print([x.id for x in parents[p1_indx]])
#print([x.id for x in parents[p2_indx]])
#print([x.id for x in indiv_offspring])
else:
# The new offspring is the same as the parent, if the crossover rate comparison fails
indiv_offspring = parents[p1_indx]
offspring.append(indiv_offspring)
else:
# For non travelling-salesman problems, simple single-point crossover
cross_point = random.randint(1,NO_OF_GENES-1)
if random.random() < CROSSOVER_RATE:
offspring.append(list(parents[p1_indx][0:cross_point]) + list(parents[p2_indx][cross_point::]))
else:
offspring.append(parents[p1_indx])
return offspring
# Various mutation methods, depending on the fitness choice.
def mutation(individual):
if random.random() < MUTATION_RATE:
affected_gene = random.randint(0,NO_OF_GENES-1)
if FITNESS_CHOICE == 3 or FITNESS_CHOICE == 4:
# Set the affected gene to a randomly selected character
individual[affected_gene] = random.choice(GENES)
elif FITNESS_CHOICE == 6:
# Swap mutation - select another random gene and swap the two - required for Travelling salesman.
second_affected_gene = random.randint(0,NO_OF_GENES-1)
while second_affected_gene == affected_gene:
second_affected_gene = random.randint(0,NO_OF_GENES-1)
temp = individual[affected_gene]
individual[affected_gene] = individual[second_affected_gene]
individual[second_affected_gene] = temp
else:
# Bit-flip mutation for the problems where the set constraint is binary digits
individual[affected_gene] = 0 if individual[affected_gene] == 1 else 1
return individual
def check_solution(population):
ideal_individual = []
if FITNESS_CHOICE == 1:
ideal_individual = [0 for _ in range(NO_OF_GENES)]
elif FITNESS_CHOICE == 2:
ideal_individual = [1 for _ in range(NO_OF_GENES)]
elif FITNESS_CHOICE == 3:
ideal_individual = TARGET_SENTENCE
elif FITNESS_CHOICE == 4:
ideal_individual = list(TARGET_NUMBER)
elif FITNESS_CHOICE == 5 or FITNESS_CHOICE == 6:
# No algorithmic way of finding the ideal individual, especially when individuals are randomised
return False
for x in population:
if x == ideal_individual:
return True
return False
SOLUTION_FOUND = False
def check_vars():
global NO_OF_GENES
global TARGET_SENTENCE
global TARGET_NUMBER
global NO_OF_PARENTS
global POPULATION_SIZE
global GENES
if POPULATION_SIZE <= NO_OF_PARENTS:
print("NO_OF_PARENTS must be less than the POPULATION_SIZE")
return False
if TOURNAMENT_SELECTION_NUMBER >= POPULATION_SIZE-NO_OF_PARENTS:
print("The TOURNAMENT_SELECTION_NUMBER must be more than POPULATION_SIZE - NO_OF_PARENTS")
return False
if FITNESS_CHOICE == 3:
print("Target Sentence: ", TARGET_SENTENCE)
TARGET_SENTENCE = list(TARGET_SENTENCE.lower())
NO_OF_GENES = len(TARGET_SENTENCE)
GENES = '''abcdefghijklmnopqrstuvwxyz '_[]()?!<>.,'''
if FITNESS_CHOICE == 4:
print("Target Number in Denary: ", TARGET_NUMBER)
# The '[2::]' removes the '0x' from the beginning if the hex() output
TARGET_NUMBER = hex(TARGET_NUMBER)[2::]
print("Target Number in Hex: ", TARGET_NUMBER)
NO_OF_GENES = len(TARGET_NUMBER)
GENES = '''1234567890abcdef'''
if FITNESS_CHOICE == 5:
for x in range(NO_OF_GENES):
# Create NO_OF_GENES random items, and assign them random weights and values
KNAPSACK_ITEMS[x] = (random.randrange(1,10),random.randrange(0,500))
if NO_OF_GENES <= 1:
print("NO_OF_GENES must be <1")
if FITNESS_CHOICE == 3:
print("Please input a larger string")
if FITNESS_CHOICE == 4:
print("Please input a larger number")
return False
return True
def main():
global POPULATION_SIZE
global GENERATIONS
global SOLUTION_FOUND
global FITNESS_CHOICE
# Check for valid vars
if not check_vars(): exit(127)
# Debug Output
print("Running with following Variables:", \
"\nFITNESS_CHOICE: ", FITNESS_DICTIONARY[FITNESS_CHOICE], \
"\nELITISM: ", ELITISM, \
"\nDEBUG: ", DEBUG, \
"\nGENERATIONS: ", GENERATIONS, \
"\nNO_OF_GENES: ", NO_OF_GENES, \
"\nNO_OF_PARENTS: ", NO_OF_PARENTS, \
"\nMUTATION_RATE: ", MUTATION_RATE, \
"\nCROSSOVER_RATE: ", CROSSOVER_RATE, \
"\n")
if FITNESS_CHOICE == 5:
print("Randomly Generated Knapsack Items: ")
for x in KNAPSACK_ITEMS:
print("ID: ", x, " Weight: ", KNAPSACK_ITEMS[x][0], " Value: ", KNAPSACK_ITEMS[x][1])
print("")
# Initial Population
gen_count = 0
population = generate_population(POPULATION_SIZE)
# Compute intitial pop fitness
fitness = [compute_fitness(x) for x in population]
# Check solution
if DEBUG:
print("POPULATION: ", population)
print("FITNESS: ", fitness, "\n")
if check_solution(population):
print("Ideal Individual found in ", gen_count, " generations")
SOLUTION_FOUND = True
else:
gen_count += 1
while (gen_count <= GENERATIONS and SOLUTION_FOUND != True):
next_generation = []
if ELITISM:
N = int((10*POPULATION_SIZE)/100)
# Get the top N population, by fitness ( This helped: https://www.geeksforgeeks.org/python-indices-of-n-largest-elements-in-list/ )
res = sorted(range(len(fitness)), key = lambda sub: fitness[sub])[-N:]
next_generation += [ population[x] for x in res ]
parents = selection(population,fitness)
# If Elitism, we need more offspring then if Not Elitism
offspring = crossover(parents,POPULATION_SIZE-len(next_generation))
offspring = [ mutation(x) for x in offspring ]
next_generation += offspring
population = next_generation
fitness = [compute_fitness(x) for x in population]
fitness_index = fitness.index(max(fitness))
best_individual = population[fitness_index] if FITNESS_CHOICE != 3 else ''.join(population[fitness_index])
if FITNESS_CHOICE == 6:
best_individual = [ "ID: " + str(x.id) for x in best_individual ]
print("Generation: ", gen_count, " Max Fitness: ", max(fitness), " Best Individual: ", best_individual)
if DEBUG:
print("POPULATION: ", population)
print("FITNESS: ", fitness, "\n")
if check_solution(population):
print("Ideal Individual found in ", gen_count, " generations")
SOLUTION_FOUND = True
else:
gen_count += 1
# Visualise the Travelling Salesman Problem
if FITNESS_CHOICE == 6:
# Plot lines for each 2 coords
for x in range(len(population[fitness_index])):
pt1 = population[fitness_index][x]
try:
pt2 = population[fitness_index][x+1]
except IndexError:
pt2 = population[fitness_index][0]
plt.plot([pt1.pos[0],pt2.pos[0]],[pt1.pos[1],pt2.pos[1]])
# Plot individual points on the 'board'
points = [ x.pos for x in population[fitness_index] ]
x,y = zip(*points)
plt.scatter(x,y,s=40)
for x in population[fitness_index]:
# Annotate the City IDs
plt.annotate(x.id, x.pos)
plt.show()
if __name__ == '__main__':
main()
|
StarcoderdataPython
|
1620764
|
import logging
import re
log = logging.getLogger('AppArgumentsParser')
log.setLevel(logging.DEBUG)
class AppArgumentsParser:
def get_apps_list(self, input: str):
return_list = []
apps_list = input.split(",")
for app_range in apps_list:
if "-" in app_range:
if re.match("(\d+)\-(\d+)", app_range):
match = re.match("(\d+)\-(\d+)", app_range)
for i in range(int(match.groups()[0]), int(match.groups()[1]) + 1):
return_list.append("ftdapp{}".format(i))
else:
log.error("The range {} is not valid. We support only digits.".format(app_range))
raise Exception("The range you specified is not valid")
else:
if re.match("^\d+$", app_range):
return_list.append("ftdapp{}".format(app_range))
else:
return_list.append(app_range)
return return_list
def parse_cpu_core_count_argument(self, cpu_core_count: str):
log.info("Parse cpu_core_count argument")
resource_profiles = {}
count_per_slot = {}
i = 1
for slot_cpu_info in cpu_core_count.split("|"):
app_cpu_info = slot_cpu_info.split(",")
for app_cpu in app_cpu_info:
if app_cpu != "0" and app_cpu != "":
resource_profiles[app_cpu] = {}
resource_profiles[app_cpu]['name'] = 'stauto_{}'.format(app_cpu)
resource_profiles[app_cpu]['cpu_core_count'] = app_cpu
else:
log.info("Cannot use {} for resource profile! Skipping...".format(app_cpu))
count_per_slot[i] = app_cpu_info
i = i + 1
return {'count_per_slot': count_per_slot, 'resource_profiles': resource_profiles}
def build_app_template(self, chassis_software, app_list):
instances = chassis_software['applications']
for i in instances:
if (len(app_list) > 0 and instances[i].get("application_identifier") in app_list) or len(app_list) == 0:
# need to initialize logical_device
if not instances[i].get('logical_device'):
chassis_software['applications'][i]['logical_device'] = {}
# if device_mode is not specified under logical_device it will take the value under chassis_software
if not chassis_software['applications'][i]['logical_device'].get("device_mode"):
chassis_software['applications'][i]['logical_device']['device_mode'] = chassis_software.get(
'device_mode')
if chassis_software.get('application_generic') is None:
return chassis_software
instance_template = chassis_software['application_generic']
for i in instances:
if (len(app_list) > 0 and instances[i].get("application_identifier") in app_list) or len(app_list) == 0:
slot_id = None
if not instances[i].get("slot"):
chassis_software['applications'][i]['slot'] = instance_template.get('slot')
slot_id = chassis_software['applications'][i]['slot']
id = None
if re.match("\D+(\d+)$", instances[i].get("application_identifier")):
match = re.match("\D+(\d+)$", instances[i].get("application_identifier"))
id = match.groups()[0]
if not instances[i].get("application_name"):
chassis_software['applications'][i]['application_name'] = instance_template.get('application_name')
if not instances[i].get("deploy_type"):
chassis_software['applications'][i]['deploy_type'] = instance_template.get('deploy_type')
if not instances[i].get("resource_profile"):
chassis_software['applications'][i]['resource_profile'] = instance_template.get('resource_profile')
# LOGICAL DEVICE
if not instances[i].get('logical_device'):
chassis_software['applications'][i]['logical_device'] = {}
if not instances[i]['logical_device'].get("name"):
chassis_software['applications'][i]['logical_device']['name'] = self._convert_name_with_id(
instance_template['logical_device'].get('name'), id, slot_id)
# EXTERNAL PORT LINKS
if not instances[i]['logical_device'].get('external_port_links'):
interface_list = []
for interface in instance_template['logical_device'].get("external_port_links"):
interface_list.append(self._convert_name_with_id(interface, id, slot_id))
chassis_software['applications'][i]['logical_device']['external_port_links'] = interface_list
# BOOTSTRAP KEYS
if not instances[i]['logical_device'].get('bootstrap_keys'):
chassis_software['applications'][i]['logical_device']['bootstrap_keys'] = {}
if not instances[i]['logical_device']['bootstrap_keys'].get("firewall_mode"):
chassis_software['applications'][i]['logical_device']['bootstrap_keys']['firewall_mode'] = \
instance_template['logical_device']['bootstrap_keys'].get('firewall_mode')
if not instances[i]['logical_device']['bootstrap_keys'].get("fqdn"):
chassis_software['applications'][i]['logical_device']['bootstrap_keys']['fqdn'] = \
self._convert_name_with_id(instance_template['logical_device']['bootstrap_keys'].get('fqdn'),
id, slot_id)
if not instances[i]['logical_device']['bootstrap_keys'].get("dns_servers"):
chassis_software['applications'][i]['logical_device']['bootstrap_keys']['dns_servers'] = \
instance_template['logical_device']['bootstrap_keys'].get('dns_servers')
if not instances[i]['logical_device']['bootstrap_keys'].get("search_domains"):
chassis_software['applications'][i]['logical_device']['bootstrap_keys']['search_domains'] = \
instance_template['logical_device']['bootstrap_keys'].get('search_domains')
if not instances[i]['logical_device']['bootstrap_keys'].get("permit_expert_mode"):
chassis_software['applications'][i]['logical_device']['bootstrap_keys']['permit_expert_mode'] = \
instance_template['logical_device']['bootstrap_keys'].get('permit_expert_mode')
if not instances[i]['logical_device']['bootstrap_keys'].get("firepower_manager_ip"):
chassis_software['applications'][i]['logical_device']['bootstrap_keys']['firepower_manager_ip'] = \
instance_template['logical_device']['bootstrap_keys'].get('firepower_manager_ip')
if not instances[i]['logical_device']['bootstrap_keys'].get("management_type"):
chassis_software['applications'][i]['logical_device']['bootstrap_keys']['management_type'] = \
instance_template['logical_device']['bootstrap_keys'].get('management_type')
# BOOTSTRAP KEYS SECRET
if not instances[i]['logical_device'].get('bootstrap_keys_secret'):
chassis_software['applications'][i]['logical_device']['bootstrap_keys_secret'] = {}
if not instances[i]['logical_device']['bootstrap_keys_secret'].get("password"):
chassis_software['applications'][i]['logical_device']['bootstrap_keys_secret']['password'] = \
instance_template['logical_device']['bootstrap_keys_secret'].get('password')
# IPv4
if not instances[i]['logical_device'].get('ipv4'):
chassis_software['applications'][i]['logical_device']['ipv4'] = {}
if not instances[i]['logical_device']['ipv4'].get("ip"):
chassis_software['applications'][i]['logical_device']['ipv4']['ip'] = \
self._compute_ipv4_address(instance_template['logical_device']['ipv4'].get('ip'), id)
if not instances[i]['logical_device']['ipv4'].get("netmask"):
chassis_software['applications'][i]['logical_device']['ipv4']['netmask'] = \
instance_template['logical_device']['ipv4'].get('netmask')
if not instances[i]['logical_device']['ipv4'].get("gateway"):
chassis_software['applications'][i]['logical_device']['ipv4']['gateway'] = \
instance_template['logical_device']['ipv4'].get('gateway')
# IPv6
if not instances[i]['logical_device'].get('ipv6'):
chassis_software['applications'][i]['logical_device']['ipv6'] = {}
if not instances[i]['logical_device']['ipv6'].get("ip"):
chassis_software['applications'][i]['logical_device']['ipv6']['ip'] = \
self._compute_ipv6_address(instance_template['logical_device']['ipv6'].get('ip'), id)
if not instances[i]['logical_device']['ipv6'].get("prefix"):
chassis_software['applications'][i]['logical_device']['ipv6']['prefix'] = \
instance_template['logical_device']['ipv6'].get('prefix')
if not instances[i]['logical_device']['ipv6'].get("gateway"):
chassis_software['applications'][i]['logical_device']['ipv6']['gateway'] = \
instance_template['logical_device']['ipv6'].get('gateway')
# Update data if deploy_type == native
if chassis_software['applications'][i]['deploy_type'] == 'native':
chassis_software['applications'][i]['logical_device']['bootstrap_keys']['permit_expert_mode'] = None
chassis_software['applications'][i]['resource_profile'] = None
return chassis_software
def _convert_name_with_id(self, name, id, slot_id):
if "{id}" in name:
name = name.replace("{id}", id)
if "{slot_id}" in name:
name = name.replace("{slot_id}", str(slot_id))
return name
def _compute_ipv4_address(self, start_address, id):
match = re.match("(\d+\.\d+\.\d+\.)(\d+)", start_address)
address = int(match.groups()[1]) + int(id) - 1
return "{}{}".format(match.groups()[0], address)
def _compute_ipv6_address(self, start_address, id):
match = re.match("([\w+\:]+\:)(\d+)$", start_address)
address = int(match.groups()[1]) + int(id) - 1
return "{}{}".format(match.groups()[0], address)
|
StarcoderdataPython
|
4836275
|
<filename>install_scripts/images.py
from __future__ import print_function
def get_images():
file = open("Manifest", "r")
images = {}
for line in file:
values = line.split()
if len(values) < 2:
continue
key = values[0].lower()
images[key] = {
'name': values[1],
'id': values[2] if len(values) > 2 else '',
}
file.close()
return images
def get_default_images():
file = open("Manifest", "r")
images = []
for line in file:
values = line.split()
if len(values) < 4:
continue
if values[3] != 'true':
continue
image = {}
image['image'] = values[1]
images.append(image)
file.close()
return {'images': images}
|
StarcoderdataPython
|
3226232
|
from commando.conf import AutoProp, ConfigDict
class TestClass(AutoProp):
@AutoProp.default
def source(self):
return 'source'
def test_auto():
t = TestClass()
assert t.source == 'source'
def test_override():
t = TestClass()
t.source = 'source1'
assert t.source == 'source1'
t.source = 'source2'
assert t.source == 'source2'
t.source = None
assert t.source == 'source'
def test_init():
c = ConfigDict({"a": 1})
assert c.a == 1
assert c["a"] == 1
def test_change():
c = ConfigDict({"a": 1})
assert c.a == 1
c.a = 2
assert c["a"] == 2
def test_two_levels():
c = ConfigDict({"a": 1, "b": {"c": 3}})
assert c.b.c == 3
def test_two_levels_assignment():
c = ConfigDict({"a": 1, "b": {"c": 3}})
d = {"d": 5}
c.b = d
assert c.b.d == 5
assert c.b == d
def test_two_levels_patch():
c = ConfigDict({"a": 1, "b": {"c": 3}})
d = {"d": 5}
c.b.d = d
assert c.b.c == 3
assert c.b.d == d
def test_copy():
c = ConfigDict({"a": 1, "b": {"c": 3}})
d = c.copy()
assert c == d
c.b.c = 4
assert c != d
def test_list():
c = ConfigDict({"a": 1, "b": {"c": 3}})
c.d = [dict(e=1), dict(f=2)]
assert c.d[0].e == 1
assert c.d[1].f == 2
def test_operator():
c = ConfigDict({"a": 1, "b": {"c": 3}})
from operator import attrgetter
assert attrgetter('b.c')(c) == 3
def test_patch_simple():
c = ConfigDict({"a": 1, "b": {"c": 3, "e": 4}})
d = {"b": {"e": 5}}
c.patch(d)
assert c.b.c == 3
assert c.b.e == 5
def test_patch_complex():
c = ConfigDict({
"a": 1,
"b": {"x": 3, "y": 4},
"c": {"x": 5, "y": 6},
"d": {"x": 7, "y": 8}
})
d = {"a": 2, "b": {"z": 5}, "c": [1, 2], "d": {"y": 9}}
c.patch(d)
assert c.a == 2
assert c.b.x == 3
assert c.b.y == 4
assert c.b.z == 5
assert c.c == [1, 2]
assert c.d.x == 7
assert c.d.y == 9
|
StarcoderdataPython
|
1726828
|
<filename>timingByPERank/TimingByPERank.py<gh_stars>100-1000
import datetime
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
def get_drawdown(p):
"""
计算净值回撤
"""
T = len(p)
hmax = [p[0]]
for t in range(1, T):
hmax.append(np.nanmax([p[t], hmax[t - 1]]))
dd = [p[t] / hmax[t] - 1 for t in range(T)]
return dd
def cal_period_perf_indicator(adjnav):
"""
计算区间业绩指标:输入必须是日频净值
"""
if type(adjnav) == pd.DataFrame:
res = pd.DataFrame(index=adjnav.columns, columns=['AnnRet', 'AnnVol', 'SR', 'MaxDD', 'Calmar'])
for col in adjnav:
res.loc[col] = cal_period_perf_indicator(adjnav[col])
return res
ret = adjnav.pct_change()
#annret = np.nanmean(ret) * 242 # 单利
annret = (adjnav[-1] / adjnav[0]) ** (242/len(adjnav)) - 1 # 复利
annvol = np.nanstd(ret) * np.sqrt(242)
sr = annret / annvol
dd = get_drawdown(adjnav)
mdd = np.nanmin(dd)
calmar = annret / -mdd
return [annret, annvol, sr, mdd, calmar]
def datestr2dtdate(datestr):
# 日期格式转换:'yyyy-mm-dd'转为datetime.date
return datetime.datetime.strptime(datestr, '%Y-%m-%d').date()
# 定义工具函数
pctrank = lambda x: x.rank(pct=True).iloc[-1]
# 从csv文件获取指数价格数据
index_data = pd.read_csv('timingByPERank/指数估值历史数据.csv').set_index('datetime')
index_data.index = [datestr2dtdate(e) for e in index_data.index]
# 设置回测参数
index_id = 'hs300' # 选择指数
pe_rank_window = 242*5 # 计算PE分位数滑动窗口长度(按交易日数目)
# 回测过程
df = index_data.loc[:,[index_id]]
df['index_pe'] = index_data[index_id+'_pe']
df['index_ret'] = df[index_id].pct_change()
df['index'] = (1+df['index_ret']).cumprod().fillna(1)
df['pe_rank'] = df['index_pe'].rolling(window=pe_rank_window).apply(pctrank)
df['pos'] = np.nan
for t in range(pe_rank_window,len(df.index)):
# 策略:高于0.8则空仓,回到0.5则重新满仓,否则保持昨仓
prev_pe_rank = df.loc[df.index[t-1],'pe_rank']
prev_pos = df.loc[df.index[t-1], 'pos']
prev_pos = 1 if np.isnan(prev_pos) else prev_pos
df.loc[df.index[t],'pos'] = 0 if prev_pe_rank>0.8 else 1 if prev_pe_rank<0.5 else prev_pos
# 计算回测业绩
backtest_start_date = index_data.index[pe_rank_window]
df['stgy_ret'] = df['pos'] * df['index_ret']
df['stgy'] = (1+df['stgy_ret']).cumprod() * df.loc[backtest_start_date,'index']
# 绘制回测结果走势图
fig = plt.figure(figsize=(20,15))
ax1 = fig.add_subplot(4,1,1)
df.loc[backtest_start_date:,['index', 'stgy']].plot(ax=ax1, grid=True, title='price')
ax2 = fig.add_subplot(4,1,2)
df.loc[backtest_start_date:,'pos'].plot(ax=ax2, grid=True, title='pos')
ax3 = fig.add_subplot(4,1,3)
df.loc[backtest_start_date:,'pe_rank'].plot(ax=ax3, grid=True, title='pe_rank')
ax4 = fig.add_subplot(4,1,4)
df.loc[backtest_start_date:,'index_pe'].plot(ax=ax4, grid=True, title='index_pe')
# 计算回测业绩指标表
res = cal_period_perf_indicator(df.loc[backtest_start_date:,['index','stgy']])
print(res)
|
StarcoderdataPython
|
157292
|
<filename>001-100/10/10-sieve.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from math import sqrt, ceil
num = 2000000
sevie = [True] * num # 0, 1, 2, 3, ..., 1999999
sevie[0] = False
sevie[1] = False
for i in range(3, num):
if i & 1 == 0:
sevie[i] = False
# a number has at most one factor than its squre
sqr = int(ceil(sqrt(num)))
for i in range(3, sqr):
if sevie[i]:
# start from the prime multiple: 3i with step: 2i
for j in range(3*i, num, 2*i):
sevie[j] = False
sum = 0
for idx, val in enumerate(sevie):
if val:
sum += idx
print sum
|
StarcoderdataPython
|
198027
|
<filename>fastreid/data/transforms/__init__.py<gh_stars>0
# encoding: utf-8
"""
@author: sherlock
@contact: <EMAIL>
"""
from .autoaugment import AutoAugment
from .build import build_transforms
from .transforms import *
from .mosaic import *
__all__ = [k for k in globals().keys() if not k.startswith("_")]
|
StarcoderdataPython
|
3255741
|
<filename>superset_patchup/version.py
"""Version goes here - to avoid cyclic dependencies :-("""
VERSION = (0, 2, 1)
__version__ = ".".join(str(v) for v in VERSION)
|
StarcoderdataPython
|
5336
|
<reponame>LaudateCorpus1/oci-ansible-collection
#!/usr/bin/python
# Copyright (c) 2020, 2022 Oracle and/or its affiliates.
# This software is made available to you under the terms of the GPL 3.0 license or the Apache 2.0 license.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# Apache License v2.0
# See LICENSE.TXT for details.
# GENERATED FILE - DO NOT EDIT - MANUAL CHANGES WILL BE OVERWRITTEN
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
"metadata_version": "1.1",
"status": ["preview"],
"supported_by": "community",
}
DOCUMENTATION = """
---
module: oci_database_management_object_privilege_facts
short_description: Fetches details about one or multiple ObjectPrivilege resources in Oracle Cloud Infrastructure
description:
- Fetches details about one or multiple ObjectPrivilege resources in Oracle Cloud Infrastructure
- Gets the list of Object Privileges granted for the specified user.
version_added: "2.9.0"
author: Oracle (@oracle)
options:
managed_database_id:
description:
- The L(OCID,https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm) of the Managed Database.
type: str
required: true
user_name:
description:
- The name of the user whose details are to be viewed.
type: str
required: true
name:
description:
- A filter to return only resources that match the entire name.
type: str
sort_by:
description:
- The field to sort information by. Only one sortOrder can be used. The default sort order
for 'NAME' is ascending. The 'NAME' sort order is case-sensitive.
type: str
choices:
- "NAME"
sort_order:
description:
- The option to sort information in ascending ('ASC') or descending ('DESC') order. Ascending order is the default order.
type: str
choices:
- "ASC"
- "DESC"
extends_documentation_fragment: [ oracle.oci.oracle ]
"""
EXAMPLES = """
- name: List object_privileges
oci_database_management_object_privilege_facts:
# required
managed_database_id: "ocid1.manageddatabase.oc1..xxxxxxEXAMPLExxxxxx"
user_name: user_name_example
# optional
name: name_example
sort_by: NAME
sort_order: ASC
"""
RETURN = """
object_privileges:
description:
- List of ObjectPrivilege resources
returned: on success
type: complex
contains:
name:
description:
- The name of the privilege on the object.
returned: on success
type: str
sample: name_example
schema_type:
description:
- The type of the object.
returned: on success
type: str
sample: schema_type_example
owner:
description:
- The owner of the object.
returned: on success
type: str
sample: owner_example
grantor:
description:
- The name of the user who performed the grant
returned: on success
type: str
sample: grantor_example
hierarchy:
description:
- Indicates whether the privilege was granted with the HIERARCHY OPTION (YES) or not (NO)
returned: on success
type: str
sample: YES
object:
description:
- The name of the object. The object can be any object, including tables, packages, indexes, sequences, and so on.
returned: on success
type: str
sample: object_example
grant_option:
description:
- Indicates whether the privilege was granted with the GRANT OPTION (YES) or not (NO)
returned: on success
type: str
sample: YES
common:
description:
- "Indicates how the grant was made. Possible values:
YES if the role was granted commonly (CONTAINER=ALL was used)
NO if the role was granted locally (CONTAINER=ALL was not used)"
returned: on success
type: str
sample: YES
inherited:
description:
- Indicates whether the role grant was inherited from another container (YES) or not (NO)
returned: on success
type: str
sample: YES
sample: [{
"name": "name_example",
"schema_type": "schema_type_example",
"owner": "owner_example",
"grantor": "grantor_example",
"hierarchy": "YES",
"object": "object_example",
"grant_option": "YES",
"common": "YES",
"inherited": "YES"
}]
"""
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.oracle.oci.plugins.module_utils import oci_common_utils
from ansible_collections.oracle.oci.plugins.module_utils.oci_resource_utils import (
OCIResourceFactsHelperBase,
get_custom_class,
)
try:
from oci.database_management import DbManagementClient
HAS_OCI_PY_SDK = True
except ImportError:
HAS_OCI_PY_SDK = False
class ObjectPrivilegeFactsHelperGen(OCIResourceFactsHelperBase):
"""Supported operations: list"""
def get_required_params_for_list(self):
return [
"managed_database_id",
"user_name",
]
def list_resources(self):
optional_list_method_params = [
"name",
"sort_by",
"sort_order",
]
optional_kwargs = dict(
(param, self.module.params[param])
for param in optional_list_method_params
if self.module.params.get(param) is not None
)
return oci_common_utils.list_all_resources(
self.client.list_object_privileges,
managed_database_id=self.module.params.get("managed_database_id"),
user_name=self.module.params.get("user_name"),
**optional_kwargs
)
ObjectPrivilegeFactsHelperCustom = get_custom_class("ObjectPrivilegeFactsHelperCustom")
class ResourceFactsHelper(
ObjectPrivilegeFactsHelperCustom, ObjectPrivilegeFactsHelperGen
):
pass
def main():
module_args = oci_common_utils.get_common_arg_spec()
module_args.update(
dict(
managed_database_id=dict(type="str", required=True),
user_name=dict(type="str", required=True),
name=dict(type="str"),
sort_by=dict(type="str", choices=["NAME"]),
sort_order=dict(type="str", choices=["ASC", "DESC"]),
)
)
module = AnsibleModule(argument_spec=module_args)
if not HAS_OCI_PY_SDK:
module.fail_json(msg="oci python sdk required for this module.")
resource_facts_helper = ResourceFactsHelper(
module=module,
resource_type="object_privilege",
service_client_class=DbManagementClient,
namespace="database_management",
)
result = []
if resource_facts_helper.is_list():
result = resource_facts_helper.list()
else:
resource_facts_helper.fail()
module.exit_json(object_privileges=result)
if __name__ == "__main__":
main()
|
StarcoderdataPython
|
85152
|
<reponame>hackerwins/polyaxon
import workers
from db.redis.ttl import RedisTTL
from events import event_subjects
from events.registry import job
from events.registry.job import JOB_FAILED, JOB_SUCCEEDED
from executor.handlers.base import BaseHandler
from polyaxon.settings import SchedulerCeleryTasks
class JobHandler(BaseHandler):
SUBJECT = event_subjects.JOB
@classmethod
def _handle_job_created(cls, event: 'Event') -> None:
if not event.data['is_managed']:
return
if not event.data['has_specification']:
return
# Start building the job and then Schedule it to be picked by the spawners
workers.send(
SchedulerCeleryTasks.JOBS_BUILD,
kwargs={'job_id': event.data['id']})
@classmethod
def _handle_job_cleaned_triggered(cls, event: 'Event') -> None:
instance = event.instance
if not instance or not instance.has_specification or not instance.is_stoppable:
return
workers.send(
SchedulerCeleryTasks.JOBS_STOP,
kwargs={
'project_name': instance.project.unique_name,
'project_uuid': instance.project.uuid.hex,
'job_name': instance.unique_name,
'job_uuid': instance.uuid.hex,
'update_status': False,
'collect_logs': False,
'is_managed': instance.is_managed,
})
@classmethod
def _handle_job_post_run(cls, event: 'Event') -> None:
instance = event.instance
if not instance or not instance.has_specification:
return
workers.send(
SchedulerCeleryTasks.JOBS_STOP,
kwargs={
'project_name': instance.project.unique_name,
'project_uuid': instance.project.uuid.hex,
'job_name': instance.unique_name,
'job_uuid': instance.uuid.hex,
'update_status': False,
'collect_logs': True,
'is_managed': instance.is_managed,
},
countdown=RedisTTL.get_for_job(job_id=instance.id))
@classmethod
def record_event(cls, event: 'Event') -> None:
if event.event_type == job.JOB_CREATED:
cls._handle_job_created(event=event)
elif event.event_type == job.JOB_CLEANED_TRIGGERED:
cls._handle_job_cleaned_triggered(event=event)
elif event.event_type in {JOB_FAILED, JOB_SUCCEEDED}:
cls._handle_job_post_run(event=event)
|
StarcoderdataPython
|
123571
|
<reponame>JMU-CIME/CPR-Music-Backend<gh_stars>1-10
import pytest
from django.test import RequestFactory
from teleband.assignments.api.views import AssignmentViewSet
from teleband.courses.models import Enrollment
pytestmark = pytest.mark.django_db
class TestAssignmentViewSet:
def test_get_queryset_student(self, enrollment: Enrollment, rf: RequestFactory):
view = AssignmentViewSet()
enrollment.role.name = "Student"
enrollment.role.save()
request = rf.get("/fake-url/")
request.user = enrollment.user
view.request = request
setattr(view, "kwargs", {"course_slug_slug": enrollment.course.slug})
queryset = view.get_queryset()
# actually there is nothing in the queryset, need
# to populate it with some assignments for this student
# and some other students to actually check this
# Make sure every assignment is assigned to me and only me
for assignment in queryset:
assert enrollment.user == assignment.enrollment.user
|
StarcoderdataPython
|
4842454
|
import numpy as np
from scipy.signal import argrelextrema
import matplotlib.pyplot as plt
from scipy import mean
import scikits.bootstrap as bootstrap
def moving_average(x, win=10):
w = np.blackman(win)
s = np.r_[2 * x[0] - x[win:1:-1], x, 2 * x[-1] - x[-1:-win:-1]]
return np.convolve(w / w.sum(), s, mode='same')[win - 1: -win + 1]
def perpendicular(a):
"""
gets perpendicular vector
:rtype: array like
:param a:
:return:
"""
b = np.empty_like(a)
b[:, 0] = -a[:, 1]
b[:, 1] = a[:, 0]
return b
def line(x, m, b):
return m * x + b
def zero_crossings(data):
pos = data > 0
npos = ~pos
return ((pos[:-1] & npos[1:]) | (npos[:-1] & pos[1:])).nonzero()[0]
def tribocycle_finder(y_disp):
"""
finds tribo-cycles using the y displacement curve
:param y_disp:
:return:
"""
y_disp = moving_average(y_disp)
maxima = argrelextrema(y_disp, np.greater, order=1000)
minima = argrelextrema(y_disp, np.less, order=500)
if maxima[0].size > minima[0].size:
cycle_ends = maxima
cycle_mid = minima
elif minima[0].size > maxima[0].size:
cycle_ends = minima
cycle_mid = maxima
else:
print 'Error in tribocycle finder, y displacement waveform incorrect'
plt.plot(y_disp)
plt.show()
cycle_ends = np.nan
cycle_mid = np.nan
return cycle_ends, cycle_mid
def prony_min_hold(params, *args):
"""
Description: Objective function for the 1-term prony analysis
:param params:
t = time vector
F = axial force
t1 = initial time
e1 = initial displacement
E = instantaneous modulus
"""
t = args[0]
F = args[1]
t1 = args[2]
e1 = args[3]
E = args[4]
p, tau = params
F_model = (E * e1 / t1) * (t1 - p * t1 + p * tau * np.exp(-1 * (t - t1) / tau)
- p * tau * np.exp(-1 * t / tau))
error = F - F_model
return np.sum(error ** 2)
def prony_hold(t, E, p, tau, e1, t1):
out = (E * e1 / t1) * (t1 - p * t1 + p * tau * np.exp(-1 * (t - t1) / tau) -
p * tau * np.exp(-1 * t / tau))
return out
def loo_regression(x, y):
"""
Tests for an outlier in a regression
:param x: X values for regression
:param y: Y values for regression
:return: object numpy.ndarray -- reg_coeffs
"""
# check that x and y are vectors and the same length
if not x.shape[0] == y.shape[0]:
print("x and y are not the same size")
x = np.array(x)
y = np.array(y)
vlen = x.shape[0]
reg_coeffs = np.zeros(vlen, dtype=float)
mask = np.ones(vlen, dtype=bool)
for i in xrange(vlen):
mask[i] = False
ss_x = x[mask]
ss_y = y[mask]
ss_mu = np.polyfit(ss_x, ss_y, deg=1)
mask[i] = True
reg_coeffs[i] = ss_mu[0]
return reg_coeffs
def flag_outlier(in_vec, thresh_percentage=95):
"""
Flags an outlier according to a percent difference threshold
:param thresh_percentage: percent confidence interval
:param in_vec:
:return: outlier_ind
"""
in_vec = np.array(in_vec)
# find largest outlier
outlier_ind = 0
l2_resid_old = 0
mask = np.ones(len(in_vec), dtype=bool)
for i in xrange(in_vec.shape[0]):
mask[i] = False
l2_resid = (in_vec[i] - np.mean(in_vec[mask]))**2
if l2_resid > l2_resid_old:
outlier_ind = i
l2_resid_old = l2_resid
mask[i] = True
# check if outlier is outside threshold percentage
# bootstrap a 95% ci from data
a_lvl = 1 - (thresh_percentage / 100.)
CIs = bootstrap.ci(data=in_vec, statfunction=mean, alpha=a_lvl)
if in_vec[outlier_ind] < CIs[0] or in_vec[outlier_ind] > CIs[1]:
return outlier_ind
else:
return None
def remove_outlier_friction(x, y, thresh=95):
mu_list = loo_regression(x, y)
oli = flag_outlier(mu_list, thresh_percentage=thresh)
if oli:
mask = np.ones(len(mu_list), dtype=bool)
mask[oli] = False
mu = np.mean(mu_list[mask])
else:
mu = np.mean(mu_list)
return mu, oli
|
StarcoderdataPython
|
73268
|
<filename>problem_repair/tests.py
from django.test import TestCase
#!/usr/bin/env python
'''
clara CLI interface
'''
# Python imports
import json
import os
import pprint
import sys
import traceback
from ast import literal_eval
# clara imports
from claraa.common import parseargs, debug
from claraa.feedback import Feedback, FeedGen
from claraa.feedback_repair import RepairFeedback
from claraa.feedback_simple import SimpleFeedback
from claraa.feedback_python import PythonFeedback
from claraa.interpreter import getlanginter
from claraa.matching import Matching
from claraa.model import dict_to_expr
from claraa.parser import getlangparser
from claraa.repair import Repair
VERBOSE=0
class Clara(object):
USAGE = '''
%s COMMAND [OPTIONS*] SOURCE_FILE_1 SOURCE_FILE_2 ...
Commands are:
help print this usage message
model prints a model of a program
match run the matching procedure on two source files
repair run the repair algorithm on two source files
feedback generates feedback on multiple specifications
Options are:
--verbose [0|1] controls the amount of output information printed
(default is 0)
--lang LANG language of the source files
(default is to guess from the first source file extension)
--args ARGS arguments for matching or repair
--argsfile FILE file with arguments for matching or repair
--ins INS inputs for matching or repair
--insfile FILE file with arguments for matching or repair
--entryfnc FNC entry function for matching or repair
(default is 'main')
--ignoreio [0|1] whether to ignore IO in matching or repair
--ignoreret [0|1] whether to ignore return value in matching or repair
--bijective [0|1] whether to perform bijective matching (default is 0)
--cleanstrings [0|1]
whether to clean (trim) strings when checking correctness
in the repair algorithm (default is 0)
--timeout INT timeout in seconds (for the repair)
(default is 60)
--suboptimal [0|1] allow sub-optimal repairs (default is 1)
--poolsize INT number of (parallel) processes to use for feedback
(default is the number of CPUs)
--feedtype FEED type of feedback to generate ('repair', 'simple')
(default is 'repair')
--maxfeedcost N maximum cost of a repair for feedback; if cost is larger
than N, an error occurs
(default is 0, which means that there is no limit)
'''
self.verbose = 0
self.timeout = 60
self.ignoreio = 0
self.ignoreret = 0
self.bijective = 0
self.cleanstrings = 0
self.entryfnc = 'main'
self.suboptimal = 1
self.maxfeedcost = 0
def __init__(self,language,feedtype,correct,wrong):
self.lang = language
self.feedtype = feedtype
self.sources = [correct,wrong]
def set_templates(self,submission_id):
self.sources[0] = submission_id
def usage(self):
'''
Prints usage information (to stderr).
'''
print >> sys.stderr, self.USAGE % (sys.argv[0],)
def debug(self, msg, *args):
'''
Prints debug message if verbose mode on.
'''
if self.verbose:
debug(msg, *args)
def error(self, msg, *args):
'''
Prints error message and exits
'''
if args:
msg %= args
print >> sys.stderr, 'Error: %s\n' % (msg,)
sys.exit(1)
def main(self):
global VERBOSE
VERBOSE = self.verbose
self.lang = self.opts.pop('lang', None)
self.timeout = int(self.opts.pop('timeout', 60))
self.ignoreio = int(self.opts.pop('ignoreio', 0))
self.ignoreret = int(self.opts.pop('ignoreret', 0))
self.bijective = int(self.opts.pop('bijective', 0))
self.cleanstrings = int(self.opts.pop('cleanstrings', 0))
self.entryfnc = self.opts.pop('entryfnc', 'main')
self.suboptimal = int(self.opts.pop('suboptimal', 1))
self.maxfeedcost = int(self.opts.pop('maxfeedcost', 0))
self.poolsize = self.opts.pop('poolsize', None)
if self.poolsize is not None:
self.poolsize = int(self.poolsize)
self.feedtype = self.opts.pop('feedtype', 'repair')
if self.feedtype == 'repair':
self.feedtype = RepairFeedback
elif self.feedtype == 'simple':
self.feedtype = SimpleFeedback
elif self.feedtype == 'python':
self.feedtype = PythonFeedback
else:
self.error("Unknown feedback type: '%s'", self.feedtype)
self.ins = self.opts.pop('ins', None)
self.args = self.opts.pop('args', None)
self.insfile = self.opts.pop('insfile', None)
self.argsfile = self.opts.pop('argsfile', None)
if self.ins is not None and self.insfile is not None:
self.error('Got both inputs and file with inputs: which to use?')
if self.args is not None and self.argsfile is not None:
self.error('Got both args and file with args: which to use?')
if self.ins is not None:
self.ins = literal_eval(self.ins)
if self.args is not None:
self.args = literal_eval(self.args)
if self.insfile is not None:
with open(self.insfile, 'r') as f:
self.ins = literal_eval(f.read())
if self.argsfile is not None:
with open(self.argsfile, 'r') as f:
self.args = literal_eval(f.read())
if self.lang is None:
self.guess_lang()
self.parser = getlangparser(self.lang)
self.inter = getlanginter(self.lang)
self.process_sources()
if self.cmd == 'match':
self.match()
elif self.cmd == 'model':
self.model()
elif self.cmd == 'repair':
self.repair()
elif self.cmd == 'feedback':
self.feedback()
elif self.cmd == 'eval':
self.eval()
else:
self.usage()
self.error("Unknown command: '%s'", self.cmd)
def model(self):
if len(self.models) != 1:
self.error('Model requires one program!')
print (self.models[0].tostring())
def match(self):
if len(self.models) < 2:
self.error('Match requires two programs!')
elif len(self.models) > 2:
self.debug('Match requires two programs, ignoring the rest!')
M = Matching(ignoreio=self.ignoreio, ignoreret=self.ignoreret,
verbose=self.verbose, bijective=self.bijective)
m = M.match_programs(self.models[0], self.models[1], self.inter,
ins=self.ins, args=self.args,
entryfnc=self.entryfnc)
if m:
self.debug('Match: %s', pprint.pformat(m[1]))
print ('Match!')
else:
print ('No match!')
def eval(self):
if len(self.models) != 1:
self.error('Eval requires exactly one program!')
print (self.models[0])
print
inter = self.inter(entryfnc=self.entryfnc)
trace = inter.run(self.models[0], args=self.args, ins=self.ins)
print (trace)
def repair(self):
if len(self.models) < 2:
self.error('Repair requires two programs!')
elif len(self.models) > 2:
self.debug('Repair requires two programs, ignoring the rest!')
R = Repair(timeout=self.timeout, verbose=self.verbose,
allowsuboptimal=self.suboptimal,
cleanstrings=self.cleanstrings)
r = R.repair(self.models[0], self.models[1], self.inter,
ins=self.ins, args=self.args, ignoreio=self.ignoreio,
ignoreret=self.ignoreret, entryfnc=self.entryfnc)
if r:
txt = RepairFeedback(self.models[1], self.models[0], r)
txt.genfeedback()
print ('Repairs:')
print ('\n'.join(map(lambda x: ' * %s' % (x,), txt.feedback)))
else:
print ('No repair!')
def feedback(self):
if len(self.models) < 2:
self.error('Feedback requires at least two programs!')
F = FeedGen(verbose=self.verbose, timeout=self.timeout,
poolsize=self.poolsize, allowsuboptimal=self.suboptimal,
feedmod=self.feedtype)
impl = self.models[-1]
specs = self.models[:-1]
feed = F.generate(
impl, specs, self.inter, ins=self.ins, args=self.args,
ignoreio=self.ignoreio, ignoreret=self.ignoreret,
cleanstrings=self.cleanstrings,
entryfnc=self.entryfnc)
if feed.status == Feedback.STATUS_REPAIRED:
if self.maxfeedcost > 0 and feed.cost > self.maxfeedcost:
self.error('max cost exceeded (%d > %d)',
feed.cost, self.maxfeedcost)
for f in feed.feedback:
print ('*', f)
elif feed.status == Feedback.STATUS_ERROR:
self.error(feed.error)
else:
self.error(feed.statusstr())
def guess_lang(self):
'''
Sets lang options from the first source file extension.
'''
if not len(self.sources):
self.error('Cannot guess the language - no source files!')
return
file_parts = self.sources[0].rsplit('.', 1)
if len(file_parts) < 2:
self.error('Cannot guess the language - no file extension!')
self.lang = file_parts[1]
self.debug('Guessed language: %s', self.lang)
def extract_exprs(self, model):
'''
Loads additional expressions for the specification.
'''
ext = '.' + self.lang
exprs_filename = model.name.replace(ext, '-exprs.json')
if not os.path.isfile(exprs_filename): return
with open(exprs_filename, 'r') as f:
exprs = json.load(f)
for expr_entry in exprs:
fname = expr_entry['fnc']
loc = expr_entry['loc']
var = expr_entry['var']
expr = dict_to_expr(expr_entry['expr'])
fnc = model.fncs[fname]
if not hasattr(fnc, 'repair_exprs'):
fnc.repair_exprs = {}
rex = fnc.repair_exprs
if loc not in rex:
rex[loc] = {}
if var not in rex[loc]:
rex[loc][var] = []
rex[loc][var].append((expr, None))
def process_sources_db(self):
'''
Reads and parses source files (sets models field). from db
'''
self.models = []
for src in self.sources:
def process_sources(self):
'''
Reads and parses source files (sets models field).
'''
self.models = []
for src in self.sources:
self.debug("Reading and parsing source file '%s'", src)
with open(src, 'r') as f:
code = f.read().decode('utf-8')
model = self.parser.parse_code(code)
model.name = src
self.extract_exprs(model)
self.models.append(model)
if __name__ == '__main__':
try:
clara = Clara()
clara.main()
sys.exit(0)
except Exception as err:
print ('Error occured: %s' % (err,),file=sys.stderr)
print >> sys.stderr, 'Error occured: %s' % (err,)
if VERBOSE:
traceback.print_exc()
sys.exit(1)
|
StarcoderdataPython
|
162334
|
# calculating the intragroup average distance, showing that is is significantly increases with age,
# TRF is lower than 24-month-old AL. Make a figure
# note about scaling - the metabolom matrix has the metabolites in rows (animals col),
# and scaling normalizes the columns, so we need to scale the transpose of the metabolom matrix.
# Checking old-young groups. need to expand the code for all 6 pairs!
# Due to orders of magnitude difference in the reads between different metabolites, we need to scale the data. each metabolite is normmalized to have mean 0, std 1.
# this program calculates the average distance within a group and between groups by calculating the the L1 metrics between all possible pairs (of layers)
# across all metabolites. we then have a matrix of distances where the diagonal is 0.
# we can calculate the average distance for the group and between groups. Next we keep the distances but permute the names, and test
# what is the probability to have intra-group average distance higher than the intergroup one
# specifically, for each pair (a,b) we will compare a to ab, b to ab, and the demand is that the distance is >= original distance
# present the data on a bar graph, p-value according to permutations
def caldistance(lst1, lst2): # calculates the L1 distance between lst1, lst2. they represent two layers (chcknum1/2) with heir list of metabolomic values
dist0 = 0
for ii in range(0, len(lst1)):
tmp = abs(lst1[ii] - lst2[ii])
dist0 = dist0 + tmp
return dist0
def strtofloat0(lst): # returns a list of float, given al ist of numbers in str type
ans = []
for ii in lst:
tmp = [float(kk) for kk in ii]
ans.append(tmp)
return ans
def getvalues0(polarval0, start1, len1, start2, len2): # returns lists with the values of the groups that we want to compare
grp1 = polarval0[:, start1:(start1+len1)]
grp2 = polarval0[:, start2:(start2+len2)]
grp1lst = grp1.tolist()
grp2lst = grp2.tolist()
return grp1lst, grp2lst
def averageingroupdis(dim0, mtrx0): # calculates the average distance within the same group (array) - sum of all elements, devided by (number of elements minus diagonal)
# dim0 is the number of rows/columns in the array.
# This is a symmetrical matrix with diagonal = 0. element ij is the distance between animal i and j (in the same group)
sm0 = np.sum(mtrx0)
numbrofelmnts = ((dim0*dim0) - dim0)
ans = sm0/numbrofelmnts
return ans
def averageoutgroupdis(dim0, mtrx0): # calculates the average distance beween two groups (array) - sum of all elements, devided by number of elements
# dim0 is the number of rows/columns in the array, here the diagonal has no meaning - each row is one group and each column is a second group.
# element ij is the distance between animal i and j (in the different groups!)
sm0 = np.sum(mtrx0)
numbrofelmnts = ((dim0*dim0))
ans = sm0/numbrofelmnts
return ans
def buildidsmatrx(distarr, perm0): # receives the original distance matrix/array and the permutation vector, builds the permutated matrix
permdist0 = []
for ii in perm0:
permtmp = []
for jj in perm0:
tmp = distarr[ii, jj] # need to have the indices starting from 0!
permtmp.append(tmp)
# print('permtmp', permtmp)
permdist0.append(permtmp)
return permdist0
def originaldistmtrx(distarry): # receives the two-group metabolomics data, generates the distance matrix(list)
distlstot0 = []
for ii in range(0, len(distarry)):
rowdist = []
for jj in range(0, len(distarry)):
tmpdist = caldistance(distarry[ii], distarry[jj])
rowdist.append(tmpdist)
distlstot0.append(rowdist)
return distlstot0
def generatepairgroup(group01, group02): # generates the distance matrix (array) for the group01-group02 pair
group01arr = np.array(group01)
group01arrt = group01arr.transpose()
print(len(group01arrt), len(group01arrt[0])) #
group01lst0 = group01arrt.tolist()
group02arr = np.array(group02)
group02arrt = group02arr.transpose()
print(len(group02arrt), len(group02arrt[0])) #
group02lst0 = group02arrt.tolist()
group0102lst0 = group01lst0 + group02lst0
print(len(group0102lst0), len(group0102lst0[0])) #
distlst0 = originaldistmtrx(group0102lst0) # generating the distance matrix (array)
print(len(distlst0), len(distlst0[0]), distlst0[0][0], distlst0[0][1], distlst0[1][1], distlst0[1][0])
return distlst0
def ingpdis(gpnum, gpsize, distmtrx): # receives the distance matrix(list), returns the intragroup distance of gpnum
distmtrxarr = np.array(distmtrx)
if gpnum == 1: # always size 15
tmpdistmtrxarr = distmtrxarr[0:gpsize, 0:gpsize]
sm0 = np.sum(tmpdistmtrxarr)
numbrofelmnts = ((gpsize * gpsize) - gpsize)
ans = sm0 / numbrofelmnts
if gpnum == 2: # should work for size 15 as well as 14
tmpdistmtrxarr = distmtrxarr[15:, 15:] # starts with No. 15 always
sm0 = np.sum(tmpdistmtrxarr)
numbrofelmnts = ((gpsize * gpsize) - gpsize) # goos for size 15 and 14 - this is the matrix size
ans = sm0 / numbrofelmnts
return ans
def outgpdis(gset, gpsize, distmtrx): # receives the distance matrix(list), returns the intergroup distance of gset
distmtrxarr = np.array(distmtrx)
if gset[1] != 3:
tmpdistmtrxarr = distmtrxarr[0:gpsize, gpsize:]
sm0 = np.sum(tmpdistmtrxarr)
numbrofelmnts = (gpsize * gpsize)
ans = sm0 / numbrofelmnts
elif gset[1] == 3:
tmpdistmtrxarr = distmtrxarr[0:gpsize, gpsize:]
sm0 = np.sum(tmpdistmtrxarr)
numbrofelmnts = (gpsize * (gpsize-1))
ans = sm0 / numbrofelmnts
return ans
def ingspdistance(distmtrx): # receives the distance matrix(list), returns the intragroup distances all 4 groups; O/Y/AL/CR
distmtrxarr = np.array(distmtrx)
permdistances = []
for ii in range(0, 4):
if ii != 3: # always size 15
tmpdistmtrxarr = distmtrxarr[(ii*15):((ii+1)*15), (ii*15):((ii+1)*15)]
sm0 = np.sum(tmpdistmtrxarr)
numbrofelmnts = (210) # 15*15 - 15
permdistances.append(sm0 / numbrofelmnts)
elif ii == 3:
tmpdistmtrxarr = distmtrxarr[45:59, 45:59]
sm0 = np.sum(tmpdistmtrxarr)
numbrofelmnts = (182) # 14*14 - 14
permdistances.append(sm0 / numbrofelmnts)
return permdistances
def calcsum0(old, young, al): # recieves three group distances, check monotonic trend between young/old/al. if monotonic, returns the sum of abs(difference).
if (young < old < al):
ans = abs(old - young) + abs(old - al)
else:
ans = 0
return ans
def getstderror(in_out, distarr): # calcuates the std (population; pstdev) of distance matrix distarr, in_out 0 means 0s in the diagonal exclude, 1 intergroup count all elements
distlst0 = distarr.tolist()
# print('dist', distlst0)
if in_out == 0: # distarr represents intragroup distances
elements0 = []
for ii in distlst0:
for jj in ii:
if jj != 0:
elements0.append(jj)
elif in_out == 1: # distarr represents intergroup distances
elements0 = []
for ii in distlst0:
for jj in ii:
if jj != 0:
elements0.append(jj)
std0 = statistics.pstdev(elements0)
# print(len(elements0)) # - yey
return std0
def make1dlist(array): # recieves a 2-d array returns a 1-d list of all elements
onedlist = []
for ii in array.tolist():
onedlist = onedlist + ii
return onedlist
from scipy.stats import hypergeom
import statsmodels
import statsmodels.stats.multitest
import statistics
from scipy.stats import mannwhitneyu
from scipy.stats import ttest_ind
from scipy.stats.stats import pearsonr
from scipy.stats.stats import spearmanr
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.decomposition import PCA
from sklearn.preprocessing import StandardScaler
from scipy.stats import stats
# need to do standard scaling (mean 0, std 1) because of the 10 orders of magnitude difference between the values in polar to lipid.
print('intergroup distance')
merge0 = pd.read_csv('mergedpolarlipid', header = None) # the metabolomics data for the merged polar & lipid file
print(merge0.head())
merge0val0 = merge0.iloc[:, :].values #
# Feature Scaling
scaler = StandardScaler()
merge0valt = scaler.fit_transform(merge0val0.transpose()) # the scaling is for the columns, so we scale the transpose matrix (col = metabolites)
merge0val = merge0valt.transpose()
valuesoldyoung = getvalues0(merge0val, 0, 15, 15, 15) # returns lists with the values of the groups that we want to compare
oldtmp1 = valuesoldyoung[0]
oldval = strtofloat0(oldtmp1)
youngtmp1 = valuesoldyoung[1]
youngval = strtofloat0(youngtmp1)
valuesALCR = getvalues0(merge0val, 30, 15, 45, 14)
altmp1 = valuesALCR[0]
alval = strtofloat0(altmp1)
crtmp1 = valuesALCR[1]
crval = strtofloat0(crtmp1)
print(len(oldval), len(oldval[0]), oldval[0][0]) # 434 15 v (not scaled 0.000390325) - yey
oldvalarr = np.array(oldval)
oldvalarrt = oldvalarr.transpose()
print(len(oldvalarrt), len(oldvalarrt[0])) # 15 434
oldvallst0 = oldvalarrt.tolist()
youngvalarr = np.array(youngval)
youngvalarrt = youngvalarr.transpose()
print(len(youngvalarrt), len(youngvalarrt[0])) # 15 434
youngvallst0 = youngvalarrt.tolist()
oldyoungvallst0 = oldvallst0 + youngvallst0
print(len(oldyoungvallst0), len(oldyoungvallst0[0])) # 30 434 - yey
# dist1_2 = caldistance([1,2,3], [1,2,4])
# print(dist1_2) # 1 - yey
dist1_1 = caldistance(oldyoungvallst0[0], oldyoungvallst0[0])
dist1_2 = caldistance(oldyoungvallst0[0], oldyoungvallst0[1])
print(dist1_1, dist1_2) # 0.0 360.7584529430399 (not scaled 0.0, 239666801.601786) - looking good!
# going over all possible pairs
distlstot = []
for ii in range(0, len(oldyoungvallst0)):
rowdist = []
for jj in range(0, len(oldyoungvallst0)):
tmpdist = caldistance(oldyoungvallst0[ii], oldyoungvallst0[jj])
rowdist.append(tmpdist)
distlstot.append(rowdist)
print(len(distlstot), len(distlstot[0]), distlstot[0][0], distlstot[0][1], distlstot[1][1], distlstot[1][0]) # 30 30 0.0 360.7584529430399 0.0 360.7584529430399 (not scaled 30 30 0.0 239666801.601786 0.0 239666801.601786)
# distlstot is the matrix/list that consist all the distances between old/young groups!
# intragroup average distance - find all intragroup pairs, find their distance, average over them
# the first group is 'old', has 15 members
distmpmtrx = [[0,3,3], [3,0,3], [3,3,0]] # average distance 3
averpairdist = averageingroupdis(3, distmpmtrx)
print(averpairdist) # 3.0 - yey
distlstotarr = np.array(distlstot)
olddistlst = distlstotarr[0:15, 0:15]
print(len(olddistlst), len(olddistlst[0])) # 15 15 - yey
oldaverdist = averageingroupdis(15, olddistlst)
print(oldaverdist) # 392.91898409453125 (not scaled 246372927.80372265) - no errors, verify
youngdistlst = distlstotarr[15:, 15:]
print(len(youngdistlst), len(youngdistlst[0])) # 15 15 - yey
print(youngdistlst) # looking good - symmetrical with 0's in the diagonal
youngaverdist = averageingroupdis(15, youngdistlst)
print(youngaverdist) # 319.49663046450587 (not scaled 198695619.0538811)
# # now permuting the labels
# permuting the labels, then pick the corresponding distances from the original matrix. For example, if no. 2 is now # 14,
# then all the distances between no.'i' and no. 2 are replaced by the distances between 'i' and 14 (which is the new 2)
# looking at the ingroup average distances, and getting p-value for the difference
getdistmtrx = originaldistmtrx(merge0val.transpose().tolist()) # calculates the distance matrix for all 59 animals
print(len(getdistmtrx), len(getdistmtrx[-1]), getdistmtrx[0][0], getdistmtrx[0][1], getdistmtrx[0][2], getdistmtrx[1][0], getdistmtrx[1][1], getdistmtrx[1][2], getdistmtrx[2][0], getdistmtrx[2][1], getdistmtrx[2][2]) #
# 59 59 0.0 360.7584529430399 385.0680196051907 360.7584529430399 0.0 270.8677751129098 385.0680196051907 270.8677751129098 0.0 - yey, same as mergedscaleddistances
distances = [392.91898409453125, 319.49663046450587, 464.5228587656814, 366.00724671998097] # O,Y,AL,TRF
difftot = abs(distances[0] - distances[1]) + abs(distances[2] - distances[0]) # difftot = (O-Y) + (AL-O)
# running permutations
range0 = np.arange(59)
perm0 = np.random.permutation(range0)
permdisttmp = buildidsmatrx(np.array(getdistmtrx), perm0)
# calculating the permuted-ingroup distances
qcingpsdist = ingspdistance(getdistmtrx) # QC ingroup distances
print(qcingpsdist) # [392.91898409453125, 319.49663046450587, 464.5228587656814, 366.00724671998097] - yey
permingpdist = ingspdistance(permdisttmp)
print(permingpdist) # for one permutation - [555.9641151107118, 405.52211994358356, 424.97273571968947, 399.20275047630844] - not monotonic, good
diffsum0 = calcsum0(permingpdist[0], permingpdist[1], permingpdist[2])
print(diffsum0) # 0 - yey
# lets run 1000 permutations
# ind0 = 0
# for ii in range(0, 1): # 100, 1000
# perm0 = np.random.permutation(range0)
# permdisttmp = buildidsmatrx(np.array(getdistmtrx), perm0)
# permingpdist = ingspdistance(permdisttmp)
# diffsum0 = calcsum0(permingpdist[0], permingpdist[1], permingpdist[2])
# if (diffsum0 > difftot) and (permingpdist[3] < permingpdist[2]):
# ind0 = ind0 + 1
# print(ind0) # 1 perm - 0, 10 perm - 0, 100 perm - 0, 1000 perm - 5, 1000 perm - 5 (p_value = 0.005) - yey
# barplot
groups00 = ['8 months', '20 months', '24 months AL', '24 months TRF']
x_pos = np.arange(len(groups00))
distances0 = [distances[1], distances[0], distances[2], distances[3]]
# Build the plot
fig, ax = plt.subplots()
ax.bar(x_pos, distances0, align='center', color='blue', capsize=10)
# ax.set_ylabel('Coefficient of Thermal Expansion ($\degree C^{-1}$)')
# ax.set_ylabel('Average Ingroup Distances [AU]')
ax.set_xticks(x_pos)
ax.set_xticklabels(groups00) #, size = 0)
ax.set_title('Average Ingroup Distance With Age') # ax.set_title('Average Ingroup Distance With Age')
# ax.yaxis.grid(True)
# ax.tick_params(axis='x', labelsize=22)
ax.tick_params(axis='y', labelsize=22)
# plt.savefig('intra group distance.pdf')
plt.show()
|
StarcoderdataPython
|
3302345
|
<reponame>utkarshayachit/seacas<gh_stars>0
# Copyright(C) 1999-2020 National Technology & Engineering Solutions
# of Sandia, LLC (NTESS). Under the terms of Contract DE-NA0003525 with
# NTESS, the U.S. Government retains certain rights in this software.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
#
# * Neither the name of NTESS nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from phactori import *
import unittest
from Operation.PhactoriSampledCellInfo import *
from Operation.PhactoriGeometricCellSampler1 import *
from paraview.simple import *
import os
class TestPhactoriGeometricCellSampler1(unittest.TestCase):
def test_PointIsInsideBoundingBox(self):
pgcs1_inst = PhactoriGeometricCellSampler1()
testbb = [-1.25,1.25,-1.5,2.0,1.75,4.25]
self.assertFalse(pgcs1_inst.PointIsInsideBoundingBox([-1.26, 0.0, 2.0], testbb))
self.assertFalse(pgcs1_inst.PointIsInsideBoundingBox([1.26, 0.0, 2.0], testbb))
self.assertTrue(pgcs1_inst.PointIsInsideBoundingBox([-1.25, 0.0, 2.0], testbb))
self.assertTrue(pgcs1_inst.PointIsInsideBoundingBox([1.25, 0.0, 2.0], testbb))
self.assertTrue(pgcs1_inst.PointIsInsideBoundingBox([-1.24, 0.0, 2.0], testbb))
self.assertTrue(pgcs1_inst.PointIsInsideBoundingBox([1.24, 0.0, 2.0], testbb))
self.assertFalse(pgcs1_inst.PointIsInsideBoundingBox([0.0, -1.6, 2.0], testbb))
self.assertFalse(pgcs1_inst.PointIsInsideBoundingBox([0.0, 2.1, 2.0], testbb))
self.assertTrue(pgcs1_inst.PointIsInsideBoundingBox([0.0, -1.5, 2.0], testbb))
self.assertTrue(pgcs1_inst.PointIsInsideBoundingBox([0.0, 1.9, 2.0], testbb))
self.assertTrue(pgcs1_inst.PointIsInsideBoundingBox([0.0, -1.4, 2.0], testbb))
self.assertTrue(pgcs1_inst.PointIsInsideBoundingBox([0.0, 1.9, 2.0], testbb))
self.assertFalse(pgcs1_inst.PointIsInsideBoundingBox([0.0, 0.0, 1.74], testbb))
self.assertFalse(pgcs1_inst.PointIsInsideBoundingBox([0.0, 0.0, 4.26], testbb))
self.assertTrue(pgcs1_inst.PointIsInsideBoundingBox([0.0, 0.0, 1.75], testbb))
self.assertTrue(pgcs1_inst.PointIsInsideBoundingBox([0.0, 0.0, 4.25], testbb))
self.assertTrue(pgcs1_inst.PointIsInsideBoundingBox([0.0, 0.0, 1.76], testbb))
self.assertTrue(pgcs1_inst.PointIsInsideBoundingBox([0.0, 0.0, 4.24], testbb))
def test_ParseParametersFromJson(self):
newOperationBlock = PhactoriOperationBlock()
newOperationBlock.mName = "phactorigeometriccellsampler1"
badOperationParams = {
"type":"geometriccellsampler1",
"cell data array names":["RTData"],
"cell data array tuple size": 1,
"do programmable filter": False,
"data controlled sampling method":"cells with ratio of min/max data value",
"data controlled ratio of min/max":0.75,
"sampling geometry bounding box":[-1.5, -4.5, -2.5, 3.5, 3.25, 7.25]
}
with self.assertRaises(Exception):
ParseOneFilterTypeFromViewMapOperation(newOperationBlock,
'geometriccellsampler1',
PhactoriGeometricCellSampler1,
badOperationParams)
def test_CreateInternalListOfGeometricallySampledCellsOnThisProcess(self):
testWavelet2 = Wavelet()
testWavelet2.UpdatePipeline()
testWavelet = PointDatatoCellData(Input=testWavelet2)
testWavelet.UpdatePipeline()
newOperationBlock = PhactoriOperationBlock()
newOperationBlock.mName = "phactorigeometriccellsampler1"
testOutFileBasename = "test_WriteAllDataFromOneProcessUsingMPI_output_cells_"
operationParams = {
"type":"geometriccellsampler1",
"cell data array names":["RTData"],
"cell data array tuple size": 1,
"do programmable filter": False,
"data controlled sampling method":"cells with ratio of min/max data value",
"data controlled ratio of min/max":0.75,
"sampling geometry bounding box":[-4.5, -1.5, -2.5, 3.5, 3.25, 7.25]
}
ParseOneFilterTypeFromViewMapOperation(newOperationBlock,
'geometriccellsampler1',
PhactoriGeometricCellSampler1,
operationParams)
PhactoriGeometricCellSampler1Instance = newOperationBlock.mOperationSpecifics
PhactoriGeometricCellSampler1Instance.myCopyOfInputFilter = testWavelet
PhactoriGeometricCellSampler1Instance.CreateInternalListOfGeometricallySampledCellsOnThisProcess()
PhactoriGeometricCellSampler1Instance.WriteCellListToFile("test_PhactoriGeometricCellSampler1_1.json",
PhactoriGeometricCellSampler1Instance.GeometricallySampledCellsForThisProcess)
goldTestStringJson = """
{
"format for cells in lists": {"PhactoriSampledCellInfo output format 1 info":[
" [cellTestPoint, ijk, dataTuple, pid, index, segmentIndex, collectionAxis]",
" cellTestPoint is [X, Y, Z], ijk is [i, j, k], dataTuple is [c1, c2, ... cN]"]},
"sampling geometry bounding box": [-4.5, -1.5, -2.5, 3.5, 3.25, 7.25],
"number of cells": 112,
"cell variable names": ["RTData"],
"data tuple size": 1,
"sum variable values": [23509.975616455078],
"average variable values": [209.91049657549178],
"list of sampled cells": [
[[-4.5, -2.5, 3.5], [-1, -1, -1], [204.47789001464844], 0, 1, 5345, -1, -1],
[[-3.5, -2.5, 3.5], [-1, -1, -1], [211.81105041503906], 0, 1, 5346, -1, -1],
[[-2.5, -2.5, 3.5], [-1, -1, -1], [219.71676635742188], 0, 1, 5347, -1, -1],
[[-1.5, -2.5, 3.5], [-1, -1, -1], [222.8864288330078], 0, 1, 5348, -1, -1],
[[-4.5, -1.5, 3.5], [-1, -1, -1], [226.4726104736328], 0, 1, 5365, -1, -1],
[[-3.5, -1.5, 3.5], [-1, -1, -1], [233.97752380371094], 0, 1, 5366, -1, -1],
[[-2.5, -1.5, 3.5], [-1, -1, -1], [242.0166473388672], 0, 1, 5367, -1, -1],
[[-1.5, -1.5, 3.5], [-1, -1, -1], [245.2774658203125], 0, 1, 5368, -1, -1],
[[-4.5, -0.5, 3.5], [-1, -1, -1], [227.3440399169922], 0, 1, 5385, -1, -1],
[[-3.5, -0.5, 3.5], [-1, -1, -1], [234.93612670898438], 0, 1, 5386, -1, -1],
[[-2.5, -0.5, 3.5], [-1, -1, -1], [243.04296875], 0, 1, 5387, -1, -1],
[[-1.5, -0.5, 3.5], [-1, -1, -1], [246.3500518798828], 0, 1, 5388, -1, -1],
[[-4.5, 0.5, 3.5], [-1, -1, -1], [209.38912963867188], 0, 1, 5405, -1, -1],
[[-3.5, 0.5, 3.5], [-1, -1, -1], [216.98121643066406], 0, 1, 5406, -1, -1],
[[-2.5, 0.5, 3.5], [-1, -1, -1], [225.08804321289062], 0, 1, 5407, -1, -1],
[[-1.5, 0.5, 3.5], [-1, -1, -1], [228.3951416015625], 0, 1, 5408, -1, -1],
[[-4.5, 1.5, 3.5], [-1, -1, -1], [205.9775390625], 0, 1, 5425, -1, -1],
[[-3.5, 1.5, 3.5], [-1, -1, -1], [213.48245239257812], 0, 1, 5426, -1, -1],
[[-2.5, 1.5, 3.5], [-1, -1, -1], [221.52157592773438], 0, 1, 5427, -1, -1],
[[-1.5, 1.5, 3.5], [-1, -1, -1], [224.7823944091797], 0, 1, 5428, -1, -1],
[[-4.5, 2.5, 3.5], [-1, -1, -1], [219.5332794189453], 0, 1, 5445, -1, -1],
[[-3.5, 2.5, 3.5], [-1, -1, -1], [226.86642456054688], 0, 1, 5446, -1, -1],
[[-2.5, 2.5, 3.5], [-1, -1, -1], [234.77215576171875], 0, 1, 5447, -1, -1],
[[-1.5, 2.5, 3.5], [-1, -1, -1], [237.94180297851562], 0, 1, 5448, -1, -1],
[[-4.5, 3.5, 3.5], [-1, -1, -1], [217.14462280273438], 0, 1, 5465, -1, -1],
[[-3.5, 3.5, 3.5], [-1, -1, -1], [224.22647094726562], 0, 1, 5466, -1, -1],
[[-2.5, 3.5, 3.5], [-1, -1, -1], [231.93704223632812], 0, 1, 5467, -1, -1],
[[-1.5, 3.5, 3.5], [-1, -1, -1], [234.97329711914062], 0, 1, 5468, -1, -1],
[[-4.5, -2.5, 4.5], [-1, -1, -1], [191.78919982910156], 0, 1, 5745, -1, -1],
[[-3.5, -2.5, 4.5], [-1, -1, -1], [198.7889404296875], 0, 1, 5746, -1, -1],
[[-2.5, -2.5, 4.5], [-1, -1, -1], [206.43572998046875], 0, 1, 5747, -1, -1],
[[-1.5, -2.5, 4.5], [-1, -1, -1], [209.42840576171875], 0, 1, 5748, -1, -1],
[[-4.5, -1.5, 4.5], [-1, -1, -1], [213.61886596679688], 0, 1, 5765, -1, -1],
[[-3.5, -1.5, 4.5], [-1, -1, -1], [220.7836456298828], 0, 1, 5766, -1, -1],
[[-2.5, -1.5, 4.5], [-1, -1, -1], [228.55862426757812], 0, 1, 5767, -1, -1],
[[-1.5, -1.5, 4.5], [-1, -1, -1], [231.63890075683594], 0, 1, 5768, -1, -1],
[[-4.5, -0.5, 4.5], [-1, -1, -1], [214.4065399169922], 0, 1, 5785, -1, -1],
[[-3.5, -0.5, 4.5], [-1, -1, -1], [221.65509033203125], 0, 1, 5786, -1, -1],
[[-2.5, -0.5, 4.5], [-1, -1, -1], [229.4951171875], 0, 1, 5787, -1, -1],
[[-1.5, -0.5, 4.5], [-1, -1, -1], [232.61985778808594], 0, 1, 5788, -1, -1],
[[-4.5, 0.5, 4.5], [-1, -1, -1], [196.45162963867188], 0, 1, 5805, -1, -1],
[[-3.5, 0.5, 4.5], [-1, -1, -1], [203.70018005371094], 0, 1, 5806, -1, -1],
[[-2.5, 0.5, 4.5], [-1, -1, -1], [211.5402069091797], 0, 1, 5807, -1, -1],
[[-1.5, 0.5, 4.5], [-1, -1, -1], [214.66494750976562], 0, 1, 5808, -1, -1],
[[-4.5, 1.5, 4.5], [-1, -1, -1], [193.12379455566406], 0, 1, 5825, -1, -1],
[[-3.5, 1.5, 4.5], [-1, -1, -1], [200.28857421875], 0, 1, 5826, -1, -1],
[[-2.5, 1.5, 4.5], [-1, -1, -1], [208.0635528564453], 0, 1, 5827, -1, -1],
[[-1.5, 1.5, 4.5], [-1, -1, -1], [211.14382934570312], 0, 1, 5828, -1, -1],
[[-4.5, 2.5, 4.5], [-1, -1, -1], [206.84458923339844], 0, 1, 5845, -1, -1],
[[-3.5, 2.5, 4.5], [-1, -1, -1], [213.84432983398438], 0, 1, 5846, -1, -1],
[[-2.5, 2.5, 4.5], [-1, -1, -1], [221.49111938476562], 0, 1, 5847, -1, -1],
[[-1.5, 2.5, 4.5], [-1, -1, -1], [224.48379516601562], 0, 1, 5848, -1, -1],
[[-4.5, 3.5, 4.5], [-1, -1, -1], [204.69740295410156], 0, 1, 5865, -1, -1],
[[-3.5, 3.5, 4.5], [-1, -1, -1], [211.45567321777344], 0, 1, 5866, -1, -1],
[[-2.5, 3.5, 4.5], [-1, -1, -1], [218.91493225097656], 0, 1, 5867, -1, -1],
[[-1.5, 3.5, 4.5], [-1, -1, -1], [221.7794189453125], 0, 1, 5868, -1, -1],
[[-4.5, -2.5, 5.5], [-1, -1, -1], [184.473388671875], 0, 1, 6145, -1, -1],
[[-3.5, -2.5, 5.5], [-1, -1, -1], [191.07464599609375], 0, 1, 6146, -1, -1],
[[-2.5, -2.5, 5.5], [-1, -1, -1], [198.41197204589844], 0, 1, 6147, -1, -1],
[[-1.5, -2.5, 5.5], [-1, -1, -1], [201.193115234375], 0, 1, 6148, -1, -1],
[[-4.5, -1.5, 5.5], [-1, -1, -1], [206.10580444335938], 0, 1, 6165, -1, -1],
[[-3.5, -1.5, 5.5], [-1, -1, -1], [212.86407470703125], 0, 1, 6166, -1, -1],
[[-2.5, -1.5, 5.5], [-1, -1, -1], [220.32333374023438], 0, 1, 6167, -1, -1],
[[-1.5, -1.5, 5.5], [-1, -1, -1], [223.18783569335938], 0, 1, 6168, -1, -1],
[[-4.5, -0.5, 5.5], [-1, -1, -1], [206.79336547851562], 0, 1, 6185, -1, -1],
[[-3.5, -0.5, 5.5], [-1, -1, -1], [213.63131713867188], 0, 1, 6186, -1, -1],
[[-2.5, -0.5, 5.5], [-1, -1, -1], [221.15248107910156], 0, 1, 6187, -1, -1],
[[-1.5, -0.5, 5.5], [-1, -1, -1], [224.05926513671875], 0, 1, 6188, -1, -1],
[[-4.5, 0.5, 5.5], [-1, -1, -1], [188.8384552001953], 0, 1, 6205, -1, -1],
[[-3.5, 0.5, 5.5], [-1, -1, -1], [195.67642211914062], 0, 1, 6206, -1, -1],
[[-2.5, 0.5, 5.5], [-1, -1, -1], [203.19757080078125], 0, 1, 6207, -1, -1],
[[-1.5, 0.5, 5.5], [-1, -1, -1], [206.10435485839844], 0, 1, 6208, -1, -1],
[[-4.5, 1.5, 5.5], [-1, -1, -1], [185.61073303222656], 0, 1, 6225, -1, -1],
[[-3.5, 1.5, 5.5], [-1, -1, -1], [192.36900329589844], 0, 1, 6226, -1, -1],
[[-2.5, 1.5, 5.5], [-1, -1, -1], [199.82826232910156], 0, 1, 6227, -1, -1],
[[-1.5, 1.5, 5.5], [-1, -1, -1], [202.69276428222656], 0, 1, 6228, -1, -1],
[[-4.5, 2.5, 5.5], [-1, -1, -1], [199.52877807617188], 0, 1, 6245, -1, -1],
[[-3.5, 2.5, 5.5], [-1, -1, -1], [206.13003540039062], 0, 1, 6246, -1, -1],
[[-2.5, 2.5, 5.5], [-1, -1, -1], [213.46734619140625], 0, 1, 6247, -1, -1],
[[-1.5, 2.5, 5.5], [-1, -1, -1], [216.24850463867188], 0, 1, 6248, -1, -1],
[[-4.5, 3.5, 5.5], [-1, -1, -1], [197.67019653320312], 0, 1, 6265, -1, -1],
[[-3.5, 3.5, 5.5], [-1, -1, -1], [204.04171752929688], 0, 1, 6266, -1, -1],
[[-2.5, 3.5, 5.5], [-1, -1, -1], [211.2006378173828], 0, 1, 6267, -1, -1],
[[-1.5, 3.5, 5.5], [-1, -1, -1], [213.85984802246094], 0, 1, 6268, -1, -1],
[[-4.5, -2.5, 6.5], [-1, -1, -1], [175.79248046875], 0, 1, 6545, -1, -1],
[[-3.5, -2.5, 6.5], [-1, -1, -1], [181.94105529785156], 0, 1, 6546, -1, -1],
[[-2.5, -2.5, 6.5], [-1, -1, -1], [188.92681884765625], 0, 1, 6547, -1, -1],
[[-1.5, -2.5, 6.5], [-1, -1, -1], [191.46768188476562], 0, 1, 6548, -1, -1],
[[-4.5, -1.5, 6.5], [-1, -1, -1], [197.20082092285156], 0, 1, 6565, -1, -1],
[[-3.5, -1.5, 6.5], [-1, -1, -1], [203.49728393554688], 0, 1, 6566, -1, -1],
[[-2.5, -1.5, 6.5], [-1, -1, -1], [210.597900390625], 0, 1, 6567, -1, -1],
[[-1.5, -1.5, 6.5], [-1, -1, -1], [213.21726989746094], 0, 1, 6568, -1, -1],
[[-4.5, -0.5, 6.5], [-1, -1, -1], [197.774658203125], 0, 1, 6585, -1, -1],
[[-3.5, -0.5, 6.5], [-1, -1, -1], [204.14617919921875], 0, 1, 6586, -1, -1],
[[-2.5, -0.5, 6.5], [-1, -1, -1], [211.3050994873047], 0, 1, 6587, -1, -1],
[[-1.5, -0.5, 6.5], [-1, -1, -1], [213.9643096923828], 0, 1, 6588, -1, -1],
[[-4.5, 0.5, 6.5], [-1, -1, -1], [179.8197479248047], 0, 1, 6605, -1, -1],
[[-3.5, 0.5, 6.5], [-1, -1, -1], [186.19126892089844], 0, 1, 6606, -1, -1],
[[-2.5, 0.5, 6.5], [-1, -1, -1], [193.35018920898438], 0, 1, 6607, -1, -1],
[[-1.5, 0.5, 6.5], [-1, -1, -1], [196.0093994140625], 0, 1, 6608, -1, -1],
[[-4.5, 1.5, 6.5], [-1, -1, -1], [176.70574951171875], 0, 1, 6625, -1, -1],
[[-3.5, 1.5, 6.5], [-1, -1, -1], [183.00221252441406], 0, 1, 6626, -1, -1],
[[-2.5, 1.5, 6.5], [-1, -1, -1], [190.10284423828125], 0, 1, 6627, -1, -1],
[[-1.5, 1.5, 6.5], [-1, -1, -1], [192.72219848632812], 0, 1, 6628, -1, -1],
[[-4.5, 2.5, 6.5], [-1, -1, -1], [190.84786987304688], 0, 1, 6645, -1, -1],
[[-3.5, 2.5, 6.5], [-1, -1, -1], [196.99644470214844], 0, 1, 6646, -1, -1],
[[-2.5, 2.5, 6.5], [-1, -1, -1], [203.98220825195312], 0, 1, 6647, -1, -1],
[[-1.5, 2.5, 6.5], [-1, -1, -1], [206.5230712890625], 0, 1, 6648, -1, -1],
[[-4.5, 3.5, 6.5], [-1, -1, -1], [189.317138671875], 0, 1, 6665, -1, -1],
[[-3.5, 3.5, 6.5], [-1, -1, -1], [195.24932861328125], 0, 1, 6666, -1, -1],
[[-2.5, 3.5, 6.5], [-1, -1, -1], [202.06704711914062], 0, 1, 6667, -1, -1],
[[-1.5, 3.5, 6.5], [-1, -1, -1], [204.49305725097656], 0, 1, 6668, -1, -1]]
}
"""
goldJson = json.loads(goldTestStringJson)
ff = open("test_PhactoriGeometricCellSampler1_1.json", "r")
testJson = json.load(ff)
ff.close()
self.assertEqual(goldJson, testJson)
#remove file that got made during test
os.remove("test_PhactoriGeometricCellSampler1_1.json")
def test_CreateInternalListOfDataControlledSampledCellsOnThisProcess_ratio_1(self):
testWavelet2 = Wavelet()
testWavelet2.UpdatePipeline()
testWavelet = PointDatatoCellData(Input=testWavelet2)
testWavelet.UpdatePipeline()
newOperationBlock = PhactoriOperationBlock()
newOperationBlock.mName = "phactorigeometriccellsampler1"
testOutFileBasename = "test_WriteAllDataFromOneProcessUsingMPI_output_cells_"
operationParams = {
"type":"geometriccellsampler1",
"cell data array names":["RTData"],
"cell data array tuple size": 1,
"do programmable filter": False,
"data controlled sampling method":"cells with ratio of min/max data value",
"data controlled ratio of min/max":0.95,
"sampling geometry bounding box":[-7.75, 7.75, -8.25, 9.75, -9.25, 8.215]
}
ParseOneFilterTypeFromViewMapOperation(newOperationBlock,
'geometriccellsampler1',
PhactoriGeometricCellSampler1,
operationParams)
PhactoriGeometricCellSampler1Instance = newOperationBlock.mOperationSpecifics
PhactoriGeometricCellSampler1Instance.myCopyOfInputFilter = testWavelet
PhactoriGeometricCellSampler1Instance.CreateInternalListOfGeometricallySampledCellsOnThisProcess()
PhactoriGeometricCellSampler1Instance.CreateInternalListOfDataControlledSampledCellsOnThisProcess()
#PhactoriGeometricCellSampler1Instance.WriteCellListToFile("test_PhactoriGeometricCellSampler1_2.json",
# PhactoriGeometricCellSampler1Instance.DataControlledSampledCellsForThisProcess)
self.assertEqual(len(PhactoriGeometricCellSampler1Instance.GeometricallySampledCellsForThisProcess), 4896)
self.assertEqual(len(PhactoriGeometricCellSampler1Instance.DataControlledSampledCellsForThisProcess), 66)
firstCell = PhactoriGeometricCellSampler1Instance.DataControlledSampledCellsForThisProcess[0]
firstCellString = firstCell.ToStrTerseOneLineList()
lastCell = PhactoriGeometricCellSampler1Instance.DataControlledSampledCellsForThisProcess[-1]
lastCellString = lastCell.ToStrTerseOneLineList()
goldFirstCellString = "[[-1.5, -1.5, -2.5], [-1, -1, -1], [251.10580444335938], 0, 1, 2968, -1, -1]"
goldLastCellString = "[[1.5, -0.5, 2.5], [-1, -1, -1], [253.63250732421875], 0, 1, 4991, -1, -1]"
self.assertEqual(firstCellString, goldFirstCellString)
self.assertEqual(lastCellString, goldLastCellString)
def test_CreateInternalListOfDataControlledSampledCellsOnThisProcess_ratio_2(self):
testWavelet2 = Wavelet()
testWavelet2.UpdatePipeline()
testWavelet = PointDatatoCellData(Input=testWavelet2)
testWavelet.UpdatePipeline()
newOperationBlock = PhactoriOperationBlock()
newOperationBlock.mName = "phactorigeometriccellsampler1"
testOutFileBasename = "test_WriteAllDataFromOneProcessUsingMPI_output_cells_"
operationParams = {
"type":"geometriccellsampler1",
"cell data array names": ["RTData"],
"cell data array tuple size": 1,
"do programmable filter": False,
"data controlled sampling method":"cells with ratio of min/max data value",
"data controlled ratio basis":"ratio is from data minimum to data maximum",
"data controlled ratio of min/max": 0.95,
"sampling geometry bounding box": [-7.75, 7.75, -8.25, 9.75, -9.25, 8.215]
}
ParseOneFilterTypeFromViewMapOperation(newOperationBlock,
'geometriccellsampler1',
PhactoriGeometricCellSampler1,
operationParams)
PhactoriGeometricCellSampler1Instance = newOperationBlock.mOperationSpecifics
PhactoriGeometricCellSampler1Instance.myCopyOfInputFilter = testWavelet
PhactoriGeometricCellSampler1Instance.CreateInternalListOfGeometricallySampledCellsOnThisProcess()
PhactoriGeometricCellSampler1Instance.CreateInternalListOfDataControlledSampledCellsOnThisProcess()
#PhactoriGeometricCellSampler1Instance.WriteCellListToFile("test_PhactoriGeometricCellSampler1_7.json",
# PhactoriGeometricCellSampler1Instance.DataControlledSampledCellsForThisProcess)
self.assertEqual(len(PhactoriGeometricCellSampler1Instance.GeometricallySampledCellsForThisProcess), 4896)
self.assertEqual(len(PhactoriGeometricCellSampler1Instance.DataControlledSampledCellsForThisProcess), 40)
firstCell = PhactoriGeometricCellSampler1Instance.DataControlledSampledCellsForThisProcess[0]
firstCellString = firstCell.ToStrTerseOneLineList()
lastCell = PhactoriGeometricCellSampler1Instance.DataControlledSampledCellsForThisProcess[-1]
lastCellString = lastCell.ToStrTerseOneLineList()
goldFirstCellString = "[[-0.5, -1.5, -2.5], [-1, -1, -1], [254.91671752929688], 0, 1, 2969, -1, -1]"
goldLastCellString = "[[0.5, -0.5, 2.5], [-1, -1, -1], [254.67347717285156], 0, 1, 4990, -1, -1]"
self.assertEqual(firstCellString, goldFirstCellString)
self.assertEqual(lastCellString, goldLastCellString)
def test_CreateInternalListOfDataControlledSampledCellsOnThisProcess_ratio_3(self):
testWavelet2 = Wavelet()
testWavelet2.UpdatePipeline()
testWavelet = PointDatatoCellData(Input=testWavelet2)
testWavelet.UpdatePipeline()
newOperationBlock = PhactoriOperationBlock()
newOperationBlock.mName = "phactorigeometriccellsampler1"
testOutFileBasename = "test_WriteAllDataFromOneProcessUsingMPI_output_cells_"
operationParams = {
"type":"geometriccellsampler1",
"cell data array names":["RTData"],
"cell data array tuple size": 1,
"do programmable filter": False,
"data controlled sampling method":"cells with ratio of min/max data value",
"data controlled ratio basis":"ratio is from data minimum to data maximum",
"data controlled ratio of min/max": 0.05,
"collect cells relative to ratio": "cells less/equal",
"sampling geometry bounding box":[-7.75, 7.75, -8.25, 9.75, -9.25, 8.215]
}
ParseOneFilterTypeFromViewMapOperation(newOperationBlock,
'geometriccellsampler1',
PhactoriGeometricCellSampler1,
operationParams)
PhactoriGeometricCellSampler1Instance = newOperationBlock.mOperationSpecifics
PhactoriGeometricCellSampler1Instance.myCopyOfInputFilter = testWavelet
PhactoriGeometricCellSampler1Instance.CreateInternalListOfGeometricallySampledCellsOnThisProcess()
PhactoriGeometricCellSampler1Instance.CreateInternalListOfDataControlledSampledCellsOnThisProcess()
#PhactoriGeometricCellSampler1Instance.WriteCellListToFile("test_PhactoriGeometricCellSampler1_8.json",
# PhactoriGeometricCellSampler1Instance.DataControlledSampledCellsForThisProcess)
self.assertEqual(len(PhactoriGeometricCellSampler1Instance.GeometricallySampledCellsForThisProcess), 4896)
self.assertEqual(len(PhactoriGeometricCellSampler1Instance.DataControlledSampledCellsForThisProcess), 8)
firstCell = PhactoriGeometricCellSampler1Instance.DataControlledSampledCellsForThisProcess[0]
firstCellString = firstCell.ToStrTerseOneLineList()
lastCell = PhactoriGeometricCellSampler1Instance.DataControlledSampledCellsForThisProcess[-1]
lastCellString = lastCell.ToStrTerseOneLineList()
goldFirstCellString = "[[-7.5, 9.5, -8.5], [-1, -1, -1], [71.15290069580078], 0, 1, 782, -1, -1]"
goldLastCellString = "[[7.5, 9.5, 7.5], [-1, -1, -1], [77.62138366699219], 0, 1, 7197, -1, -1]"
self.assertEqual(firstCellString, goldFirstCellString)
self.assertEqual(lastCellString, goldLastCellString)
def test_CreateInternalListOfDataControlledSampledCellsOnThisProcess_distance(self):
testWavelet2 = Wavelet()
testWavelet2.UpdatePipeline()
testWavelet = PointDatatoCellData(Input=testWavelet2)
testWavelet.UpdatePipeline()
newOperationBlock = PhactoriOperationBlock()
newOperationBlock.mName = "phactorigeometriccellsampler1"
testOutFileBasename = "test_WriteAllDataFromOneProcessUsingMPI_output_cells_"
operationParams = {
"type":"geometriccellsampler1",
"cell data array names":["RTData"],
"cell data array tuple size": 1,
"do programmable filter": False,
"data controlled sampling method":"cells within distance of min/max highest data value cell",
"data controlled distance":1.25,
"data controlled sampling use min or max": "max",
"sampling geometry bounding box":[-7.75, 7.75, -8.25, 9.75, -9.25, 8.215]
}
ParseOneFilterTypeFromViewMapOperation(newOperationBlock,
'geometriccellsampler1',
PhactoriGeometricCellSampler1,
operationParams)
PhactoriGeometricCellSampler1Instance = newOperationBlock.mOperationSpecifics
PhactoriGeometricCellSampler1Instance.myCopyOfInputFilter = testWavelet
PhactoriGeometricCellSampler1Instance.CreateInternalListOfGeometricallySampledCellsOnThisProcess()
PhactoriGeometricCellSampler1Instance.CreateInternalListOfDataControlledSampledCellsOnThisProcess()
#PhactoriGeometricCellSampler1Instance.WriteCellListToFile("test_PhactoriGeometricCellSampler1_3.json",
# PhactoriGeometricCellSampler1Instance.DataControlledSampledCellsForThisProcess)
self.assertEqual(len(PhactoriGeometricCellSampler1Instance.DataControlledSampledCellsForThisProcess), 7)
firstCell = PhactoriGeometricCellSampler1Instance.DataControlledSampledCellsForThisProcess[0]
firstCellString = firstCell.ToStrTerseOneLineList()
lastCell = PhactoriGeometricCellSampler1Instance.DataControlledSampledCellsForThisProcess[-1]
lastCellString = lastCell.ToStrTerseOneLineList()
goldFirstCellString = "[[-0.5, -0.5, -1.5], [-1, -1, -1], [257.593505859375], 0, 1, 3389, -1, -1]"
goldLastCellString = "[[-0.5, -0.5, 0.5], [-1, -1, -1], [264.2397155761719], 0, 1, 4189, -1, -1]"
self.assertEqual(firstCellString, goldFirstCellString)
self.assertEqual(lastCellString, goldLastCellString)
operationParams = {
"type":"geometriccellsampler1",
"cell data array names":["RTData"],
"cell data array tuple size": 1,
"do programmable filter": False,
"data controlled sampling method":"cells within distance of min/max highest data value cell",
"data controlled distance":2.25,
"data controlled sampling use min or max": "min",
"sampling geometry bounding box":[-7.75, 7.75, -8.25, 9.75, -9.25, 8.215]
}
PhactoriGeometricCellSampler1Instance.ParseParametersFromJson(operationParams)
PhactoriGeometricCellSampler1Instance.CreateInternalListOfDataControlledSampledCellsOnThisProcess()
#PhactoriGeometricCellSampler1Instance.WriteCellListToFile("test_PhactoriGeometricCellSampler1_4.json",
# PhactoriGeometricCellSampler1Instance.DataControlledSampledCellsForThisProcess)
self.assertEqual(len(PhactoriGeometricCellSampler1Instance.DataControlledSampledCellsForThisProcess), 17)
firstCell = PhactoriGeometricCellSampler1Instance.DataControlledSampledCellsForThisProcess[0]
firstCellString = firstCell.ToStrTerseOneLineList()
lastCell = PhactoriGeometricCellSampler1Instance.DataControlledSampledCellsForThisProcess[-1]
lastCellString = lastCell.ToStrTerseOneLineList()
goldFirstCellString = "[[-7.5, 7.5, -8.5], [-1, -1, -1], [112.75462341308594], 0, 1, 742, -1, -1]"
goldLastCellString = "[[-6.5, 9.5, -6.5], [-1, -1, -1], [96.05937194824219], 0, 1, 1583, -1, -1]"
self.assertEqual(firstCellString, goldFirstCellString)
self.assertEqual(lastCellString, goldLastCellString)
def test_CreateInternalListOfDataControlledSampledCellsOnThisProcess_boundingbox(self):
testWavelet2 = Wavelet()
testWavelet2.UpdatePipeline()
testWavelet = PointDatatoCellData(Input=testWavelet2)
testWavelet.UpdatePipeline()
newOperationBlock = PhactoriOperationBlock()
newOperationBlock.mName = "phactorigeometriccellsampler1"
testOutFileBasename = "test_WriteAllDataFromOneProcessUsingMPI_output_cells_"
operationParams = {
"type":"geometriccellsampler1",
"cell data array names":["RTData"],
"cell data array tuple size": 1,
"do programmable filter": False,
"data controlled sampling method":"cells within bounding box around min/max highest data value cell",
"data controlled bounding box": [-1.25, 1.25, -2.25, 1.25, -2.25, 2.25],
"data controlled sampling use min or max": "max",
"sampling geometry bounding box":[-7.75, 7.75, -8.25, 9.75, -9.25, 8.215]
}
ParseOneFilterTypeFromViewMapOperation(newOperationBlock,
'geometriccellsampler1',
PhactoriGeometricCellSampler1,
operationParams)
PhactoriGeometricCellSampler1Instance = newOperationBlock.mOperationSpecifics
PhactoriGeometricCellSampler1Instance.myCopyOfInputFilter = testWavelet
PhactoriGeometricCellSampler1Instance.CreateInternalListOfGeometricallySampledCellsOnThisProcess()
PhactoriGeometricCellSampler1Instance.CreateInternalListOfDataControlledSampledCellsOnThisProcess()
#PhactoriGeometricCellSampler1Instance.WriteCellListToFile("test_PhactoriGeometricCellSampler1_5.json",
# PhactoriGeometricCellSampler1Instance.DataControlledSampledCellsForThisProcess)
operationParams = {
"type":"geometriccellsampler1",
"cell data array names":["RTData"],
"cell data array tuple size": 1,
"do programmable filter": False,
"data controlled sampling method":"cells within bounding box around min/max highest data value cell",
"data controlled bounding box": [-1.25, 1.25, -2.25, 1.25, -2.25, 2.25],
"data controlled sampling use min or max": "min",
"sampling geometry bounding box":[-7.75, 7.75, -8.25, 9.75, -9.25, 8.215]
}
PhactoriGeometricCellSampler1Instance.ParseParametersFromJson(operationParams)
PhactoriGeometricCellSampler1Instance.CreateInternalListOfDataControlledSampledCellsOnThisProcess()
#PhactoriGeometricCellSampler1Instance.WriteCellListToFile("test_PhactoriGeometricCellSampler1_6.json",
# PhactoriGeometricCellSampler1Instance.DataControlledSampledCellsForThisProcess)
self.assertEqual(len(PhactoriGeometricCellSampler1Instance.DataControlledSampledCellsForThisProcess), 18)
firstCell = PhactoriGeometricCellSampler1Instance.DataControlledSampledCellsForThisProcess[0]
firstCellString = firstCell.ToStrTerseOneLineList()
lastCell = PhactoriGeometricCellSampler1Instance.DataControlledSampledCellsForThisProcess[-1]
lastCellString = lastCell.ToStrTerseOneLineList()
goldFirstCellString = "[[-7.5, 7.5, -8.5], [-1, -1, -1], [112.75462341308594], 0, 1, 742, -1, -1]"
goldLastCellString = "[[-6.5, 9.5, -6.5], [-1, -1, -1], [96.05937194824219], 0, 1, 1583, -1, -1]"
self.assertEqual(firstCellString, goldFirstCellString)
self.assertEqual(lastCellString, goldLastCellString)
def test_CollectDataOnSampledCellsOnThisProcess(self):
testWavelet2 = Wavelet()
testWavelet2.UpdatePipeline()
testWavelet = PointDatatoCellData(Input=testWavelet2)
testWavelet.UpdatePipeline()
newOperationBlock = PhactoriOperationBlock()
newOperationBlock.mName = "phactorigeometriccellsampler1"
testOutFileBasename = "test_WriteAllDataFromOneProcessUsingMPI_output_cells_"
operationParams = {
"type":"geometriccellsampler1",
"cell data array names":["RTData"],
"cell data array tuple size": 1,
"do programmable filter": False,
"data controlled sampling method":"cells within bounding box around min/max highest data value cell",
"data controlled bounding box": [-1.25, 1.25, -2.25, 1.25, -2.25, 2.25],
"data controlled sampling use min or max": "max",
"sampling geometry bounding box":[-7.75, 7.75, -8.25, 9.75, -9.25, 8.215]
}
ParseOneFilterTypeFromViewMapOperation(newOperationBlock,
'geometriccellsampler1',
PhactoriGeometricCellSampler1,
operationParams)
PhactoriGeometricCellSampler1Instance = newOperationBlock.mOperationSpecifics
PhactoriGeometricCellSampler1Instance.myCopyOfInputFilter = testWavelet
PhactoriGeometricCellSampler1Instance.CreateInternalListOfGeometricallySampledCellsOnThisProcess()
#PhactoriGeometricCellSampler1Instance.WriteCellListToFile("test_PhactoriGeometricCellSampler1_9a.json",
# PhactoriGeometricCellSampler1Instance.GeometricallySampledCellsForThisProcess)
#change data on some cells then recollect it to see if it is done properly
testData1 = []
for oneCell in PhactoriGeometricCellSampler1Instance.GeometricallySampledCellsForThisProcess:
testData1.append(oneCell.dataTuple[0])
oneCell.dataTuple[0] = -1.0
PhactoriGeometricCellSampler1Instance.CollectDataOnSampledCellsOnThisProcess()
testData2 = []
for oneCell in PhactoriGeometricCellSampler1Instance.GeometricallySampledCellsForThisProcess:
testData2.append(oneCell.dataTuple[0])
self.assertEqual(testData1, testData2)
#PhactoriGeometricCellSampler1Instance.WriteCellListToFile("test_PhactoriGeometricCellSampler1_9b.json",
# PhactoriGeometricCellSampler1Instance.GeometricallySampledCellsForThisProcess)
if __name__ == '__main__':
unittest.main()
|
StarcoderdataPython
|
3246383
|
from engine.metric_artifact import Metric as MetricArtifact
from aim.web.api.utils import unsupported_float_type
def separate_select_statement(select: list) -> tuple:
aim_select = []
tf_select = []
for s in select:
if s.startswith('tf:'):
adapter, _, name = s.partition(':')
tf_select.append(name)
else:
aim_select.append(s)
return aim_select, tf_select
def process_trace_record(r, trace, x_axis_trace, x_idx):
base, metric_record = MetricArtifact.deserialize_pb(r)
if unsupported_float_type(metric_record.value):
return
if x_axis_trace is not None:
# try to initialize new value for x_axis from already available sources
if x_axis_trace.metric == trace.metric:
new_x_axis_value = metric_record.value
else:
new_x_axis_value = x_axis_trace.tmp_data.get(x_idx)
if new_x_axis_value:
if x_axis_trace.current_x_axis_value and new_x_axis_value < x_axis_trace.current_x_axis_value:
trace.alignment['is_asc'] = False
x_axis_trace.current_x_axis_value = new_x_axis_value
else:
# if there was no available value for x_idx index from available sources read from storage
try:
x_r = next(x_axis_trace.read_records(x_idx))
_, x_axis_metric_record = MetricArtifact.deserialize_pb(x_r)
if not unsupported_float_type(x_axis_metric_record.value):
new_x_axis_value = x_axis_metric_record.value
if x_axis_trace.current_x_axis_value and \
new_x_axis_value < x_axis_trace.current_x_axis_value:
trace.alignment['is_asc'] = False
x_axis_trace.current_x_axis_value = new_x_axis_value
x_axis_trace.tmp_data[x_idx] = x_axis_trace.current_x_axis_value
else:
trace.alignment['skipped_steps'] += 1
except StopIteration:
trace.alignment['is_synced'] = False
trace.append((
metric_record.value,
base.step,
base.epoch if base.has_epoch else None,
base.timestamp,
x_axis_trace.current_x_axis_value if x_axis_trace else None
))
|
StarcoderdataPython
|
1702682
|
#!/usr/bin/env python3
"""This python starts the Hillview service on the machines
specified in the configuration file."""
# pylint: disable=invalid-name
from argparse import ArgumentParser
from hillviewCommon import RemoteHost, RemoteAggregator, ClusterConfiguration, get_config
def start_webserver(config):
"""Starts the Hillview web server"""
assert isinstance(config, ClusterConfiguration)
rh = config.get_webserver()
print("Starting web server", rh)
rh.run_remote_shell_command(
"export WEB_CLUSTER_DESCRIPTOR=serverlist; cd " + config.service_folder + "; nohup " + \
config.tomcat + "/bin/startup.sh &")
def start_worker(config, rh):
"""Starts the Hillview worker on a remote machine"""
assert isinstance(rh, RemoteHost)
assert isinstance(config, ClusterConfiguration)
print("Starting worker", rh)
gclog = config.service_folder + "/hillview/gc.log"
rh.run_remote_shell_command(
"cd " + config.service_folder + "/hillview; " + \
"nohup java -Dlog4j.configurationFile=./log4j.properties -server -Xms" + rh.heapsize + \
" -Xmx" + rh.heapsize + " -Xloggc:" + gclog + \
" -jar " + config.service_folder + \
"/hillview/hillview-server-jar-with-dependencies.jar " + rh.host + ":" + \
str(config.worker_port) + " >nohup.out 2>&1 &")
def start_aggregator(config, agg):
"""Starts a Hillview aggregator"""
assert isinstance(agg, RemoteAggregator)
assert isinstance(config, ClusterConfiguration)
print("Starting aggregator", agg)
agg.run_remote_shell_command(
"cd " + config.service_folder + "/hillview; " + \
"nohup java -Dlog4j.configurationFile=./log4j.properties -server " + \
" -jar " + config.service_folder + \
"/hillview/hillview-server-jar-with-dependencies.jar " + \
config.service_folder + "/workers " + agg.host + ":" + \
str(config.aggregator_port) + " >nohup.agg 2>&1 &")
def start_aggregators(config):
"""Starts all Hillview aggregators"""
assert isinstance(config, ClusterConfiguration)
config.run_on_all_aggregators(lambda rh: start_aggregator(config, rh))
def start_workers(config):
"""Starts all Hillview workers"""
assert isinstance(config, ClusterConfiguration)
config.run_on_all_workers(lambda rh: start_worker(config, rh))
def main():
"""Main function"""
parser = ArgumentParser()
parser.add_argument("config", help="json cluster configuration file")
args = parser.parse_args()
config = get_config(parser, args)
start_webserver(config)
start_workers(config)
start_aggregators(config)
if __name__ == "__main__":
main()
|
StarcoderdataPython
|
1733388
|
# Import to make requests to get the news from website
import requests
# Import to parse the HTML
from bs4 import BeautifulSoup
# Import to interact with database
from database_connector import DBManager
# Define a function to parse the news page website and get title and description
def get_title_and_description_tuple_list(page=0):
# Challenge: You can make this data extraction faster & consume less resource by other means, way way faster
# Hint: Regex
# Declare the news page url
news_page_url = "https://www.indiatoday.in/india?page={page_number}"
# Hit the news page site and get the HTML
html = requests.get(news_page_url.format(page_number=page))
# Create a beautiful soup object
soup = BeautifulSoup(html.text, 'html.parser')
# Parse the HTML and create a list of title and description
title_description_tuple_list = []
for details_div in soup.find_all('div', {'class': 'detail'}):
title_description_tuple_list.append((details_div.h2.text, details_div.p.text))
return title_description_tuple_list
# Define a function to get the news page as json from db if it already exists else parse it from the website
def get_news_page_as_json(page_no=0):
# Check if the specific page number has already been extracted
result_dict = DBManager.get_entries_from_db_for_page_no(page_no=page_no)
if not result_dict:
# Get all the title, description and page_no
title_description_tuple_list = get_title_and_description_tuple_list(page=page_no)
no_of_rows_inserted = DBManager.insert_all_title_description_into_db(title_description_tuple_list, page_no=page_no)
result_dict = DBManager.get_entries_from_db_for_page_no(page_no=page_no)
return result_dict
|
StarcoderdataPython
|
3219128
|
# -*- coding: utf-8 -*-
# (c) 2009-2021 <NAME> and contributors; see WsgiDAV https://github.com/mar10/wsgidav
# Licensed under the MIT license: http://www.opensource.org/licenses/mit-license.php
"""
Implement the FileLikeQueue helper class.
This helper class is intended to handle use cases where an incoming PUT
request should be directly streamed to a remote target.
Usage: return an instance of this class to`begin_write` and pass it to the
consumer at the same time::
def begin_write(self, contentType=None):
queue = FileLikeQueue(max_size=1)
requests.post(..., data=queue)
return queue
"""
import queue
from wsgidav import util
__docformat__ = "reStructuredText"
_logger = util.get_module_logger(__name__)
# ============================================================================
# FileLikeQueue
# ============================================================================
class FileLikeQueue:
"""A queue for chunks that behaves like a file-like.
read() and write() are typically called from different threads.
This helper class is intended to handle use cases where an incoming PUT
request should be directly streamed to a remote target:
def begin_write(self, contentType=None):
# Create a proxy buffer
queue = FileLikeQueue(max_size=1)
# ... and use it as source for the consumer:
requests.post(..., data=queue)
# pass it to the PUT handler as target
return queue
"""
def __init__(self, max_size=0):
self.is_closed = False
self.queue = queue.Queue(max_size)
self.unread = b""
def read(self, size=0):
"""Read a chunk of bytes from queue.
size = 0: Read next chunk (arbitrary length)
> 0: Read one chunk of `size` bytes (or less if stream was closed)
< 0: Read all bytes as single chunk (i.e. blocks until stream is closed)
This method blocks until the requested size become available.
However, if close() was called, '' is returned immediately.
"""
res = self.unread
self.unread = b""
# Get next chunk, cumulating requested size as needed
while res == b"" or size < 0 or (size > 0 and len(res) < size):
try:
# Read pending data, blocking if neccessary
# (but handle the case that close() is called while waiting)
res += self.queue.get(True, 0.1)
except queue.Empty:
# There was no pending data: wait for more, unless close() was called
if self.is_closed:
break
# Deliver `size` bytes from buffer
if size > 0 and len(res) > size:
self.unread = res[size:]
res = res[:size]
# print("FileLikeQueue.read({}) => {} bytes".format(size, len(res)))
assert type(res) is bytes
return res
def write(self, chunk):
"""Put a chunk of bytes (or an iterable) to the queue.
May block if max_size number of chunks is reached.
"""
assert type(chunk) is bytes
if self.is_closed:
raise ValueError("Cannot write to closed object")
# print("FileLikeQueue.write(), n={}".format(len(chunk)))
# Add chunk to queue (blocks if queue is full)
if util.is_basestring(chunk):
self.queue.put(chunk)
else: # if not a string, assume an iterable
for o in chunk:
self.queue.put(o)
def close(self):
# print("FileLikeQueue.close()")
self.is_closed = True
# TODO: we may also implement iterator functionality, but this should be
# optional, since the consumer may behave differently.
# For example the `requests` library produces chunked transfer encoding if
# the `data` argument is a generator instead of a file-like.
# def __iter__(self):
# return self
# def __next__(self):
# result = self.read(self.block_size)
# if not result:
# raise StopIteration
# return result
# next = __next__ # Python 2.x
# ============================================================================
# StreamingFile
# ============================================================================
class StreamingFile:
"""A file object wrapped around an iterator / data stream."""
def __init__(self, data_stream):
"""Intialise the object with the data stream."""
self.data_stream = data_stream
self.buffer = ""
def read(self, size=None):
"""Read bytes from an iterator."""
while size is None or len(self.buffer) < size:
try:
self.buffer += next(self.data_stream)
except StopIteration:
break
sized_chunk = self.buffer[:size]
if size is None:
self.buffer = ""
else:
self.buffer = self.buffer[size:]
return sized_chunk
|
StarcoderdataPython
|
152664
|
#!/usr/local/bin/python
import re
import os
refsbib = open('refs.bib', 'r').read()
p = re.compile('@.+{(.*),')
g = p.findall(refsbib)
for f in g:
name = f + '.pdf'
if os.path.isfile(os.path.join('./live/files/', name)):
print("[OK] %s ready" % f)
else:
print("[--] %s not found" % f)
|
StarcoderdataPython
|
8801
|
<filename>read_delphin_data.py
# -*- coding: utf-8 -*-
"""
Created on Mon Dec 6 14:51:24 2021
@author: laukkara
This script is run first to fetch results data from university's network drive
"""
import os
import pickle
input_folder_for_Delphin_data = r'S:\91202_Rakfys_Mallinnus\RAMI\simulations'
output_folder = os.path.join(r'C:\Local\laukkara\Data\github\mry-cluster2\input')
output_pickle_file_name = 'S_RAMI.pickle'
## Preparations
if not os.path.exists(output_folder):
os.makedirs(output_folder)
output_pickle_file_path = os.path.join(output_folder,
output_pickle_file_name)
## Read in results data from pickle files
cases = {}
data = {}
cases = os.listdir(input_folder_for_Delphin_data)
cases.remove('olds')
cases.remove('RAMI_simulated_cases.xlsx')
data = {}
for case in cases:
print('Reading:', case)
fname = os.path.join(input_folder_for_Delphin_data, case, 'd.pickle')
with open(fname, 'rb') as f:
try:
df = pickle.load(f)
if df.shape[0] == 1200:
data[case] = df
else:
print('ERROR AT:', case)
except:
print('Error when reading case:', case)
print(data[cases[0]].columns)
with open(output_pickle_file_path, 'wb') as f:
pickle.dump(data, f)
|
StarcoderdataPython
|
1764469
|
"""
Here are a VAE and GAN
"""
from pl_bolts.models.autoencoders.basic_ae.basic_ae_module import AE
from pl_bolts.models.autoencoders.basic_vae.basic_vae_module import VAE
from pl_bolts.models.autoencoders.components import resnet18_encoder, resnet18_decoder
from pl_bolts.models.autoencoders.components import resnet50_encoder, resnet50_decoder
|
StarcoderdataPython
|
1706633
|
import time
import logging
import fire
import numpy as np
from tqdm import tqdm
from torch.utils.data import DataLoader
import models
import utils
from dataset import ImageDataset
logging.getLogger().setLevel(logging.INFO)
def run(model_name, output_dir, dataname, data_dir='./data', batch_size=16, test_run=-1):
data_path = '%s/%s' % (data_dir, dataname)
logging.info('Load data from %s' % data_path)
logging.info('Using model=%s' % model_name)
ds = ImageDataset(data_path)
model = models.get_model(model_name)
data_loader = DataLoader(ds, batch_size=batch_size)
features_list = []
count = 0
iterator = tqdm(data_loader)
for batch in iterator:
output = model.forward_pass(batch.to(utils.torch_device()))
features_list.append(output.cpu().detach().numpy())
if test_run != -1 and count > test_run:
iterator.close()
break
count = count + 1
features = np.vstack(features_list)
logging.info(features.shape)
output_path = '%s/%s-%s--%s' % (output_dir, model_name, dataname, time.strftime('%Y-%m-%d-%H-%M-%S'))
np.save(output_path, features)
logging.info('save data at %s' % output_path)
if __name__ == "__main__":
fire.Fire(run)
|
StarcoderdataPython
|
2990
|
<gh_stars>1-10
import numpy as np
import random
from time import time, sleep
import h5py
import torch
import torch.nn as nn
import torch.optim as optimizer
import glob
import os
#from scipy.stats import rankdata
from lstm import Model, initialize
from Optim import ScheduledOptim
# import _pickle as cPickle
# np.set_printoptions(threshold=np.nan)
def start(config):
model = Model(config)
model = model.to(config.device)
#optim = optimizer.SGD(model.parameters(), lr=2e-4, momentum=0.9, weight_decay=config.c)
#lr_scheduler = torch.optim.lr_scheduler.StepLR(optim, step_size=200, gamma=0.1) # 20M iters
optim = ScheduledOptim(
optimizer.Adam(
filter(lambda p: p.requires_grad, model.parameters()), lr=config.lr,
betas=(0.9, 0.98), eps=1e-09),
config.hidden_dim, 2000)
list_of_files = glob.glob(config.model_path + '/*')
latest_file = None
if list_of_files:
latest_file = max(list_of_files, key=os.path.getctime)
model_ckpt = latest_file
# model_ckpt = config.model_path + '/model-454.pth'
print(model_ckpt)
if model_ckpt:
checkpoint = torch.load(model_ckpt)
model.load_state_dict(checkpoint['state_dict'])
optim.optimizer.load_state_dict(checkpoint['optimizer'])
start_iter = model_ckpt.split('-')[-1].split('.')[0]
start_iter = int(start_iter)
else:
model.apply(initialize)
start_iter = 0
count = 0
for iter in range(start_iter, config.total_iterations):
print('iteration: %s' % iter)
#if (iter + 1) % 100000 == 0:
# lr_scheduler.step()
start_time = time()
optim.update_learning_rate(iter)
# reads the randomly sampled (s,pi,z)'s from the buffer
# ~ 0.1s
# TODO: if error, set a lock
# translate, _ = cPickle.load(open('save/vocab_cotra.pkl', 'rb'))
with h5py.File("buffer", "r") as f:
cur_row = int(f['/cur_row'][0])
s_buffer = f['/s']
pi_buffer = f['/pi']
z_buffer = f['/z']
s_tmp = []
pi_tmp = []
z_tmp = []
df = cur_row - count
'''x = np.bincount(s_buffer[:,1].astype(int)) / 500000
for i in range(len(x)):
if x[i] > 0.01:
print(i, x[i], translate[i])
break'''
if count == 0:
count = cur_row
t_inf = time()
if count != 0 and df >= 1000:
print('time required for 32 self-play games: ', 32 * (time() - t_inf) / df)
t_inf = time()
count = cur_row
if cur_row >= config.buffer_size:
r = np.sort(
np.random.choice(list(range(0, config.buffer_size)), (config.batch_size // 2), replace=False))
else:
r = np.sort(
np.random.choice(list(range(0, cur_row)), (config.batch_size // 2), replace=False))
tmp = []
# randomly sample rows 8 times for a dramatic speedup.
num_segments = 8
for i in range(num_segments):
tmp.append(
r[(config.batch_size // 2) // num_segments * i:(config.batch_size // 2) // num_segments * (i + 1)])
for i in range(num_segments):
s_tmp.append(s_buffer[tmp[i], :config.max_length])
pi_tmp.append(pi_buffer[tmp[i], :config.max_length, ...])
z_tmp.append(z_buffer[tmp[i], ...])
s = np.concatenate(s_tmp, 0)
pi = np.concatenate(pi_tmp, 0)
z = np.concatenate(z_tmp, 0)
# print('io time: ',time() - start_time)
# decompresses sampled pi's
# takes about 0.005s
new_pi = np.zeros(((config.batch_size // 2), config.max_length, config.vocab_size))
for i in range((config.batch_size // 2)):
for j in range(config.max_length):
if pi[i, j, 0] == -1: # meaning the terminal state; pi=0
new_pi[i, j, :] = 0
elif pi[i, j, 0] == -2 or sum(pi[i, j, :]) == 0: # meaning the padding; place -1 padding
new_pi[i, j, :] = -1
else:
# Beware that np.bincount's bin is [0,1,...min_length-1]
new_pi[i, j, :] = np.bincount(pi[i, j, :].astype(int),
minlength=config.vocab_size) / config.simulation_num_per_move
pi = new_pi
# creating a mask for loss function and preparing a minibatch
def generate_mask(array):
new_array = np.zeros_like(array)
for i in range(len(array)):
for j in range(len(array[i])):
if j == len(array[i]) - 1:
new_array[i, :] = 1
elif array[i, j] == config.period_token:
new_array[i, :j + 1] = 1
break
elif array[i, j] == config.blank_token:
new_array[i, :j] = 1
break
return new_array
def pi_mask(array):
array = array[:, 1:]
array = np.pad(array, ((0, 0), (0, 1)), 'constant')
return generate_mask(array)
# pi_tmp isn't modified here, since the mask will be modified appropriately
_, pi_mask = pi_mask(s)
z_mask = generate_mask(s)
z_batch = np.concatenate(
[np.ones([(config.batch_size // 2), config.max_length]) * (-1),
np.ones([(config.batch_size // 2), config.max_length])])
def convert(x):
return torch.tensor(x.astype(np.float32), device=config.device)
t2 = time()
# gradient update
model.train()
cache = []
for i in range(config.depth // config.unit_depth):
cache += [torch.zeros(config.batch_size, config.hidden_dim,device=config.device),
torch.zeros(config.batch_size, config.hidden_dim,device=config.device)]
s_batch = convert(np.array(s)).long()
policy, v, cache = model(s_batch, tuple(cache))
def loss_policy(y_true, y_pred):
return torch.sum(-y_true * torch.log(y_pred + 1.0e-8), 2)
def loss_value(y_true, y_pred):
return (y_true - y_pred) ** 2
pi_mask = convert(pi_mask)
z_mask = convert(z_mask)
z = convert(z)
pi = convert(pi)
loss = torch.mean(torch.sum(loss_policy(pi, policy) * pi_mask +
loss_value(z, v) * z_mask
, 1) / torch.sum(z_mask, 1))
loss.backward()
gn = nn.utils.clip_grad_norm(model.parameters(), config.clip)
print(gn)
optim.step()
optim.zero_grad()
print("grad update: %s seconds" % (time() - t2))
print("iteration: %s seconds" % (time() - start_time))
checkpoint = {'state_dict': model.state_dict(),
'optimizer': optim.optimizer.state_dict()}
sleep(config.training_sleep_time)
torch.save(checkpoint, config.model_path + '/model' + '-' + str(iter + 1) + '.pth')
|
StarcoderdataPython
|
3303357
|
<reponame>He-Ze/Distributed-System-SYSU
#! /usr/bin/env python
# encoding: utf-8
# WARNING! Do not edit! https://waf.io/book/index.html#_obtaining_the_waf_file
import os,sys,imp,types,re
from waflib import Utils,Configure,Options,Logs,Errors
from waflib.Tools import fc
fc_compiler={'win32':['gfortran','ifort'],'darwin':['gfortran','g95','ifort'],'linux':['gfortran','g95','ifort'],'java':['gfortran','g95','ifort'],'default':['gfortran'],'aix':['gfortran']}
def default_compilers():
build_platform=Utils.unversioned_sys_platform()
possible_compiler_list=fc_compiler.get(build_platform,fc_compiler['default'])
return' '.join(possible_compiler_list)
def configure(conf):
try:test_for_compiler=conf.options.check_fortran_compiler or default_compilers()
except AttributeError:conf.fatal("Add options(opt): opt.load('compiler_fc')")
for compiler in re.split('[ ,]+',test_for_compiler):
conf.env.stash()
conf.start_msg('Checking for %r (Fortran compiler)'%compiler)
try:
conf.load(compiler)
except conf.errors.ConfigurationError as e:
conf.env.revert()
conf.end_msg(False)
Logs.debug('compiler_fortran: %r'%e)
else:
if conf.env['FC']:
conf.end_msg(conf.env.get_flat('FC'))
conf.env.COMPILER_FORTRAN=compiler
break
conf.end_msg(False)
else:
conf.fatal('could not configure a Fortran compiler!')
def options(opt):
test_for_compiler=default_compilers()
opt.load_special_tools('fc_*.py')
fortran_compiler_opts=opt.add_option_group('Configuration options')
fortran_compiler_opts.add_option('--check-fortran-compiler',default=None,help='list of Fortran compiler to try [%s]'%test_for_compiler,dest="check_fortran_compiler")
for x in test_for_compiler.split():
opt.load('%s'%x)
|
StarcoderdataPython
|
1693742
|
<gh_stars>0
import random, string
def otp():
temp = []
for _ in range(6):
temp.append(str(random.choice(string.digits)))
return ''.join(temp)
|
StarcoderdataPython
|
22254
|
<reponame>zhou3968322/pytorch-CycleGAN-and-pix2pix<filename>generator/constant_aug.py
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
# -*- coding:utf-8 -*-
# email:<EMAIL>
# create: 2020/11/25
from imgaug import augmenters as iaa
seq_cir = iaa.Sequential(
[
iaa.AdditiveGaussianNoise(scale=0.01 * 255),
# iaa.MultiplyElementwise((0.8, 0.99)),
iaa.Dropout(p=(0, 0.05)),
# iaa.JpegCompression(compression=(80, 99)),
iaa.Affine(rotate=(-90, 90), scale=(0.4, 0.7), fit_output=True)
],
random_order=True)
seq_cir_big = iaa.Sequential(
[
iaa.AdditiveGaussianNoise(scale=0.01 * 255),
# iaa.MultiplyElementwise((0.8, 0.99)),
iaa.Dropout(p=(0, 0.05)),
# iaa.JpegCompression(compression=(80, 99)),
iaa.Affine(rotate=(-90, 90), scale=(0.9, 1.5), fit_output=True)
],
random_order=True)
seq_ell = iaa.Sequential(
[
iaa.AdditiveGaussianNoise(scale=0.01 * 255),
# iaa.MultiplyElementwise((0.8, 0.99)),
iaa.Dropout(p=(0, 0.05)),
# iaa.JpegCompression(compression=(80, 99)),
iaa.Affine(rotate=(-20, 20), scale=(0.4, 0.9), fit_output=True)
],
random_order=True)
seq_squ = iaa.Sequential(
[
iaa.AdditiveGaussianNoise(scale=0.01 * 255),
# iaa.MultiplyElementwise((0.8, 0.99)),
iaa.Dropout(p=(0, 0.05)),
# iaa.JpegCompression(compression=(80, 99)),
iaa.Affine(rotate=(-90, 90), scale=(0.18, 0.35), fit_output=True)
# iaa.Affine(rotate=(-90, 90), scale=(0.8, 1.4), fit_output=True)
],
random_order=True)
seq_rec = iaa.Sequential(
[
iaa.AdditiveGaussianNoise(scale=0.01 * 255),
# iaa.MultiplyElementwise((0.8, 0.99)),
iaa.Dropout(p=(0, 0.05)),
# iaa.JpegCompression(compression=(80, 99)),
iaa.Affine(rotate=(-90, 90), scale=(0.15, 0.25), fit_output=True)
# iaa.Affine(rotate=(-90, 90), scale=(0.2, 0.4), fit_output=True)
],
random_order=True)
seq_doc_noise = iaa.Sequential(
[
iaa.Sometimes(
0.6,
iaa.OneOf(iaa.Sequential([iaa.GaussianBlur(sigma=(0, 1.0))])
# iaa.AverageBlur(k=(2, 5)),
# iaa.MedianBlur(k=(3, 7))])
)
),
iaa.Sometimes(
0.5,
iaa.LinearContrast((0.8, 1.2), per_channel=0.5),
),
iaa.Sometimes(
0.3,
iaa.Multiply((0.8, 1.2), per_channel=0.5),
),
iaa.Sometimes(
0.3,
iaa.WithBrightnessChannels(iaa.Add((-40, 40))),
),
# iaa.Sometimes(
# 0.3,
# iaa.OneOf(iaa.Sequential([
# iaa.AdditiveGaussianNoise(scale=(0, 0.01*255), per_channel=0.5),
# iaa.SaltAndPepper(0.01)]))
# ),
iaa.Sometimes(
0.5,
iaa.Add((-10, 10), per_channel=0.5),
),
# iaa.Sometimes(
# 0.5,
# iaa.Dropout(p=(0, 0.05))
# ),
# iaa.JpegCompression(compression=(80, 99))
],
random_order=True)
|
StarcoderdataPython
|
56196
|
"""Integration tests for the sync CLI command."""
import os.path
import fixture
class SyncTests(fixture.IntegrationFixture):
def _assert_exists(self, output_path, exists=True, i=1):
if exists:
self.assertTrue(os.path.exists(os.path.join(self.output_dir,
output_path)), "%s does not exist on loop %s" % (output_path, i))
else:
self.assertFalse(os.path.exists(os.path.join(self.output_dir,
output_path)), "%s exists on loop %s" % (output_path, i))
def test_file_from_tag(self):
manifest = self.build_manifest_str('v0.2', [('playbooks/playbook1.yml', 'playbook1.yml')])
result = self._run_sync(manifest)
self.assertEqual(0, result.exit_code)
self._assert_exists('playbook1.yml')
def test_file_to_dir(self):
manifest = self.build_manifest_str('master', [('playbooks/playbook1.yml', 'playbooks/')])
result = self._run_sync(manifest)
self.assertEqual(0, result.exit_code)
self._assert_exists('playbooks/playbook1.yml')
def test_file_to_top_lvl_dir(self):
manifest = self.build_manifest_str('master', [('playbooks/playbook1.yml', '')])
result = self._run_sync(manifest)
self.assertEqual(0, result.exit_code)
self._assert_exists('playbook1.yml')
def test_file_glob_to_dir(self):
manifest = self.build_manifest_str('v0.2', [('playbooks/*.yml', 'playbooks/')])
result = self._run_sync(manifest)
self.assertEqual(0, result.exit_code)
self._assert_exists('playbooks/playbook1.yml')
def test_dir_from_tag(self):
manifest = self.build_manifest_str('v0.2', [('roles/', 'roles')])
result = self._run_sync(manifest)
self.assertEqual(0, result.exit_code)
self._assert_exists('roles/dummyrole1/tasks/main.yml')
self._assert_exists('roles/dummyrole2/tasks/main.yml')
# Doesn't exist in v0.2 tag.
self._assert_exists('roles/dummyrole3/tasks/main.yml', False)
def test_dir_from_branch(self):
manifest = self.build_manifest_str('master', [('roles/', 'roles')])
for i in range(2):
result = self._run_sync(manifest)
self.assertEqual(0, result.exit_code)
self._assert_exists('roles/dummyrole1/tasks/main.yml', i=i)
self._assert_exists('roles/dummyrole2/tasks/main.yml', i=i)
self._assert_exists('roles/dummyrole3/tasks/main.yml', i=i)
self._assert_exists('roles/roles/dummyrole1/tasks/main.yml', False, i=i)
def test_dir_from_branch_trailing_dst_slash(self):
manifest = self.build_manifest_str('master', [('roles/', 'roles/')])
for i in range(2):
result = self._run_sync(manifest)
self.assertEqual(0, result.exit_code)
self._assert_exists('roles/dummyrole1/tasks/main.yml', i=i)
self._assert_exists('roles/dummyrole2/tasks/main.yml', i=i)
def test_dir_top_level_dst(self):
manifest = self.build_manifest_str('master', [('roles', '')])
result = self._run_sync(manifest)
self.assertEqual(0, result.exit_code)
self._assert_exists('dummyrole1/tasks/main.yml')
self._assert_exists('dummyrole2/tasks/main.yml')
self._assert_exists('roles/dummyrole1/tasks/main.yml', False)
self._assert_exists('roles/dummyrole2/tasks/main.yml', False)
def test_glob_dir(self):
manifest = self.build_manifest_str('master', [('roles/*', 'roles')])
for i in range(2):
result = self._run_sync(manifest)
self.assertEqual(0, result.exit_code)
self._assert_exists('roles/dummyrole1/tasks/main.yml', i=i)
self._assert_exists('roles/dummyrole2/tasks/main.yml', i=i)
self._assert_exists('roles/roles/dummyrole1/tasks/main.yml', False, i=i)
self._assert_exists('dummyrole1/tasks/main.yml', False, i=i)
def test_glob_dir_dst_slash(self):
manifest = self.build_manifest_str('v0.2', [('roles/*', 'roles/')])
result = self._run_sync(manifest)
self.assertEqual(0, result.exit_code)
self._assert_exists('roles/dummyrole1/tasks/main.yml')
self._assert_exists('roles/dummyrole2/tasks/main.yml')
def test_subdir(self):
manifest = self.build_manifest_str('master', [('roles/dummyrole1', 'roles/dummyrole1')])
result = self._run_sync(manifest)
self.assertEqual(0, result.exit_code)
self._assert_exists('roles/dummyrole1/tasks/main.yml')
self._assert_exists('roles/dummyrole2/tasks/main.yml', False)
def test_top_level_dir(self):
manifest = self.build_manifest_str('master', [('./', 'vendor/output')])
result = self._run_sync(manifest)
self.assertEqual(0, result.exit_code)
self._assert_exists('vendor/output/roles/dummyrole1/tasks/main.yml')
self._assert_exists('vendor/output/roles/dummyrole2/tasks/main.yml')
self._assert_exists('vendor/output/.git', False)
def test_subdir_dst_slash(self):
manifest = self.build_manifest_str('master', [('roles/dummyrole1', 'roles/dummyrole1/')])
result = self._run_sync(manifest)
for i in range(2):
self.assertEqual(0, result.exit_code)
self._assert_exists('roles/dummyrole1/tasks/main.yml', i=i)
self._assert_exists('roles/dummyrole2/tasks/main.yml', False, i=i)
self._assert_exists('roles/dummyrole1/dummyrole1/tasks/main.yml', False, i=i)
self._assert_exists('roles/dummyrole1/roles/dummyrole1/tasks/main.yml', False, i=i)
def test_dir_rename_dst_exists(self):
m1 = self.build_manifest_str('master', [('roles', 'roles2')])
m2 = self.build_manifest_str('master', [('roles', 'roles2/')])
for manifest in [m1, m2]:
for i in range(2):
result = self._run_sync(manifest)
self.assertEqual(0, result.exit_code)
self._assert_exists('roles2/dummyrole1/tasks/main.yml', i=i)
self._assert_exists('roles2/dummyrole2/tasks/main.yml', i=i)
self._assert_exists('roles2/roles/dummyrole1/tasks/main.yml', False, i=i)
self._assert_exists('roles2/roles2/dummyrole1/tasks/main.yml', False, i=i)
self._assert_exists('roles2/roles/dummyrole2/tasks/main.yml', False, i=i)
# If we run again, make sure we don't nest:
result = self._run_sync(manifest)
self.assertEqual(0, result.exit_code)
self._assert_exists('roles2/dummyrole1/tasks/main.yml', i=i)
self._assert_exists('roles2/dummyrole2/tasks/main.yml', i=i)
self._assert_exists('roles2/roles/dummyrole1/tasks/main.yml', False, i=i)
self._assert_exists('roles2/roles/dummyrole2/tasks/main.yml', False, i=i)
def test_merge_two_dirs(self):
manifest = self.build_manifest_str('master', [
('roles/', 'merged/'),
('playbooks/*', 'merged/'),
])
result = self._run_sync(manifest)
self.assertEqual(0, result.exit_code)
self._assert_exists('merged/dummyrole1/tasks/main.yml')
self._assert_exists('merged/dummyrole2/tasks/main.yml')
self._assert_exists('merged/playbook1.yml')
def test_dir_clobber(self):
# Testing a bug where files in roles get clobbered by later copying everything
# from a source roles dir in.
manifest = self.build_manifest_str('master', [('roles/dummyrole2/tasks/main.yml', 'roles/main.yml'),
('roles/*', 'roles/')])
result = self._run_sync(manifest)
self.assertEqual(0, result.exit_code)
self._assert_exists('roles/dummyrole1/tasks/main.yml')
self._assert_exists('roles/dummyrole2/tasks/main.yml')
self._assert_exists('roles/main.yml')
# Re-run to trigger cleanup of previous dirs:
|
StarcoderdataPython
|
3382677
|
# elements_constraints_discovery.py
from __future__ import print_function
import pandas as pd
from tdda.constraints.pd.constraints import discover_df
df = pd.read_csv('testdata/elements92.csv')
constraints = discover_df(df)
with open('elements92.tdda', 'w') as f:
f.write(constraints.to_json())
print('Written elements92.tdda')
|
StarcoderdataPython
|
49268
|
<filename>design.py
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'design.ui'
#
# Created by: PyQt5 UI code generator 5.14.1
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(685, 643)
MainWindow.setMinimumSize(QtCore.QSize(685, 0))
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setObjectName("centralwidget")
self.verticalLayout_3 = QtWidgets.QVBoxLayout(self.centralwidget)
self.verticalLayout_3.setObjectName("verticalLayout_3")
self.gridLayout = QtWidgets.QGridLayout()
self.gridLayout.setObjectName("gridLayout")
self.toolButton_6 = QtWidgets.QToolButton(self.centralwidget)
self.toolButton_6.setObjectName("toolButton_6")
self.gridLayout.addWidget(self.toolButton_6, 10, 2, 1, 1)
self.toolButton_2 = QtWidgets.QToolButton(self.centralwidget)
self.toolButton_2.setObjectName("toolButton_2")
self.gridLayout.addWidget(self.toolButton_2, 2, 2, 1, 1)
self.label_4 = QtWidgets.QLabel(self.centralwidget)
self.label_4.setAlignment(QtCore.Qt.AlignCenter)
self.label_4.setObjectName("label_4")
self.gridLayout.addWidget(self.label_4, 23, 1, 1, 1)
self.verticalLayout = QtWidgets.QVBoxLayout()
self.verticalLayout.setObjectName("verticalLayout")
self.gridLayout.addLayout(self.verticalLayout, 17, 0, 1, 1)
self.toolButton = QtWidgets.QToolButton(self.centralwidget)
self.toolButton.setObjectName("toolButton")
self.gridLayout.addWidget(self.toolButton, 1, 2, 1, 1)
self.pushButton_5 = QtWidgets.QPushButton(self.centralwidget)
self.pushButton_5.setObjectName("pushButton_5")
self.gridLayout.addWidget(self.pushButton_5, 26, 0, 1, 4)
self.label_8 = QtWidgets.QLabel(self.centralwidget)
self.label_8.setObjectName("label_8")
self.gridLayout.addWidget(self.label_8, 6, 0, 1, 1)
self.label_22 = QtWidgets.QLabel(self.centralwidget)
self.label_22.setObjectName("label_22")
self.gridLayout.addWidget(self.label_22, 15, 0, 1, 1)
self.horizontalLayout_2 = QtWidgets.QHBoxLayout()
self.horizontalLayout_2.setObjectName("horizontalLayout_2")
self.label_24 = QtWidgets.QLabel(self.centralwidget)
self.label_24.setObjectName("label_24")
self.horizontalLayout_2.addWidget(self.label_24)
self.spinBox = QtWidgets.QSpinBox(self.centralwidget)
self.spinBox.setObjectName("spinBox")
self.horizontalLayout_2.addWidget(self.spinBox)
self.label_23 = QtWidgets.QLabel(self.centralwidget)
self.label_23.setObjectName("label_23")
self.horizontalLayout_2.addWidget(self.label_23, 0, QtCore.Qt.AlignRight)
self.spinBox_2 = QtWidgets.QSpinBox(self.centralwidget)
self.spinBox_2.setObjectName("spinBox_2")
self.horizontalLayout_2.addWidget(self.spinBox_2)
self.gridLayout.addLayout(self.horizontalLayout_2, 20, 0, 1, 4)
self.label_5 = QtWidgets.QLabel(self.centralwidget)
self.label_5.setObjectName("label_5")
self.gridLayout.addWidget(self.label_5, 1, 0, 1, 1)
self.comboBox = QtWidgets.QComboBox(self.centralwidget)
self.comboBox.setObjectName("comboBox")
self.comboBox.addItem("")
self.comboBox.addItem("")
self.gridLayout.addWidget(self.comboBox, 15, 1, 1, 1)
self.toolButton_5 = QtWidgets.QToolButton(self.centralwidget)
self.toolButton_5.setObjectName("toolButton_5")
self.gridLayout.addWidget(self.toolButton_5, 9, 2, 1, 1)
self.lineEdit_2 = QtWidgets.QLineEdit(self.centralwidget)
self.lineEdit_2.setObjectName("lineEdit_2")
self.gridLayout.addWidget(self.lineEdit_2, 2, 1, 1, 1)
self.pushButton_2 = QtWidgets.QPushButton(self.centralwidget)
self.pushButton_2.setObjectName("pushButton_2")
self.gridLayout.addWidget(self.pushButton_2, 6, 3, 1, 1)
self.toolButton_7 = QtWidgets.QToolButton(self.centralwidget)
self.toolButton_7.setObjectName("toolButton_7")
self.gridLayout.addWidget(self.toolButton_7, 11, 2, 1, 1)
self.lineEdit_6 = QtWidgets.QLineEdit(self.centralwidget)
self.lineEdit_6.setObjectName("lineEdit_6")
self.gridLayout.addWidget(self.lineEdit_6, 10, 1, 1, 1)
self.label_18 = QtWidgets.QLabel(self.centralwidget)
self.label_18.setAlignment(QtCore.Qt.AlignCenter)
self.label_18.setObjectName("label_18")
self.gridLayout.addWidget(self.label_18, 17, 1, 1, 1)
self.toolButton_10 = QtWidgets.QToolButton(self.centralwidget)
self.toolButton_10.setObjectName("toolButton_10")
self.gridLayout.addWidget(self.toolButton_10, 18, 2, 1, 1)
self.toolButton_3 = QtWidgets.QToolButton(self.centralwidget)
self.toolButton_3.setObjectName("toolButton_3")
self.gridLayout.addWidget(self.toolButton_3, 5, 2, 1, 1)
self.toolButton_8 = QtWidgets.QToolButton(self.centralwidget)
self.toolButton_8.setObjectName("toolButton_8")
self.gridLayout.addWidget(self.toolButton_8, 12, 2, 1, 1)
self.lineEdit_5 = QtWidgets.QLineEdit(self.centralwidget)
self.lineEdit_5.setObjectName("lineEdit_5")
self.gridLayout.addWidget(self.lineEdit_5, 9, 1, 1, 1)
self.label_15 = QtWidgets.QLabel(self.centralwidget)
self.label_15.setObjectName("label_15")
self.gridLayout.addWidget(self.label_15, 3, 0, 1, 4)
self.label_17 = QtWidgets.QLabel(self.centralwidget)
self.label_17.setObjectName("label_17")
self.gridLayout.addWidget(self.label_17, 21, 0, 1, 4)
self.lineEdit_11 = QtWidgets.QLineEdit(self.centralwidget)
self.lineEdit_11.setObjectName("lineEdit_11")
self.gridLayout.addWidget(self.lineEdit_11, 18, 1, 1, 1)
self.lineEdit_10 = QtWidgets.QLineEdit(self.centralwidget)
self.lineEdit_10.setObjectName("lineEdit_10")
self.gridLayout.addWidget(self.lineEdit_10, 14, 1, 1, 1)
self.label_16 = QtWidgets.QLabel(self.centralwidget)
self.label_16.setObjectName("label_16")
self.gridLayout.addWidget(self.label_16, 7, 0, 1, 4)
self.label = QtWidgets.QLabel(self.centralwidget)
self.label.setAlignment(QtCore.Qt.AlignCenter)
self.label.setObjectName("label")
self.gridLayout.addWidget(self.label, 0, 1, 1, 1)
self.label_6 = QtWidgets.QLabel(self.centralwidget)
self.label_6.setObjectName("label_6")
self.gridLayout.addWidget(self.label_6, 2, 0, 1, 1)
self.label_9 = QtWidgets.QLabel(self.centralwidget)
self.label_9.setObjectName("label_9")
self.gridLayout.addWidget(self.label_9, 9, 0, 1, 1)
self.lineEdit = QtWidgets.QLineEdit(self.centralwidget)
self.lineEdit.setObjectName("lineEdit")
self.gridLayout.addWidget(self.lineEdit, 1, 1, 1, 1)
self.pushButton_3 = QtWidgets.QPushButton(self.centralwidget)
self.pushButton_3.setObjectName("pushButton_3")
self.gridLayout.addWidget(self.pushButton_3, 15, 3, 1, 1)
self.label_14 = QtWidgets.QLabel(self.centralwidget)
self.label_14.setObjectName("label_14")
self.gridLayout.addWidget(self.label_14, 25, 0, 1, 4)
self.pushButton_4 = QtWidgets.QPushButton(self.centralwidget)
self.pushButton_4.setObjectName("pushButton_4")
self.gridLayout.addWidget(self.pushButton_4, 24, 0, 1, 4)
self.lineEdit_8 = QtWidgets.QLineEdit(self.centralwidget)
self.lineEdit_8.setObjectName("lineEdit_8")
self.gridLayout.addWidget(self.lineEdit_8, 12, 1, 1, 1)
self.label_2 = QtWidgets.QLabel(self.centralwidget)
self.label_2.setAlignment(QtCore.Qt.AlignCenter)
self.label_2.setObjectName("label_2")
self.gridLayout.addWidget(self.label_2, 4, 1, 1, 1)
self.label_3 = QtWidgets.QLabel(self.centralwidget)
self.label_3.setAlignment(QtCore.Qt.AlignCenter)
self.label_3.setObjectName("label_3")
self.gridLayout.addWidget(self.label_3, 8, 1, 1, 1)
self.label_7 = QtWidgets.QLabel(self.centralwidget)
self.label_7.setObjectName("label_7")
self.gridLayout.addWidget(self.label_7, 5, 0, 1, 1)
self.label_20 = QtWidgets.QLabel(self.centralwidget)
self.label_20.setObjectName("label_20")
self.gridLayout.addWidget(self.label_20, 16, 0, 1, 4)
self.verticalLayout_2 = QtWidgets.QVBoxLayout()
self.verticalLayout_2.setObjectName("verticalLayout_2")
self.label_19 = QtWidgets.QLabel(self.centralwidget)
self.label_19.setObjectName("label_19")
self.verticalLayout_2.addWidget(self.label_19)
self.gridLayout.addLayout(self.verticalLayout_2, 18, 0, 1, 1)
self.pushButton_7 = QtWidgets.QPushButton(self.centralwidget)
self.pushButton_7.setObjectName("pushButton_7")
self.gridLayout.addWidget(self.pushButton_7, 14, 3, 1, 1)
self.lineEdit_3 = QtWidgets.QLineEdit(self.centralwidget)
self.lineEdit_3.setObjectName("lineEdit_3")
self.gridLayout.addWidget(self.lineEdit_3, 5, 1, 1, 1)
self.toolButton_4 = QtWidgets.QToolButton(self.centralwidget)
self.toolButton_4.setObjectName("toolButton_4")
self.gridLayout.addWidget(self.toolButton_4, 6, 2, 1, 1)
self.label_10 = QtWidgets.QLabel(self.centralwidget)
self.label_10.setObjectName("label_10")
self.gridLayout.addWidget(self.label_10, 10, 0, 1, 1)
self.lineEdit_7 = QtWidgets.QLineEdit(self.centralwidget)
self.lineEdit_7.setObjectName("lineEdit_7")
self.gridLayout.addWidget(self.lineEdit_7, 11, 1, 1, 1)
self.label_11 = QtWidgets.QLabel(self.centralwidget)
self.label_11.setObjectName("label_11")
self.gridLayout.addWidget(self.label_11, 11, 0, 1, 1)
self.lineEdit_4 = QtWidgets.QLineEdit(self.centralwidget)
self.lineEdit_4.setObjectName("lineEdit_4")
self.gridLayout.addWidget(self.lineEdit_4, 6, 1, 1, 1)
self.pushButton_6 = QtWidgets.QPushButton(self.centralwidget)
self.pushButton_6.setObjectName("pushButton_6")
self.gridLayout.addWidget(self.pushButton_6, 18, 3, 1, 1)
self.label_12 = QtWidgets.QLabel(self.centralwidget)
self.label_12.setObjectName("label_12")
self.gridLayout.addWidget(self.label_12, 12, 0, 1, 1)
self.label_21 = QtWidgets.QLabel(self.centralwidget)
self.label_21.setObjectName("label_21")
self.gridLayout.addWidget(self.label_21, 14, 0, 1, 1)
self.pushButton = QtWidgets.QPushButton(self.centralwidget)
self.pushButton.setObjectName("pushButton")
self.gridLayout.addWidget(self.pushButton, 2, 3, 1, 1)
self.label_13 = QtWidgets.QLabel(self.centralwidget)
self.label_13.setAlignment(QtCore.Qt.AlignCenter)
self.label_13.setObjectName("label_13")
self.gridLayout.addWidget(self.label_13, 11, 3, 1, 1)
self.doubleSpinBox = QtWidgets.QDoubleSpinBox(self.centralwidget)
self.doubleSpinBox.setObjectName("doubleSpinBox")
self.gridLayout.addWidget(self.doubleSpinBox, 12, 3, 1, 1)
self.verticalLayout_3.addLayout(self.gridLayout)
MainWindow.setCentralWidget(self.centralwidget)
self.menubar = QtWidgets.QMenuBar(MainWindow)
self.menubar.setGeometry(QtCore.QRect(0, 0, 685, 22))
self.menubar.setObjectName("menubar")
MainWindow.setMenuBar(self.menubar)
self.statusbar = QtWidgets.QStatusBar(MainWindow)
self.statusbar.setObjectName("statusbar")
MainWindow.setStatusBar(self.statusbar)
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "MainWindow"))
self.toolButton_6.setText(_translate("MainWindow", "..."))
self.toolButton_2.setText(_translate("MainWindow", "..."))
self.label_4.setText(_translate("MainWindow", "QDPT result processing"))
self.toolButton.setText(_translate("MainWindow", "..."))
self.pushButton_5.setText(_translate("MainWindow", "Instruction"))
self.label_8.setText(_translate("MainWindow", "New method file"))
self.label_22.setText(_translate("MainWindow", "dimer/monomer"))
self.label_24.setText(_translate("MainWindow", "Minimum atoms in one molecule"))
self.label_23.setText(_translate("MainWindow", "Maximum contact length"))
self.label_5.setText(_translate("MainWindow", "Main file"))
self.comboBox.setItemText(0, _translate("MainWindow", "Dimer"))
self.comboBox.setItemText(1, _translate("MainWindow", "Monomer"))
self.toolButton_5.setText(_translate("MainWindow", "..."))
self.pushButton_2.setText(_translate("MainWindow", "Method Changer"))
self.toolButton_7.setText(_translate("MainWindow", "..."))
self.label_18.setText(_translate("MainWindow", "Molecular Delimiter"))
self.toolButton_10.setText(_translate("MainWindow", "..."))
self.toolButton_3.setText(_translate("MainWindow", "..."))
self.toolButton_8.setText(_translate("MainWindow", "..."))
self.label_15.setText(_translate("MainWindow", "-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------"))
self.label_17.setText(_translate("MainWindow", "--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------"))
self.label_16.setText(_translate("MainWindow", "--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------"))
self.label.setText(_translate("MainWindow", "VEC Changer"))
self.label_6.setText(_translate("MainWindow", "VEC file"))
self.label_9.setText(_translate("MainWindow", "Settings"))
self.pushButton_3.setText(_translate("MainWindow", "Files generator"))
self.label_14.setText(_translate("MainWindow", "--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------"))
self.pushButton_4.setText(_translate("MainWindow", "Plot Editor"))
self.label_2.setText(_translate("MainWindow", "Method Changer"))
self.label_3.setText(_translate("MainWindow", "Inp files generator"))
self.label_7.setText(_translate("MainWindow", "Directory with files to change"))
self.label_20.setText(_translate("MainWindow", "--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------"))
self.label_19.setText(_translate("MainWindow", "Pack file"))
self.pushButton_7.setText(_translate("MainWindow", "Open Folder"))
self.toolButton_4.setText(_translate("MainWindow", "..."))
self.label_10.setText(_translate("MainWindow", "Geometry 1"))
self.label_11.setText(_translate("MainWindow", "Geometry 2"))
self.pushButton_6.setText(_translate("MainWindow", "Start"))
self.label_12.setText(_translate("MainWindow", "Directory for generated files"))
self.label_21.setText(_translate("MainWindow", "inp filenames mask"))
self.pushButton.setText(_translate("MainWindow", "VEC Changer"))
self.label_13.setText(_translate("MainWindow", "Step"))
|
StarcoderdataPython
|
3293905
|
from typing import List
from talon import actions, Module, speech_system
mod = Module()
macro = []
recording = False
@mod.action_class
class Actions:
def macro_record():
"""Begin recording a new voice command macro."""
global macro
global recording
macro = []
recording = True
def macro_stop():
"""Stop recording the macro."""
global recording
recording = False
def macro_play():
"""Execute the commands in the last recorded macro."""
actions.user.macro_stop()
# :-1 because we don't want to replay `macro stop`/`macro play`
for words in macro[:-1]:
print(words)
actions.mimic(words)
def macro_append_command(words: List[str]):
"""Appends a command to the current macro; called when a voice command is uttered while recording a macro."""
assert recording, "Not currently recording a macro"
macro.append(words)
def fn(d):
if not recording or "parsed" not in d: return
actions.user.macro_append_command(d["parsed"]._unmapped)
speech_system.register("pre:phrase", fn)
|
StarcoderdataPython
|
1746240
|
from setuptools import setup
version = "0.1.1"
url = "https://github.com/JIC-CSB/dserve"
readme = open('README.rst').read()
setup(
name='dserve',
packages=['dserve'],
version=version,
description="Tool to serve a dataset over HTTP",
long_description=readme,
include_package_data=True,
author="<NAME>",
author_email="<EMAIL>",
url=url,
download_url="{}/tarball/{}".format(url, version),
install_requires=[
'dtoolcore>=0.15.0',
'flask',
'flask_cors',
],
entry_points={
'console_scripts': ['dserve=dserve.cli:main']
},
license="MIT")
|
StarcoderdataPython
|
4827011
|
<filename>compyle/cuda.py
"""Common CUDA related functionality.
"""
from __future__ import print_function
from pytools import Record, RecordWithoutPickling
import logging
from pytools.persistent_dict import KeyBuilder as KeyBuilderBase
from pytools.persistent_dict import WriteOncePersistentDict
from pycuda._cluda import CLUDA_PREAMBLE
import pycuda._mymako as mako
from pycuda.tools import (dtype_to_ctype, bitlog2,
context_dependent_memoize, ScalarArg, VectorArg)
import pycuda.gpuarray as gpuarray
from compyle.thrust.sort import argsort
import pycuda.driver as drv
from pycuda.compiler import SourceModule as _SourceModule
from pycuda.tools import dtype_to_ctype
from pytools import memoize
import numpy as np
import six
_cuda_ctx = False
def set_context():
global _cuda_ctx
if not _cuda_ctx:
import pycuda.autoinit
_cuda_ctx = True
# The following code is taken from pyopencl for struct mapping.
# it should be ported over to pycuda eventually.
import pycuda.gpuarray as gpuarray # noqa
class SourceModule(_SourceModule):
def __getattr__(self, name):
def kernel(*args, **kwargs):
f = self.get_function(name)
return f(*args, **kwargs)
kernel.function_name = name
return kernel
class _CDeclList:
def __init__(self, device):
self.device = device
self.declared_dtypes = set()
self.declarations = []
self.saw_complex = False
def add_dtype(self, dtype):
dtype = np.dtype(dtype)
if dtype.kind == "c":
self.saw_complex = True
if dtype.kind != "V":
return
if dtype in self.declared_dtypes:
return
for name, field_data in sorted(six.iteritems(dtype.fields)):
field_dtype, offset = field_data[:2]
self.add_dtype(field_dtype)
_, cdecl = match_dtype_to_c_struct(
self.device, dtype_to_ctype(dtype), dtype)
self.declarations.append(cdecl)
self.declared_dtypes.add(dtype)
def visit_arguments(self, arguments):
for arg in arguments:
dtype = arg.dtype
if dtype.kind == "c":
self.saw_complex = True
def get_declarations(self):
result = "\n\n".join(self.declarations)
if self.saw_complex:
result = (
"#include <pycuda-complex.h>\n\n"
+ result)
return result
@memoize
def match_dtype_to_c_struct(device, name, dtype, context=None, use_typedef=False):
"""Return a tuple `(dtype, c_decl)` such that the C struct declaration
in `c_decl` and the structure :class:`numpy.dtype` instance `dtype`
have the same memory layout.
Note that *dtype* may be modified from the value that was passed in,
for example to insert padding.
(As a remark on implementation, this routine runs a small kernel on
the given *device* to ensure that :mod:`numpy` and C offsets and
sizes match.)
This example explains the use of this function::
>>> import numpy as np
>>> import pyopencl as cl
>>> import pyopencl.tools
>>> ctx = cl.create_some_context()
>>> dtype = np.dtype([("id", np.uint32), ("value", np.float32)])
>>> dtype, c_decl = pyopencl.tools.match_dtype_to_c_struct(
... ctx.devices[0], 'id_val', dtype)
>>> print c_decl
typedef struct {
unsigned id;
float value;
} id_val;
>>> print dtype
[('id', '<u4'), ('value', '<f4')]
>>> cl.tools.get_or_register_dtype('id_val', dtype)
As this example shows, it is important to call
:func:`get_or_register_dtype` on the modified `dtype` returned by this
function, not the original one.
"""
fields = sorted(
six.iteritems(dtype.fields),
key=lambda name_dtype_offset: name_dtype_offset[1][1]
)
c_fields = []
for field_name, dtype_and_offset in fields:
field_dtype, offset = dtype_and_offset[:2]
c_fields.append(" %s %s;" % (dtype_to_ctype(field_dtype), field_name))
if use_typedef:
c_decl = "typedef struct {\n%s\n} %s;\n\n" % (
"\n".join(c_fields), name
)
else:
c_decl = "struct %s {\n%s\n};\n\n" % (
name, "\n".join(c_fields)
)
cdl = _CDeclList(device)
for field_name, dtype_and_offset in fields:
field_dtype, offset = dtype_and_offset[:2]
cdl.add_dtype(field_dtype)
pre_decls = cdl.get_declarations()
offset_code = "\n".join(
"result[%d] = pycuda_offsetof(%s, %s);" % (i + 1, name, field_name)
for i, (field_name, _) in enumerate(fields))
src = r"""
#define pycuda_offsetof(st, m) \
((uint) ((char *) &(dummy_pycuda.m) \
- (char *)&dummy_pycuda ))
%(pre_decls)s
%(my_decl)s
extern "C" __global__ void get_size_and_offsets(uint *result)
{
result[0] = sizeof(%(my_type)s);
%(my_type)s dummy_pycuda;
%(offset_code)s
}
""" % dict(
pre_decls=pre_decls,
my_decl=c_decl,
my_type=name,
offset_code=offset_code)
prg = SourceModule(src)
knl = prg.get_size_and_offsets
result_buf = gpuarray.empty(1 + len(fields), np.uint32)
e = drv.Event()
knl(result_buf.gpudata, block=(1, 1, 1))
e.record()
e.synchronize()
size_and_offsets = result_buf.get()
size = int(size_and_offsets[0])
from pytools import any
offsets = size_and_offsets[1:]
if any(ofs >= size for ofs in offsets):
# offsets not plausible
if dtype.itemsize == size:
# If sizes match, use numpy's idea of the offsets.
offsets = [dtype_and_offset[1]
for field_name, dtype_and_offset in fields]
else:
raise RuntimeError(
"OpenCL compiler reported offsetof() past sizeof() "
"for struct layout on '%s'. "
"This makes no sense, and it's usually indicates a "
"compiler bug. "
"Refusing to discover struct layout." % device)
del knl
del prg
del context
try:
dtype_arg_dict = {
'names': [field_name
for field_name, (field_dtype, offset) in fields],
'formats': [field_dtype
for field_name, (field_dtype, offset) in fields],
'offsets': [int(x) for x in offsets],
'itemsize': int(size_and_offsets[0]),
}
dtype = np.dtype(dtype_arg_dict)
if dtype.itemsize != size_and_offsets[0]:
# "Old" versions of numpy (1.6.x?) silently ignore "itemsize". Boo.
dtype_arg_dict["names"].append("_pycl_size_fixer")
dtype_arg_dict["formats"].append(np.uint8)
dtype_arg_dict["offsets"].append(int(size_and_offsets[0]) - 1)
dtype = np.dtype(dtype_arg_dict)
except NotImplementedError:
def calc_field_type():
total_size = 0
padding_count = 0
for offset, (field_name, (field_dtype, _)) in zip(offsets, fields):
if offset > total_size:
padding_count += 1
yield ('__pycuda_padding%d' % padding_count,
'V%d' % offset - total_size)
yield field_name, field_dtype
total_size = field_dtype.itemsize + offset
dtype = np.dtype(list(calc_field_type()))
assert dtype.itemsize == size_and_offsets[0]
return dtype, c_decl
@memoize
def dtype_to_c_struct(device, dtype):
if dtype.fields is None:
return ""
import pyopencl.cltypes
if dtype in pyopencl.cltypes.vec_type_to_scalar_and_count:
# Vector types are built-in. Don't try to redeclare those.
return ""
matched_dtype, c_decl = match_dtype_to_c_struct(
device, dtype_to_ctype(dtype), dtype)
def dtypes_match():
result = len(dtype.fields) == len(matched_dtype.fields)
for name, val in six.iteritems(dtype.fields):
result = result and matched_dtype.fields[name] == val
return result
assert dtypes_match()
return c_decl
#####################################################################
# The GenericScanKernel is added here temporarily until the following
# PR is merged into PyCUDA
# https://github.com/inducer/pycuda/pull/188
#####################################################################
logger = logging.getLogger(__name__)
#####################################################################
# The GenericScanKernel is added here temporarily until the following
# PR is merged into PyCUDA
# https://github.com/inducer/pycuda/pull/188
#####################################################################
def parse_arg_list(arguments):
"""Parse a list of kernel arguments. *arguments* may be a comma-separate
list of C declarators in a string, a list of strings representing C
declarators, or :class:`Argument` objects.
"""
if isinstance(arguments, str):
arguments = arguments.split(",")
def parse_single_arg(obj):
if isinstance(obj, str):
from pycuda.tools import parse_c_arg
return parse_c_arg(obj)
else:
return obj
return [parse_single_arg(arg) for arg in arguments]
def get_arg_list_scalar_arg_dtypes(arg_types):
result = []
for arg_type in arg_types:
if isinstance(arg_type, ScalarArg):
result.append(arg_type.dtype)
elif isinstance(arg_type, VectorArg):
result.append(None)
else:
raise RuntimeError("arg type not understood: %s" % type(arg_type))
return result
def _process_code_for_macro(code):
if "//" in code:
raise RuntimeError("end-of-line comments ('//') may not be used in "
"code snippets")
return code.replace("\n", " \\\n")
class _NumpyTypesKeyBuilder(KeyBuilderBase):
def update_for_type(self, key_hash, key):
if issubclass(key, np.generic):
self.update_for_str(key_hash, key.__name__)
return
raise TypeError("unsupported type for persistent hash keying: %s"
% type(key))
# {{{ preamble
SHARED_PREAMBLE = CLUDA_PREAMBLE + """
#define WG_SIZE ${wg_size}
#define SCAN_EXPR(a, b, across_seg_boundary) ${scan_expr}
#define INPUT_EXPR(i) (${input_expr})
%if is_segmented:
#define IS_SEG_START(i, a) (${is_segment_start_expr})
%endif
${preamble}
typedef ${dtype_to_ctype(scan_dtype)} scan_type;
typedef ${dtype_to_ctype(index_dtype)} index_type;
// NO_SEG_BOUNDARY is the largest representable integer in index_type.
// This assumption is used in code below.
#define NO_SEG_BOUNDARY ${str(np.iinfo(index_dtype).max)}
"""
# }}}
# {{{ main scan code
# Algorithm: Each work group is responsible for one contiguous
# 'interval'. There are just enough intervals to fill all compute
# units. Intervals are split into 'units'. A unit is what gets
# worked on in parallel by one work group.
#
# in index space:
# interval > unit > local-parallel > k-group
#
# (Note that there is also a transpose in here: The data is read
# with local ids along linear index order.)
#
# Each unit has two axes--the local-id axis and the k axis.
#
# unit 0:
# | | | | | | | | | | ----> lid
# | | | | | | | | | |
# | | | | | | | | | |
# | | | | | | | | | |
# | | | | | | | | | |
#
# |
# v k (fastest-moving in linear index)
#
# unit 1:
# | | | | | | | | | | ----> lid
# | | | | | | | | | |
# | | | | | | | | | |
# | | | | | | | | | |
# | | | | | | | | | |
#
# |
# v k (fastest-moving in linear index)
#
# ...
#
# At a device-global level, this is a three-phase algorithm, in
# which first each interval does its local scan, then a scan
# across intervals exchanges data globally, and the final update
# adds the exchanged sums to each interval.
#
# Exclusive scan is realized by allowing look-behind (access to the
# preceding item) in the final update, by means of a local shift.
#
# NOTE: All segment_start_in_X indices are relative to the start
# of the array.
SCAN_INTERVALS_SOURCE = SHARED_PREAMBLE + r"""
#define K ${k_group_size}
// #define DEBUG
#ifdef DEBUG
#define pycu_printf(ARGS) printf ARGS
#else
#define pycu_printf(ARGS) /* */
#endif
KERNEL
REQD_WG_SIZE(WG_SIZE, 1, 1)
void ${kernel_name}(
${argument_signature},
GLOBAL_MEM scan_type* __restrict__ partial_scan_buffer,
const index_type N,
const index_type interval_size
%if is_first_level:
, GLOBAL_MEM scan_type* __restrict__ interval_results
%endif
%if is_segmented and is_first_level:
// NO_SEG_BOUNDARY if no segment boundary in interval.
, GLOBAL_MEM index_type* __restrict__ g_first_segment_start_in_interval
%endif
%if store_segment_start_flags:
, GLOBAL_MEM char* __restrict__ g_segment_start_flags
%endif
)
{
// index K in first dimension used for carry storage
%if use_bank_conflict_avoidance:
// Avoid bank conflicts by adding a single 32-bit value to the size of
// the scan type.
struct __attribute__ ((__packed__)) wrapped_scan_type
{
scan_type value;
int dummy;
};
%else:
struct wrapped_scan_type
{
scan_type value;
};
%endif
// padded in WG_SIZE to avoid bank conflicts
LOCAL_MEM struct wrapped_scan_type ldata[K + 1][WG_SIZE + 1];
%if is_segmented:
LOCAL_MEM char l_segment_start_flags[K][WG_SIZE];
LOCAL_MEM index_type l_first_segment_start_in_subtree[WG_SIZE];
// only relevant/populated for local id 0
index_type first_segment_start_in_interval = NO_SEG_BOUNDARY;
index_type first_segment_start_in_k_group, first_segment_start_in_subtree;
%endif
// {{{ declare local data for input_fetch_exprs if any of them are stenciled
<%
fetch_expr_offsets = {}
for name, arg_name, ife_offset in input_fetch_exprs:
fetch_expr_offsets.setdefault(arg_name, set()).add(ife_offset)
local_fetch_expr_args = set(
arg_name
for arg_name, ife_offsets in fetch_expr_offsets.items()
if -1 in ife_offsets or len(ife_offsets) > 1)
%>
%for arg_name in local_fetch_expr_args:
LOCAL_MEM ${arg_ctypes[arg_name]} l_${arg_name}[WG_SIZE*K];
%endfor
// }}}
const index_type interval_begin = interval_size * GID_0;
const index_type interval_end = min(interval_begin + interval_size, N);
const index_type unit_size = K * WG_SIZE;
index_type unit_base = interval_begin;
%for is_tail in [False, True]:
%if not is_tail:
for(; unit_base + unit_size <= interval_end; unit_base += unit_size)
%else:
if (unit_base < interval_end)
%endif
{
// {{{ carry out input_fetch_exprs
// (if there are ones that need to be fetched into local)
%if local_fetch_expr_args:
for(index_type k = 0; k < K; k++)
{
const index_type offset = k*WG_SIZE + LID_0;
const index_type read_i = unit_base + offset;
%for arg_name in local_fetch_expr_args:
%if is_tail:
if (read_i < interval_end)
%endif
{
l_${arg_name}[offset] = ${arg_name}[read_i];
}
%endfor
}
local_barrier();
%endif
pycu_printf(("after input_fetch_exprs\n"));
// }}}
// {{{ read a unit's worth of data from global
for(index_type k = 0; k < K; k++)
{
const index_type offset = k*WG_SIZE + LID_0;
const index_type read_i = unit_base + offset;
%if is_tail:
if (read_i < interval_end)
%endif
{
%for name, arg_name, ife_offset in input_fetch_exprs:
${arg_ctypes[arg_name]} ${name};
%if arg_name in local_fetch_expr_args:
if (offset + ${ife_offset} >= 0)
${name} = l_${arg_name}[offset + ${ife_offset}];
else if (read_i + ${ife_offset} >= 0)
${name} = ${arg_name}[read_i + ${ife_offset}];
/*
else
if out of bounds, name is left undefined */
%else:
// ${arg_name} gets fetched directly from global
${name} = ${arg_name}[read_i];
%endif
%endfor
scan_type scan_value = INPUT_EXPR(read_i);
const index_type o_mod_k = offset % K;
const index_type o_div_k = offset / K;
ldata[o_mod_k][o_div_k].value = scan_value;
%if is_segmented:
bool is_seg_start = IS_SEG_START(read_i, scan_value);
l_segment_start_flags[o_mod_k][o_div_k] = is_seg_start;
%endif
%if store_segment_start_flags:
g_segment_start_flags[read_i] = is_seg_start;
%endif
}
}
pycu_printf(("after read from global\n"));
// }}}
// {{{ carry in from previous unit, if applicable
%if is_segmented:
local_barrier();
first_segment_start_in_k_group = NO_SEG_BOUNDARY;
if (l_segment_start_flags[0][LID_0])
first_segment_start_in_k_group = unit_base + K*LID_0;
%endif
if (LID_0 == 0 && unit_base != interval_begin)
{
scan_type tmp = ldata[K][WG_SIZE - 1].value;
scan_type tmp_aux = ldata[0][0].value;
ldata[0][0].value = SCAN_EXPR(
tmp, tmp_aux,
%if is_segmented:
(l_segment_start_flags[0][0])
%else:
false
%endif
);
}
pycu_printf(("after carry-in\n"));
// }}}
local_barrier();
// {{{ scan along k (sequentially in each work item)
scan_type sum = ldata[0][LID_0].value;
%if is_tail:
const index_type offset_end = interval_end - unit_base;
%endif
for(index_type k = 1; k < K; k++)
{
%if is_tail:
if (K * LID_0 + k < offset_end)
%endif
{
scan_type tmp = ldata[k][LID_0].value;
%if is_segmented:
index_type seq_i = unit_base + K*LID_0 + k;
if (l_segment_start_flags[k][LID_0])
{
first_segment_start_in_k_group = min(
first_segment_start_in_k_group,
seq_i);
}
%endif
sum = SCAN_EXPR(sum, tmp,
%if is_segmented:
(l_segment_start_flags[k][LID_0])
%else:
false
%endif
);
ldata[k][LID_0].value = sum;
}
}
pycu_printf(("after scan along k\n"));
// }}}
// store carry in out-of-bounds (padding) array entry (index K) in
// the K direction
ldata[K][LID_0].value = sum;
%if is_segmented:
l_first_segment_start_in_subtree[LID_0] =
first_segment_start_in_k_group;
%endif
local_barrier();
// {{{ tree-based local parallel scan
// This tree-based scan works as follows:
// - Each work item adds the previous item to its current state
// - barrier
// - Each work item adds in the item from two positions to the left
// - barrier
// - Each work item adds in the item from four positions to the left
// ...
// At the end, each item has summed all prior items.
// across k groups, along local id
// (uses out-of-bounds k=K array entry for storage)
scan_type val = ldata[K][LID_0].value;
<% scan_offset = 1 %>
% while scan_offset <= wg_size:
// {{{ reads from local allowed, writes to local not allowed
if (LID_0 >= ${scan_offset})
{
scan_type tmp = ldata[K][LID_0 - ${scan_offset}].value;
% if is_tail:
if (K*LID_0 < offset_end)
% endif
{
val = SCAN_EXPR(tmp, val,
%if is_segmented:
(l_first_segment_start_in_subtree[LID_0]
!= NO_SEG_BOUNDARY)
%else:
false
%endif
);
}
%if is_segmented:
// Prepare for l_first_segment_start_in_subtree, below.
// Note that this update must take place *even* if we're
// out of bounds.
first_segment_start_in_subtree = min(
l_first_segment_start_in_subtree[LID_0],
l_first_segment_start_in_subtree
[LID_0 - ${scan_offset}]);
%endif
}
%if is_segmented:
else
{
first_segment_start_in_subtree =
l_first_segment_start_in_subtree[LID_0];
}
%endif
// }}}
local_barrier();
// {{{ writes to local allowed, reads from local not allowed
ldata[K][LID_0].value = val;
%if is_segmented:
l_first_segment_start_in_subtree[LID_0] =
first_segment_start_in_subtree;
%endif
// }}}
local_barrier();
%if 0:
if (LID_0 == 0)
{
printf("${scan_offset}: ");
for (int i = 0; i < WG_SIZE; ++i)
{
if (l_first_segment_start_in_subtree[i] == NO_SEG_BOUNDARY)
printf("- ");
else
printf("%d ", l_first_segment_start_in_subtree[i]);
}
printf("\n");
}
%endif
<% scan_offset *= 2 %>
% endwhile
pycu_printf(("after tree scan\n"));
// }}}
// {{{ update local values
if (LID_0 > 0)
{
sum = ldata[K][LID_0 - 1].value;
for(index_type k = 0; k < K; k++)
{
%if is_tail:
if (K * LID_0 + k < offset_end)
%endif
{
scan_type tmp = ldata[k][LID_0].value;
ldata[k][LID_0].value = SCAN_EXPR(sum, tmp,
%if is_segmented:
(unit_base + K * LID_0 + k
>= first_segment_start_in_k_group)
%else:
false
%endif
);
}
}
}
%if is_segmented:
if (LID_0 == 0)
{
// update interval-wide first-seg variable from current unit
first_segment_start_in_interval = min(
first_segment_start_in_interval,
l_first_segment_start_in_subtree[WG_SIZE-1]);
}
%endif
pycu_printf(("after local update\n"));
// }}}
local_barrier();
// {{{ write data
{
// work hard with index math to achieve contiguous 32-bit stores
GLOBAL_MEM int *dest =
(GLOBAL_MEM int *) (partial_scan_buffer + unit_base);
<%
assert scan_dtype.itemsize % 4 == 0
ints_per_wg = wg_size
ints_to_store = scan_dtype.itemsize*wg_size*k_group_size // 4
%>
const index_type scan_types_per_int = ${scan_dtype.itemsize//4};
%for store_base in range(0, ints_to_store, ints_per_wg):
<%
# Observe that ints_to_store is divisible by the work group
# size already, so we won't go out of bounds that way.
assert store_base + ints_per_wg <= ints_to_store
%>
%if is_tail:
if (${store_base} + LID_0 <
scan_types_per_int*(interval_end - unit_base))
%endif
{
index_type linear_index = ${store_base} + LID_0;
index_type linear_scan_data_idx =
linear_index / scan_types_per_int;
index_type remainder =
linear_index - linear_scan_data_idx * scan_types_per_int;
int* src = (int*) &(ldata
[linear_scan_data_idx % K]
[linear_scan_data_idx / K].value);
dest[linear_index] = src[remainder];
}
%endfor
}
pycu_printf(("after write\n"));
// }}}
local_barrier();
}
% endfor
// write interval sum
%if is_first_level:
if (LID_0 == 0)
{
interval_results[GID_0] = partial_scan_buffer[interval_end - 1];
%if is_segmented:
g_first_segment_start_in_interval[GID_0] =
first_segment_start_in_interval;
%endif
}
%endif
}
"""
# }}}
# {{{ update
UPDATE_SOURCE = SHARED_PREAMBLE + r"""
KERNEL
REQD_WG_SIZE(WG_SIZE, 1, 1)
void ${name_prefix}_final_update(
${argument_signature},
const index_type N,
const index_type interval_size,
GLOBAL_MEM scan_type* __restrict__ interval_results,
GLOBAL_MEM scan_type* __restrict__ partial_scan_buffer
%if is_segmented:
, GLOBAL_MEM index_type* __restrict__ g_first_segment_start_in_interval
%endif
%if is_segmented and use_lookbehind_update:
, GLOBAL_MEM char* __restrict__ g_segment_start_flags
%endif
)
{
%if use_lookbehind_update:
LOCAL_MEM scan_type ldata[WG_SIZE];
%endif
%if is_segmented and use_lookbehind_update:
LOCAL_MEM char l_segment_start_flags[WG_SIZE];
%endif
const index_type interval_begin = interval_size * GID_0;
const index_type interval_end = min(interval_begin + interval_size, N);
// carry from last interval
scan_type carry = ${neutral};
if (GID_0 != 0)
carry = interval_results[GID_0 - 1];
%if is_segmented:
const index_type first_seg_start_in_interval =
g_first_segment_start_in_interval[GID_0];
%endif
%if not is_segmented and 'last_item' in output_statement:
scan_type last_item = interval_results[GDIM_0-1];
%endif
%if not use_lookbehind_update:
// {{{ no look-behind ('prev_item' not in output_statement -> simpler)
index_type update_i = interval_begin+LID_0;
%if is_segmented:
index_type seg_end = min(first_seg_start_in_interval, interval_end);
%endif
for(; update_i < interval_end; update_i += WG_SIZE)
{
scan_type partial_val = partial_scan_buffer[update_i];
scan_type item = SCAN_EXPR(carry, partial_val,
%if is_segmented:
(update_i >= seg_end)
%else:
false
%endif
);
index_type i = update_i;
{ ${output_statement}; }
}
// }}}
%else:
// {{{ allow look-behind ('prev_item' in output_statement -> complicated)
// We are not allowed to branch across barriers at a granularity smaller
// than the whole workgroup. Therefore, the for loop is group-global,
// and there are lots of local ifs.
index_type group_base = interval_begin;
scan_type prev_item = carry; // (A)
for(; group_base < interval_end; group_base += WG_SIZE)
{
index_type update_i = group_base+LID_0;
// load a work group's worth of data
if (update_i < interval_end)
{
scan_type tmp = partial_scan_buffer[update_i];
tmp = SCAN_EXPR(carry, tmp,
%if is_segmented:
(update_i >= first_seg_start_in_interval)
%else:
false
%endif
);
ldata[LID_0] = tmp;
%if is_segmented:
l_segment_start_flags[LID_0] = g_segment_start_flags[update_i];
%endif
}
local_barrier();
// find prev_item
if (LID_0 != 0)
prev_item = ldata[LID_0 - 1];
/*
else
prev_item = carry (see (A)) OR last tail (see (B));
*/
if (update_i < interval_end)
{
%if is_segmented:
if (l_segment_start_flags[LID_0])
prev_item = ${neutral};
%endif
scan_type item = ldata[LID_0];
index_type i = update_i;
{ ${output_statement}; }
}
if (LID_0 == 0)
prev_item = ldata[WG_SIZE - 1]; // (B)
local_barrier();
}
// }}}
%endif
}
"""
# }}}
# {{{ driver
# {{{ helpers
def _round_down_to_power_of_2(val):
result = 2**bitlog2(val)
if result > val:
result >>= 1
assert result <= val
return result
_PREFIX_WORDS = set("""
ldata partial_scan_buffer global scan_offset
segment_start_in_k_group carry
g_first_segment_start_in_interval IS_SEG_START tmp Z
val l_first_segment_start_in_subtree unit_size
index_type interval_begin interval_size offset_end K
SCAN_EXPR do_update WG_SIZE
first_segment_start_in_k_group scan_type
segment_start_in_subtree offset interval_results interval_end
first_segment_start_in_subtree unit_base
first_segment_start_in_interval k INPUT_EXPR
prev_group_sum prev pv value partial_val pgs
is_seg_start update_i scan_item_at_i seq_i read_i
l_ o_mod_k o_div_k l_segment_start_flags scan_value sum
first_seg_start_in_interval g_segment_start_flags
group_base seg_end my_val DEBUG ARGS
ints_to_store ints_per_wg scan_types_per_int linear_index
linear_scan_data_idx dest src store_base wrapped_scan_type
dummy scan_tmp tmp_aux
LID_2 LID_1 LID_0
LDIM_0 LDIM_1 LDIM_2
GDIM_0 GDIM_1 GDIM_2
GID_0 GID_1 GID_2
""".split())
_IGNORED_WORDS = set("""
4 8 32
typedef for endfor if void while endwhile endfor endif else const printf
None return bool n char true false ifdef pycu_printf str range assert
np iinfo max itemsize __packed__ struct __restrict__ extern C
set iteritems len setdefault
GLOBAL_MEM LOCAL_MEM_ARG WITHIN_KERNEL LOCAL_MEM KERNEL REQD_WG_SIZE
local_barrier
__syncthreads
pragma __attribute__ __global__ __device__ __shared__ __launch_bounds__
threadIdx blockIdx blockDim gridDim x y z
barrier
_final_update _debug_scan kernel_name
positions all padded integer its previous write based writes 0
has local worth scan_expr to read cannot not X items False bank
four beginning follows applicable item min each indices works side
scanning right summed relative used id out index avoid current state
boundary True across be This reads groups along Otherwise undetermined
store of times prior s update first regardless Each number because
array unit from segment conflicts two parallel 2 empty define direction
CL padding work tree bounds values and adds
scan is allowed thus it an as enable at in occur sequentially end no
storage data 1 largest may representable uses entry Y meaningful
computations interval At the left dimension know d
A load B group perform shift tail see last OR
this add fetched into are directly need
gets them stenciled that undefined
there up any ones or name only relevant populated
even wide we Prepare int seg Note re below place take variable must
intra Therefore find code assumption
branch workgroup complicated granularity phase remainder than simpler
We smaller look ifs lots self behind allow barriers whole loop
after already Observe achieve contiguous stores hard go with by math
size won t way divisible bit so Avoid declare adding single type
is_tail is_first_level input_expr argument_signature preamble
double_support neutral output_statement
k_group_size name_prefix is_segmented index_dtype scan_dtype
wg_size is_segment_start_expr fetch_expr_offsets
arg_ctypes ife_offsets input_fetch_exprs def
ife_offset arg_name local_fetch_expr_args update_body
update_loop_lookbehind update_loop_plain update_loop
use_lookbehind_update store_segment_start_flags
update_loop first_seg scan_dtype dtype_to_ctype
use_bank_conflict_avoidance
a b prev_item i last_item prev_value
N NO_SEG_BOUNDARY across_seg_boundary
""".split())
def _make_template(s):
leftovers = set()
def replace_id(match):
# avoid name clashes with user code by adding 'psc_' prefix to
# identifiers.
word = match.group(1)
if word in _IGNORED_WORDS:
return word
elif word in _PREFIX_WORDS:
return "psc_" + word
else:
leftovers.add(word)
return word
import re
s = re.sub(r"\b([a-zA-Z0-9_]+)\b", replace_id, s)
if leftovers:
from warnings import warn
warn("leftover words in identifier prefixing: " + " ".join(leftovers))
return mako.template.Template(s, strict_undefined=True)
class _GeneratedScanKernelInfo(Record):
__slots__ = [
"scan_src",
"kernel_name",
"scalar_arg_dtypes",
"wg_size",
"k_group_size"]
def __init__(self, scan_src, kernel_name, scalar_arg_dtypes, wg_size,
k_group_size):
Record.__init__(self,
scan_src=scan_src,
kernel_name=kernel_name,
scalar_arg_dtypes=scalar_arg_dtypes,
wg_size=wg_size,
k_group_size=k_group_size)
def build(self, options):
program = SourceModule(self.scan_src, options=options)
kernel = program.get_function(self.kernel_name)
kernel.prepare(self.scalar_arg_dtypes)
return _BuiltScanKernelInfo(
kernel=kernel,
wg_size=self.wg_size,
k_group_size=self.k_group_size)
class _BuiltScanKernelInfo(RecordWithoutPickling):
__slots__ = ["kernel", "wg_size", "k_group_size"]
def __init__(self, kernel, wg_size, k_group_size):
RecordWithoutPickling.__init__(self,
kernel=kernel,
wg_size=wg_size,
k_group_size=k_group_size)
class _GeneratedFinalUpdateKernelInfo(Record):
def __init__(self, source, kernel_name, scalar_arg_dtypes, update_wg_size):
Record.__init__(self,
source=source,
kernel_name=kernel_name,
scalar_arg_dtypes=scalar_arg_dtypes,
update_wg_size=update_wg_size)
def build(self, options):
program = SourceModule(self.source, options=options)
kernel = program.get_function(self.kernel_name)
kernel.prepare(self.scalar_arg_dtypes)
return _BuiltFinalUpdateKernelInfo(
kernel=kernel,
update_wg_size=self.update_wg_size
)
class _BuiltFinalUpdateKernelInfo(RecordWithoutPickling):
__slots__ = ["kernel", "update_wg_size"]
def __init__(self, kernel, update_wg_size):
RecordWithoutPickling.__init__(self,
kernel=kernel,
update_wg_size=update_wg_size)
# }}}
class ScanPerformanceWarning(UserWarning):
pass
class _GenericScanKernelBase(object):
# {{{ constructor, argument processing
def __init__(self, dtype,
arguments, input_expr, scan_expr, neutral, output_statement,
is_segment_start_expr=None, input_fetch_exprs=[],
index_dtype=np.int32,
name_prefix="scan", options=None, preamble=""):
"""
:arg dtype: the :class:`numpy.dtype` with which the scan will
be performed. May be a structured type if that type was registered
through :func:`pycuda.tools.get_or_register_dtype`.
:arg arguments: A string of comma-separated C argument declarations.
If *arguments* is specified, then *input_expr* must also be
specified. All types used here must be known to PyCUDA.
(see :func:`pycuda.tools.get_or_register_dtype`).
:arg scan_expr: The associative, binary operation carrying out the scan,
represented as a C string. Its two arguments are available as `a`
and `b` when it is evaluated. `b` is guaranteed to be the
'element being updated', and `a` is the increment. Thus,
if some data is supposed to just propagate along without being
modified by the scan, it should live in `b`.
This expression may call functions given in the *preamble*.
Another value available to this expression is `across_seg_boundary`,
a C `bool` indicating whether this scan update is crossing a
segment boundary, as defined by `is_segment_start_expr`.
The scan routine does not implement segmentation
semantics on its own. It relies on `scan_expr` to do this.
This value is available (but always `false`) even for a
non-segmented scan.
.. note::
In early pre-releases of the segmented scan,
segmentation semantics were implemented *without*
relying on `scan_expr`.
:arg input_expr: A C expression, encoded as a string, resulting
in the values to which the scan is applied. This may be used
to apply a mapping to values stored in *arguments* before being
scanned. The result of this expression must match *dtype*.
The index intended to be mapped is available as `i` in this
expression. This expression may also use the variables defined
by *input_fetch_expr*.
This expression may also call functions given in the *preamble*.
:arg output_statement: a C statement that writes
the output of the scan. It has access to the scan result as `item`,
the preceding scan result item as `prev_item`, and the current index
as `i`. `prev_item` in a segmented scan will be the neutral element
at a segment boundary, not the immediately preceding item.
Using *prev_item* in output statement has a small run-time cost.
`prev_item` enables the construction of an exclusive scan.
For non-segmented scans, *output_statement* may also reference
`last_item`, which evaluates to the scan result of the last
array entry.
:arg is_segment_start_expr: A C expression, encoded as a string,
resulting in a C `bool` value that determines whether a new
scan segments starts at index *i*. If given, makes the scan a
segmented scan. Has access to the current index `i`, the result
of *input_expr* as a, and in addition may use *arguments* and
*input_fetch_expr* variables just like *input_expr*.
If it returns true, then previous sums will not spill over into the
item with index *i* or subsequent items.
:arg input_fetch_exprs: a list of tuples *(NAME, ARG_NAME, OFFSET)*.
An entry here has the effect of doing the equivalent of the following
before input_expr::
ARG_NAME_TYPE NAME = ARG_NAME[i+OFFSET];
`OFFSET` is allowed to be 0 or -1, and `ARG_NAME_TYPE` is the type
of `ARG_NAME`.
:arg preamble: |preamble|
The first array in the argument list determines the size of the index
space over which the scan is carried out, and thus the values over
which the index *i* occurring in a number of code fragments in
arguments above will vary.
All code fragments further have access to N, the number of elements
being processed in the scan.
"""
dtype = self.dtype = np.dtype(dtype)
if neutral is None:
from warnings import warn
warn("not specifying 'neutral' is deprecated and will lead to "
"wrong results if your scan is not in-place or your "
"'output_statement' does something otherwise non-trivial",
stacklevel=2)
if dtype.itemsize % 4 != 0:
raise TypeError(
"scan value type must have size divisible by 4 bytes")
self.index_dtype = np.dtype(index_dtype)
if np.iinfo(self.index_dtype).min >= 0:
raise TypeError("index_dtype must be signed")
self.options = options
self.parsed_args = parse_arg_list(arguments)
from pycuda.tools import VectorArg
vector_args_indices = [i for i, arg in enumerate(self.parsed_args)
if isinstance(arg, VectorArg)]
self.first_array_idx = vector_args_indices[0]
self.input_expr = input_expr
self.is_segment_start_expr = is_segment_start_expr
self.is_segmented = is_segment_start_expr is not None
if self.is_segmented:
is_segment_start_expr = _process_code_for_macro(
is_segment_start_expr)
self.output_statement = output_statement
for name, arg_name, ife_offset in input_fetch_exprs:
if ife_offset not in [0, -1]:
raise RuntimeError(
"input_fetch_expr offsets must either be 0 or -1")
self.input_fetch_exprs = input_fetch_exprs
arg_dtypes = {}
arg_ctypes = {}
for arg in self.parsed_args:
arg_dtypes[arg.name] = arg.dtype
arg_ctypes[arg.name] = dtype_to_ctype(arg.dtype)
self.options = options
self.name_prefix = name_prefix
# {{{ set up shared code dict
from pytools import all
from pycuda.characterize import has_double_support
self.code_variables = dict(
np=np,
dtype_to_ctype=dtype_to_ctype,
preamble=preamble,
name_prefix=name_prefix,
index_dtype=self.index_dtype,
scan_dtype=dtype,
is_segmented=self.is_segmented,
arg_dtypes=arg_dtypes,
arg_ctypes=arg_ctypes,
scan_expr=_process_code_for_macro(scan_expr),
neutral=_process_code_for_macro(neutral),
double_support=has_double_support(),
)
index_typename = dtype_to_ctype(self.index_dtype)
scan_typename = dtype_to_ctype(dtype)
# This key is meant to uniquely identify the non-device parameters for
# the scan kernel.
self.kernel_key = (
self.dtype,
tuple(arg.declarator() for arg in self.parsed_args),
self.input_expr,
scan_expr,
neutral,
output_statement,
is_segment_start_expr,
tuple(input_fetch_exprs),
index_dtype,
name_prefix,
preamble,
# These depend on dtype_to_ctype(), so their value is independent of
# the other variables.
index_typename,
scan_typename,
)
# }}}
self.use_lookbehind_update = "prev_item" in self.output_statement
self.store_segment_start_flags = (
self.is_segmented and self.use_lookbehind_update)
self.finish_setup()
# }}}
generic_scan_kernel_cache = WriteOncePersistentDict(
"pycuda-generated-scan-kernel-cache-v1",
key_builder=_NumpyTypesKeyBuilder())
class GenericScanKernel(_GenericScanKernelBase):
"""Generates and executes code that performs prefix sums ("scans") on
arbitrary types, with many possible tweaks.
Usage example::
import pycuda.gpuarray as gpuarray
from compyle.cuda import GenericScanKernel
knl = GenericScanKernel(
np.int32,
arguments="int *ary",
input_expr="ary[i]",
scan_expr="a+b", neutral="0",
output_statement="ary[i] = item;")
a = gpuarray.arange(10000, dtype=np.int32)
knl(a)
"""
def finish_setup(self):
# Before generating the kernel, see if it's cached.
cache_key = (self.kernel_key,)
from_cache = False
try:
result = generic_scan_kernel_cache[cache_key]
from_cache = True
logger.debug(
"cache hit for generated scan kernel '%s'" % self.name_prefix)
(
self.first_level_scan_gen_info,
self.second_level_scan_gen_info,
self.final_update_gen_info) = result
except KeyError:
pass
if not from_cache:
logger.debug(
"cache miss for generated scan kernel '%s'" % self.name_prefix)
self._finish_setup_impl()
result = (self.first_level_scan_gen_info,
self.second_level_scan_gen_info,
self.final_update_gen_info)
generic_scan_kernel_cache.store_if_not_present(cache_key, result)
# Build the kernels.
self.first_level_scan_info = self.first_level_scan_gen_info.build(
self.options)
del self.first_level_scan_gen_info
self.second_level_scan_info = self.second_level_scan_gen_info.build(
self.options)
del self.second_level_scan_gen_info
self.final_update_info = self.final_update_gen_info.build(
self.options)
del self.final_update_gen_info
def _finish_setup_impl(self):
# {{{ find usable workgroup/k-group size, build first-level scan
trip_count = 0
dev = drv.Context.get_device()
avail_local_mem = dev.get_attribute(
drv.device_attribute.MAX_SHARED_MEMORY_PER_BLOCK)
# not sure where these go, but roughly this much seems unavailable.
avail_local_mem -= 0x400
max_scan_wg_size = dev.get_attribute(
drv.device_attribute.MAX_THREADS_PER_BLOCK)
wg_size_multiples = 64
use_bank_conflict_avoidance = (
self.dtype.itemsize > 4 and self.dtype.itemsize % 8 == 0)
# k_group_size should be a power of two because of in-kernel
# division by that number.
solutions = []
for k_exp in range(0, 9):
for wg_size in range(wg_size_multiples, max_scan_wg_size + 1,
wg_size_multiples):
k_group_size = 2**k_exp
lmem_use = self.get_local_mem_use(wg_size, k_group_size,
use_bank_conflict_avoidance)
if lmem_use <= avail_local_mem:
solutions.append(
(wg_size * k_group_size, k_group_size, wg_size))
from pytools import any
for wg_size_floor in [256, 192, 128]:
have_sol_above_floor = any(wg_size >= wg_size_floor
for _, _, wg_size in solutions)
if have_sol_above_floor:
# delete all solutions not meeting the wg size floor
solutions = [(total, try_k_group_size, try_wg_size)
for total, try_k_group_size, try_wg_size in solutions
if try_wg_size >= wg_size_floor]
break
_, k_group_size, max_scan_wg_size = max(solutions)
while True:
candidate_scan_gen_info = self.generate_scan_kernel(
max_scan_wg_size, self.parsed_args,
_process_code_for_macro(self.input_expr),
self.is_segment_start_expr,
input_fetch_exprs=self.input_fetch_exprs,
is_first_level=True,
store_segment_start_flags=self.store_segment_start_flags,
k_group_size=k_group_size,
use_bank_conflict_avoidance=use_bank_conflict_avoidance)
candidate_scan_info = candidate_scan_gen_info.build(
self.options)
# Will this device actually let us execute this kernel
# at the desired work group size? Building it is the
# only way to find out.
kernel_max_wg_size = candidate_scan_info.kernel.get_attribute(
drv.function_attribute.MAX_THREADS_PER_BLOCK)
if candidate_scan_info.wg_size <= kernel_max_wg_size:
break
else:
max_scan_wg_size = min(kernel_max_wg_size, max_scan_wg_size)
trip_count += 1
assert trip_count <= 20
self.first_level_scan_gen_info = candidate_scan_gen_info
assert (_round_down_to_power_of_2(candidate_scan_info.wg_size)
== candidate_scan_info.wg_size)
# }}}
# {{{ build second-level scan
from pycuda.tools import VectorArg
second_level_arguments = self.parsed_args + [
VectorArg(self.dtype, "interval_sums")]
second_level_build_kwargs = {}
if self.is_segmented:
second_level_arguments.append(
VectorArg(self.index_dtype,
"g_first_segment_start_in_interval_input"))
# is_segment_start_expr answers the question "should previous sums
# spill over into this item". And since
# g_first_segment_start_in_interval_input answers the question if a
# segment boundary was found in an interval of data, then if not,
# it's ok to spill over.
second_level_build_kwargs["is_segment_start_expr"] = \
"g_first_segment_start_in_interval_input[i] != NO_SEG_BOUNDARY"
else:
second_level_build_kwargs["is_segment_start_expr"] = None
self.second_level_scan_gen_info = self.generate_scan_kernel(
max_scan_wg_size,
arguments=second_level_arguments,
input_expr="interval_sums[i]",
input_fetch_exprs=[],
is_first_level=False,
store_segment_start_flags=False,
k_group_size=k_group_size,
use_bank_conflict_avoidance=use_bank_conflict_avoidance,
**second_level_build_kwargs)
# }}}
# {{{ generate final update kernel
update_wg_size = min(max_scan_wg_size, 256)
final_update_tpl = _make_template(UPDATE_SOURCE)
final_update_src = str(final_update_tpl.render(
wg_size=update_wg_size,
output_statement=self.output_statement,
argument_signature=", ".join(
arg.declarator() for arg in self.parsed_args),
is_segment_start_expr=self.is_segment_start_expr,
input_expr=_process_code_for_macro(self.input_expr),
use_lookbehind_update=self.use_lookbehind_update,
**self.code_variables))
update_scalar_arg_dtypes = (
get_arg_list_scalar_arg_dtypes(self.parsed_args)
+ [self.index_dtype, self.index_dtype, None, None])
if self.is_segmented:
# g_first_segment_start_in_interval
update_scalar_arg_dtypes.append(None)
if self.store_segment_start_flags:
update_scalar_arg_dtypes.append(None) # g_segment_start_flags
self.final_update_gen_info = _GeneratedFinalUpdateKernelInfo(
final_update_src,
self.name_prefix + "_final_update",
update_scalar_arg_dtypes,
update_wg_size)
# }}}
# {{{ scan kernel build/properties
def get_local_mem_use(self, k_group_size, wg_size,
use_bank_conflict_avoidance):
arg_dtypes = {}
for arg in self.parsed_args:
arg_dtypes[arg.name] = arg.dtype
fetch_expr_offsets = {}
for name, arg_name, ife_offset in self.input_fetch_exprs:
fetch_expr_offsets.setdefault(arg_name, set()).add(ife_offset)
itemsize = self.dtype.itemsize
if use_bank_conflict_avoidance:
itemsize += 4
return (
# ldata
itemsize * (k_group_size + 1) * (wg_size + 1)
# l_segment_start_flags
+ k_group_size * wg_size
# l_first_segment_start_in_subtree
+ self.index_dtype.itemsize * wg_size
+ k_group_size * wg_size * sum(
arg_dtypes[arg_name].itemsize
for arg_name, ife_offsets in list(fetch_expr_offsets.items())
if -1 in ife_offsets or len(ife_offsets) > 1))
def generate_scan_kernel(
self,
max_wg_size,
arguments,
input_expr,
is_segment_start_expr,
input_fetch_exprs,
is_first_level,
store_segment_start_flags,
k_group_size,
use_bank_conflict_avoidance):
scalar_arg_dtypes = get_arg_list_scalar_arg_dtypes(arguments)
# Empirically found on Nv hardware: no need to be bigger than this size
wg_size = _round_down_to_power_of_2(
min(max_wg_size, 256))
kernel_name = self.code_variables["name_prefix"]
if is_first_level:
kernel_name += "_lev1"
else:
kernel_name += "_lev2"
scan_tpl = _make_template(SCAN_INTERVALS_SOURCE)
scan_src = str(
scan_tpl.render(
wg_size=wg_size,
input_expr=input_expr,
k_group_size=k_group_size,
argument_signature=", ".join(
arg.declarator() for arg in arguments),
is_segment_start_expr=is_segment_start_expr,
input_fetch_exprs=input_fetch_exprs,
is_first_level=is_first_level,
store_segment_start_flags=store_segment_start_flags,
use_bank_conflict_avoidance=use_bank_conflict_avoidance,
kernel_name=kernel_name,
**self.code_variables))
scalar_arg_dtypes.extend(
(None, self.index_dtype, self.index_dtype))
if is_first_level:
scalar_arg_dtypes.append(None) # interval_results
if self.is_segmented and is_first_level:
scalar_arg_dtypes.append(None) # g_first_segment_start_in_interval
if store_segment_start_flags:
scalar_arg_dtypes.append(None) # g_segment_start_flags
return _GeneratedScanKernelInfo(
scan_src=scan_src,
kernel_name=kernel_name,
scalar_arg_dtypes=scalar_arg_dtypes,
wg_size=wg_size,
k_group_size=k_group_size)
# }}}
def __call__(self, *args, **kwargs):
# {{{ argument processing
allocator = kwargs.get("allocator")
n = kwargs.get("size")
stream = kwargs.get("stream")
if len(args) != len(self.parsed_args):
raise TypeError("expected %d arguments, got %d" %
(len(self.parsed_args), len(args)))
first_array = args[self.first_array_idx]
allocator = allocator or first_array.allocator
if n is None:
n, = first_array.shape
if n == 0:
return
data_args = []
from pycuda.tools import VectorArg
for arg_descr, arg_val in zip(self.parsed_args, args):
if isinstance(arg_descr, VectorArg):
data_args.append(arg_val.gpudata)
else:
data_args.append(arg_val)
# }}}
l1_info = self.first_level_scan_info
l2_info = self.second_level_scan_info
unit_size = l1_info.wg_size * l1_info.k_group_size
dev = drv.Context.get_device()
max_intervals = 3 * dev.get_attribute(
drv.device_attribute.MULTIPROCESSOR_COUNT)
from pytools import uniform_interval_splitting
interval_size, num_intervals = uniform_interval_splitting(
n, unit_size, max_intervals)
# {{{ allocate some buffers
interval_results = gpuarray.empty(
num_intervals, dtype=self.dtype,
allocator=allocator)
partial_scan_buffer = gpuarray.empty(
n, dtype=self.dtype,
allocator=allocator)
if self.store_segment_start_flags:
segment_start_flags = gpuarray.empty(
n, dtype=np.bool,
allocator=allocator)
# }}}
# {{{ first level scan of interval (one interval per block)
scan1_args = data_args + [
partial_scan_buffer.gpudata, n, interval_size,
interval_results.gpudata,
]
if self.is_segmented:
first_segment_start_in_interval = gpuarray.empty(
num_intervals, dtype=self.index_dtype,
allocator=allocator)
scan1_args.append(first_segment_start_in_interval.gpudata)
if self.store_segment_start_flags:
scan1_args.append(segment_start_flags.gpudata)
l1_evt = l1_info.kernel.prepared_async_call(
(num_intervals, 1), (l1_info.wg_size, 1, 1), stream,
*scan1_args)
# }}}
# {{{ second level scan of per-interval results
# can scan at most one interval
assert interval_size >= num_intervals
scan2_args = data_args + [
interval_results.gpudata, # interval_sums
]
if self.is_segmented:
scan2_args.append(first_segment_start_in_interval.gpudata)
scan2_args = scan2_args + [
interval_results.gpudata, # partial_scan_buffer
num_intervals, interval_size]
l2_evt = l2_info.kernel.prepared_async_call(
(1, 1), (l1_info.wg_size, 1, 1), stream,
*scan2_args)
# }}}
# {{{ update intervals with result of interval scan
upd_args = data_args + [n,
interval_size,
interval_results.gpudata,
partial_scan_buffer.gpudata]
if self.is_segmented:
upd_args.append(first_segment_start_in_interval.gpudata)
if self.store_segment_start_flags:
upd_args.append(segment_start_flags.gpudata)
return self.final_update_info.kernel.prepared_async_call(
(num_intervals, 1),
(self.final_update_info.update_wg_size, 1, 1), stream,
*upd_args)
# }}}
# }}}
|
StarcoderdataPython
|
3382835
|
#
# Copyright (C) 2019 <NAME> (<EMAIL>)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# python 2 version
import base64
from Crypto.PublicKey import RSA
from Crypto import Random
import ast
from Crypto.Cipher import PKCS1_OAEP
from Crypto.Cipher import PKCS1_v1_5
from Crypto.Hash import SHA
# https://github.com/jashandeep-sohi/python-blowfish
# https://pycryptodome.readthedocs.io/en/latest/src/cipher/oaep.html
# old https://gitlab.com/m2crypto/m2crypto
# old https://stackoverflow.com/questions/43664751/python-to-java-encryption-rsa
# https://daniellimws.github.io/rsa-java-to-python
# RSA/ECB/PKCS1Padding
# pip install crypto rsa
# pip install pycryptodome
encrypted = 'jZm1l82bdMU8+nc6pt/C/E+isFlJyKvZtwcNR3aLAp7Y8xT3hqKzrtsjhn85KfaN8dR5hgFRTHaEzd/xRqQYC3xQu7U0jwl/LJCPMMC1BBdth/PvfuF5Qr6u/TLz1vl2ZkGXf6aoBx4LWhBONT/OkbGwjGrjdHVtzx1meSHJyGjY0rsc8+s2sMQsbAKwMA8ZCGDRrT9277R36nTaTOIbb+z9wJY7wh3kTZj4KpkXBQOIziB0BAQwXOIHXvGx95Nh31A8pCNS8voBz8wuIsOPKMKvYq7l+X5QthxySY3LXw/l2F5eH7d2sT/JFiljwtJzouhxfdAC5crfieMHYfN5frm0d+1cB1TMjkbVW505GZpauLXLQs9WA3CRK8t8vQDZJKPvr1CgYZZoylEPYwqF7W00rtbhBMX3+YtWE8qNUfo6OD3Cj23GYQnXVTLCJhxAizYX7sSLnkYNiH4HHkW187PNYda/fIHoybJ5jPzZ3nSVvKh/lNzetUoxAUHM0noqMTXGEhnQLN+dq/LTo7uJ5czasWu6nx2hEsmGTyRNvSsFbjLiqVlee4BbjgJVel/LFYIaWeLYtDZZwGBTS81pCTxU8gF3ksIQtiDesvyzBFBP9bHyu/aS8RvqGg2IeHZY0GZHtBygZMs++HnmJQ2QfGbQs6OuRnVEMgV5rI3WcAwrcz38/g4/4IR27TwISMubzRT1zFVaocklJ4TEucFgz0OU1euETa0xbStc/HPUBKrZt3+9w0sGmNzvXUnKzAoNtMMq7TIxJ4ik6jzYURWV7qKfyN+0Wk27nOaXlBPNPitvUYoSaygysK1CjaC2+PnCkpPaZA7Q0RQAZvboBynqEw=='
plain_key = 'MIICdwIBADANBgkqhkiG9w0BAQEFAASCAmEwggJdAgEAAoGBAM/NIqP7cvUpBnT67AsbEdInIF9KlFiklgzEF4UP1vN1wTSnHuVzQD/DNYBtYRvQOg6sr+usGV2DrnsAn1lgatwlNV3ethTOSPsLfv8HA//LofTW2ZGZ0D4CsQZBWgjmHRppVYb+2DUiVZG2IPo4SWAhtfmwNuVMDWHq5oKFxauVAgMBAAECgYBSbhA4tk0ivRRnoQWzXhiSoZyw0DfKdfdjtwGRcfgVeXebuFOEN1ScNoZpnHnx+4acPZpHRWyGcO7sshGD9cBNPqP2hvp9d+YvH3JOczO+D3xnSlfnMii0XR7eTaF32+T73rB4G/cQ8+Gp9IeoZwrj60sa4WZUrOuvUeH4NQEIIQJBAOgi0iM973ZntKbeJBoEeIRX0nYIz5qGytXyeZJPFegUhX0Ljf9wQD9x8Zwm+8AhHmGyFasb1Cw/u4j7ATOnl90CQQDlKeRg0KOZ9W6h+4o2XlDcL5aUJcEZulWGvIbUXcKUWBdQbrwMbCb/6bPpjScQFpTR6tZla4S9IULKkHJGPUMZAkEA42sBra8Gw1xUGkp02dxZaWZUdHirUnsNik6TlafPEV/RazD/uylwd/ecOVvjtVV82z9JhSmtUnBZvJgTlFRzLQJBALej2HWU/GWV/nAkCOAEuLuaDwrtLk8VuQ/d6BYqhJEn/pbgBiXWTXJqr1gLWzBTSDLoA6MGhDqjesik9E5BLZECQFDVDPjE10MbqVvkFMRPcPJvECBn44TFeg2MseEAkQHVgbfuvVgZ3eX2nc3uzqbflCfgi1F1lINBeoJQIb4eexQ='
encrypted_text = """T<KEY>""".replace("\n","")
def decode_msg_v1_5(ciphertext, privateKey):
""" Should consider using the more robust PKCS1 OAEP. """
sentinel = Random.new().read(256) # data length is 256
cipher = PKCS1_v1_5.new(privateKey)
messagereceived = cipher.decrypt(ciphertext[0:128], sentinel)
return messagereceived
private_key = "-----BEGIN RSA PRIVATE KEY-----\n" + plain_key + "\n-----END RSA PRIVATE KEY-----"
rsa_private_key = RSA.importKey(private_key)
encryptedBytes = base64.b64decode(encrypted_text)
decodedfile = decode_msg_v1_5(encryptedBytes, rsa_private_key)
print(decodedfile)
# decrypted = key.decrypt(ast.literal_eval(str(encrypted)))
|
StarcoderdataPython
|
83320
|
<reponame>kashewnuts/pipenv<filename>pipenv/vendor/vistir/compat.py
# -*- coding=utf-8 -*-
from __future__ import absolute_import, unicode_literals
import errno
import os
import sys
import warnings
from tempfile import mkdtemp
import six
__all__ = [
"Path",
"get_terminal_size",
"finalize",
"partialmethod",
"JSONDecodeError",
"FileNotFoundError",
"ResourceWarning",
"FileNotFoundError",
"PermissionError",
"IsADirectoryError",
"fs_str",
"lru_cache",
"TemporaryDirectory",
"NamedTemporaryFile",
"to_native_string",
]
if sys.version_info >= (3, 5):
from pathlib import Path
from functools import lru_cache
else:
from pathlib2 import Path
from pipenv.vendor.backports.functools_lru_cache import lru_cache
from .backports.tempfile import NamedTemporaryFile as _NamedTemporaryFile
if sys.version_info < (3, 3):
from pipenv.vendor.backports.shutil_get_terminal_size import get_terminal_size
NamedTemporaryFile = _NamedTemporaryFile
else:
from tempfile import NamedTemporaryFile
from shutil import get_terminal_size
try:
from weakref import finalize
except ImportError:
from pipenv.vendor.backports.weakref import finalize
try:
from functools import partialmethod
except Exception:
from .backports.functools import partialmethod
try:
from json import JSONDecodeError
except ImportError: # Old Pythons.
JSONDecodeError = ValueError
if six.PY2:
class ResourceWarning(Warning):
pass
class FileNotFoundError(IOError):
"""No such file or directory"""
def __init__(self, *args, **kwargs):
self.errno = errno.ENOENT
super(FileNotFoundError, self).__init__(*args, **kwargs)
class PermissionError(OSError):
def __init__(self, *args, **kwargs):
self.errno = errno.EACCES
super(PermissionError, self).__init__(*args, **kwargs)
class IsADirectoryError(OSError):
"""The command does not work on directories"""
pass
else:
from builtins import ResourceWarning, FileNotFoundError, PermissionError, IsADirectoryError
if not sys.warnoptions:
warnings.simplefilter("default", ResourceWarning)
class TemporaryDirectory(object):
"""Create and return a temporary directory. This has the same
behavior as mkdtemp but can be used as a context manager. For
example:
with TemporaryDirectory() as tmpdir:
...
Upon exiting the context, the directory and everything contained
in it are removed.
"""
def __init__(self, suffix="", prefix=None, dir=None):
if "RAM_DISK" in os.environ:
import uuid
name = uuid.uuid4().hex
dir_name = os.path.join(os.environ["RAM_DISK"].strip(), name)
os.mkdir(dir_name)
self.name = dir_name
else:
suffix = suffix if suffix else ""
if not prefix:
self.name = mkdtemp(suffix=suffix, dir=dir)
else:
self.name = mkdtemp(suffix, prefix, dir)
self._finalizer = finalize(
self,
self._cleanup,
self.name,
warn_message="Implicitly cleaning up {!r}".format(self),
)
@classmethod
def _rmtree(cls, name):
from .path import rmtree
def onerror(func, path, exc_info):
if issubclass(exc_info[0], (PermissionError, OSError)):
try:
try:
if path != name:
os.chflags(os.path.dirname(path), 0)
os.chflags(path, 0)
except AttributeError:
pass
if path != name:
os.chmod(os.path.dirname(path), 0o70)
os.chmod(path, 0o700)
try:
os.unlink(path)
# PermissionError is raised on FreeBSD for directories
except (IsADirectoryError, PermissionError, OSError):
cls._rmtree(path)
except FileNotFoundError:
pass
elif issubclass(exc_info[0], FileNotFoundError):
pass
else:
raise
rmtree(name, onerror=onerror)
@classmethod
def _cleanup(cls, name, warn_message):
cls._rmtree(name)
warnings.warn(warn_message, ResourceWarning)
def __repr__(self):
return "<{} {!r}>".format(self.__class__.__name__, self.name)
def __enter__(self):
return self
def __exit__(self, exc, value, tb):
self.cleanup()
def cleanup(self):
if self._finalizer.detach():
self._rmtree(self.name)
def fs_str(string):
"""Encodes a string into the proper filesystem encoding
Borrowed from pip-tools
"""
if isinstance(string, str):
return string
assert not isinstance(string, bytes)
return string.encode(_fs_encoding)
_fs_encoding = sys.getfilesystemencoding() or sys.getdefaultencoding()
def to_native_string(string):
from .misc import to_text, to_bytes
if six.PY2:
return to_bytes(string)
return to_text(string)
|
StarcoderdataPython
|
126109
|
<filename>lib/parse.py
#
# This module holds our parsing code for DNS messages.
#
import binascii
import logging
import struct
import lib.parse_answer
logger = logging.getLogger()
def parseHeaderText(header):
"""
parseHeaderText(): Go through our parsed headers, and create text descriptions based on them.
"""
retval = {}
if header["qr"] == 0:
retval["qr"] = "Question"
elif header["qr"] == 1:
retval["qr"] = "Response"
else:
retval["qr"] = "Unknown! (%s)" % header["qr"]
if header["opcode"] == 0:
retval["opcode_text"] = "Standard query"
elif header["opcode"] == 1:
retval["opcode_text"] = "Inverse query"
elif header["opcode"] == 2:
retval["opcode_text"] = "Server status request"
else:
retval["opcode_text"] = "Unknown! (%s)" % header["opcode"]
if header["aa"] == 0:
retval["aa"] = "Server isn't an authority"
elif header["aa"] == 1:
retval["aa"] = "Server is an authority"
else:
retval["aa"] = "Unknown! (%s)" % header["aa"]
if header["tc"] == 0:
retval["tc"] = "Message not truncated"
elif header["tc"] == 1:
retval["tc"] = "Message truncated"
else:
retval["tc"] = "Unknown! (%s)" % header["tc"]
if header["rd"] == 0:
retval["rd"] = "Recursion not requested"
elif header["rd"] == 1:
retval["rd"] = "Recursion requested"
else:
retval["rd"] = "Unknown! (%s)" % header["rd"]
if header["ra"] == 0:
retval["ra"] = "Recursion not available"
elif header["ra"] == 1:
retval["ra"] = "Recursion available!"
else:
retval["ra"] = "Unknown! (%s)" % header["ra"]
if header["rcode"] == 0:
retval["rcode_text"] = "No errors reported"
elif header["rcode"] == 1:
retval["rcode_text"] = "Format error (nameserver couldn't interpret this query)"
elif header["rcode"] == 2:
retval["rcode_text"] = "Server failure"
elif header["rcode"] == 3:
retval["rcode_text"] = "Name error (name does not exist!)"
elif header["rcode"] == 4:
retval["rcode_text"] = "Not implemented (nameserver doesn't support this type of query)"
elif header["rcode"] == 5:
retval["rcode_text"] = "Refused (the server refused to answer our question!)"
else:
retval["rcode_text"] = "Error code %s" % header["rcode"]
return(retval)
#
# Extract request ID from the header
#
def getRequestId(data):
retval = binascii.hexlify(data[0:2])
return(retval)
def parseHeader(data):
"""
parseHeader(): Extracts the various fields of our header
Returns a dictionary.
"""
retval = {}
request_id = data[0:2]
retval["request_id"] = binascii.hexlify(request_id).decode("utf-8")
#
# Header flag bits:
#
# 0 - QR: 0 if query, 1 if answer
# 1-4 - Opcode: 0 is standard query, 1 is reverse query, 2 is server status request
# 5 - AA: Is the answer authoritative?
# 6 - TC: Has the message been truncated?
# 7 - RD: Set to 1 when recursion is desired
# 8 - RA: Is Recursion available on this DNS server?
# 9-11 - Z: These reserved bits are always set to zero.
# 12-15 - RCODE: Result Code. 0 for no errors.
#
header = data[2:4]
logger.debug("Header Flags: %s: %s %s" % (binascii.hexlify(header), data[2], data[3]))
retval["header"] = {}
retval["header"]["qr"] = (data[2] & 0b10000000) >> 7
retval["header"]["opcode"] = (data[2] & 0b01111000) >> 3
retval["header"]["aa"] = (data[2] & 0b00000100) >> 2
retval["header"]["tc"] = (data[2] & 0b00000010) >> 1
retval["header"]["rd"] = (data[2] & 0b00000001)
retval["header"]["ra"] = (data[3] & 0b10000000) >> 7
retval["header"]["z"] = (data[3] & 0b01110000) >> 4
retval["header"]["rcode"] = (data[3] & 0b00001111)
#
# Create text versions of our header fields
#
retval["header_text"] = parseHeaderText(retval["header"])
retval["num_questions"] = struct.unpack(">H", data[4:6])[0]
retval["num_answers"] = struct.unpack(">H", data[6:8])[0]
retval["num_authority_records"] = struct.unpack(">H", data[8:10])[0]
retval["num_additional_records"] = struct.unpack(">H", data[10:12])[0]
return(retval)
|
StarcoderdataPython
|
86569
|
<reponame>amaork/raspi-peri<filename>raspi_peri/version.py
version = '0.01'
|
StarcoderdataPython
|
1736432
|
<reponame>fujy/ROS-Project<filename>src/rbx2/rbx2_ar_tags/nodes/ar_follower.py<gh_stars>1-10
#!/usr/bin/env python
"""
ar_follower.py - Version 1.0 2013-08-25
Follow an AR tag published on the /ar_pose_marker topic. The /ar_pose_marker topic
is published by the ar_track_alvar package
Created for the Pi Robot Project: http://www.pirobot.org
Copyright (c) 2013 <NAME>. All rights reserved.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details at:
http://www.gnu.org/licenses/gpl.html
"""
import rospy
from ar_track_alvar.msg import AlvarMarkers
from geometry_msgs.msg import Twist
from math import copysign
class ARFollower():
def __init__(self):
rospy.init_node("ar_follower")
# Set the shutdown function (stop the robot)
rospy.on_shutdown(self.shutdown)
# How often should we update the robot's motion?
self.rate = rospy.get_param("~rate", 10)
r = rospy.Rate(self.rate)
# The maximum rotation speed in radians per second
self.max_angular_speed = rospy.get_param("~max_angular_speed", 2.0)
# The minimum rotation speed in radians per second
self.min_angular_speed = rospy.get_param("~min_angular_speed", 0.5)
# The maximum distance a target can be from the robot for us to track
self.max_x = rospy.get_param("~max_x", 20.0)
# The goal distance (in meters) to keep between the robot and the marker
self.goal_x = rospy.get_param("~goal_x", 0.6)
# How far away from the goal distance (in meters) before the robot reacts
self.x_threshold = rospy.get_param("~x_threshold", 0.05)
# How far away from being centered (y displacement) on the AR marker
# before the robot reacts (units are meters)
self.y_threshold = rospy.get_param("~y_threshold", 0.05)
# How much do we weight the goal distance (x) when making a movement
self.x_scale = rospy.get_param("~x_scale", 0.5)
# How much do we weight y-displacement when making a movement
self.y_scale = rospy.get_param("~y_scale", 1.0)
# The max linear speed in meters per second
self.max_linear_speed = rospy.get_param("~max_linear_speed", 0.3)
# The minimum linear speed in meters per second
self.min_linear_speed = rospy.get_param("~min_linear_speed", 0.1)
# Publisher to control the robot's movement
self.cmd_vel_pub = rospy.Publisher('cmd_vel', Twist)
# Intialize the movement command
self.move_cmd = Twist()
# Set flag to indicate when the AR marker is visible
self.target_visible = False
# Wait for the ar_pose_marker topic to become available
rospy.loginfo("Waiting for ar_pose_marker topic...")
rospy.wait_for_message('ar_pose_marker', AlvarMarkers)
# Subscribe to the ar_pose_marker topic to get the image width and height
rospy.Subscriber('ar_pose_marker', AlvarMarkers, self.set_cmd_vel)
rospy.loginfo("Marker messages detected. Starting follower...")
# Begin the cmd_vel publishing loop
while not rospy.is_shutdown():
# Send the Twist command to the robot
self.cmd_vel_pub.publish(self.move_cmd)
# Sleep for 1/self.rate seconds
r.sleep()
def set_cmd_vel(self, msg):
# Pick off the first marker (in case there is more than one)
try:
marker = msg.markers[0]
if not self.target_visible:
rospy.loginfo("FOLLOWER is Tracking Target!")
self.target_visible = True
except:
# If target is loar, stop the robot by slowing it incrementally
self.move_cmd.linear.x /= 1.5
self.move_cmd.angular.z /= 1.5
if self.target_visible:
rospy.loginfo("FOLLOWER LOST Target!")
self.target_visible = False
return
# Get the displacement of the marker relative to the base
target_offset_y = marker.pose.pose.position.y
# Get the distance of the marker from the base
target_offset_x = marker.pose.pose.position.x
# Rotate the robot only if the displacement of the target exceeds the threshold
if abs(target_offset_y) > self.y_threshold:
# Set the rotation speed proportional to the displacement of the target
speed = target_offset_y * self.y_scale
self.move_cmd.angular.z = copysign(max(self.min_angular_speed,
min(self.max_angular_speed, abs(speed))), speed)
else:
self.move_cmd.angular.z = 0.0
# Now get the linear speed
if abs(target_offset_x - self.goal_x) > self.x_threshold:
speed = (target_offset_x - self.goal_x) * self.x_scale
if speed < 0:
speed *= 1.5
self.move_cmd.linear.x = copysign(min(self.max_linear_speed, max(self.min_linear_speed, abs(speed))), speed)
else:
self.move_cmd.linear.x = 0.0
def shutdown(self):
rospy.loginfo("Stopping the robot...")
self.cmd_vel_pub.publish(Twist())
rospy.sleep(1)
if __name__ == '__main__':
try:
ARFollower()
rospy.spin()
except rospy.ROSInterruptException:
rospy.loginfo("AR follower node terminated.")
|
StarcoderdataPython
|
147387
|
import pandas as pd
import os
import re
import numpy as np
from datetime import datetime
from sklearn.decomposition import PCA
# Plotting Packages
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
import matplotlib.cbook as cbook
import numpy as np
from mpl_toolkits.axes_grid1.inset_locator import inset_axes
from mpl_toolkits.axes_grid1.inset_locator import zoomed_inset_axes
from mpl_toolkits.axes_grid1.inset_locator import mark_inset
from matplotlib import rcParams
rcParams['font.family'] = "Times New Roman"
colors=['#033C5A','#AA9868','#0190DB','#FFC72C','#A75523','#008364','#78BE20','#C9102F',
'#033C5A','#AA9868','#0190DB','#FFC72C','#A75523','#008364','#78BE20','#C9102F']
#-----------------------------------------------------------------------------------------------------------------------
#----------------------------------------------------Import Data--------------------------------------------------------
#-----------------------------------------------------------------------------------------------------------------------
# Import monthly data
monthlyIndex=pd.read_csv(r'Data\RegRelevant_MonthlySentimentIndex_Jan2021.csv')
print(monthlyIndex.info())
monthlyIndex['Year-Month']=monthlyIndex['Year'].map(str)+'-'+monthlyIndex['Month'].map(str)
monthlyIndex['date']=monthlyIndex['Year-Month'].astype('datetime64[ns]').dt.date
for dict in ['GI','LM','LSD']:
monthlyIndex[dict+'index_standardized']=(monthlyIndex[dict+'index']-np.mean(monthlyIndex[dict+'index']))/np.std(monthlyIndex[dict+'index'])
monthlyIndex['UncertaintyIndex_standardized']=(monthlyIndex['UncertaintyIndex']-np.mean(monthlyIndex['UncertaintyIndex']))/np.std(monthlyIndex['UncertaintyIndex'])
# PCA of monthly sentiment indexes
features = ['GIindex', 'LMindex', 'LSDindex']
x = monthlyIndex.loc[:, features].values
pca = PCA(n_components=2)
principalComponents = pca.fit_transform(x)
print("Variance explained by PC1 and PC2:", pca.explained_variance_ratio_)
print("PC1 feature weights:", pca.components_[0])
principalComponents_neg=principalComponents*(-1)
principalDf = pd.DataFrame(data = principalComponents_neg, columns = ['SentimentPC1', 'SentimentPC2'])
monthlyIndex = pd.concat([monthlyIndex, principalDf], axis = 1)
monthlyIndex['SentimentMax']=monthlyIndex[['GIindex','LMindex','LSDindex']].max(axis=1)
monthlyIndex['SentimentMin']=monthlyIndex[['GIindex','LMindex','LSDindex']].min(axis=1)
# Import weekly data
weeklyIndex=pd.read_csv(r'Data\RegRelevant_WeeklySentimentIndex_Jan2021.csv')
print(weeklyIndex.info())
weeklyIndex['date']=weeklyIndex['StartDate'].astype('datetime64[ns]').dt.date
for dict in ['GI','LM','LSD']:
weeklyIndex[dict+'index_standardized']=(weeklyIndex[dict+'index']-np.mean(weeklyIndex[dict+'index']))/np.std(weeklyIndex[dict+'index'])
weeklyIndex['UncertaintyIndex_standardized']=(weeklyIndex['UncertaintyIndex']-np.mean(weeklyIndex['UncertaintyIndex']))/np.std(weeklyIndex['UncertaintyIndex'])
# PCA of weekly sentiment indexes
features = ['GIindex', 'LMindex', 'LSDindex']
x = weeklyIndex.loc[:, features].values
pca = PCA(n_components=2)
principalComponents = pca.fit_transform(x)
print("Variance explained by PC1 and PC2:", pca.explained_variance_ratio_)
print("PC1 feature weights:", pca.components_[0])
principalComponents_neg=principalComponents*(-1)
principalDf = pd.DataFrame(data = principalComponents_neg, columns = ['SentimentPC1', 'SentimentPC2'])
weeklyIndex = pd.concat([weeklyIndex, principalDf], axis = 1)
#-----------------------------------------------------------------------------------------------------------------------
#---------------------------------------Plot Monthly Sentiment & Uncertainty Indexes--------------------------------------------
#-----------------------------------------------------------------------------------------------------------------------
#-----------------------------------------------------------------------------------------------------------------------
# Plot monthly uncertainty index under Trump with weekly inset
x=monthlyIndex['date'][-49:]
y=monthlyIndex['UncertaintyIndex'][-49:]
fig, ax = plt.subplots(1, figsize=(15,8))
ax.plot(x,y,color=colors[0],marker='D',markersize=8)
# Events
ax.text(datetime(2016,12,1), 0.73, 'Transition\nof power', fontsize=13, color=colors[4],horizontalalignment='center')
ax.text(datetime(2020,4,1), 0.8, 'Coronavirus\noutbreak', fontsize=13, color=colors[4],horizontalalignment='center')
ax.text(datetime(2020,11,1), 0.77, '2020 presidential\nelection', fontsize=13, color=colors[4],horizontalalignment='center')
# format the ticks
years = mdates.YearLocator() # every year
months = mdates.MonthLocator() # every month
years_fmt = mdates.DateFormatter('%Y-%m')
#
# ax.xaxis.set_major_locator(years)
# ax.xaxis.set_major_formatter(years_fmt)
# ax.xaxis.set_minor_locator(months)
#
# # round to nearest years.
# datemin = np.datetime64(min(x), 'Y')
# datemax = np.datetime64(max(x), 'Y') + np.timedelta64(1, 'Y')
# ax.set_xlim(datemin, datemax)
# format the coords message box
ax.format_xdata = mdates.DateFormatter('%Y-%m-%d')
ax.format_ydata = lambda x: '$%1.2f' % x
fig.autofmt_xdate()
# Set tick and label format
ax.tick_params(axis='both',which='major',labelsize=14,color='#d3d3d3')
ax.tick_params(axis='both',which='minor',color='#d3d3d3')
ax.set_ylabel('Monthly Uncertainty Index',fontsize=16)
ax.set_yticks(np.arange(round(min(y),1)-0.1,round(max(y),1)+0.2,0.1))
#ax.set_ylim(bottom=round(min(y),1))
ax.grid(color='#d3d3d3', which='major', axis='y')
# Borders
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.spines['left'].set_color('#d3d3d3')
ax.spines['bottom'].set_color('#d3d3d3')
# Title
fig.suptitle('Figure 1: Uncertainty about Regulatory Policy',
x=0.72, y=0.95,fontsize=20)
ax.set_title('(January 2017 - January 2021)',fontsize=18,position=(0.85,1.1))
# Inset plot
xins=weeklyIndex['date'][-52:]
yins=weeklyIndex['UncertaintyIndex'][-52:]
axins=inset_axes(ax, width=5, height=2.5, bbox_to_anchor=(.05, .69, .5, .5),
bbox_transform=ax.transAxes,loc=2)
axins.plot(xins,yins,color='#033C5A',linewidth=2,marker='D',markersize=5)
axins.format_xdata = mdates.DateFormatter('%Y-%m')
axins.set_yticks(np.arange(round(min(yins),1)-0.1, round(max(yins),1)+0.2, 0.1))
axins.grid(color='gray', which='major', axis='y', linestyle='dotted')
axins.tick_params(axis='both',which='major',labelsize=10)
axins.set_facecolor('#d3d3d3')
axins.set_alpha(0.2)
axins.set_title('Weekly Index over the Past 12 Months',fontsize=14,position=(0.5,0.85))
# Adjust plot position
plt.subplots_adjust(top=0.81, bottom=0.15)
#Notes
fig.text(0.12, 0.02,'Notes: The uncertainty index was estimated using a dictionary-based sentiment analysis'
' approach applied to newspaper text and fixed effects\nregressions. '
'For details on the methodology, refer to the latest draft of the Sinclair and Xie paper'
' on "Sentiment and Uncertainty about Regulation".',
fontsize=14,style='italic')
plt.savefig('Figures/Figure1.jpg', bbox_inches='tight')
plt.show()
#-----------------------------------------------------------------------------------------------------------------------
# Plot monthly uncertainty index with events by presidential year
x=monthlyIndex['date']
y=monthlyIndex['UncertaintyIndex']
fig, ax = plt.subplots(1, figsize=(15,9))
ax.plot(x,y,color='black')
# Presidential year
ax.axvspan(datetime(1985,1,1),datetime(1989,2,1),alpha=0.1, color=colors[7])
ax.text(datetime(1987,1,1), 0.91, 'Ronald\nReagan', fontsize=13, color=colors[7],horizontalalignment='center')
ax.axvspan(datetime(1989,2,1),datetime(1993,2,1),alpha=0.1, color=colors[7])
ax.text(datetime(1991,1,1), 0.91, '<NAME>', fontsize=13, color=colors[7],horizontalalignment='center')
ax.axvspan(datetime(1993,2,1),datetime(2001,2,1),alpha=0.1, color=colors[0])
ax.text(datetime(1997,1,1), 0.91, 'Bill\nClinton', fontsize=13, color=colors[0],horizontalalignment='center')
ax.axvspan(datetime(2001,2,1),datetime(2009,2,1),alpha=0.1, color=colors[7])
ax.text(datetime(2005,1,1), 0.91, '<NAME>', fontsize=13, color=colors[7],horizontalalignment='center')
ax.axvspan(datetime(2009,2,1),datetime(2017,2,1),alpha=0.1, color=colors[0])
ax.text(datetime(2013,1,1), 0.91, 'Barack\nObama', fontsize=13, color=colors[0],horizontalalignment='center')
ax.axvspan(datetime(2017,2,1),datetime(2021,2,1),alpha=0.1, color=colors[7])
ax.text(datetime(2019,1,1),0.91, 'Donald\nTrump', fontsize=13, color=colors[7],horizontalalignment='center')
# events
ax.text(datetime(2008,9,1), 0.8, 'Lehman\nBrothers', fontsize=13, color=colors[4],horizontalalignment='center')
ax.text(datetime(2010,3,1), 0.855, 'Obamacare', fontsize=13, color=colors[4],horizontalalignment='center')
ax.text(datetime(2010,10,1), 0.87, 'Deepwater Horizon\noil spill', fontsize=13, color=colors[4],horizontalalignment='center')
ax.text(datetime(2010,7,1), 0.84, 'Dodd-Frank', fontsize=13, color=colors[4],horizontalalignment='left')
ax.text(datetime(2016,11,1),0.83 , '2016 presidential\nelection', fontsize=13, color=colors[4],horizontalalignment='center')
ax.text(datetime(2020,1,1), 0.79, 'Coronavirus\noutbreak', fontsize=13, color=colors[4],horizontalalignment='center')
# format the ticks
years = mdates.YearLocator(2) # every year
months = mdates.MonthLocator() # every month
years_fmt = mdates.DateFormatter('%Y')
ax.xaxis.set_major_locator(years)
ax.xaxis.set_major_formatter(years_fmt)
ax.xaxis.set_minor_locator(months)
# round to nearest years.
datemin = np.datetime64(monthlyIndex['date'].iloc[0], 'Y')
datemax = np.datetime64(monthlyIndex['date'].iloc[-1], 'Y')
ax.set_xlim(datemin, datemax)
# format the coords message box
ax.format_xdata = mdates.DateFormatter('%Y')
ax.format_ydata = lambda x: '$%1.2f' % x
fig.autofmt_xdate()
# Set tick and label format
ax.tick_params(axis='both',which='major',labelsize=14,color='#d3d3d3')
ax.tick_params(axis='both',which='minor',color='#d3d3d3')
ax.set_ylabel('Monthly Uncertainty Index',fontsize=16)
ax.set_yticks(np.arange(round(min(y),1),round(max(y),1)+0.1,0.1))
ax.set_ylim(bottom=round(min(y),1))
ax.grid(color='#d3d3d3', which='major', axis='y')
# Borders
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.spines['left'].set_color('#d3d3d3')
ax.spines['bottom'].set_color('#d3d3d3')
# Title
fig.suptitle('Figure 3: Uncertainty about Regulation by Presidential Year',
y=0.95,fontsize=20)
ax.set_title('(January 1985 - January 2021)',fontsize=18,position=(0.5,1.12))
#Notes
fig.text(0.12, 0.03,'Notes: The uncertainty index was estimated using a dictionary-based sentiment analysis'
' approach applied to newspaper text and fixed effects\nregressions. '
'For details on the methodology, refer to the latest draft of the Sinclair and Xie paper'
' on "Sentiment and Uncertainty about Regulation".',
fontsize=14,style='italic')
# Adjust plot position
plt.subplots_adjust(top=0.81, bottom=0.15)
plt.savefig('Figures/Figure3.jpg', bbox_inches='tight')
plt.show()
#-----------------------------------------------------------------------------------------------------------------------
#-----------------------------------------------------------------------------------------------------------------------
# Plot PC1 under Trump with weekly inset
x = monthlyIndex['date'][-49:]
y = monthlyIndex['SentimentPC1'][-49:]
fig, ax = plt.subplots(1, figsize=(15, 8))
ax.plot(x,y,color=colors[0],marker='D',markersize=8)
# Events
#ax.text(datetime(2016,12,1), 0.73, 'Transition\nof Power', fontsize=13, color=colors[4],horizontalalignment='center')
ax.text(datetime(2018,12,1), -0.45, 'Trump midterm\nelection', fontsize=13, color=colors[4],horizontalalignment='center')
#ax.text(datetime(2020,3,1), -0.15, 'Coronavirus\noutbreak', fontsize=13, color=colors[4],horizontalalignment='center')
#ax.text(datetime(2020,12,1), 0.77, '2020 Presidential Election', fontsize=13, color=colors[4],horizontalalignment='center')
# format the ticks
years = mdates.YearLocator() # every year
months = mdates.MonthLocator() # every month
years_fmt = mdates.DateFormatter('%Y-%m')
#
# ax.xaxis.set_major_locator(years)
# ax.xaxis.set_major_formatter(years_fmt)
# ax.xaxis.set_minor_locator(months)
#
# # round to nearest years.
# datemin = np.datetime64(min(x), 'Y')
# datemax = np.datetime64(max(x), 'Y') + np.timedelta64(1, 'Y')
# ax.set_xlim(datemin, datemax)
# format the coords message box
ax.format_xdata = mdates.DateFormatter('%Y-%m-%d')
ax.format_ydata = lambda x: '$%1.2f' % x
fig.autofmt_xdate()
# Set tick and label format
ax.tick_params(axis='both',which='major',labelsize=14,color='#d3d3d3')
ax.tick_params(axis='both',which='minor',color='#d3d3d3')
ax.set_ylabel('Monthly Sentiment Index',fontsize=16)
ax.set_yticks(np.arange(-0.8,1.4,0.4))
#ax.set_ylim(bottom=round(min(y),1))
ax.grid(color='#d3d3d3', which='major', axis='y')
# Borders
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.spines['left'].set_color('#d3d3d3')
ax.spines['bottom'].set_color('#d3d3d3')
# Title
fig.suptitle('Figure 2: Sentiment about Regulatory Policy',
x=0.26, y=0.95,fontsize=20)
ax.set_title('(January 2017 - January 2021)',fontsize=18,position=(0.1,1.13))
# Inset plot
xins=weeklyIndex['date'][-52:]
yins=weeklyIndex['SentimentPC1'][-52:]
axins=inset_axes(ax, width=5, height=2.5, bbox_to_anchor=(.52, .75, .5, .5),
bbox_transform=ax.transAxes,loc=2)
axins.plot(xins,yins,color='#033C5A',linewidth=2,marker='D',markersize=5)
axins.format_xdata = mdates.DateFormatter('%Y-%m')
axins.set_yticks(np.arange(-2, 3, 1))
axins.grid(color='gray', which='major', axis='y', linestyle='dotted')
axins.tick_params(axis='both',which='major',labelsize=10)
axins.set_facecolor('#d3d3d3')
axins.set_alpha(0.1)
axins.set_title('Weekly Index over the Past 12 Months',fontsize=14,position=(0.5,0.85))
# Adjust plot position
plt.subplots_adjust(top=0.79, bottom=0.15)
#Notes
fig.text(0.12, 0.02,'Notes: The sentiment index was estimated using a dictionary-based sentiment analysis'
' approach applied to newspaper text and fixed effects\nregressions. '
'For details on the methodology, refer to the latest draft of the Sinclair and Xie paper'
' on "Sentiment and Uncertainty about Regulation".',
fontsize=14,style='italic')
plt.savefig("Figures/Figure2.jpg", bbox_inches='tight')
plt.show()
#-----------------------------------------------------------------------------------------------------------------------
# Plot PC1 with events by presidential year
x = monthlyIndex['date']
y = monthlyIndex['SentimentPC1']
fig, ax = plt.subplots(1, figsize=(15, 9))
ax.plot(x, y, color='black')
# Presidential year
ax.axvspan(datetime(1985,1,1),datetime(1989,2,1),alpha=0.1, color=colors[7])
ax.text(datetime(1987,1,1), 1.6, 'Ronald\nReagan', fontsize=13, color=colors[7],horizontalalignment='center')
ax.axvspan(datetime(1989,2,1),datetime(1993,2,1),alpha=0.1, color=colors[7])
ax.text(datetime(1991,1,1), 1.6, '<NAME>', fontsize=13, color=colors[7],horizontalalignment='center')
ax.axvspan(datetime(1993,2,1),datetime(2001,2,1),alpha=0.1, color=colors[0])
ax.text(datetime(1997,1,1), 1.6, 'Bill\nClinton', fontsize=13, color=colors[0],horizontalalignment='center')
ax.axvspan(datetime(2001,2,1),datetime(2009,2,1),alpha=0.1, color=colors[7])
ax.text(datetime(2005,1,1), 1.6, '<NAME>', fontsize=13, color=colors[7],horizontalalignment='center')
ax.axvspan(datetime(2009,2,1),datetime(2017,2,1),alpha=0.1, color=colors[0])
ax.text(datetime(2013,1,1), 1.6, 'Barack\nObama', fontsize=13, color=colors[0],horizontalalignment='center')
ax.axvspan(datetime(2017,2,1),datetime(2021,2,1),alpha=0.1, color=colors[7])
ax.text(datetime(2019,1,1),1.6, 'Donald\nTrump', fontsize=13, color=colors[7],horizontalalignment='center')
# events
ax.text(datetime(1993,9,1), 0.75, 'Clinton\nhealth care plan', fontsize=13, color=colors[4],horizontalalignment='center')
ax.text(datetime(2001,9,1), -0.75, '9/11', fontsize=13, color=colors[4],horizontalalignment='center')
ax.text(datetime(2006,11,1), 0.73, 'Bush midterm\nelection', fontsize=13, color=colors[4],horizontalalignment='center')
ax.text(datetime(2008,9,1), -0.6, 'Lehman\nBrothers', fontsize=13, color=colors[4],horizontalalignment='center')
ax.text(datetime(2010,3,1), -1, 'Obamacare', fontsize=13, color=colors[4],horizontalalignment='center')
ax.text(datetime(2010,10,1),-1.25, 'Deepwater Horizon\noil spill', fontsize=13, color=colors[4],horizontalalignment='center')
ax.text(datetime(2010,12,1), -1.4, 'Dodd-Frank', fontsize=13, color=colors[4],horizontalalignment='center')
ax.text(datetime(2012,6,1), -1, 'Libor\nscandal', fontsize=13, color=colors[4],horizontalalignment='left')
ax.text(datetime(2016,11,1), 0.8 , '2016 presidential\nelection', fontsize=13, color=colors[4],horizontalalignment='center')
#ax.text(datetime(2020,1,1), -0.5, 'Coronavirus\noutbreak', fontsize=13, color=colors[4],horizontalalignment='center')
# format the ticks
years = mdates.YearLocator(2) # every year
months = mdates.MonthLocator() # every month
years_fmt = mdates.DateFormatter('%Y')
ax.xaxis.set_major_locator(years)
ax.xaxis.set_major_formatter(years_fmt)
ax.xaxis.set_minor_locator(months)
# round to nearest years.
datemin = np.datetime64(x.iloc[0], 'Y')
datemax = np.datetime64(x.iloc[-1], 'Y')
ax.set_xlim(datemin, datemax)
# format the coords message box
ax.format_xdata = mdates.DateFormatter('%Y-%m-%d')
ax.format_ydata = lambda x: '$%1.2f' % x
fig.autofmt_xdate()
# Set tick and label format
ax.tick_params(axis='both',which='major',labelsize=14,color='#d3d3d3')
ax.tick_params(axis='both',which='minor',color='#d3d3d3')
ax.set_ylabel('Monthly Sentiment Index', fontsize=16)
ax.set_yticks(np.arange(round(min(y), 0) - 0.5, round(max(y), 0) + 1, 0.5))
ax.grid(color='#d3d3d3', which='major', axis='y')
# Borders
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.spines['left'].set_color('#d3d3d3')
ax.spines['bottom'].set_color('#d3d3d3')
# Title
fig.suptitle("Figure 4: Sentiment about Regulation by Presidential Year",
y=0.95, fontsize=20)
ax.set_title('(January 1985 - January 2021)', fontsize=18,position=(0.5,1.12))
# Notes
fig.text(0.12, 0.03, 'Notes: The sentiment index was estimated using a dictionary-based sentiment analysis'
' approach applied to newspaper text and fixed effects\nregressions. '
'For details on the methodology, refer to the latest draft of the Sinclair and Xie paper'
' on "Sentiment and Uncertainty about Regulation".',
fontsize=14, style='italic')
# Adjust plot position
plt.subplots_adjust(top=0.81, bottom=0.15)
plt.savefig("Figures/Figure4.jpg", bbox_inches='tight')
plt.show()
|
StarcoderdataPython
|
4830149
|
<reponame>juforg/cookiecutter-flask-restful
# -*- coding: utf-8 -*-
# @author: songjie
# @email: <EMAIL>
# @date: 2020/08/25
# SJ编程规范
# 命名:
# 1. 见名思意,变量的名字必须准确反映它的含义和内容
# 2. 遵循当前语言的变量命名规则
# 3. 不要对不同使用目的的变量使用同一个变量名
# 4. 同个项目不要使用不同名称表述同个东西
# 5. 函数/方法 使用动词+名词组合,其它使用名词组合
# 设计原则:
# 1. KISS原则: Keep it simple and stupid !
# 2. SOLID原则: S: 单一职责 O: 开闭原则 L: 迪米特法则 I: 接口隔离原则 D: 依赖倒置原则
#
from faker import Faker
def factoryboy_gen(text):
fake = Faker(locale='zh_CN')
text = text.upper()
if text in ["BIGINT", "INT", "INTEGER"]:
return fake.pyint(min_value=0, max_value=9999, step=1)
elif text in ["SMALLINT"]:
return fake.pyint(min_value=0, max_value=9999, step=1)
elif text in ["DECIMAL"]:
return fake.pydecimal(left_digits=None, right_digits=None, positive=False, min_value=None, max_value=None)
elif text in ["Float"]:
fake.pyfloat(left_digits=None, right_digits=None, positive=False, min_value=None, max_value=None) # Python浮点数
elif text in ["TIMESTAMP", "DATETIME"]:
return fake.date(pattern="%Y-%m-%d %H:%M:%S", end_datetime=None)
elif text in ["DATE"]:
return fake.date(pattern="%Y-%m-%d", end_datetime=None)
elif text in ["TIME"]:
return fake.date(pattern="%H:%M:%S", end_datetime=None)
elif text in ["TEXT", "VARCHAR", "NVARCHAR", "NCHAR", "CHAR"]:
return fake.pystr(min_chars=None, max_chars=20)
|
StarcoderdataPython
|
3343898
|
<reponame>robertblincoe/repo_test<gh_stars>1-10
"""card_full_width"""
from dash import html
def card_full_width(children):
"""
Apply CSS classes and styles to create a card with a grey background
that fits the full width of its parent container using CSS flexbox.
See https://developer.mozilla.org/en-US/docs/Learn/CSS/CSS_layout for more information.
"""
return html.Li(children, className="mini-card", style={"flex": "1 1 100%"})
|
StarcoderdataPython
|
156049
|
<filename>setup.py
#!/usr/bin/env python
"""SRE regex tools."""
"""
Copyright 2020 <NAME>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from setuptools import find_packages, setup
__version__ = "0.0.1"
classifiers = """\
Environment :: Console
Intended Audience :: Developers
Intended Audience :: Science/Research
Intended Audience :: System Administrators
License :: OSI Approved :: Apache Software License
Operating System :: OS Independent
Programming Language :: Python :: 3.6
Programming Language :: Python :: 3.7
Programming Language :: Python :: 3.8
Programming Language :: Python :: Implementation :: CPython
Topic :: Security
Development Status :: 4 - Beta
"""
setup(
name="sre-tools",
version=__version__,
description="Tools for manupulating sre_parse data structures",
license="Apache-2.0",
author_email="<EMAIL>",
url="https://github.com/jayvdb/sre-tools",
packages=find_packages(exclude=["tests", "tests.*"]),
python_requires=">=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*",
classifiers=classifiers.splitlines(),
tests_require=[],
)
|
StarcoderdataPython
|
1602409
|
<gh_stars>0
import pyowm
owm = pyowm.OWM('797153f746aae22307499da4ad723468')
observation = owm.weather_at_place('Almere,nl')
w = observation.get_weather()
print(w)
wind = w.get_wind()
temp = w.get_temperature('celsius')
print(wind)
print(temp)
observation_list = owm.weather_around_coords(52.371353, 5.222124)
|
StarcoderdataPython
|
1607744
|
ouput = []
with open('input.txt') as file:
bank = {}
for line in file.readlines():
operations = list(line.split())
if len(operations) == 2:
procedure, percent = operations
if procedure == "INCOME":
for client in bank:
if bank[client] > 0:
bank[client] = bank[client] +int( bank[client] * int(percent)/100)
if procedure == "BALANCE":
if percent not in bank:
ouput.append("ERROR")
else:
ouput.append(f"{bank[percent]}")
if len(operations) == 4:
procedure, client_1, client_2, sumt = operations
if procedure == "TRANSFER":
if client_1 not in bank:
bank[client_1] = 0
if client_2 not in bank:
bank[client_2] = 0
bank[client_1] -= int(sumt)
bank[client_2] += int(sumt)
if len(operations) == 3:
procedure, client, sumt = operations
if client not in bank:
bank[client] = 0
if procedure == "DEPOSIT":
bank[client] += int(sumt)
if procedure == "WITHDRAW":
bank[client] -= int(sumt)
with open('output.txt', 'w') as file:
for key in ouput:
file.write(f"{key}\n")
|
StarcoderdataPython
|
1741912
|
<gh_stars>0
import pdb
import time
import os
import subprocess
import re
import random
import json
import numpy as np
import glob
from tensorboard.backend.event_processing.event_accumulator import EventAccumulator
import socket
import argparse
import threading
import _thread
import signal
from datetime import datetime
import csv
import opt_dash
parser = argparse.ArgumentParser(description='TCP client')
parser.add_argument('--tc', metavar='TESTCASE', type=str, help='select testcase')
args = parser.parse_args()
with open('../job_trace/job_queue_sc_50.json', 'r') as fp: #TODO
queue = json.load(fp)
queue_dict = {}
arrival_time = 0
for item in queue:
arrival_time += np.random.poisson(30)
queue_dict[item] = arrival_time
queue_timer = time.time()
queue_delay = {}
for item in queue:
queue_delay[str(item)] = 0
# predict batch time simulated
with open('batch_times/K80_batch_time_sc.json', 'r') as fp: #TODO
K80_batch_pred = json.load(fp)
with open('batch_times/V100_batch_time_sc.json', 'r') as fp:
V100_batch_pred = json.load(fp)
for key,value in K80_batch_pred.items():
# add Gaussian noise with 5% mean
pred_error = value * 0.035 # 3% error
direction = 1 if random.random() < 0.5 else -1
K80_batch_pred[key] = round(value + direction*pred_error,3)
for key,value in V100_batch_pred.items():
# add Gaussian noise with 5% mean
pred_error = value * 0.023 # 3% error
direction = 1 if random.random() < 0.5 else -1
V100_batch_pred[key] = round(value + direction*pred_error,3)
multigpu_list = []#['1', '2', '3']#, '4', '5', '6', '7'] #TODO
job_start = {} #{'49': time1, '15': time2...}
JCT = {}
for item in queue:
JCT[str(item)] = 0
completion = {}
for item in queue:
completion[str(item)] = 0
overhead = {} # initialize so that every job starts with 0s overhead time
for item in queue:
overhead[str(item)] = 0
ovhd_start = {} # initialize this to 0 as well
for item in queue:
ovhd_start[str(item)] = 0
b_start = {} # initialize this to 0 as well
for item in queue:
b_start[str(item)] = 0
c_start = {} # initialize this to 0 as well
for item in queue:
c_start[str(item)] = 0
d_start = {} # initialize this to 0 as well
for item in queue:
d_start[str(item)] = 0
ovhd_a = {} # {1: [10, 12, ...], 2: [xx]}
for item in queue:
ovhd_a[str(item)] = []
ovhd_b = {} # {1: [10, 12, ...], 2: [xx]}
for item in queue:
ovhd_b[str(item)] = []
ovhd_c = {} # {1: [10, 12, ...], 2: [xx]}
for item in queue:
ovhd_c[str(item)] = []
ovhd_d = {} # {1: [10, 12, ...], 2: [xx]}
for item in queue:
ovhd_d[str(item)] = []
ovhd_total = {} # {1: [10, 12, ...], 2: [xx]}
for item in queue:
ovhd_total[str(item)] = []
k80_1st = {}
for item in queue:
k80_1st[str(item)] = []
v100_1st = {}
for item in queue:
v100_1st[str(item)] = []
num_mig = {} # initialize migration time to 0
for item in queue:
num_mig[str(item)] = 0
queue_start = {} # initialize this to 0 as well
for item in queue:
queue_start[str(item)] = 0
queue_time = {} # initialize this to 0 as well
for item in queue:
queue_time[str(item)] = 0
V100_batch_time = {}
for item in queue:
V100_batch_time[str(item)] = 0
K80_batch_time = {}
for item in queue:
K80_batch_time[str(item)] = 0
V100_1st_ovhd = {}
for item in queue:
V100_1st_ovhd[str(item)] = 0
K80_1st_ovhd = {}
for item in queue:
K80_1st_ovhd[str(item)] = 0
K80_start_time = {}
for item in queue:
K80_start_time[str(item)] = 0
V100_start_time = {}
for item in queue:
V100_start_time[str(item)] = 0
promote_start_time = {}
for item in queue:
promote_start_time[str(item)] = 0
demote_list = []
job_remaining_batch = {}
for item in queue:
job_remaining_batch[str(item)] = 0
K80_time = {}
for item in queue:
K80_time[str(item)] = 0
V100_time = {}
for item in queue:
V100_time[str(item)] = 0
gpu_usage_time = [] # don't initialize this
gpu_usage = []
gpu_usage_completion = []
speedup_dict = {}
for item in queue:
speedup_dict[str(item)] = 0
birthplace = {}
for item in queue:
birthplace[str(item)] = 'none'
index = 0
K80_cap = 8 #TODO
V100_cap = 4
K80_used = 0
V100_used = 0
K80_per_node = 8
V100_per_node = 4
K80_job = {}
for i in range(K80_cap):
K80_job[str(i)] = 'idle'
V100_job = {}
for i in range(V100_cap):
V100_job[str(i)] = 'idle'
step1_job = []
step2_job = []
pc_job = []
K80_node = ['c2178']#, 'c2182']
V100_node = ['d1003']#, 'd1015']
host_node = 'c0145'
testcase = args.tc
### also, change .h5 file folder in jobs ###
INTERVAL = 30 # make decision every 30s
run_log = open('run.log','w')
# function to detect if there are two free or reserved GPUs in a node
# returns an empty list if there is none, otherwise returns list with gpu id in V100/K80_jobs
def detect_2_gpus(gpu_dict, gpu_per_node, status='idle'):
job_list = list(gpu_dict.values())
num_nodes = int(len(job_list) / gpu_per_node)
for i in range(num_nodes):
start = i * gpu_per_node
end = start + gpu_per_node
sliced_list = job_list[start:end]
occurence = sliced_list.count(status)
if occurence >= 2:
# only take the first two elements
indexs = [j for j, e in enumerate(sliced_list) if e == status]
return [str(j + start) for j in indexs]
return []
def K80_LUT(gpu):
quotient = int(gpu) // 8
remainder = int(gpu) % 8
real_node = K80_node[quotient]
real_gpu = str(remainder)
return real_node, real_gpu
def V100_LUT(gpu):
quotient = int(gpu) // 4
remainder = int(gpu) % 4
real_node = V100_node[quotient]
real_gpu = str(remainder)
return real_node, real_gpu
def send_signal(node, cmd):
# Create a TCP/IP socket
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
port = 10000
# Connect the socket to the port where the server is listening
server_address = (node, int(port))
print('connecting to {} port {}'.format(*server_address), file=run_log, flush=True)
sock.connect(server_address)
try:
# Send data
message = cmd.encode('utf-8') #b'save 35' #b'start 35 gpu 6'#b'save 35'
print('sending {!r}'.format(message), file=run_log, flush=True)
sock.sendall(message)
while True:
data = sock.recv(32)
if 'success' in data.decode('utf-8'):
# print('received {!r}'.format(data))
break
else:
print('waiting for success signal', file=run_log, flush=True)
time.sleep(1)
finally:
#print('closing socket')
sock.close()
def get_avail_id(gpu_dict):
# input is K80_job or V100_job (dict)
key_list = list(gpu_dict.keys())
value_list = list(gpu_dict.values())
indexs = [j for j, e in enumerate(value_list) if e == 'idle']
return [key_list[j] for j in indexs]
# 2-gpu jobs in new_pool have duplicated items
# returns mapping of jobs in "new_pool" to GPUs
def GPU_placement(GPU_avail, new_pool, gpu_type='K80', raise_error=True):
mapping = {}
skip = False
res_group = [] # group reserved GPU together
for i in range(len(GPU_avail)):
if skip:
skip = False
continue
else:
# two gpus from the same node
if gpu_type == 'K80':
GPU_per_node = K80_per_node
elif gpu_type == 'V100':
GPU_per_node = V100_per_node
if i!=len(GPU_avail)-1 and int(GPU_avail[i])//GPU_per_node==int(GPU_avail[i+1])//GPU_per_node:
skip = True
res_group.append([GPU_avail[i], GPU_avail[i+1]])
else:
res_group.append([GPU_avail[i]])
group_1gpu = [i for i in res_group if len(i) == 1] # 1gpu id
group_2gpu = [i for i in res_group if len(i) == 2] # 2gpu id [['1','2'],['4','7']]
pool_1gpu = [i for i in new_pool if i not in multigpu_list] # 1gpu job
pool_2gpu = [i for i in new_pool if i in multigpu_list] # 2gpu job
if len(GPU_avail) < len(new_pool) or 2*len(group_2gpu) < len(pool_2gpu):
if raise_error:
if gpu_type == 'K80':
raise ValueError('Bug with K80 placement for new jobs, more jobs than free gpus')
elif gpu_type == 'V100':
raise ValueError('Bug with V100 placement for new jobs, more jobs than free gpus')
else:
return mapping
# if there is no 2-gpu job
if set(new_pool).isdisjoint(multigpu_list):
for i in range(len(new_pool)):
mapping[new_pool[i]] = GPU_avail[i]
else:
# first, fill in all 1gpu slots with 1-gpu jobs as much as possible
for i in group_1gpu[:]:
if len(pool_1gpu) > 0:
mapping[pool_1gpu[0]] = i[0]
pool_1gpu.pop(0)
for i in group_2gpu[:]:
if len(pool_2gpu) > 1:
mapping[pool_2gpu[0]] = ','.join(i)
pool_2gpu = [i for i in pool_2gpu if i != pool_2gpu[0]]
elif len(pool_1gpu) > 0:
mapping[pool_1gpu[0]] = i[0]
if len(pool_1gpu) > 1:
mapping[pool_1gpu[1]] = i[1]
pool_1gpu.pop(1)
pool_1gpu.pop(0)
return mapping
#aa = K80_placement(['0','1','2','3','4'], ['3','3','1','1','50'])
# checks if 2-GPU jobs can be promoted/demoted without locality issue
# if cannot, remove 2-GPU job and corresponding 1-GPU job until all jobs can fit
# then returns new_K80_avail, new_V100_avail, new_promoted, new_demoted
def locality_check(K80_avail, V100_avail, promoted, demoted):
'''
K80/V100_avail: ['1', '2', '5']
promoted/demoted: ['7','7','50','70']
'''
for item in range(2):#[K80_avail, V100_avail]:
skip = False
res_group = [] # group reserved GPU together
GPU_avail = [K80_avail,V100_avail][item]
for i in range(len(GPU_avail)):
if skip:
skip = False
continue
else:
# two gpus from the same node
if item == 0:
GPU_per_node = K80_per_node
elif item == 1:
GPU_per_node = V100_per_node
if i!=len(GPU_avail)-1 and int(GPU_avail[i])//GPU_per_node==int(GPU_avail[i+1])//GPU_per_node:
skip = True
res_group.append([GPU_avail[i], GPU_avail[i+1]])
else:
res_group.append([GPU_avail[i]])
if item == 0:
K80_1gpu = [i for i in res_group if len(i) == 1] # 1gpu id
K80_2gpu = [i for i in res_group if len(i) == 2] # 2gpu id [['1','2'],['4','7']]
elif item == 1:
V100_1gpu = [i for i in res_group if len(i) == 1] # 1gpu id
V100_2gpu = [i for i in res_group if len(i) == 2] # 2gpu id
promoted_1gpu = [i for i in promoted if i not in multigpu_list] # 1gpu job
promoted_2gpu = [i for i in promoted if i in multigpu_list] # 2gpu job ['3','3','4','4','10']
demoted_1gpu = [i for i in demoted if i not in multigpu_list] # 1gpu job
demoted_2gpu = [i for i in demoted if i in multigpu_list] # 2gpu job
condition1 = len(K80_avail) >= len(demoted) and 2*len(K80_2gpu) >= len(demoted_2gpu)
condition2 = len(V100_avail) >= len(promoted) and 2*len(V100_2gpu) >= len(promoted_2gpu)
if condition1 and condition2:
return None
else:
print('Notice: promoted/demoted jobs cannot fit in their destination due to locality', file=run_log, flush=True)
print('Remove all 2-gpu jobs from this migration decision', file=run_log, flush=True) # meaning they stay wherever they were before
for job in promoted_2gpu:
promoted.remove(job)
for job in demoted_2gpu:
demoted.remove(job)
for gpu_pair in K80_2gpu:
for gpu in gpu_pair:
K80_avail.remove(gpu)
for gpu_pair in V100_2gpu:
for gpu in gpu_pair:
V100_avail.remove(gpu)
# check if need to remove 1-gpu jobs as well
if len(K80_avail) < len(demoted_1gpu):
diff = len(demoted_1gpu) - len(K80_avail)
for i in range(diff):
removed_1gpu = demoted[0]
demoted.remove(removed_1gpu)
# also need to remove its corresponding GPU
V100_avail.remove(demoted_V100_map_1gpu[removed_1gpu])
elif len(V100_avail) < len(promoted_1gpu):
diff = len(promoted_1gpu) - len(V100_avail)
for i in range(diff):
removed_1gpu = promoted[0]
promoted.remove(removed_1gpu)
# also need to remove its corresponding GPU
K80_avail.remove(promoted_K80_map_1gpu[removed_1gpu])
return K80_avail, V100_avail, promoted, demoted
#locality_check(['2'],['2'],['44'],['48'])
# input: a list of jobs
# output: a dict of jobs with their remaining time on K80 and V100
# the remaining time on the other GPU type need to include migration overhead
# 1. ovhd_total: the mean is average migration overhead once
# 2. 1st_ovhd: extra time spent on 1st epoch after migration
# the returned dict looks like this {'50': [300, 150], '78': [1000, 300]}
# if a job can't be migrated yet (not in step1_job list) it shouldn't be in the input list
# elif a job can be migrated but have not been migrated or have been migration but does not have speedup yet
# , it should have the other gpu type remaining time as migration overhead
def get_remaining_time(job_list):
result_dict = {}
for job in job_list:
if job not in step1_job:
raise ValueError('Bug with promotion scheme, more jobs than free gpus')
# use prediction for remaining time on non-birth GPU
# also use a general migration overhead
elif job in step1_job and job not in step2_job:
mig_overhead = 40
K80_remain = job_remaining_batch[job] * K80_batch_time[job]
V100_remain = job_remaining_batch[job] * V100_batch_time[job]
K80_pred = job_remaining_batch[job] * K80_batch_pred[job]
V100_pred = job_remaining_batch[job] * V100_batch_pred[job]
# this is not accurate, but just to force job to run on the other GPU type not profiled
if birthplace[job] in K80_node:
result_dict[job] = [K80_remain, V100_pred + mig_overhead]
elif birthplace[job] in V100_node:
result_dict[job] = [K80_pred + mig_overhead, V100_remain]
else: # job has its K80_batch_time and V100_batch_time profiled
K80_remain = job_remaining_batch[job] * K80_batch_time[job]
V100_remain = job_remaining_batch[job] * V100_batch_time[job]
K80_mig_ovhd = np.mean(ovhd_total[job]) + K80_1st_ovhd[job]
V100_mig_ovhd = np.mean(ovhd_total[job]) + V100_1st_ovhd[job]
if job in list(K80_job.values()):
result_dict[job] = [K80_remain, V100_remain + V100_mig_ovhd]
elif job in list(V100_job.values()):
result_dict[job] = [K80_remain + K80_mig_ovhd, V100_remain]
return result_dict
#d, e, f = random_promotion(['0','1','4','8'], ['3','3','1','1'], [])
def save_job(node, job): # save_job('c2176', '50')
# first wait for the job to be qualified for checkpointing
while True: # wait for ckpt_qual to be available
global ckpt_qual_dict
if ckpt_qual_dict['job'+job] == 1:
ckpt_qual_dict['job'+job] = 0
break
time.sleep(5)
global pid_dict
pid = pid_dict['job'+job]
send_signal(node, 'save ' + job + ' pid ' + pid) # 'save 50 pid 10000'
global ovhd_start
ovhd_start[job] = time.time()
time.sleep(3) # in case epoch_waste is communicate too frequently
# resume job
def resume_job(node, gpu, job): # resume_job('c2176', '3', '50')
cmd = 'resume ' + job + ' gpu ' + gpu
send_signal(node, cmd)
# start job
def start_job(node, gpu, job):
cmd = 'start ' + job + ' gpu ' + gpu
send_signal(node, cmd)
############### first clear finish status of all jobs ####################
pid_dict = {}
for i in range(len(queue)):
job_name = 'job' + str(i + 1)
pid_dict[job_name] = 0
checkpoint_dict = {}
for i in range(len(queue)):
job_name = 'job' + str(i + 1)
checkpoint_dict[job_name] = 0
ckpt_qual_dict = {}
for i in range(len(queue)):
job_name = 'job' + str(i + 1)
ckpt_qual_dict[job_name] = 0
finish_dict = {}
for i in range(len(queue)):
job_name = 'job' + str(i + 1)
finish_dict[job_name] = 0
epoch_waste_dict = {}
for i in range(len(queue)):
job_name = 'job' + str(i + 1)
epoch_waste_dict[job_name] = 0
#################### background thread running TCP socket ########################
def thread_function():
# here listen on the socket
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server_address = (host_node, 10002)
print('starting up on {} port {}'.format(*server_address), file=run_log, flush=True)
sock.bind(server_address)
sock.listen(5)
while True:
# Wait for a connection
connection, client_address = sock.accept()
try:
while True:
data = connection.recv(32)
if data:
data_str = data.decode('utf-8')
global K80_start_time
global V100_start_time, promote_start_time
global K80_job
global V100_job
global K80_time
global V100_time
global ovhd_a, ovhd_b, ovhd_c, ovhd_d, ovhd_start, overhead, ovhd_total, v100_1st, k80_1st
global b_start, c_start, d_start, completion
global step1_job, step2_job
global V100_batch_time, K80_batch_time, job_remaining_batch, speedup_dict
global K80_1st_ovhd, V100_1st_ovhd
if 'ckpt_qual' in data_str:
global ckpt_qual_dict
job_name = data_str.split(' ')[0]
ckpt_qual_dict[job_name] = 1
elif 'finish' in data_str:
global finish_dict
job_name = data_str.split(' ')[0]
job = job_name.replace('job','')
finish_dict[job_name] = 1
JCT[job] = int(time.time() - job_start[job])
if job in list(K80_job.values()):
K80_time[job] += int(time.time() - K80_start_time[job])
elif job in list(V100_job.values()):
V100_time[job] += int(time.time() - V100_start_time[job])
elif 'pid' in data_str:
global pid_dict
job_name = data_str.split(' ')[0]
pid = data_str.split(' ')[2]
pid_dict[job_name] = pid
elif 'checkpoint' in data_str: # can only be received after save signal is sent
global checkpoint_dict
job_name = data_str.split(' ')[0]
job = job_name.replace('job','')
checkpoint_dict[job_name] = 1
ovhd_a[job].append(int(time.time() - ovhd_start[job]))
b_start[job] = time.time()
elif 'waste' in data_str:
global epoch_waste_dict
job_name = data_str.split(' ')[0]
epoch_waste_time = data_str.split(' ')[2]
epoch_waste_dict[job_name] += int(epoch_waste_time)
elif 'b_end' in data_str:
job_name = data_str.split(' ')[0]
job = job_name.replace('job','')
ovhd_b[job].append(int(time.time() - b_start[job]))
c_start[job] = time.time()
elif 'c_end' in data_str:
job_name = data_str.split(' ')[0]
job = job_name.replace('job','')
ovhd_c[job].append(int(time.time() - c_start[job]))
d_start[job] = time.time()
elif 'd_end' in data_str:
job_name = data_str.split(' ')[0]
job = job_name.replace('job','')
ovhd_d[job].append(int(time.time() - d_start[job]))
ovhd_total[job].append(int(time.time() - ovhd_start[job]))
if ovhd_start[job] != 0:
overhead[job] += int(time.time() - ovhd_start[job])
ovhd_start[job] = 0
if job in list(K80_job.values()):
K80_start_time[job] = time.time()
elif job in list(V100_job.values()):
V100_start_time[job] = time.time()
promote_start_time[job] = time.time()
elif '1st_epoch' in data_str: # 'job50 1st_epoch 35'
job_name = data_str.split(' ')[0]
job = job_name.replace('job','')
epoch_time = int(data_str.split(' ')[2])
if job in list(K80_job.values()):
k80_1st[job].append(epoch_time)
elif job in list(V100_job.values()):
v100_1st[job].append(epoch_time)
elif 'completion' in data_str: # 'job50 completion 0.33'
job_name = data_str.split(' ')[0]
job = job_name.replace('job','')
completion_portion = float(data_str.split(' ')[2])
completion[job] = completion_portion
elif 'batch_time' in data_str: # 'job50 batch_time 0.042'
job_name = data_str.split(' ')[0]
job = job_name.replace('job','')
batch_time = float(data_str.split(' ')[2])
# also step1_job and step2_job
# if job birthplace is K80, K80_batch_time is collected, then step1 complete
if job in list(K80_job.values()) and K80_batch_time[job] == 0:
K80_batch_time[job] = batch_time
if birthplace[job] in K80_node:
step1_job.append(job)
elif birthplace[job] in V100_node:
step2_job.append(job)
speedup_dict[job] = round(K80_batch_time[job] / V100_batch_time[job], 3)
elif job in list(V100_job.values()) and V100_batch_time[job] == 0:
V100_batch_time[job] = batch_time
if birthplace[job] in V100_node:
step1_job.append(job)
elif birthplace[job] in K80_node:
step2_job.append(job)
speedup_dict[job] = round(K80_batch_time[job] / V100_batch_time[job], 3)
elif 'remain_batch' in data_str: # 'job50 remain_batch 156300'
job_name = data_str.split(' ')[0]
job = job_name.replace('job','')
remaining_batch = int(data_str.split(' ')[2])
job_remaining_batch[job] = remaining_batch
elif '1st_ovhd' in data_str: # 'job50 1st_ovhd 4.99'
job_name = data_str.split(' ')[0]
job = job_name.replace('job','')
ovhd_time = float(data_str.split(' ')[2])
if job in list(K80_job.values()) and K80_1st_ovhd[job] == 0:
K80_1st_ovhd[job] = ovhd_time
elif job in list(V100_job.values()) and V100_1st_ovhd[job] == 0:
V100_1st_ovhd[job] = ovhd_time
# if 'ckpt_qual' in data_str or 'finish' in data_str or 'checkpoint' in data_str:
# print('received ' + data_str)
connection.sendall(b'success')
#time.sleep(5)
else:
break
finally:
connection.close()
x = threading.Thread(target=thread_function, daemon=True)
x.start()
###############################################################################
######################################################################
while True:
# termination condition:
# all the jobs have finished
################### check for finished jobs on K80 and V100 ##############################
for gpu, job in K80_job.items():
if job != 'idle':
if finish_dict['job'+job] == 1:
K80_used -= 1
K80_job[gpu] = 'idle'
print('K80 finished job: ' + job, file=run_log, flush=True)
for gpu, job in V100_job.items():
if job != 'idle':
if finish_dict['job'+job] == 1:
V100_used -= 1
V100_job[gpu] = 'idle'
print('V100 finished job: ' + job, file=run_log, flush=True)
if job in demote_list:
demote_list.remove(job)
################ submit new jobs to vacant K80 GPUs ############################
# first fill in vacant V100s
if V100_used < V100_cap:
V100_free = V100_cap - V100_used
for i in range(V100_free):
time_passed = int(time.time() - queue_timer)
if index < len(queue) and queue_dict[queue[index]] < time_passed: # make sure job has arrived in the queue
job_new = str(queue[index])
if job_new in multigpu_list:
# find 2 gpus in the same node to schedule it
idle_gpus = detect_2_gpus(V100_job, V100_per_node)[:2]
if len(idle_gpus) > 0:
node_string = ''
for gpu in idle_gpus:
real_node, real_gpu = V100_LUT(gpu)
if gpu == idle_gpus[1]:
gpu_str += real_gpu
node_string = real_node
job_start[job_new] = time.time()
queue_delay[job_new] = int(time_passed - queue_dict[queue[index]])
V100_start_time[job_new] = time.time()
index += 1
else:
gpu_str = real_gpu + ','
V100_job[gpu] = job_new
V100_used += 1
start_job(node_string, gpu_str, job_new)
birthplace[job_new] = node_string
time.sleep(5) # don't communicate too often
else:
for gpu, job in V100_job.items():
if job == 'idle': # schedule new job here if idle
real_node, real_gpu = V100_LUT(gpu)
start_job(real_node, real_gpu, job_new)
birthplace[job_new] = real_node
V100_job[gpu] = job_new
job_start[job_new] = time.time()
queue_delay[job_new] = int(time_passed - queue_dict[queue[index]])
V100_start_time[job_new] = time.time()
index += 1
V100_used += 1
time.sleep(5) # don't communicate too often
break
# first fill in vacant K80s
if K80_used < K80_cap:
K80_free = K80_cap - K80_used
for i in range(K80_free):
time_passed = int(time.time() - queue_timer)
if index < len(queue) and queue_dict[queue[index]] < time_passed: # make sure job has arrived in the queue
job_new = str(queue[index])
if job_new in multigpu_list:
# find 2 gpus in the same node to schedule it
idle_gpus = detect_2_gpus(K80_job, K80_per_node)[:2]
if len(idle_gpus) > 0:
node_string = ''
for gpu in idle_gpus:
real_node, real_gpu = K80_LUT(gpu)
if gpu == idle_gpus[1]:
gpu_str += real_gpu
node_string = real_node
job_start[job_new] = time.time()
queue_delay[job_new] = int(time_passed - queue_dict[queue[index]])
K80_start_time[job_new] = time.time()
index += 1
else:
gpu_str = real_gpu + ','
K80_job[gpu] = job_new
K80_used += 1
start_job(node_string, gpu_str, job_new)
birthplace[job_new] = node_string
time.sleep(5) # don't communicate too often
else:
for gpu, job in K80_job.items():
if job == 'idle': # schedule new job here if idle
real_node, real_gpu = K80_LUT(gpu)
start_job(real_node, real_gpu, job_new)
birthplace[job_new] = real_node
K80_job[gpu] = job_new
job_start[job_new] = time.time()
queue_delay[job_new] = int(time_passed - queue_dict[queue[index]])
K80_start_time[job_new] = time.time()
index += 1
K80_used += 1
time.sleep(5) # don't communicate too often
break
################## make promotion decisions ################
# figure out which job enters the pool and which GPUs enter the pool
# job must be in step1_job, and if it's on V100, it must have passed demote_qualify_time
# the selected job's current GPU also enters GPU pool. And if GPU is idle, it gets added into the pool as well
# look at demote list
for gpu, job in V100_job.items():
if job != 'idle':
# for jobs who have finished profiling, added the job
if job not in demote_list and job in step2_job and len(ovhd_total[job]) > 0:
job_speedup = 1 - (1/speedup_dict[job]) # this is different from original DASH speedup
job_ovhd = np.mean(ovhd_total[job]) # 100
k80_1st_ovhd = K80_1st_ovhd[job]
v100_1st_ovhd = V100_1st_ovhd[job]
demote_qualify_time = (2 * job_ovhd + k80_1st_ovhd + v100_1st_ovhd) / job_speedup
if job_speedup == 0 or speedup_dict[job] == 0:
pdb.set_trace()
if len(v100_1st[job]) > 0:
v100_1st_epoch = max(v100_1st[job])
else:
v100_1st_epoch = 0
if int(time.time() - promote_start_time[job]) > max(demote_qualify_time, v100_1st_epoch):
demote_list.append(job)
print('job' + job + 'qualified for demote for passing demote qualify time ' +
str(int(demote_qualify_time)), file=run_log, flush=True)
# for jobs who have not finished profiling, add the job if it's qualified and it started on V100
elif job not in demote_list and job not in step2_job and job in step1_job and birthplace[job] in V100_node:
demote_list.append(job)
print('job' + job + 'qualified for demote for profiling', file=run_log, flush=True)
job_pool = []
K80_pool = []
V100_pool = []
for gpu, job in K80_job.items():
if job in step1_job:
if job not in job_pool: # for 2-gpu jobs, add the job once, but add both gpus
job_pool.append(job)
K80_pool.append(gpu)
elif job == 'idle':
K80_pool.append(gpu)
for gpu, job in V100_job.items():
if job in demote_list:
if job not in job_pool: # for 2-gpu jobs, add the job once, but add both gpus
job_pool.append(job)
V100_pool.append(gpu)
elif job == 'idle':
V100_pool.append(gpu)
# prepare inputs and perform optimization
num_GPUs = [len(K80_pool), len(V100_pool)]
job_num_GPUs = {}
for job in job_pool:
if job in multigpu_list:
job_num_GPUs[job] = 2
else:
job_num_GPUs[job] = 1
job_remaining_time = get_remaining_time(job_pool)
promoted = [] # jobs to be placed in V100. 2-gpu jobs are duplicated
demoted = [] # jobs to be placed in K80
# perform 1st optimization
if len(job_num_GPUs) > 0 and len(job_remaining_time) > 0:
opt_decision = opt_dash.optimize_promotion(num_GPUs, job_num_GPUs, job_remaining_time)
print('job_pool:',job_pool,'K80_pool:',K80_pool,'V100_pool:',V100_pool,'remain_time',job_remaining_time,'decision:',opt_decision, file=run_log, flush=True)
# check if placement of promo/demo 2-gpu jobs are viable
# if not viable: remove jobs that benefit least from promo/hurt least from demo
for job in opt_decision:
placement = opt_decision[job]
if placement == 1 and job in list(K80_job.values()):
promoted.append(job)
# duplicate the job if it's 2-gpu job
if job in multigpu_list:
promoted.append(job)
elif placement == 0 and job in list(V100_job.values()):
demoted.append(job)
# duplicate the job if it's 2-gpu job
if job in multigpu_list:
demoted.append(job)
if len(promoted) > 0:
print('original promotion (2-gpu dup)', promoted, file=run_log, flush=True)
if len(demoted) > 0:
print('original demotion (2-gpu dup)', demoted, file=run_log, flush=True)
if len(demoted) > 0 or len(promoted) > 0:
# generate K80/V100 GPU list that are either idle or have job in promoted/demoted
# to be used by placement function
K80_avail = []
V100_avail = []
promoted_K80_map_1gpu = {} # original mapping for 1-gpu jobs, used in "locality check"
demoted_V100_map_1gpu = {}
for gpu, job in K80_job.items():
if job == 'idle':
K80_avail.append(gpu)
elif job in promoted:
K80_avail.append(gpu)
if job not in multigpu_list:
promoted_K80_map_1gpu[job] = gpu
for gpu, job in V100_job.items():
if job == 'idle':
V100_avail.append(gpu)
elif job in demoted:
V100_avail.append(gpu)
if job not in multigpu_list:
demoted_V100_map_1gpu[job] = gpu
# use these information: K80_avail, V100_avail, promoted, demoted
check_result = locality_check(K80_avail, V100_avail, promoted, demoted)
if check_result is not None:
K80_avail, V100_avail, promoted, demoted = check_result
# now place promoted jobs on V100_avail and demoted jobs on K80_avail
K80_mapping = GPU_placement(K80_avail, demoted, gpu_type='K80')
V100_mapping = GPU_placement(V100_avail, promoted, gpu_type='V100')
# make promotion decisions
if len(promoted) > 0 or len(demoted) > 0:
# remove duplicated 2-gpu jobs from promoted and demoted
promoted = list(dict.fromkeys(promoted))
demoted = list(dict.fromkeys(demoted))
# stop all promoted jobs on K80
checkpoint_finish_check = []
for job in promoted[:]:
if job not in multigpu_list:
# need to find its current gpu on K80
current_gpu = ''
for gpu, job_K in K80_job.items():
if job_K == job:
current_gpu = gpu
break
real_node, real_gpu = K80_LUT(current_gpu)
K80_job[current_gpu] = 'idle'
K80_used -= 1
else:
current_gpu = []
for gpu, job_K in K80_job.items():
if job_K == job:
current_gpu.append(gpu)
real_node, real_gpu = K80_LUT(current_gpu[0])
for item in current_gpu:
K80_job[item] = 'idle'
K80_used -= 1
save_job(real_node, job)
if finish_dict['job'+job] != 1:
K80_time[job] += int(time.time() - K80_start_time[job])
checkpoint_finish_check.append(job)
# stop all demoted jobs on V100
for job in demoted[:]:
if job not in multigpu_list:
# need to find its current gpu on V100
current_gpu = ''
for gpu, job_K in V100_job.items():
if job_K == job:
current_gpu = gpu
break
real_node, real_gpu = V100_LUT(current_gpu)
V100_job[current_gpu] = 'idle'
V100_used -= 1
else:
current_gpu = []
for gpu, job_K in V100_job.items():
if job_K == job:
current_gpu.append(gpu)
real_node, real_gpu = V100_LUT(current_gpu[0])
for item in current_gpu:
V100_job[item] = 'idle'
V100_used -= 1
save_job(real_node, job)
if finish_dict['job'+job] != 1:
V100_time[job] += int(time.time() - V100_start_time[job])
checkpoint_finish_check.append(job)
demote_list.remove(job)
# wait for all GPUs to be available
if len(checkpoint_finish_check) > 0:
while True:
time.sleep(5)
for job in checkpoint_finish_check[:]:
if checkpoint_dict['job'+job] == 1: # checkpoint has finished, gpu is free
print(job + ' checkpointed successfully', file=run_log, flush=True)
checkpoint_dict['job'+job] = 0 # reset it
checkpoint_finish_check.remove(job)
# also check if job already finished before sending checkpoint signal
elif finish_dict['job'+job] == 1:
print(job + ' finished before receiving checkpoint signal', file=run_log, flush=True)
checkpoint_finish_check.remove(job)
if len(checkpoint_finish_check) == 0:
break
# give it some time to cleanup old checkpointed jobs
time.sleep(3)
# resume promoted jobs on V100
for job in promoted[:]:
if finish_dict['job'+job] != 1:
gpu = V100_mapping[job]
if job not in multigpu_list:
real_node, real_gpu = V100_LUT(gpu)
resume_job(real_node, real_gpu, job)
V100_job[gpu] = job
V100_used += 1
else:
gpu_split = gpu.split(',')
node_string = ''
for g in gpu_split:
real_node, real_gpu = V100_LUT(g)
if g == gpu_split[1]:
gpu_str += real_gpu
node_string = real_node
else:
gpu_str = real_gpu + ','
V100_job[g] = job
V100_used += 1
resume_job(node_string, gpu_str, job)
promoted.remove(job)
num_mig[job] += 1
else: # job finished before checkpointing
print('job'+job_new+' has finished before checkpointing', file=run_log, flush=True)
promoted.remove(job)
# resume demoted jobs on K80
for job in demoted[:]:
if finish_dict['job'+job] != 1:
gpu = K80_mapping[job]
if job not in multigpu_list:
real_node, real_gpu = K80_LUT(gpu)
resume_job(real_node, real_gpu, job)
K80_job[gpu] = job
K80_used += 1
else:
gpu_split = gpu.split(',')
node_string = ''
for g in gpu_split:
real_node, real_gpu = K80_LUT(g)
if g == gpu_split[1]:
gpu_str += real_gpu
node_string = real_node
else:
gpu_str = real_gpu + ','
K80_job[g] = job
K80_used += 1
resume_job(node_string, gpu_str, job)
demoted.remove(job)
num_mig[job] += 1
else: # job finished before checkpointing
print('job'+job_new+' has finished before checkpointing', file=run_log, flush=True)
demoted.remove(job)
# perform a check, make sure all promoted/demoted jobs are scheduled
if len(promoted) > 0 or len(demoted) > 0:
raise ValueError('Bug with promotion scheme, more jobs than free gpus')
############## monitor GPU usage ############
usage = K80_used + V100_used
time_stamp = int(time.time() - queue_timer)
gpu_usage_time.append(time_stamp)
gpu_usage.append(usage)
total_completion = np.sum(list(completion.values()))
gpu_usage_completion.append(total_completion)
############### wait for next iteration
time.sleep(INTERVAL)
################ check if termination condition is met ################
K80_idle_num = sum(value == 'idle' for value in K80_job.values())
V100_idle_num = sum(value == 'idle' for value in V100_job.values())
if K80_idle_num == K80_cap and V100_idle_num == V100_cap and index == len(queue):
print('all jobs are finished!', file=run_log, flush=True)
break
# get average JCT
average_JCT = np.average(list(JCT.values()))
JCT['average'] = average_JCT
average_overhead = np.average(list(overhead.values()))
overhead['average'] = average_overhead
average_queue_delay = np.average(list(queue_delay.values()))
queue_delay['average'] = average_queue_delay
# after everything is finished
print('finished all runs', file=run_log, flush=True)
JCT_name = testcase + '_JCT.json'
overhead_name = testcase + '_overhead.json'
num_mig_name = testcase + '_num_mig.json'
epoch_waste_name = testcase + '_epoch_waste.json'
ckpt_qual_name = 'ckpt_qual.json'
finish_name = 'finish.json'
K80_time_name = testcase + '_K80_time.json'
V100_time_name = testcase + '_V100_time.json'
gpu_usage_name = testcase + '_gpu_usage.csv'
completion_name = 'completion.json'
ovhd_a_name = testcase + '_ovhd_a.json'
ovhd_b_name = testcase + '_ovhd_b.json'
ovhd_c_name = testcase + '_ovhd_c.json'
ovhd_d_name = testcase + '_ovhd_d.json'
ovhd_total_name = testcase + '_ovhd_total.json'
K80_1st_ovhd_name = testcase + '_K80_1st_ovhd.json'
V100_1st_ovhd_name = testcase + '_V100_1st_ovhd.json'
queue_delay_name = testcase + '_queue_delay.json'
K80_batch_time_name = testcase + '_K80_batch_time.json'
V100_batch_time_name = testcase + '_V100_batch_time.json'
birthplace_name = testcase + '_birthplace.json'
speedup_name = testcase + '_speedup.json'
job_remaining_batch_name = 'job_remaining_batch.json'
demote_list_name = 'demote_list.json'
with open(JCT_name, 'w') as fp1:
json.dump(JCT, fp1, sort_keys=True, indent=4)
with open(overhead_name, 'w') as fp3:
json.dump(overhead, fp3, sort_keys=True, indent=4)
with open(num_mig_name, 'w') as fp3:
json.dump(num_mig, fp3, sort_keys=True, indent=4)
with open(epoch_waste_name, 'w') as fp3:
json.dump(epoch_waste_dict, fp3, sort_keys=True, indent=4)
with open(ckpt_qual_name, 'w') as fp1:
json.dump(ckpt_qual_dict, fp1, sort_keys=True, indent=4)
with open(finish_name, 'w') as fp1:
json.dump(finish_dict, fp1, sort_keys=True, indent=4)
with open(K80_time_name, 'w') as fp3:
json.dump(K80_time, fp3, sort_keys=True, indent=4)
with open(V100_time_name, 'w') as fp3:
json.dump(V100_time, fp3, sort_keys=True, indent=4)
with open(ovhd_a_name, 'w') as fp3:
json.dump(ovhd_a, fp3, sort_keys=True, indent=4)
with open(ovhd_b_name, 'w') as fp3:
json.dump(ovhd_b, fp3, sort_keys=True, indent=4)
with open(ovhd_c_name, 'w') as fp3:
json.dump(ovhd_c, fp3, sort_keys=True, indent=4)
with open(ovhd_d_name, 'w') as fp3:
json.dump(ovhd_d, fp3, sort_keys=True, indent=4)
with open(ovhd_total_name, 'w') as fp3:
json.dump(ovhd_total, fp3, sort_keys=True, indent=4)
with open(K80_1st_ovhd_name, 'w') as fp3:
json.dump(K80_1st_ovhd, fp3, sort_keys=True, indent=4)
with open(V100_1st_ovhd_name, 'w') as fp3:
json.dump(V100_1st_ovhd, fp3, sort_keys=True, indent=4)
with open(demote_list_name, 'w') as fp1:
json.dump(demote_list, fp1, sort_keys=True, indent=4)
with open(completion_name, 'w') as fp1:
json.dump(completion, fp1, sort_keys=True, indent=4)
with open(queue_delay_name, 'w') as fp1:
json.dump(queue_delay, fp1, sort_keys=True, indent=4)
with open(K80_batch_time_name, 'w') as fp3:
json.dump(K80_batch_time, fp3, sort_keys=True, indent=4)
with open(V100_batch_time_name, 'w') as fp3:
json.dump(V100_batch_time, fp3, sort_keys=True, indent=4)
with open(birthplace_name, 'w') as fp1:
json.dump(birthplace, fp1, sort_keys=True, indent=4)
with open(speedup_name, 'w') as fp1:
json.dump(speedup_dict, fp1, sort_keys=True, indent=4)
with open(job_remaining_batch_name, 'w') as fp1:
json.dump(job_remaining_batch, fp1, sort_keys=True, indent=4)
gpu_usage_time = np.asarray(gpu_usage_time)
gpu_usage = np.asarray(gpu_usage)
gpu_usage_completion = np.asarray(gpu_usage_completion)
rows = zip(gpu_usage_time, gpu_usage, gpu_usage_completion)
with open(gpu_usage_name, 'w') as f:
writer = csv.writer(f)
for row in rows:
writer.writerow(row)
|
StarcoderdataPython
|
1609588
|
"""
Basic building blocks for generic class based views.
We don't bind behaviour to http method handlers yet,
which allows mixin classes to be composed in interesting ways.
"""
from __future__ import unicode_literals
from rest_framework import status
from rest_framework.response import Response
from rest_framework.settings import api_settings
class CreateDataFrameMixin(object):
"""
Adds a row to the dataframe.
"""
def create(self, request, *args, **kwargs):
serializer = self.get_serializer(data=request.data)
serializer.is_valid(raise_exception=True)
self.perform_create(serializer)
headers = self.get_success_headers(serializer.data)
return Response(serializer.data, status=status.HTTP_201_CREATED, headers=headers)
def perform_create(self, serializer):
dataframe = self.get_dataframe()
return self.update_dataframe(dataframe.append(serializer.validated_data))
def get_success_headers(self, data):
try:
return {'Location': data[api_settings.URL_FIELD_NAME]}
except (TypeError, KeyError):
return {}
class ListDataFrameMixin(object):
"""
List the contents of a dataframe.
"""
def list(self, request, *args, **kwargs):
dataframe = self.filter_dataframe(self.get_dataframe())
page = self.paginate_dataframe(dataframe)
if page is not None:
serializer = self.get_serializer(page)
return self.get_paginated_response(serializer.data)
serializer = self.get_serializer(dataframe)
return Response(serializer.data)
class RetrieveDataFrameMixin(object):
"""
Retrieve a dataframe row.
"""
def retrieve(self, request, *args, **kwargs):
instance = self.get_object()
serializer = self.get_serializer(instance)
return Response(serializer.data)
class UpdateDataFrameMixin(object):
"""
Update a dataframe row.
"""
def update(self, request, *args, **kwargs):
partial = kwargs.pop('partial', False)
instance = self.get_object()
serializer = self.get_serializer(data=request.data, partial=partial)
serializer.is_valid(raise_exception=True)
self.perform_update(instance, serializer)
return Response(serializer.data)
def perform_update(self, instance, serializer):
validated_data = serializer.validated_data
instance.ix[validated_data.index, validated_data.columns] = validated_data[:]
dataframe = self.get_dataframe()
dataframe.ix[instance.index] = instance
return self.update_dataframe(dataframe)
def partial_update(self, request, *args, **kwargs):
kwargs['partial'] = True
return self.update(request, *args, **kwargs)
class DestroyDataFrameMixin(object):
"""
Destroy a dataframe row.
"""
def destroy(self, request, *args, **kwargs):
instance = self.get_object()
self.perform_destroy(instance)
return Response(status=status.HTTP_204_NO_CONTENT)
def perform_destroy(self, instance):
dataframe = self.get_dataframe()
return self.update_dataframe(dataframe.drop(instance.index))
|
StarcoderdataPython
|
1724240
|
<reponame>Bluenix2/easyrpc
from easyrpc.proxy import EasyRpcProxy
class EasyRpcProxyLogger(EasyRpcProxy):
def __init__(self, *args, **kwargs):
args = list(args)
# override - default expects_results=True -> False -
# logs do not expect return values
args[8] = False
super().__init__(*args, **kwargs)
async def info(self, message):
await self['info'](message)
async def warning(self, message):
await self['warning'](message)
async def error(self, message):
await self['error'](message)
async def debugger(self, message):
await self['debug'](message)
async def exception(self, message):
stack_trace = format_exc()
await self['exception'](message, stack_trace)
|
StarcoderdataPython
|
3202351
|
<gh_stars>1-10
broker_url = 'amqp://guest@localhost//'
timezone = 'Europe/Moscow'
worker_max_tasks_per_child = 1
|
StarcoderdataPython
|
5042
|
import pygame
import random
pygame.init()
clock = pygame.time.Clock()
fps = 60
#game window
bottom_panel = 150
screen_width = 800
screen_height = 400 + bottom_panel
screen = pygame.display.set_mode((screen_width, screen_height))
pygame.display.set_caption('Battle')
#define game variables
current_fighter = 1
total_fighters = 3
action_cooldown = 0
action_wait_time = 90
attack = False
potion = False
clicked = False
#define fonts
font = pygame.font.SysFont('Times New Roman', 26)
#define colours
red = (255, 0, 0)
green = (0, 255, 0)
#load images
#background image
background_img = pygame.image.load('img/Background/background.png').convert_alpha()
#panel image
panel_img = pygame.image.load('img/Icons/panel.png').convert_alpha()
#sword image
sword_img = pygame.image.load('img/Icons/sword.png').convert_alpha()
#create function for drawing text
def draw_text(text, font, text_col, x, y):
img = font.render(text, True, text_col)
screen.blit(img, (x, y))
#function for drawing background
def draw_bg():
screen.blit(background_img, (0, 0))
#function for drawing panel
def draw_panel():
#draw panel rectangle
screen.blit(panel_img, (0, screen_height - bottom_panel))
#show knight stats
draw_text(f'{knight.name} HP: {knight.hp}', font, red, 100, screen_height - bottom_panel + 10)
for count, i in enumerate(bandit_list):
#show name and health
draw_text(f'{i.name} HP: {i.hp}', font, red, 550, (screen_height - bottom_panel + 10) + count * 60)
#fighter class
class Fighter():
def __init__(self, x, y, name, max_hp, strength, potions):
self.name = name
self.max_hp = max_hp
self.hp = max_hp
self.strength = strength
self.start_potions = potions
self.potions = potions
self.alive = True
self.animation_list = []
self.frame_index = 0
self.action = 0#0:idle, 1:attack, 2:hurt, 3:dead
self.update_time = pygame.time.get_ticks()
#load idle images
temp_list = []
for i in range(8):
img = pygame.image.load(f'img/{self.name}/Idle/{i}.png')
img = pygame.transform.scale(img, (img.get_width() * 3, img.get_height() * 3))
temp_list.append(img)
self.animation_list.append(temp_list)
#load attack images
temp_list = []
for i in range(8):
img = pygame.image.load(f'img/{self.name}/Attack/{i}.png')
img = pygame.transform.scale(img, (img.get_width() * 3, img.get_height() * 3))
temp_list.append(img)
self.animation_list.append(temp_list)
self.image = self.animation_list[self.action][self.frame_index]
self.rect = self.image.get_rect()
self.rect.center = (x, y)
def update(self):
animation_cooldown = 100
#handle animation
#update image
self.image = self.animation_list[self.action][self.frame_index]
#check if enough time has passed since the last update
if pygame.time.get_ticks() - self.update_time > animation_cooldown:
self.update_time = pygame.time.get_ticks()
self.frame_index += 1
#if the animation has run out then reset back to the start
if self.frame_index >= len(self.animation_list[self.action]):
self.idle()
def idle(self):
#set variables to attack animation
self.action = 0
self.frame_index = 0
self.update_time = pygame.time.get_ticks()
def attack(self, target):
#deal damage to enemy
rand = random.randint(-5, 5)
damage = self.strength + rand
target.hp -= damage
#check if target has died
if target.hp < 1:
target.hp = 0
target.alive = False
#set variables to attack animation
self.action = 1
self.frame_index = 0
self.update_time = pygame.time.get_ticks()
def draw(self):
screen.blit(self.image, self.rect)
class HealthBar():
def __init__(self, x, y, hp, max_hp):
self.x = x
self.y = y
self.hp = hp
self.max_hp = max_hp
def draw(self, hp):
#update with new health
self.hp = hp
#calculate health ratio
ratio = self.hp / self.max_hp
pygame.draw.rect(screen, red, (self.x, self.y, 150, 20))
pygame.draw.rect(screen, green, (self.x, self.y, 150 * ratio, 20))
knight = Fighter(200, 260, 'Knight', 30, 10, 3)
bandit1 = Fighter(550, 270, 'Bandit', 20, 6, 1)
bandit2 = Fighter(700, 270, 'Bandit', 20, 6, 1)
bandit_list = []
bandit_list.append(bandit1)
bandit_list.append(bandit2)
knight_health_bar = HealthBar(100, screen_height - bottom_panel + 40, knight.hp, knight.max_hp)
bandit1_health_bar = HealthBar(550, screen_height - bottom_panel + 40, bandit1.hp, bandit1.max_hp)
bandit2_health_bar = HealthBar(550, screen_height - bottom_panel + 100, bandit2.hp, bandit2.max_hp)
run = True
while run:
clock.tick(fps)
#draw background
draw_bg()
#draw panel
draw_panel()
knight_health_bar.draw(knight.hp)
bandit1_health_bar.draw(bandit1.hp)
bandit2_health_bar.draw(bandit2.hp)
#draw fighters
knight.update()
knight.draw()
for bandit in bandit_list:
bandit.update()
bandit.draw()
#control player actions
#reset action variables
attack = False
potion = False
target = None
#make sure mouse is visible
pygame.mouse.set_visible(True)
pos = pygame.mouse.get_pos()
for count, bandit in enumerate(bandit_list):
if bandit.rect.collidepoint(pos):
#hide mouse
pygame.mouse.set_visible(False)
#show sword in place of mouse cursor
screen.blit(sword_img, pos)
if clicked == True:
attack = True
target = bandit_list[count]
#player action
if knight.alive == True:
if current_fighter == 1:
action_cooldown += 1
if action_cooldown >= action_wait_time:
#look for player action
#attack
if attack == True and target != None:
knight.attack(target)
current_fighter += 1
action_cooldown = 0
#enemy action
for count, bandit in enumerate(bandit_list):
if current_fighter == 2 + count:
if bandit.alive == True:
action_cooldown += 1
if action_cooldown >= action_wait_time:
#attack
bandit.attack(knight)
current_fighter += 1
action_cooldown = 0
else:
current_fighter += 1
#if all fighters have had a turn then reset
if current_fighter > total_fighters:
current_fighter = 1
for event in pygame.event.get():
if event.type == pygame.QUIT:
run = False
if event.type == pygame.MOUSEBUTTONDOWN:
clicked = True
else:
clicked = False
pygame.display.update()
pygame.quit()
|
StarcoderdataPython
|
87696
|
import bpy
import addon_utils
import importlib
import sys
from . utils import current_addon_exists, get_addon_name
class RunAddon(bpy.types.Operator):
bl_idname = "code_autocomplete.run_addon"
bl_label = "Run Addon"
bl_description = "Unregister, reload and register it again."
bl_options = {"REGISTER"}
@classmethod
def poll(cls, context):
return current_addon_exists()
def execute(self, context):
bpy.ops.code_autocomplete.save_files()
addon_name = get_addon_name()
module = sys.modules.get(addon_name)
if module:
addon_utils.disable(addon_name)
importlib.reload(module)
addon_utils.enable(addon_name)
return {"FINISHED"}
|
StarcoderdataPython
|
4834775
|
<filename>build/dev/zero_conf_print_service_info.py
#!/usr/bin/env python3
""" Example of resolving a service with a known name """
import logging
import sys
from zeroconf import Zeroconf
TYPE = '_http._tcp.local.'
NAME = '_activity_assist'
if __name__ == '__main__':
logging.basicConfig(level=logging.DEBUG)
if len(sys.argv) > 1:
assert sys.argv[1:] == ['--debug']
logging.getLogger('zeroconf').setLevel(logging.DEBUG)
zeroconf = Zeroconf()
try:
tmp = zeroconf.get_service_info(TYPE, NAME + '.' + TYPE)
print('port: ', tmp.port)
print('server: ', tmp.server)
props = {}
for key, value in tmp.properties.items():
props[key.decode("utf-8")] = value.decode("utf-8")
print(props)
finally:
zeroconf.close()
|
StarcoderdataPython
|
128835
|
<gh_stars>1-10
#1st method
sq1 = []
for x in range(10):
sq1.append(x**2)
print("sq1 = ", sq1)
# 2nd method
sq2 = [x**2 for x in range(10)]
print("sq2 = ", sq2)
sq3 = [(x,y) for x in [1,2,3] for y in [3,1,4] if x!=y]
print("sq3 = ", sq3)
vec = [-4, -2, 0, 2, 4]
print("x*2", [x*2 for x in vec])
print("x if x>0", [x for x in vec if x>=0])
print("abs(x) = ", [abs(x) for x in vec])
freshfruit = [' banana', ' loganberry ', 'passion fruit ']
print("weapon.strip() = ", [weapon.strip() for weapon in freshfruit])
print("(x, x**2) = ", [(x, x**2) for x in range(6)])
vec2 = [[1,2,3], [4,5,6], [7,8,9]]
print("num = ", [num for elem in vec2 for num in elem])
|
StarcoderdataPython
|
1637495
|
import copy
class Prototype:
def __init__(self):
self._objects = {}
def register_object(self, name, obj):
self._objects[name] = obj
def unregister_object(self, name):
del self._objects[name]
def clone(self, name, **attr):
obj = copy.deepcopy(self._objects.get(name))
obj.__dict__.update(attr)
return obj
class Coord:
x: int
y: int
z: int
def __init__(self, x, y, z):
self.x = x
self.y = y
self.z = z
def __str__(self):
return f'{self.x} {self.y} {self.z}'
if __name__ == '__main__':
a = Coord(2, 1, 5)
prototype = Prototype()
prototype.register_object('point_a', a)
b = prototype.clone('point_a')
c = prototype.clone('point_a', x=1, y=2, comment='point_c')
print([str(i) for i in (a, b, c)])
print(c.comment)
|
StarcoderdataPython
|
141304
|
import os
import json
import time
import threading
import logging
import torch
import glob
import shutil
from .trial_local import TrialConnector
from .thread_manager import ThreadManager
from dataloop_services.plugin_utils import get_dataset_obj
import dtlpy as dl
from logging_utils import logginger
from copy import deepcopy
logger = logginger(__name__)
class Launcher:
def __init__(self, optimal_model, ongoing_trials=None):
self.optimal_model = optimal_model
self.ongoing_trials = ongoing_trials
self.num_available_devices = torch.cuda.device_count()
self.home_path = optimal_model.data['home_path']
self.dataset_name = optimal_model.data['dataset_name']
def launch_trial(self, hp_values):
model_specs = self.optimal_model.unwrap()
inputs = {
'devices': {'gpu_index': 0},
'hp_values': hp_values,
'model_specs': model_specs,
}
meta_checkpoint = self._run_trial_demo_execution(inputs)
return {'metrics': meta_checkpoint['metrics'],
'meta_checkpoint': meta_checkpoint}
def launch_trials(self):
if self.ongoing_trials is None:
raise Exception('for this method ongoing_trials object must be passed during the init')
if self.ongoing_trials.num_trials > 0:
self._launch_local_trials()
def _launch_local_trials(self):
self.trial_connector = TrialConnector()
threads = ThreadManager()
model_specs = self.optimal_model.unwrap()
logger.info('launching new set of trials')
device = 0
for trial_id, trial in self.ongoing_trials.trials.items():
logger.info('launching trial_' + trial_id + ': ' + str(trial))
inputs = {
'devices': {'gpu_index': device},
'hp_values': trial['hp_values'],
'model_specs': model_specs
}
threads.new_thread(target=self._collect_metrics,
inputs=inputs,
trial_id=trial_id)
device = device + 1
threads.wait()
ongoing_trials_results = threads.results
for trial_id, metrics_and_checkpoint_dict in ongoing_trials_results.items():
self.ongoing_trials.update_metrics(trial_id, metrics_and_checkpoint_dict)
def _collect_metrics(self, inputs_dict, trial_id, results_dict):
thread_name = threading.currentThread().getName()
logger.info('starting thread: ' + thread_name)
meta_checkpoint = self.trial_connector.run(inputs_dict)
results_dict[trial_id] = {'metrics': meta_checkpoint['metrics'],
'meta_checkpoint': meta_checkpoint}
logger.info('finished thread: ' + thread_name)
|
StarcoderdataPython
|
1768324
|
import turtle
def draw_square(some_turtle):
for i in range(1,5):
some_turtle.forward(100)
some_turtle.right(90)
def draw_art():
window = turtle.Screen()
window.bgcolor("red")
#Create the turtle Brad - Draws a square
brad = turtle.Turtle()
brad.shape("turtle")
brad.color("yellow")
brad.speed(2)
for i in range(1,37):
draw_square(brad)
brad.right(10)
#Create the turtle Anngie - Draws a circle
angie = turtle.Turtle()
angie.shape("arrow")
angie.color("blue")
angie.circle(100)
charlie = turtle.Turtle()
charlie.shape("arrow")
charlie.color("green")
for i in xrange(3):
charlie.forward(100)
charlie.left(120)
window.exitonclick()
draw_art()
|
StarcoderdataPython
|
113510
|
import argparse
import os
import sys
from PyQt5 import QtWidgets
from .rama_analyzer import RamaAnalyzerMain
def run():
p = argparse.ArgumentParser(description='Analyze Ramachandran plots of Gromacs trajectories')
p.add_argument('-f', action='store', dest='XVGFILE', type=str, help='.xvg file produced by gmx rama',
default='rama.xvg')
args = vars(p.parse_args())
app = QtWidgets.QApplication([])
mainwin = RamaAnalyzerMain()
filename = os.path.expanduser(args['XVGFILE'])
if os.path.exists(filename):
mainwin.load(filename)
mainwin.show()
sys.exit(app.exec_())
if __name__ == '__main__':
run()
|
StarcoderdataPython
|
1692142
|
<filename>multitest_transport/api/build_channel_api_test.py
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for build_channel_api."""
import tradefed_cluster.util.google_import_fixer
import datetime
from absl.testing import absltest
import mock
from protorpc import protojson
from six.moves import urllib
from google.oauth2 import credentials as authorized_user
from google.oauth2 import service_account
from multitest_transport.api import api_test_util
from multitest_transport.api import build_channel_api
from multitest_transport.models import build
from multitest_transport.models import messages
from multitest_transport.models import ndb_models
from multitest_transport.plugins import base as plugins
from multitest_transport.util import file_util
from multitest_transport.util import oauth2_util
class AndroidBuildProvider(plugins.BuildProvider):
"""Dummy build provider for testing."""
name = 'Android'
mock = mock.MagicMock()
def __init__(self):
super(AndroidBuildProvider, self).__init__()
self.AddOptionDef('mock_option')
def GetBuildItem(self, path):
return self.mock.GetBuildItem(path)
class GoogleDriveBuildProvider(plugins.BuildProvider):
"""Dummy build provider for testing."""
name = 'Google Drive'
class BuildChannelApiTest(api_test_util.TestCase):
def setUp(self):
super(BuildChannelApiTest, self).setUp(build_channel_api.BuildChannelApi)
def _CreateMockBuildChannel(self, name='android', provider='Android'):
return build.AddBuildChannel(name, provider, {})
def testList(self):
self._CreateMockBuildChannel(name='android', provider='Android')
self._CreateMockBuildChannel(name='drive', provider='Google Drive')
res = self.app.get('/_ah/api/mtt/v1/build_channels')
build_channel_list = protojson.decode_message(
messages.BuildChannelList, res.body)
# verify that the right channels are returned
build_channels = build_channel_list.build_channels
self.assertEqual(2, len(build_channels))
self.assertItemsEqual(['android', 'drive'], [
channel.name for channel in build_channels])
self.assertItemsEqual(['Android', 'Google Drive'], [
channel.provider_name for channel in build_channels])
def testGet(self):
config = self._CreateMockBuildChannel()
res = self.app.get('/_ah/api/mtt/v1/build_channels/%s' % config.key.id())
build_channel_config = protojson.decode_message(messages.BuildChannelConfig,
res.body)
# verify that the right channel is returned
self.assertEqual(config.key.id(), build_channel_config.id)
self.assertEqual('android', build_channel_config.name)
def testGet_notFound(self):
# unknown build channel ID
res = self.app.get('/_ah/api/mtt/v1/build_channels/%s' % 666,
expect_errors=True)
# throws if channel not found
self.assertEqual('404 Not Found', res.status)
@mock.patch.object(build.BuildChannel, 'ListBuildItems')
def testListBuildItems(self, mock_channel):
# create placeholders build items to return from channel
build_items = [
plugins.BuildItem(name='item1', path=u'path1', is_file=True),
plugins.BuildItem(name='item2', path=u'path2', is_file=True)
]
mock_channel.return_value = build_items, 'next_page_token'
config = self._CreateMockBuildChannel()
res = self.app.get(
'/_ah/api/mtt/v1/build_channels/%s/build_items' % config.key.id())
build_item_list = protojson.decode_message(messages.BuildItemList, res.body)
# verify same build items are returned
self.assertEqual('next_page_token', build_item_list.next_page_token)
self.assertItemsEqual(['item1', 'item2'], [
item.name for item in build_item_list.build_items])
def testListBuildItems_notFound(self):
# unknown build channel ID
res = self.app.get('/_ah/api/mtt/v1/build_channels/%s/build_items' % 666,
expect_errors=True)
# throws if channel not found
self.assertEqual('404 Not Found', res.status)
def testLookupBuildItem(self):
config = self._CreateMockBuildChannel(name='android', provider='Android')
url = 'mtt:///%s/path/to/file.ext' % config.key.id()
build_item = plugins.BuildItem(
name='foo',
path='zzz/bar/foo',
is_file=True,
size=1234,
timestamp=datetime.datetime.utcnow())
AndroidBuildProvider.mock.GetBuildItem.return_value = build_item
res = self.app.get(
'/_ah/api/mtt/v1/build_channels/build_item_lookup?url=%s' % (
urllib.parse.quote(url)))
build_item_msg = protojson.decode_message(messages.BuildItem, res.body)
AndroidBuildProvider.mock.GetBuildItem.assert_called_once_with(
'path/to/file.ext')
self.assertEqual(
messages.Convert(build_item, messages.BuildItem), build_item_msg)
@mock.patch.object(file_util.FileHandle, 'Get')
def testLookupBuildItem_withHttpUrl(self, mock_find_handle_get):
url = 'http://foo.com/bar/zzz/file.ext?foo=bar'
mock_file_handle = mock.MagicMock()
mock_find_handle_get.return_value = mock_file_handle
mock_file_info = file_util.FileInfo(
url=url,
is_file=True,
total_size=1234,
timestamp=datetime.datetime.utcnow())
mock_file_handle.Info.return_value = mock_file_info
res = self.app.get(
'/_ah/api/mtt/v1/build_channels/build_item_lookup?url=%s' % (
urllib.parse.quote(url)))
build_item_msg = protojson.decode_message(messages.BuildItem, res.body)
mock_find_handle_get.assert_called_once_with(url)
mock_file_handle.Info.assert_called_once()
self.assertEqual('file.ext', build_item_msg.name)
self.assertEqual(mock_file_info.is_file, build_item_msg.is_file)
self.assertEqual(mock_file_info.total_size, build_item_msg.size)
self.assertEqual(mock_file_info.timestamp, build_item_msg.timestamp)
def testDelete(self):
# create build channel and confirm it exists
config = self._CreateMockBuildChannel()
self.assertIsNotNone(config.key.get())
# delete channel and verify that it no longer exists
self.app.delete('/_ah/api/mtt/v1/build_channels/%s' % config.key.id())
self.assertIsNone(config.key.get())
def testCreate(self):
data = {
'name': 'testName',
'id': '',
'provider_name': 'Android',
'options': [{
'name': 'mock_option',
'value': '123123123'
}]
}
res = self.app.post_json('/_ah/api/mtt/v1/build_channels', data)
msg = protojson.decode_message(messages.BuildChannelConfig, res.body)
build_channel_config = messages.ConvertToKey(ndb_models.BuildChannelConfig,
msg.id).get()
self.assertIsNotNone(build_channel_config)
self.assertEqual(data['name'], build_channel_config.name)
self.assertEqual(data['provider_name'], build_channel_config.provider_name)
self.assertEqual(data['options'][0]['name'],
build_channel_config.options[0].name)
def testUpdate(self):
build_channel_config = self._CreateMockBuildChannel()
build_channel_config_id = str(build_channel_config.key.id())
data = {
'id': build_channel_config_id,
'name': 'bar',
'provider_name': 'Android'
}
res = self.app.put_json(
'/_ah/api/mtt/v1/build_channels/%s' % build_channel_config_id, data)
build_channel_config = build_channel_config.key.get()
msg = protojson.decode_message(messages.BuildChannelConfig, res.body)
self.assertEqual(
messages.Convert(build_channel_config, messages.BuildChannelConfig),
msg)
self.assertEqual(data['name'], build_channel_config.name)
@mock.patch.object(oauth2_util, 'GetOAuth2Flow')
@mock.patch.object(oauth2_util, 'GetRedirectUri')
def testGetAuthorizationInfo(self, mock_get_redirect, mock_get_flow):
"""Tests that authorization info can be retrieved."""
config = self._CreateMockBuildChannel(name='android', provider='Android')
# Mock getting URIs from OAuth2 utilities
mock_get_redirect.return_value = 'redirect_uri', True
oauth2_flow = mock.MagicMock()
oauth2_flow.authorization_url.return_value = 'auth_uri', None
mock_get_flow.return_value = oauth2_flow
# Verify authorization info
response = self.app.get(
'/_ah/api/mtt/v1/build_channels/%s/auth?redirect_uri=%s' %
(config.key.id(), 'redirect_uri'))
authorization_info = protojson.decode_message(messages.AuthorizationInfo,
response.body)
self.assertEqual(authorization_info.url, 'auth_uri')
self.assertEqual(authorization_info.is_manual, True)
def testGetAuthorizationInfo_notFound(self):
"""Tests that an error occurs when a build channel is not found."""
response = self.app.get(
'/_ah/api/mtt/v1/build_channels/%s/auth?redirect_uri=%s' %
('unknown', 'redirect_uri'),
expect_errors=True)
self.assertEqual('404 Not Found', response.status)
@mock.patch.object(oauth2_util, 'GetOAuth2Flow')
def testAuthorizeConfig(self, mock_get_flow):
"""Tests that a build channel can be authorized."""
config = self._CreateMockBuildChannel(name='android', provider='Android')
# Mock getting credentials from OAuth2 utilities
oauth2_flow = mock.MagicMock(credentials=authorized_user.Credentials(None))
mock_get_flow.return_value = oauth2_flow
# Verify that credentials were obtained and stored
self.app.post(
'/_ah/api/mtt/v1/build_channels/%s/auth?redirect_uri=%s&code=%s'
% (config.key.id(), 'redirect_uri', 'code'))
oauth2_flow.fetch_token.assert_called_once_with(code='code')
config = ndb_models.BuildChannelConfig.get_by_id(config.key.id())
self.assertIsNotNone(config.credentials)
def testAuthorizeConfig_notFound(self):
"""Tests that an error occurs when a build channel is not found."""
response = self.app.post(
'/_ah/api/mtt/v1/build_channels/%s/auth_return?redirect_uri=%s&code=%s'
% ('unknown', 'redirect_uri', 'code'), expect_errors=True)
self.assertEqual('404 Not Found', response.status)
@mock.patch.object(service_account.Credentials, 'from_service_account_info')
def testAuthorizeConfigWithServiceAccount(self, mock_parse_key):
"""Tests that a build channel can be authorized with a service account."""
config = self._CreateMockBuildChannel(name='android', provider='Android')
# Mock parsing service account JSON key
mock_parse_key.return_value = service_account.Credentials(None, None, None)
# Verify that credentials were obtained and stored
self.app.put_json(
'/_ah/api/mtt/v1/build_channels/%s/auth' % config.key.id(),
{'value': '{}'})
config = ndb_models.BuildChannelConfig.get_by_id(config.key.id())
self.assertIsNotNone(config.credentials)
def testAuthorizeWithServiceAccount_notFound(self):
"""Tests that an error occurs when a build channel is not found."""
response = self.app.put_json(
'/_ah/api/mtt/v1/build_channels/%s/auth' % 'unknown', {'value': '{}'},
expect_errors=True)
self.assertEqual('404 Not Found', response.status)
def testUnauthorize(self):
"""Tests that a build channel can be unauthorized."""
config = self._CreateMockBuildChannel(name='android', provider='Android')
config.credentials = authorized_user.Credentials(None)
config.put()
# Verify that credentials were removed
self.app.delete('/_ah/api/mtt/v1/build_channels/%s/auth' % config.key.id())
config = ndb_models.BuildChannelConfig.get_by_id(config.key.id())
self.assertIsNone(config.credentials)
def testUnauthorize_notFound(self):
"""Tests that an error occurs when a build channel is not found."""
response = self.app.delete(
'/_ah/api/mtt/v1/build_channels/%s/auth' % 'unknown',
expect_errors=True)
self.assertEqual('404 Not Found', response.status)
if __name__ == '__main__':
absltest.main()
|
StarcoderdataPython
|
164632
|
<gh_stars>0
# uncompyle6 version 3.7.4
# Python bytecode 3.7 (3394)
# Decompiled from: Python 3.7.9 (tags/v3.7.9:13c94747c7, Aug 17 2020, 18:58:18) [MSC v.1900 64 bit (AMD64)]
# Embedded file name: T:\InGame\Gameplay\Scripts\Server\conditional_layers\conditional_layer_handlers.py
# Compiled at: 2018-05-11 22:46:41
# Size of source mod 2**32: 5273 bytes
from gsi_handlers.gameplay_archiver import GameplayArchiver
from sims4.gsi.dispatcher import GsiHandler
from sims4.gsi.schema import GsiGridSchema, GsiFieldVisualizers
import date_and_time, enum, services
conditional_layer_service_schema = GsiGridSchema(label='Conditional Layers/Conditional Layer Service')
conditional_layer_service_schema.add_field('conditional_layer', label='Class Name', width=1, unique_field=True)
conditional_layer_service_schema.add_field('layer_hash', label='Layer Name', width=1)
conditional_layer_service_schema.add_field('objects_created', label='Objects Created', width=1)
conditional_layer_service_schema.add_field('requests_waiting', label='Requests Waiting', width=1)
conditional_layer_service_schema.add_field('last_request', label='Last Request', width=1)
with conditional_layer_service_schema.add_has_many('Objects', GsiGridSchema) as (sub_schema):
sub_schema.add_field('object_id', label='Object Id')
sub_schema.add_field('object', label='Object')
with conditional_layer_service_schema.add_has_many('Requests', GsiGridSchema) as (sub_schema):
sub_schema.add_field('request', label='Request')
sub_schema.add_field('speed', label='Speed')
sub_schema.add_field('timer_interval', label='Timer Interval')
sub_schema.add_field('timer_object_count', label='Timer Object Count')
@GsiHandler('conditional_layer_service', conditional_layer_service_schema)
def generate_conditional_layer_service_data(zone_id: int=None):
layer_data = []
conditional_layer_service = services.conditional_layer_service()
if conditional_layer_service is None:
return layer_data
object_manager = services.object_manager()
for conditional_layer, layer_info in conditional_layer_service._layer_infos.items():
object_data = []
for object_id in layer_info.objects_loaded:
obj = object_manager.get(object_id)
object_data.append({'object_id':str(object_id),
'object':str(obj)})
request_data = []
for request in conditional_layer_service.requests:
if request.conditional_layer is conditional_layer:
request_data.append({'request':str(request),
'speed':request.speed.name,
'timer_interval':str(request.timer_interval),
'timer_object_count':str(request.timer_object_count)})
layer_data.append({'layer_hash':str(conditional_layer.layer_name),
'conditional_layer':str(conditional_layer),
'objects_created':str(len(layer_info.objects_loaded)),
'requests_waiting':str(len(request_data)),
'last_request':str(layer_info.last_request_type),
'Objects':object_data,
'Requests':request_data})
return layer_data
class LayerRequestAction(enum.Int, export=False):
SUBMITTED = ...
EXECUTING = ...
COMPLETED = ...
conditional_layer_request_archive_schema = GsiGridSchema(label='Conditional Layers/Conditional Layer Request Archive', sim_specific=False)
conditional_layer_request_archive_schema.add_field('game_time', label='Game/Sim Time', type=(GsiFieldVisualizers.TIME))
conditional_layer_request_archive_schema.add_field('request', label='Request')
conditional_layer_request_archive_schema.add_field('action', label='Action')
conditional_layer_request_archive_schema.add_field('layer_hash', label='Layer Hash')
conditional_layer_request_archive_schema.add_field('speed', label='Speed')
conditional_layer_request_archive_schema.add_field('timer_interval', label='Timer Interval')
conditional_layer_request_archive_schema.add_field('timer_object_count', label='Timer Object Count')
conditional_layer_request_archive_schema.add_field('objects_in_layer_count', label='Object Count')
archiver = GameplayArchiver('conditional_layer_requests', conditional_layer_request_archive_schema,
add_to_archive_enable_functions=True)
def is_archive_enabled():
return archiver.enabled
def archive_layer_request_culling(request, action, objects_in_layer_count=None):
time_service = services.time_service()
if time_service.sim_timeline is None:
time = 'zone not running'
else:
time = time_service.sim_now
entry = {'game_type':str(time), 'request':str(request),
'action':action.name,
'layer_hash':str(hex(request.conditional_layer.layer_name)),
'speed':request.speed.name,
'timer_interval':str(request.timer_interval),
'timer_object_count':str(request.timer_object_count),
'objects_in_layer_count':str(objects_in_layer_count) if objects_in_layer_count else ''}
archiver.archive(entry)
|
StarcoderdataPython
|
150417
|
<reponame>LSanselme/kerod
import tensorflow as tf
from kerod.model.backbone.fpn import FPN
def test_build_fpn():
shapes = [160, 80, 40, 20]
features = [tf.zeros((1, shape, shape, 3)) for shape in shapes]
pyramid = FPN()(features)
assert len(pyramid) == len(shapes) + 1
for p, shape in zip(pyramid[:-1], shapes):
assert p.shape[1] == shape
assert p.shape[2] == shape
assert pyramid[-1].shape[1] == 10
assert pyramid[-1].shape[2] == 10
|
StarcoderdataPython
|
73674
|
<reponame>davemus/flake8-custom-trailing-commas<gh_stars>1-10
yield (a, b)
yield a, b
|
StarcoderdataPython
|
3315667
|
<reponame>mtasic85/routingtable
__all__ = ['ProtocolCommand']
class ProtocolCommand(object):
# protocol version 1.0
DEFAULT_PROTOCOL_VERSION_MAJOR = 1
DEFAULT_PROTOCOL_VERSION_MINOR = 0
# protocol message types
PROTOCOL_REQ = 0
PROTOCOL_RES = 1
def __init__(self, node, protocol_major_version, protocol_minor_version, protocol_command_code):
self.node = node
if protocol_major_version is None:
protocol_major_version = self.DEFAULT_PROTOCOL_VERSION_MAJOR
if protocol_minor_version is None:
protocol_minor_version = self.DEFAULT_PROTOCOL_VERSION_MINOR
self.protocol_major_version = protocol_major_version
self.protocol_minor_version = protocol_minor_version
self.protocol_command_code = protocol_command_code
def start(self):
raise NotImplementedError
def stop(self):
raise NotImplementedError
def req(self):
raise NotImplementedError
def on_req(self, remote_host, remote_port, *args, **kwargs):
raise NotImplementedError
def res(self, remote_host, remote_port, *args, **kwargs):
raise NotImplementedError
def on_res(self, remote_host, remote_port, res):
raise NotImplementedError
|
StarcoderdataPython
|
67065
|
#!/usr/env python
class Queue:
def __init__(self, size=16):
self.queue = []
self.size = size
self.front = 0
self.rear = 0
def is_empty(self):
return self.rear == 0
def is_full(self):
if (self.front - self.rear + 1) == self.size:
return True
else:
return False
def first(self):
if self.is_empty():
raise Exception("QueueIsEmpty")
else:
return self.queue[self.front]
def last(self):
if self.is_empty():
raise Exception("QueueIsEmpty")
else:
return self.queue[self.rear]
def add(self, obj):
if self.is_full():
raise Exception("QueueOverFlow")
else:
self.queue.append(obj)
self.rear += 1
def delete(self):
if self.is_empty():
raise Exception("QueueIsEmpty")
else:
self.rear -= 1
return self.queue.pop(0)
def show(self):
print(self.queue)
if __name__ == "__main__":
q = Queue(3)
q.add(1)
q.add(2)
q.show()
q.delete()
q.show()
|
StarcoderdataPython
|
4818383
|
from logger import LogConfig
from dim_endpoints import register_with_controller
from server import flask_thread, enq
from config import Config
import signal
class ProgramStop:
stop = False
def __init__(self):
signal.signal(signal.SIGINT, self.exit_gracefully)
signal.signal(signal.SIGTERM, self.exit_gracefully)
def exit_gracefully(self, signum, frame):
self.stop = True
if __name__ == "__main__":
LogConfig()
config = Config()
config.load()
configured = config.is_configured()
register_with_controller(configured)
flask_thread()
if configured:
enq()
stop_me = ProgramStop()
while not stop_me.stop:
signal.pause()
|
StarcoderdataPython
|
1680717
|
import time
from django.core.management.base import BaseCommand, CommandError
from app.management import mail, tweet
from app.models import Account
from multiprocessing import Pool
class Command(BaseCommand):
help = 'Starts a process to get messages and route them to twitter.'
def handle(self, *args, **options):
self.stdout.write("Starting to churn messages...")
pool = Pool(10)
while True:
accounts = list(Account.objects.all())
print "update"
pool.map(mail.update, accounts)
print "process_new_messages"
pool.map(tweet.process_new_messages, accounts)
print "done."
time.sleep(60 * 5) # Every 5 minutes
|
StarcoderdataPython
|
173317
|
<filename>tests/conftest.py
from pytest import fixture
from datetime import datetime
from dateutil.tz import tzutc
@fixture()
def fake_profile():
return "[test]\n" + "aws_access_key_id = AKEXAMPLE\n" + "aws_secret_access_key = SKEXAMPLE"
@fixture()
def fake_config():
return {"test": {"aws_access_key_id": "", "aws_secret_access_key": "", "aws_default_region": ""}}
@fixture()
def sts_get_session_response():
return {"Credentials": {"AccessKeyId": "accesskey", "SecretAccessKey": "secretkey", "SessionToken": "sessiontoken"}}
@fixture()
def sts_get_caller_id_response():
return {"UserId": "string", "Account": "000000000000", "Arn": "string"}
@fixture()
def iam_list_ak_response():
return {"AccessKeyMetadata": [{"AccessKeyId": "AKIA111111111EXAMPLE"}, {"AccessKeyId": "AKIA222222222EXAMPLE"}]}
@fixture()
def iam_create_access_key_return():
return {"AccessKey": {"AccessKeyId": "accesskey", "SecretAccessKey": "secretkey"}}
@fixture()
def boto_standard_error():
return {"Error": {"Code": "WhatEver", "Message": "Error"}}
@fixture()
def sts_get_session_error():
return {"Error": {"Code": "WhatEver", "Message": "Error"}}
@fixture()
def iam_limit_exceeded_exception():
return {"Error": {"Code": "LimitExceededException", "Message": "Error"}}
@fixture()
def current_mocked_date():
return datetime(2020, 7, 1, 10, 00, 00, tzinfo=tzutc())
@fixture()
def creation_mocked_date():
return datetime(2020, 5, 1, 10, 00, 00, tzinfo=tzutc())
|
StarcoderdataPython
|
1740855
|
import os
from shutil import rmtree
from PIL import Image
from .functions_web import download_image
def convert_to_pdf(img_list, download_path):
if len(img_list) > 0:
image = img_list.pop(0)
image.save(download_path, save_all=True, append_images=img_list)
def download_and_convert_to_pdf(srcs, download_path, chp_nb, referer=''):
img_list = []
for j in range(len(srcs)):
ext = srcs[j].split('.')[-1]
download_image(srcs[j], download_path + 'temp' + os.path.sep, 'chp_' + chp_nb + '_' + str(j) + '.' + ext, referer=referer + chp_nb)
img_list.append(Image.open(download_path + 'temp' + os.path.sep + 'chp_' + chp_nb + '_' + str(j) + '.' + ext).convert('RGB'))
convert_to_pdf(img_list, download_path + 'chp_' + chp_nb + '.pdf')
rmtree(download_path + 'temp' + os.path.sep)
|
StarcoderdataPython
|
3383316
|
<filename>krogon/load_cli_modules.py
import sys
import pkgutil
def load_krogon_plugin_click_commands(root_click_group):
for (_, name, _) in pkgutil.iter_modules(sys.path):
if not name.startswith('krogon_'):
continue
plugin_name = name.replace('krogon_', '')
cli_module_name = name + '.' + plugin_name + '_cli'
try:
module = __import__(cli_module_name)
click_cli_group = module \
.__getattribute__(plugin_name + '_cli') \
.__getattribute__(plugin_name)
root_click_group.add_command(click_cli_group)
except ModuleNotFoundError:
continue
except AttributeError:
continue
|
StarcoderdataPython
|
3281349
|
<gh_stars>0
#!/usr/bin/env python3
"""
SteamCMD API global configuration.
"""
# allowed HTTP methods
ALLOWED_METHODS = ["GET", "HEAD", "OPTIONS"]
# allowed api version
ALLOWED_VERSIONS = ["v1"]
# available endpoints
AVAILABLE_ENDPOINTS = ["info", "version"]
# file containing version
VERSION_FILE = ".version"
|
StarcoderdataPython
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.