id
stringlengths 1
7
| text
stringlengths 6
1.03M
| dataset_id
stringclasses 1
value |
---|---|---|
114001
|
<reponame>Slavkata/Forecast-Report
# Code in this file is taken from this site
# http://www.codiply.com/blog/hyperparameter-grid-search-across-multiple-models-in-scikit-learn/
# and modified to fit my program.
import pandas as pd
from sklearn.grid_search import GridSearchCV
import numpy as np
class EstimatorSelectionHelper:
def __init__(self, models, params):
self.models = models
self.params = params
self.keys = models.keys()
self.grid_searches = {}
def fit(self, X, y, cv=3, n_jobs=1, verbose=1, scoring=None, refit=False):
for key in self.keys:
print("Running GridSearchCV for %s." % key)
model = self.models[key]
params = self.params[key]
gs = GridSearchCV(model, params, cv=cv, n_jobs=n_jobs,
verbose=verbose, scoring=scoring, refit=refit)
gs.fit(X,y)
self.grid_searches[key] = gs
def score_summary(self, sort_by='mean_score'):
def row(key, scores, params):
d = {
'estimator': key,
'min_score': min(scores),
'max_score': max(scores),
'mean_score': np.mean(scores),
'std_score': np.std(scores),
}
return pd.Series({**d, **params})
rows = [row(k, gsc.cv_validation_scores, gsc.parameters)
for k in self.keys
for gsc in self.grid_searches[k].grid_scores_]
df = pd.concat(rows, axis=1).T.sort_values([sort_by], ascending=False)
columns = ['estimator', 'min_score', 'mean_score', 'max_score', 'std_score']
columns = columns + [c for c in df.columns if c not in columns]
return df[columns]
|
StarcoderdataPython
|
21962
|
<filename>school/lecture1/isi_cv_02_task.py
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Sun Feb 19 20:41:09 2017
@author: pd
"""
#from IPython import get_ipython
#get_ipython().magic('reset -sf')
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.tree import DecisionTreeClassifier
X, Y = datasets.make_classification(n_samples=1000,
n_features=3, n_redundant=0)
# print(X, Y)
clf = DecisionTreeClassifier()
clf = clf.fit(X*10, Y*10)
x,y,z = clf.predict([[-2, 2, 0],[-131, -123, -435],[-22, 100, 53]])
#### initial visualization
plt.xlim(0.0, 20.0)
plt.ylim(0.0, 20.0)
# plt.scatter(X, Y, color="b", label="fast")
# plt.scatter(x, y, color="r", label="slow")
# plt.legend()
# plt.xlabel("bumpiness")
# plt.ylabel("grade")
plt.show()
|
StarcoderdataPython
|
92443
|
<reponame>entityoneuk/lusid-python-tools
import lusid
import lusid.models as models
import logging
logger = logging.getLogger()
def create_transaction_type_configuration(api_factory, alias, movements):
"""
This function creates a transaction type configuration if it doesn't already exist.
Parameters
----------
api_factory : lusid.utilities.ClientApiFactory
The LUSID api factory to use
alias : lusid.models.TransactionConfigurationTypeAlias
An aliases with type and group
movements : list[lusid.models.TransactionConfigurationMovementDataRequest]
The movements to use for
transaction type
Returns
-------
response : (lusid.models.createtransactiontyperesponse)
The response from creating the transaction type
"""
# Call LUSID to get your transaction type configuration
response = api_factory.build(
lusid.api.SystemConfigurationApi
).list_configuration_transaction_types()
aliases_current = [
(alias.type, alias.transaction_group)
for transaction_grouping in response.transaction_configs
for alias in transaction_grouping.aliases
]
logger.info(
f"The LUSID enviornment currently has {len(aliases_current)} transaction aliases"
)
if (alias.type, alias.transaction_group) in aliases_current:
logging.warning(
"The following alias already exists: "
+ f"Type of {alias.type} with source {alias.transaction_group}"
)
return response
logger.info(f"Creating a new transaction aliases called: {alias.type}")
response = api_factory.build(
lusid.api.SystemConfigurationApi
).create_configuration_transaction_type(
type=models.TransactionConfigurationDataRequest(
aliases=[alias], movements=movements
)
)
return response
|
StarcoderdataPython
|
1606850
|
<reponame>janbernloehr/watering
#!/usr/bin/python
# wiring stuff
from time import sleep
import wiringpi as wiringpi
# web stuff
import json
import falcon
# data stuff
import dataset
from datetime import date, datetime, timedelta
# constants
WATERING = 1
INPUT = 0
OUTPUT = 1
PWM_OUTPUT = 2
GPIO_CLOCK = 3
SOFT_PWM_OUTPUT = 4
SOFT_TONE_OUTPUT = 5
PWM_TONE_OUTPUT = 6
LOW = 0
HIGH = 1
s_to_ml_factor = 250.0/20.0 # 20s == 250ml
def json_serial(obj):
"""JSON serializer for objects not serializable by default json code"""
if isinstance(obj, datetime):
serial = obj.isoformat()
return serial
raise TypeError('Not sure how to serialize %s' % (obj,))
def water_all(volume, username):
duration = int(float(volume) / s_to_ml_factor)
wiringpi.digitalWrite(WATERING, HIGH)
try:
sleep(duration)
db = dataset.connect('sqlite:///mydatabase.db')
db_waterings = db['waterings']
db_waterings.insert(dict(waterdate=datetime.utcnow(), user=username, quantity=volume))
finally:
wiringpi.digitalWrite(WATERING, LOW)
return duration
def record_filling(volume, username):
db = dataset.connect('sqlite:///mydatabase.db')
db_fillings = db['fillings']
db_fillings.insert(dict(filldate=datetime.utcnow(), user=username, quantity=volume))
def get_history():
db = dataset.connect('sqlite:///mydatabase.db')
db_fillings = db['fillings']
last_filling = db_fillings.find_one(order_by='-filldate', _limit=1)
db_waterings = db['waterings']
recent_waterings = db_waterings.find(db_waterings.table.columns.waterdate >= last_filling['filldate'], order_by='-waterdate')
lst = []
total = last_filling['quantity']
taken = 0
for x in recent_waterings:
taken += x['quantity']
lst.append({
'waterdate': x['waterdate'],
'user': x['user'],
'quantity': x['quantity']
})
return json.dumps({
'last_filling': last_filling,
'remaining' : total-taken,
'history': lst
}, default=json_serial)
def start_this_app():
# we use the wiring pi schema
wiringpi.wiringPiSetup()
# set the watering port to output
wiringpi.pinMode(WATERING, OUTPUT)
# to start with a clean state
wiringpi.digitalWrite(WATERING, LOW)
class DoWatering:
def on_get(self, req, resp, volume):
duration = water_all(int(volume), 'Jan')
origin = req.get_header('Origin')
resp.set_header('Access-Control-Allow-Origin', origin)
resp.body = json.dumps({
'action': 'water',
'volume': volume,
'duration': duration
})
resp.status = falcon.HTTP_200
class RecordFilling:
def on_get(self, req, resp, volume):
record_filling(int(volume), 'Jan')
origin = req.get_header('Origin')
resp.set_header('Access-Control-Allow-Origin', origin)
resp.body = json.dumps({
'action': 'record_filling',
'volume': volume
})
resp.status = falcon.HTTP_200
class GetHistory:
def on_get(self, req, resp):
origin = req.get_header('Origin')
resp.set_header('Access-Control-Allow-Origin', origin)
resp.body = get_history()
resp.status = falcon.HTTP_200
# Initialize Wiring etc.
start_this_app()
# Start Web
app = falcon.API()
app.add_route('/watering.api/water/{volume}', DoWatering())
app.add_route('/watering.api/fill/{volume}', RecordFilling())
app.add_route('/watering.api/history', GetHistory())
|
StarcoderdataPython
|
3286040
|
# pretty printing for stage 2.
# put "source /path/to/stage2_gdb_pretty_printers.py" in ~/.gdbinit to load it automatically.
import re
import gdb.printing
class TypePrinter:
no_payload_count = 4096
# Keep in sync with src/type.zig
# Types which have no payload do not need to be entered here.
payload_type_names = {
'array_u8': 'type.Len',
'array_u8_sentinel_0': 'Len',
'single_const_pointer': 'ElemType',
'single_mut_pointer': 'ElemType',
'many_const_pointer': 'ElemType',
'many_mut_pointer': 'ElemType',
'c_const_pointer': 'ElemType',
'c_mut_pointer': 'ElemType',
'const_slice': 'ElemType',
'mut_slice': 'ElemType',
'optional': 'ElemType',
'optional_single_mut_pointer': 'ElemType',
'optional_single_const_pointer': 'ElemType',
'anyframe_T': 'ElemType',
'int_signed': 'Bits',
'int_unsigned': 'Bits',
'error_set': 'ErrorSet',
'error_set_inferred': 'ErrorSetInferred',
'error_set_merged': 'ErrorSetMerged',
'array': 'Array',
'vector': 'Array',
'array_sentinel': 'ArraySentinel',
'pointer': 'Pointer',
'function': 'Function',
'error_union': 'ErrorUnion',
'error_set_single': 'Name',
'opaque': 'Opaque',
'struct': 'Struct',
'union': 'Union',
'union_tagged': 'Union',
'enum_full, .enum_nonexhaustive': 'EnumFull',
'enum_simple': 'EnumSimple',
'enum_numbered': 'EnumNumbered',
'empty_struct': 'ContainerScope',
'tuple': 'Tuple',
'anon_struct': 'AnonStruct',
}
def __init__(self, val):
self.val = val
def tag(self):
tag_if_small_enough = self.val['tag_if_small_enough']
tag_type = tag_if_small_enough.type
if tag_if_small_enough < TypePrinter.no_payload_count:
return tag_if_small_enough
else:
return self.val['ptr_otherwise'].dereference()['tag']
def payload_type(self):
tag = self.tag()
if tag is None:
return None
type_name = TypePrinter.payload_type_names.get(str(tag))
if type_name is None:
return None
return gdb.lookup_type('struct type.%s' % type_name)
def to_string(self):
tag = self.tag()
if tag is None:
return '(invalid type)'
if self.val['tag_if_small_enough'] < TypePrinter.no_payload_count:
return '.%s' % str(tag)
return None
def children(self):
if self.val['tag_if_small_enough'] < TypePrinter.no_payload_count:
return
yield ('tag', '.%s' % str(self.tag()))
payload_type = self.payload_type()
if payload_type is not None:
yield ('payload', self.val['ptr_otherwise'].cast(payload_type.pointer()).dereference()['data'])
class ValuePrinter:
no_payload_count = 4096
# Keep in sync with src/value.zig
# Values which have no payload do not need to be entered here.
payload_type_names = {
'big_int_positive': 'BigInt',
'big_int_negative': 'BigInt',
'extern_fn': 'ExternFn',
'decl_ref': 'Decl',
'repeated': 'SubValue',
'eu_payload': 'SubValue',
'opt_payload': 'SubValue',
'empty_array_sentinel': 'SubValue',
'eu_payload_ptr': 'PayloadPtr',
'opt_payload_ptr': 'PayloadPtr',
'bytes': 'Bytes',
'enum_literal': 'Bytes',
'slice': 'Slice',
'enum_field_index': 'U32',
'ty': 'Ty',
'int_type': 'IntType',
'int_u64': 'U64',
'int_i64': 'I64',
'function': 'Function',
'variable': 'Variable',
'decl_ref_mut': 'DeclRefMut',
'elem_ptr': 'ElemPtr',
'field_ptr': 'FieldPtr',
'float_16': 'Float_16',
'float_32': 'Float_32',
'float_64': 'Float_64',
'float_80': 'Float_80',
'float_128': 'Float_128',
'error': 'Error',
'inferred_alloc': 'InferredAlloc',
'inferred_alloc_comptime': 'InferredAllocComptime',
'aggregate': 'Aggregate',
'union': 'Union',
'bound_fn': 'BoundFn',
}
def __init__(self, val):
self.val = val
def tag(self):
tag_if_small_enough = self.val['tag_if_small_enough']
tag_type = tag_if_small_enough.type
if tag_if_small_enough < ValuePrinter.no_payload_count:
return tag_if_small_enough
else:
return self.val['ptr_otherwise'].dereference()['tag']
def payload_type(self):
tag = self.tag()
if tag is None:
return None
type_name = ValuePrinter.payload_type_names.get(str(tag))
if type_name is None:
return None
return gdb.lookup_type('struct value.%s' % type_name)
def to_string(self):
tag = self.tag()
if tag is None:
return '(invalid value)'
if self.val['tag_if_small_enough'] < ValuePrinter.no_payload_count:
return '.%s' % str(tag)
return None
def children(self):
if self.val['tag_if_small_enough'] < ValuePrinter.no_payload_count:
return
yield ('tag', '.%s' % str(self.tag()))
payload_type = self.payload_type()
if payload_type is not None:
yield ('payload', self.val['ptr_otherwise'].cast(payload_type.pointer()).dereference()['data'])
pp = gdb.printing.RegexpCollectionPrettyPrinter('Zig stage2 compiler')
pp.add_printer('Type', r'^type\.Type$', TypePrinter)
pp.add_printer('Value', r'^value\.Value$', ValuePrinter)
gdb.printing.register_pretty_printer(gdb.current_objfile(), pp)
|
StarcoderdataPython
|
20950
|
<filename>pkg/agents/team4/trainingAgent/findBestConfigs.py
# TODO: autmatate finding best agents
|
StarcoderdataPython
|
3364621
|
<gh_stars>1-10
# Generated by Django 3.2 on 2021-04-29 14:07
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('school_management_app', '0013_alter_tcomment_created_on'),
]
operations = [
migrations.AlterField(
model_name='news',
name='ndate',
field=models.DateTimeField(auto_now_add=True),
),
migrations.AlterField(
model_name='tnews',
name='ndate',
field=models.DateTimeField(auto_now_add=True),
),
]
|
StarcoderdataPython
|
90607
|
from enum import Enum
import logging
import dbus
try:
from gi.repository import GObject
except ImportError:
import gobject as GObject
from .dbus_bluez_interfaces import Characteristic, Service, string_to_dbus_array
logger = logging.getLogger(__name__)
class BluenetUuids(object):
SERVICE = 'FBE51523-B3E6-4F68-B6DA-410C0BBA1A78'
AVAILABLE_NETWORKS = 'FBE51524-B3E6-4F68-B6DA-410C0BBA1A78'
CONNECTION_STATE = 'FBE51525-B3E6-4F68-B6DA-410C0BBA1A78'
HOST_NAME = 'FBE51526-B3E6-4F68-B6DA-410C0BBA1A78'
VERSION = 'FBE51527-B3E6-4F68-B6DA-410C0BBA1A78'
SSID = 'FBE51528-B3E6-4F68-B6DA-410C0BBA1A78'
CREDENTIALS = 'FBE51529-B3E6-4F68-B6DA-410C0BBA1A78'
class WifiConnectionState(Enum):
DOWN = 0
DISCONNECTED = 1
CONNECTING = 2
CONNECTED = 3
class BluenetService(Service):
"""
Concrete implementation of a GATT service that can be used for Wifi onboarding.
"""
def __init__(self, bus, index, host_name, version):
super().__init__(bus, index, BluenetUuids.SERVICE, True)
self._credentials_received_callback = None
self._available_networks_characteristic = AvailableNetworksCharacteristic(bus, 0, self)
self._connection_state_characteristic = ConnectionStateCharacteristic(bus, 1, self)
self._host_name_characteristic = HostNameCharacteristic(bus, 2, self, host_name)
self._version_characteristic = VersionCharacteristic(bus, 3, self, version)
self._ssid_characteristic = SsidCharacteristic(bus, 4, self)
self._credentials_characteristic = CredentialsCharacteristic(bus, 5, self)
self.add_characteristic(self._available_networks_characteristic)
self.add_characteristic(self._connection_state_characteristic)
self.add_characteristic(self._host_name_characteristic)
self.add_characteristic(self._version_characteristic)
self.add_characteristic(self._ssid_characteristic)
self.add_characteristic(self._credentials_characteristic)
def set_available_networks(self, ssids):
self._available_networks_characteristic.ssids = ssids
def set_connection_state(self, state, current_ssid):
self._connection_state_characteristic.set_connection_state(state, current_ssid)
def set_hostname(self, hostname):
self._host_name_characteristic.set_hostname(hostname)
def set_credentials_received_callback(self, cb):
"""
The provided callback will be called when the credentials for a network were received.
It must have the signature callback(ssid, credentials).
"""
self._credentials_received_callback = cb
def _credentials_received(self):
if not callable(self._credentials_received_callback):
return
self._credentials_received_callback(
self._ssid_characteristic.ssid,
self._credentials_characteristic.credentials)
class AvailableNetworksCharacteristic(Characteristic):
"""
GATT characteristic sending a list of network names.
Possible operations: Notify
Sends one SSID at a time with each notify signal.
After all SSIDs have been sent it waits for 3s and starts from begin.
"""
def __init__(self, bus, index, service):
super().__init__(bus, index, BluenetUuids.AVAILABLE_NETWORKS, ['notify'], service)
self.ssids = []
self._ssids_sent = []
self._ssid_last_sent = ''
self._update_interval = 300 # ms, time between single SSIDs
self._wait_time = 3000 # ms, time to wait between sending the complete set of SSIDs
def _send_next_ssid(self):
if not self.ssids:
logger.debug("No SSIDs available.")
return self.is_notifying
next_ssid = ''
for ssid in self.ssids:
if ssid not in self._ssids_sent:
next_ssid = ssid
break
if not next_ssid:
# all SSIDs have been sent at least once, repeat:
self._ssids_sent = []
GObject.timeout_add(self._wait_time, self._start_send_ssids)
return False
logger.debug("Sending next SSID: %s" % next_ssid)
self.value_update(string_to_dbus_array(next_ssid))
self._ssid_last_sent = next_ssid
self._ssids_sent.append(next_ssid)
return self.is_notifying
def _start_send_ssids(self):
GObject.timeout_add(self._update_interval, self._send_next_ssid)
# return False to stop timeout:
return False
def _read_value(self, options):
logger.info("Sending AvailableNetworks Value")
return string_to_dbus_array(self._ssid_last_sent)
def _on_start_notifying(self):
logger.info("Remote device is connected")
logger.info("Start notifying about available networks")
self._ssids_sent = []
self._start_send_ssids()
class ConnectionStateCharacteristic(Characteristic):
"""
GATT characteristic sending the current Wifi connection status.
Possible operations: Read + Notify
First byte is the connection status (0: Down 1: Disconnected, 2: Connecting, 3: Connected)
In case of Connecting or Connected the remaining bytes are the currently used SSID.
"""
def __init__(self, bus, index, service):
super().__init__(bus, index, BluenetUuids.CONNECTION_STATE, ['read', 'notify'], service)
self.state = WifiConnectionState.DISCONNECTED
self.current_ssid = None
def set_connection_state(self, state, current_ssid):
self.state = state
self.current_ssid = current_ssid
if self.is_notifying:
logger.info("Sending updated connection state")
if self.state != WifiConnectionState.DISCONNECTED and self.current_ssid:
self.value_update([dbus.Byte(self.state.value)] + string_to_dbus_array(self.current_ssid))
else:
self.value_update([dbus.Byte(self.state.value)])
def _read_value(self, options):
logger.info("Read Connection State Value")
if self.state != WifiConnectionState.DISCONNECTED and self.current_ssid:
return [dbus.Byte(self.state.value)] + string_to_dbus_array(self.current_ssid)
else:
return [dbus.Byte(self.state.value)]
class HostNameCharacteristic(Characteristic):
"""
GATT characteristic providing the host name of the server.
Possible operations: Read + Notify
Content: host name as array of characters
"""
def __init__(self, bus, index, service, hostname):
super().__init__(bus, index, BluenetUuids.HOST_NAME, ['read', 'notify'], service)
self.hostname = hostname
def set_hostname(self, hostname):
self.hostname = hostname
if self.is_notifying:
logger.info("Sending updated hostname")
self.value_update(string_to_dbus_array(self.hostname))
def _read_value(self, options):
logger.info("Sending HostName Value")
return string_to_dbus_array(self.hostname)
class VersionCharacteristic(Characteristic):
"""
GATT characteristic providing the version of this GATT service.
Possible operations: Read
Content: Version as a string (array of characters)
"""
def __init__(self, bus, index, service, version):
super().__init__(bus, index, BluenetUuids.VERSION, ['read'], service)
self.version = version
def _read_value(self, options):
logger.info("Sending Version Value")
return string_to_dbus_array(self.version)
class SsidCharacteristic(Characteristic):
"""
GATT characteristic for setting the SSID to connect with.
An attempt to join the network is made when the credentials are received.
Possible operations: Write
Content: SSID as array of characters
"""
def __init__(self, bus, index, service):
super().__init__(bus, index, BluenetUuids.SSID, ['write'], service)
self.ssid = None
def _write_value(self, value, options):
self.ssid = bytes(value).decode()
logger.info("Received SSID: %s" % self.ssid)
class CredentialsCharacteristic(Characteristic):
"""
GATT characteristic for providing the credentials needed to join a network.
When this characteristic is written an attempt is made to join the specified network.
Possible operations: Write
Content: Credentials (i.e. Wifi password) as array of characters
"""
def __init__(self, bus, index, service):
super().__init__(bus, index, BluenetUuids.CREDENTIALS, ['write'], service)
self.credentials = None
def _write_value(self, value, options):
self.credentials = bytes(value).decode()
logger.info("Received password")
self.service._credentials_received()
|
StarcoderdataPython
|
110023
|
<reponame>vtta2008/pipelineTool
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Script Name: toolBoxII
Author: <NAME>/Jimmy - TD artist
Warning: This is the most complex code structure I have build, it is using more advanced maya features alongside
more advanced python features than before.
Description:
It makes an UI that you can quickly create nurbs controller for Maya, you can also save it for your own. All
the qssPths you save will be stored in 'userLibrary' folder inside DAMGpipelinetool folder.
"""
# -------------------------------------------------------------------------------------------------------------
""" Import """
import json # to read and write info & qssPths
import logging
from functools import partial # partial module can store variables to method
import maya.OpenMayaUI as omui # the extent of the internal Maya_tk API
import maya.app.renderSetup.views.renderSetupButton as marv # very nice symbol okButton
import pymel.core as pm # Pymel command for maya
# -------------------------------------------------------------------------------------------------------------
# IMPORT MAYA PYTHON MODULES
# -------------------------------------------------------------------------------------------------------------
from maya import cmds, mel # Maya_tk Python command
# ------------------------------------------------------
# VARIALBES ARE USED BY ALL CLASSES
# ------------------------------------------------------
from tankers.pMaya.plt_modules import MayaVariables as var
from tankers.pMaya.plt_modules import ToolBoxIIfuncs
NAMES = var.MAINVAR
VERSION = var.MAYAVERSION
DIRECTORY = os.path.join(os.getenv(__root__), 'maya', 'userLibrary')
CHANNELBOX_ID = 'ChannelBoxID'
# -------------------------------------------------------------------------------------------------------------
# IMPORT QT MODULES
# -------------------------------------------------------------------------------------------------------------
from tankers.pMaya.QtPlugins import Qt # plugin module go with DAMGtool to make UI
from tankers.pMaya.QtPlugins.Qt import QtWidgets, QtCore, QtGui
# -------------------------------------------------------------------------------------------------------------
# MAKE MAYA UNDERSTAND QT UI AS MAYA WINDOW, FIX PLM_VERSION CONVENTION
# -------------------------------------------------------------------------------------------------------------
# We can configure the current level to make it disable certain logs when we don't want it.
logging.basicConfig()
logger = logging.getLogger(__file__)
logger.setLevel(logging.DEBUG)
# -------------------------------------------------------------------------------------------------------------
# CHECK THE CORRECT BINDING THAT BE USING UNDER QT.PY
# -------------------------------------------------------------------------------------------------------------
# While Qt.py lets us abstract the actual Qt library, there are a few things it cannot do yet
# and a few support libraries we need that we have to import manually.
if Qt.__qtBinding__ == 'PySide':
logger.debug('Using PySide with shiboken')
from shiboken import wrapInstance
from appPackages.maya.plugins.Qt.QtCore import Signal
elif Qt.__qtBinding__.startswith('PyQt'):
logger.debug('Using PyQt with sip')
from sip import wrapinstance as wrapInstance
from appPackages.maya.plugins.Qt.QtCore import pyqtSignal as Signal
else:
logger.debug('Using PySide2 with shiboken2')
from shiboken2 import wrapInstance
from appPackages.maya.plugins.Qt.QtCore import Signal
# -------------------------------------------------------------------------------------------------------------
# SHOW UI - MAKE UI IS DOCKABLE INSIDE MAYA
# -------------------------------------------------------------------------------------------------------------
def deleteDock(name=NAMES['id'][9], version=VERSION):
"""
A simple function to delete the given dock
Args:
name: the name of the dock
"""
if version >= 2017:
if cmds.workspaceControl(name, query=True, exists=True):
cmds.deleteUI(name)
else:
if cmds.dockControl(name, query=True, exists=True):
cmds.deleteUI(name)
def getMayaMainWindow():
"""
Since maya is Qt, we can _parent our UIs to it.
This means that we don't have to manage our UI and can leave it to maya.
Returns:
QtWidgets.QMainWindow: The maya MainWindow
"""
# Use the OpenMayaUI API to get a reference to Maya_tk's MainWindow
win = omui.MQtUtil_mainWindow()
# Use the wrapInstance method to convert it to something python can understand (QMainWindow)
ptr = wrapInstance(long(win), QtWidgets.QMainWindow)
# Return this to whoever wants it
return ptr
def getDock(name='DAMGtoolBoxIIDock', version=VERSION):
"""
This function creates a dock with the given name.
It's an example of how we can mix maya's UI elements with Qt elements
Args:
name: The name of the dock to create
Returns:
QtWidget.QWidget: The dock's widget
"""
# Delete any conflicting docks
deleteDock(name)
# Create a workspaceControl dock using Maya_tk's UI tools
if version >= 2017:
ctrl = cmds.workspaceControl(name, label=NAMES['mayaLabel'][9])
else:
ctrl = cmds.dockControl(name, label=NAMES['mayaLabel'][9])
# Use the OpenMayaUI API to get the actual Qt widget associated with the name
qtCtrl = omui.MQtUtil_findControl(ctrl)
# Use wrapInstance to convert it to something Python can understand (QWidget)
ptr = wrapInstance(long(qtCtrl), QtWidgets.QWidget)
return ptr
# ------------------------------------- #
# SUB CLASSES FUNCTIONS AND UI ELEMENTS #
# ------------------------------------- #
# Controller Manager UI when create something
# ------------------------------------------------------
def geticon(icon):
return os.path.join(os.getenv(__root__), 'imgs', 'maya.icon', icon)
class ControllerManager(QtWidgets.QWidget):
onSolo = Signal(bool)
tfW = 200
def __init__(self, nurbs):
super(ControllerManager, self).__init__()
if isinstance(nurbs, basestring):
nurbs = pm.PyNode(nurbs)
self.nurbs = nurbs
self.buildUI()
def buildUI(self):
btnW = 75
layout = QtWidgets.QGridLayout(self)
self.name = QtWidgets.QCheckBox()
self.name.setChecked(self.nurbs.visibility.get())
self.name.toggled.connect(lambda val: self.nurbs.getTransform().visibility.set(val))
layout.addWidget(self.name, 0, 0)
name = str(self.nurbs.getTransform())
self.textFldName = QtWidgets.QLineEdit(name)
self.textFldName.setMinimumWidth(self.tfW)
self.textFldName.returnPressed.connect(self.renameController)
layout.addWidget(self.textFldName, 0, 1)
self.colorPlate = QtWidgets.QLabel()
self.colorPlate.setMinimumWidth(30)
self.nurbs.overrideRGBColors.set(0)
self.nurbs.overrideEnabled.set(1)
self.setColorPlate()
layout.addWidget(self.colorPlate, 0, 2)
soloBtn = QtWidgets.QPushButton('Isolate')
soloBtn.setMinimumWidth(75)
soloBtn.setCheckable(True)
soloBtn.toggled.connect(lambda val: self.onSolo.emit(val))
layout.addWidget(soloBtn, 0, 3)
self.color_slider = QtWidgets.QSlider(QtCore.Qt.Horizontal)
self.color_slider.setMinimumWidth(2.5 * btnW)
self.color_slider.setMinimum(1)
self.color_slider.setMaximum(31)
self.color_slider.updateProgress(self.nurbs.overrideColor.get())
self.color_slider.valueChanged.connect(self.sliderSetColor)
layout.addWidget(self.color_slider, 0, 4)
deleteBtn = QtWidgets.QPushButton('Delete')
deleteBtn.setMinimumWidth(btnW)
deleteBtn.clicked.connect(self.deleteController)
layout.addWidget(deleteBtn, 0, 5)
def renameController(self):
oldName = str(self.nurbs.getTransform())
newName = self.textFldName.text()
cmds.rename(oldName, newName)
def sliderSetColor(self):
index = self.color_slider.value()
self.nurbs.overrideColor.set(index)
color = cmds.colorIndex(index, q=True)
self.setColorPlate(color)
def setColorPlate(self, color=None):
if not color:
indexColor = self.nurbs.overrideColor.get()
if indexColor == 0:
color = (.4, .4, .4)
else:
color = cmds.colorIndex(indexColor, query=True)
assert len(color) == 3, "You must provide a list of 3 colors"
r, g, b = [c * 255 for c in color]
self.colorPlate.fixStyleSheet('background-color: rgba(%s,%s,%s,1.0)' % (r, g, b))
def setColor(self):
nurbsIndexColor = self.nurbs.overrideColor.get()
if nurbsIndexColor == 0:
nurbsColor = (.4, .4, .4)
else:
nurbsColor = cmds.colorIndex(nurbsIndexColor, q=True)
color = pm.colorEditor(rgbValue=nurbsColor)
r, g, b, a = [float(c) for c in color.split()]
color = (r, g, b)
self.nurbs.overrideColorRGB.set(color)
self.setColorPlate(color)
def disableNurbs(self, value):
self.name.setChecked(not value)
def deleteController(self):
self.setParent(None)
self.setVisible(False)
self.deleteLater()
pm.delete(self.nurbs.getTransform())
# User Library Functions
# ------------------------------------------------------
class ControllerLibrary(dict):
def createDirectory(self, directory=DIRECTORY):
"""
Creates the given directory if it doesn't exists.
:param directory (str): the directory to create
:return:
"""
if not os.path.exists(directory):
os.mkdir(directory)
def save(self, name, screenshot=True, directory=DIRECTORY, **info):
self.createDirectory(directory)
path = os.path.join(directory, '%s.ma' % name)
infoFile = os.path.join(directory, '%s.json' % name)
info['name'] = name
info['path'] = path
cmds.file(rename=path)
if cmds.ls(sl=True):
cmds.file(force=True, type='mayaAscii', exportSelected=True)
else:
cmds.file(save=True, type='mayaAscii', force=True)
if screenshot:
info['screenshot'] = self.saveScreenshot(name, directory=directory)
with open(infoFile, 'w') as f:
json.dump(info, f, indent=4)
self[name] = info
def remove(self, name, directory=DIRECTORY):
mayapath = os.path.join(directory, '%s.ma' % name)
jsonpath = os.path.join(directory, '%s.json' % name)
imagepath = os.path.join(directory, '%s.jpg' % name)
items = [mayapath, jsonpath, imagepath]
for item in items:
cmds.sysFile(item, delete=True)
def reference(self, name, directory=DIRECTORY):
mayapath = os.path.join(directory, '%s.ma' % name)
cmds.file(mayapath, reference=True, usingNamespaces=False)
def find(self, directory=DIRECTORY):
self.clear()
if not os.path.exists(directory):
return
files = os.listdir(directory)
mayafiles = [f for f in files if f.endswith('.ma')]
for ma in mayafiles:
name, ext = os.path.splitext(ma)
path = os.path.join(directory, ma)
infoFile = '%s.json' % name
if infoFile in files:
infoFile = os.path.join(directory, infoFile)
with open(infoFile, 'r') as f:
info = json.load(f)
else:
info = {}
screenshot = '%s.jpg' % name
if screenshot in files:
info['screenshot'] = os.path.join(directory, name)
info['name'] = name
info['path'] = path
self[name] = info
def load(self, name):
path = self[name]['path']
cmds.file(path, i=True, usingNamespaces=False)
def saveScreenshot(self, name, directory=DIRECTORY):
cmds.viewFit()
path = os.path.join(directory, '%s.jpg' % name)
cmds.setAttr('defaultRenderGlobals.imageFormat', 8)
cf = cmds.currentTime(q=True)
cmds.playblast(completeFilename=path, forceOverwrite=True, format='image', width=200, height=200,
showOrnaments=False, startTime=cf, endTime=cf, viewer=False)
return path
# A Maya_tk channel box UI with a few modify
# ------------------------------------------------------
class ChanelBox(QtWidgets.QWidget):
channelBoxID = CHANNELBOX_ID
def __init__(self):
# _parent = QtWidgets.QWidget(_parent=getMayaMainWindow())
# super(ChanelBox, self).__init__(_parent)
super(ChanelBox, self).__init__()
self.buildUI()
def buildUI(self):
layout = QtWidgets.QGridLayout(self)
self.cb1 = cmds.channelBox(CHANNELBOX_ID)
self.menuChannelBoxWhenRightClick()
ctrl = omui.MQtUtil.findControl(CHANNELBOX_ID)
channelBoxWidget = wrapInstance(long(ctrl), QtWidgets.QWidget)
layout.addWidget(channelBoxWidget)
# Menu popup when right click in channel box:
def menuChannelBoxWhenRightClick(self):
cb1_popup = cmds.popupMenu(p=self.cb1, ctl=False, button=3)
cmds.menuItem(l="Channels", c=partial(self.channelBoxCommand, "-channelEditor"))
cmds.menuItem(d=True)
cmds.menuItem(d=True)
cmds.menuItem(parent=cb1_popup, l="Reset All Channels", c=partial(self.channelBoxCommand, "-setAllToZero"))
cb1_menu_03 = cmds.menuItem(parent=cb1_popup, l="Channel Name", subMenu=True)
cmds.setParent(cb1_menu_03, m=True)
cmds.radioMenuItemCollection()
cmds.menuItem('niceNameItem', l="Nice", rb=True, c=self.niceNameSet)
cmds.menuItem('longNameItem', l="Long", rb=True, c=self.longNameSet)
cmds.menuItem('shortNameItem', l="Short", rb=True, c=self.shortNameSet)
cmds.setParent('..', m=True)
cmds.menuItem(d=True)
cmds.menuItem(l="Key Selected", c=partial(self.channelBoxCommand, "-keySelected"))
cmds.menuItem(l="Key All", c=partial(self.channelBoxCommand, "-keyAll"))
cmds.menuItem(l="Breakdown Selected", c=partial(self.channelBoxCommand, "-breakDownSelected"))
cmds.menuItem(l="Breakdown All", c=partial(self.channelBoxCommand, "-breakDownAll"))
cmds.menuItem(d=True)
cmds.menuItem(l="Cut Selected", c=partial(self.channelBoxCommand, "-cutSelected"))
cmds.menuItem(l="Copy Selected", c=partial(self.channelBoxCommand, "-copySelected"))
cmds.menuItem(l="Paste Selected", c=partial(self.channelBoxCommand, "-pasteSelected"))
cmds.menuItem(l="Delete Selected", c=partial(self.channelBoxCommand, "-deleteSelected"))
cmds.menuItem(d=True)
cmds.menuItem(l="Break Connections", c=partial(self.channelBoxCommand, "-breakConnection"))
cmds.menuItem(d=True)
cmds.menuItem(l="Lock Selected", c=partial(self.channelBoxCommand, "-lockSelected"))
cmds.menuItem(l="Unlock Selected", c=partial(self.channelBoxCommand, "-unlockSelected"))
cmds.menuItem(l="Hide Selected", c=partial(self.channelBoxCommand, "-hideSelected"))
cmds.menuItem(l="Lock and Hide Selected", c=partial(self.channelBoxCommand, "-lockAndHideSelected"))
cmds.menuItem(l="Show Hidden Channels", c=partial(self.channelBoxCommand, "-unhideHided"))
cmds.menuItem(d=True)
cmds.menuItem(l="Expressions...", c=partial(self.channelBoxCommand, "-expression"))
cmds.menuItem(l="Set Driven Key", c=partial(self.channelBoxCommand, "-setDrivenKey"))
cmds.menuItem(d=True)
cmds.menuItem(l="Delete Attribute", c=partial(self.channelBoxCommand, "-deleteAttribute"))
cmds.menuItem(d=True)
cmds.menuItem(l="Setting", subMenu=True)
cmds.setParent(m=True)
cmds.menuItem(l="Slow", rb=True, c=self.speedSlowSet)
cmds.menuItem(l="Normal", rb=True, c=self.speedNormalSet)
cmds.menuItem(l="Fast", rb=True, c=self.speedFastSet)
cmds.menuItem(d=True)
cmds.menuItem('hyperCheckBox', l="Hyperbolic", checkBox=True, c=self.hyperbolicSet)
cmds.menuItem(d=True)
cmds.menuItem(l="Precision", c=self.precisionNumberUI)
cmds.menuItem(d=True)
cmds.menuItem(l="No Manips", rb=True, c="cmds.channelBox(self.myChannelBox, query=True, mnp=0)")
cmds.menuItem(l="Invisible Manips", rb=True, c="cmds.channelBox(self.myChannelBox, query=True, mnp=1)")
cmds.menuItem(l="Standard Manips", rb=True, c="cmds.channelBox(self.myChannelBox, query=True, mnp=2)")
def warningPopup(self, message):
cmds.confirmDialog(t='Warning', m=message, b='OK')
cmds.warning(message)
# Menu popup functions
def precisionNumberUI(self, *args):
if cmds.window('setPrecisionNumber', exists=True):
cmds.deleteUI('setPrecisionNumber')
cmds.window('setPrecisionNumber')
cmds.columnLayout()
cmds.intField('precisionNumber', w=195)
cmds.text(l="", h=10)
cmds.rowColumnLayout(nc=2, cw=[(1, 90), (2, 100)])
cmds.button(l="Ok", w=90, c=self.setPreNum)
cmds.button(l="Close", w=90, c="cmds.deleteUI('setPrecisionNumber')")
cmds.showWindow('setPrecisionNumber')
def setPreNum(self, *args):
newPreNum = cmds.intField('precisionNumber', query=True, value=True)
if newPreNum <= 3:
newWidth = 65
elif newPreNum <= 6:
newWidth = 95
elif newPreNum <= 9:
newWidth = 115
elif newPreNum <= 12:
newWidth = 130
else:
newWidth = 155
cmds.channelBox(self.channelBoxID, edit=True, pre=newPreNum, fieldWidth=newWidth)
cmds.deleteUI('setPrecisionNumber')
def hyperbolicSet(self, *args):
hyperbolicCheck = cmds.menuItem('hyperCheckBox', query=True, checkBox=True)
if hyperbolicCheck == True:
cmds.channelBox(self.channelBoxID, e=True, hyp=True)
if hyperbolicCheck == False:
cmds.channelBox(self.channelBoxID, e=True, hyp=False)
def speedSlowSet(self, *args):
cmds.channelBox(self.channelBoxID, e=True, spd=0.1)
def speedNormalSet(self, *args):
cmds.channelBox(self.channelBoxID, e=True, spd=1)
def speedFastSet(self, *args):
cmds.channelBox(self.channelBoxID, e=True, spd=10)
def niceNameSet(self, *args):
cmds.channelBox(self.channelBoxID, e=True, nn=True, ln=False)
def longNameSet(self, *args):
cmds.channelBox(self.channelBoxID, e=True, nn=False, ln=True)
def shortNameSet(self, *args):
cmds.channelBox(self.channelBoxID, e=True, nn=False, ln=False)
def channelBoxCommand(self, operation, *args):
channelSel = cmds.channelBox(self.channelBoxID, query=True, sma=True)
objSel = cmds.ls(sl=True)
# reset default channels
transformChannels = ["translateX", "translateY", "translateZ", "rotateX", "rotateY", "rotateZ"]
scaleChannels = ["scaleX", "scaleY", "scaleZ", "visibility"]
if (operation == "-channelEditor"):
mel.eval("lockingKeyableWnd;")
elif (operation == "-setAllToZero"):
for obj in objSel:
for channel in transformChannels:
cmds.setAttr(obj + "." + channel, 0)
for channel in scaleChannels:
cmds.setAttr(obj + "." + channel, 1)
# reset created channels
for obj in objSel:
createdChannels = []
allChannels = cmds.listAnimatable(obj)
for channel in allChannels:
attrName = channel.split(".")[-1]
createdChannels.append(attrName)
channels = list(set(createdChannels) - set(transformChannels) - set(scaleChannels))
for channel in channels:
defaultValue = cmds.addItem(obj + "." + channel, query=True, dv=True)
cmds.setAttr(obj + "." + channel, defaultValue)
elif (operation == "-keySelected"):
for obj in objSel:
for channel in channelSel:
cmds.setKeyframe(obj + "." + channel)
elif (operation == "-keyAll"):
for obj in objSel:
allChannels = cmds.listAnimatable(obj)
cmds.select(obj)
for channel in allChannels:
cmds.setKeyframe(channel)
elif (operation == "-breakDownSelected"):
for obj in objSel:
for channel in channelSel:
cmds.setKeyframe(obj + "." + channel, breakdown=True)
elif (operation == "-breakDownAll"):
for obj in objSel:
allChannels = cmds.listAnimatable(obj)
cmds.select(obj)
for channel in allChannels:
cmds.setKeyframe(channel, breakdown=True)
elif (operation == "-cutSelected") or (operation == "-deleteSelected"):
for obj in objSel:
for channel in channelSel:
cmds.cutKey(obj, at=channel)
elif (operation == "-copySelected"):
for obj in objSel:
for channel in channelSel:
cmds.copyKey(obj, at=channel)
elif (operation == "-pasteSelected"):
for obj in objSel:
for channel in channelSel:
cmds.pasteKey(obj, connect=True, at=channel)
elif (operation == "-breakConnection"):
for obj in objSel:
for channel in channelSel:
attr = obj + "." + channel
mel.eval("source channelBoxCommand; CBdeleteConnection \"%s\"" % attr)
elif (operation == "-lockSelected"):
for obj in objSel:
for channel in channelSel:
cmds.setAttr(obj + "." + channel, lock=True)
elif (operation == "-unlockSelected"):
for obj in objSel:
for channel in channelSel:
cmds.setAttr(obj + "." + channel, lock=False)
elif (operation == "-hideSelected"):
for obj in objSel:
for channel in channelSel:
cmds.setAttr(obj + "." + channel, keyable=False, channelBox=False)
elif (operation == "-lockAndHideSelected"):
for obj in objSel:
for channel in channelSel:
cmds.setAttr(obj + "." + channel, lock=True)
cmds.setAttr(obj + "." + channel, keyable=False, channelBox=False)
elif (operation == "-unhideHided"):
# channelBoxChannels = transformChannels + scaleChannels
for obj in objSel:
# for channel in channelBoxChannels:
# cmds.setAttr( obj + "." + channel, l=False, k=True )
# get locked channel
lockChannels = cmds.listAttr(obj, locked=True)
if lockChannels == None:
message = "nothing is locked"
self.warningPopup(message)
break
else:
for channel in lockChannels:
cmds.setAttr(obj + "." + channel, keyable=True, channelBox=True)
elif (operation == "-showDefault"):
for obj in objSel:
defaultChannel = ["tx", "ty", "tz", "rx", "ry", "rz", "sx", "sy", "sz"]
for channel in defaultChannel:
cmds.setAttr(obj + "." + channel, k=True, cb=True)
elif (operation == "-expression"):
mel.eval('expressionEditor EE "" "";')
elif (operation == "-unhideHided"):
mel.eval('SetDrivenKeyOptions;')
elif (operation == "-deleteAttribute"):
for obj in objSel:
for channel in channelSel:
cmds.deleteAttr(obj, at=channel)
elif (operation == "-about"):
cmds.confirmDialog(t="About DAMG Controller Maker",
m=("Thank you for using my script :D\n"
"Made by <NAME> - JimJim\n"
"Please feel free to give your feedback\n"
"Email me: <EMAIL>\n"),
b="Close")
# ----------------------------------------------------------------------------------------------------------- #
""" MAIN CLASS: DAMG TOOL BOX II - ALL ABOUT CONTROLLER UI """
# ----------------------------------------------------------------------------------------------------------- #
"""
A DRAFT PREVIS FOR UI
WARNING: Change preVis here before changing code, or at least update it after changed the UI.
It helps me easier in calculating or considering all the measurement, variables,
as well as how I handle innovating UI quickly and accurately. Actually, it's saving my time.
(w) 4 3 4 1
| | | |
W |---------------------||-----------------||----------------------------||---------------------|
H Y
X 1 2 | 3 4 || 5 6 7 || 8 9 10 11 || 12
(h) - ---------------------------------------------------------------------------------------------
1----| 1 || USER ASSETS CONTROLLER MANAGER CHANNEL BOX ||
- | --------------------- ----------------------------------------------- --------------------- |
1----| 2 || txtFld | btn1 || txt | mnOp | btn2 | txt | mnOp | btn3 | btn4 || ||
_ ||---------------------||-----------------------------------------------|| ||
| || || || ||
| || || || ||
1----| 3 || QListWiget || || ||
| || || QGidWidget || ||
| || || || ||
- ||-------------------- || || ||
1----| 4 || btn5 btn6 btn7 || || ||
- | -------------------- ------------------------------------------------ | ||
1----| 5 || txt txt || txt || txt || ||
- | -------------------- ------------------ -------- ------------------- | ||
| || iconbtn | iconbtn || btn || btn8 optionMenu textField || ||
| || | || ||----------------------------|| ||
| || | || || cb btn9 btn10 btn11 || ||
| || | || ||----------------------------|| ||
| || | || || txt txtFld || ||
1----| 6 || | || || txt txtFld || ||
| || | || ||----------------------------|| ||
| || | || || txt txt txt txt || ||
| || | || || txtFld txtFld txtFld mnOp || ||
| || | || || btn13 || ||
| || | || btn12 || btn14 btn15 btn16 btn17 || ||
- | --------------------- ----------------- ---------------------------- --------------------- |
1----| 7 | btn18 btn19 btn20 || |
- -----------------------------------------------------------------------------------------------
|| | || || || ||
btn| 1 2 3 4 5 6 7 8 9 10 11 12 13 14
| Save |Create|Create|Refresh|Import|Refresh|Remove|Create| X | Y | Z |autoColor|AddA|leftHand|
| 15 16 17 18 19 20
|leftFoot|RightHand|RightFoot|Group|Cen.Piv|Frez.T|
[x, y, height, width] = [X, Y, H, W]
"""
class ToolBoxII(QtWidgets.QWidget):
"""
The DAMGtoolBoxII is a dialog that lets us save and import controllers,
also have functions to help user getting easier to modify or plt_model nurbs.
"""
# --------------------------------------------------------------------------------------------------------
# DICTIONARY TO STORE BINDATA TO MAKE CONTROLLERS SHOW IN DAMG CONTROLLER LIBRARY SECTION
# --------------------------------------------------------------------------------------------------------
# 2D nurbs types
nurbsType2D = {
'Arrow Curve': 'arrowCurve.icon.png', 'Plus Nurbs 2': 'boldPlusNurbs.icon.png',
'Clock Arrow Up': 'clockArrowUp.icon.png', 'Clock Arrow Down': 'clockArrowDown.icon.png',
'Female Symbol': 'femaleSymbol.icon.png', 'Male Symbol': 'maleSymbol.icon.png',
'Two directions': 'twoDirections.icon.png', 'Style Arrow 2D': 'twoDstyleArrow.icon.png',
'Lip Control': 'lipControl.icon.png', 'Upper Lip Control': 'upperLipControl.icon.png',
'Eyes Control': 'eyeControl.icon.png', 'Circle Plus': 'circlePlus.icon.png',
'Bold Circle 2D': 'twoDboldCircle.icon.png', 'Bear Foot Control': 'bearFootControl.icon.png',
'Fist Curve': "fistCurve.icon.png", 'Hand Nurbs': 'handNurbs.icon.png',
'Foot Control 1': "footControl1.icon.png", 'Foot Control 2': 'footControl2.icon.png',
'Circle Arrow 2D': 'twoDcircleArrow.icon.png', 'Slider Control': "sliderControl.icon.png",
'Master Control': 'masterControl.icon.png', 'Fan 5 Wings': 'fiveWingsFan.icon.png',
'Move Control 2': "moveControl1.icon.png", 'Cross Control': "crossControl.icon.png",
'Move Control 1': 'moveControl2.icon.png', 'Plus Nurbs 1': 'plusNurbs.icon.png'
}
# 3D nurbs types
nurbsType3D = {
'Crown Curve': 'crownCurve.icon.png', 'Cube Nurbs': 'cubeCurve.icon.png',
'Cube Nurbs on base': "cubeOnBase.icon.png", 'Nail Arrow Up': 'nailArrowUp.icon.png',
'Rotation Control 1': "rotationControl.icon.png", 'Nail Arrow Down': 'nailArrowDown.icon.png',
'Diamond Control': "diamond.icon.png", 'Single Rotation': "singleRotateControl.icon.png",
'Shere Control': "sphereControl.icon.png", 'Spike Cross Control': "spikeCrossControl.icon.png",
'Pyramid': 'pyramid.icon.png', 'Four Sides Arrow': 'fourSidesArrow.icon.png',
'Origin Control': 'orginControl.icon.png', 'Circle Arrow 3D': 'threeDcircleArrow.icon.png',
'Arrow Both Sides': 'arrowBothSide.icon.png', 'Style Arrow 3D': 'threeDstyleArrow.icon.png',
'Jaw Control': 'headJawControl.icon.png', 'Two Way Arrow': 'twoWayArrow.icon.png',
'Locator Control': 'locatorControl.icon.png', 'Sphere Square': 'sphereSquare.icon.png',
'Ear Control': 'earControl.icon.png', 'Half Sphere': 'halfSphere.icon.png',
'Rotation Control 2': 'twoAxisRotation.icon.png', 'Fish Nail': 'fishNail.icon.png',
'Cylinder Nurbs': 'cylinderCurve.icon.png', 'Point Mark': 'pointNote.icon.png',
'Tongue Control': 'tongueControl.icon.png', 'Zig Zag Circle': 'zigZagCircle.icon.png'
}
# get the paths of plt.maya.icon folder
scrIcons = os.path.join(os.getenv(__root__), 'imgs', 'maya.icon')
def __init__(self, dock=True):
if dock:
parent = getDock()
else:
deleteDock()
try:
cmds.deleteUI('DAMGtoolBoxII')
except:
logger.debug('No previous UI exists')
parent = QtWidgets.QDialog(parent=getMayaMainWindow())
parent.setObjectName('DAMGtoolBoxII')
parent.setWindowTitle('DAMG Tool Box II - Nurbs/Curver/Controller AIO')
self.layout = QtWidgets.QVBoxLayout(parent)
super(ToolBoxII, self).__init__(parent=parent)
# the library variable points to an instance of our controller library
self.library = ControllerLibrary()
# every time we create a showLayout_new instance, we will automatically build our UI and populate it
self.buildUI()
self.populateLibrarySection()
self.populateManagerSection()
self.parent().layout().addWidget(self)
if not dock:
parent.show()
# -------------------------------------------
# BUILD UI
# -------------------------------------------
def buildUI(self):
"""
This is the main method to excute script of every UI elements to show up UI
:return: DAMG tool box II - All about controller
"""
# ---------------------------------------------------------------------------------------------------------
# Varialbes to manage UI elements
btnW = [0, 60, 50]
txtW = [0, 20, 75]
txfW = [0, 150]
cbbW = [0, 100, 150]
size = [0, 50, 37.5, 50, 25]
t1W = 4
top1X = [0, 0, 1, 2, 3]
top1Y = [0, 0, 0, 0, 0]
top1H = [0, 1, 1, 1, 1]
top1W = [0, t1W, t1W, t1W, t1W]
top1L = ['', '2D Style ', '3D Style']
top1B = ['', 'Save', 'Import', 'Remove', 'Refresh']
t2Y = top1W[4]
t2W = 7
top2X = top1X
top2Y = [0, t2Y, t2Y, t2Y, t2Y]
top2H = [0, 1, 1, 2, 1]
top2W = [0, t2W, t2W, t2W, t2W]
top2L = ['', ' 2D: ', ' 3D: ']
top2B = ['', 'Create', 'Create', 'Refresh']
m1W = 4
mid1X = [0, 4, 5, 6, 7]
mid1Y = [0, 0, 0, 0, 0]
mid1H = [0, 1, 1, 1, 1]
mid1W = [0, m1W, m1W / 2, m1W, m1W]
mid1L = ['', 'Preset 2D name Controller', 'Preset 3D name Controller']
mid1B = ['', 'Group', 'Center Pivot', 'Freezee Transform']
m2Y = mid1W[4]
m2W = 3
mid2X = mid1X
mid2Y = [0, m2Y, m2Y, m2Y, m2Y]
mid2H = [0, 1, 1, 1, 1]
mid2W = [0, m2W, m2W, m2W, m2W]
mid2L = ['', 'Change color selected controller']
mid2B = ['', 'Left->Red & Right->Blue']
m3Y = mid2W[4] + mid1W[4]
m3W = 4
mid3X = mid2X
mid3Y = [0, m3Y, m3Y, m3Y, m3Y, m3Y]
mid3H = [0, 1, 1, 1, 1]
mid3W = [0, m3W, m3W, m3W, m3W, m3W]
mid3L = [0, 'Text', 'Fonts', 'CurveText', 'MIRROR', 'ADD ATTRIBUTE', 'Long Name', 'Shot Name',
'Extra Functions',
'TEXT CURVE', 'MIRROR']
mid3B = [0, 'Create', 'X', 'Y', 'Z', 'Add Attribute', 'Left Hand', 'Left Foot', 'Right Hand', 'Right Foot']
top1 = {'title': 'USER ASSETS', 'X': top1X, 'Y': top1Y, 'H': top1H, 'W': top1W,
'btnW': btnW, 'btn': top1B, 'txtW': txtW, 'tfW': txfW, 'ccbW': cbbW, 'label': top1L, 'size': size}
top2 = {'title': 'CONTROLLER MANAGER', 'X': top2X, 'Y': top2Y, 'H': top2H, 'W': top2W,
'btnW': btnW, 'btn': top2B, 'txtW': txtW, 'tfW': txfW, 'ccbW': cbbW, 'label': top2L, 'size': size}
mid1 = {'title': 'CONTROLLER ASSETS', 'X': mid1X, 'Y': mid1Y, 'H': mid1H, 'W': mid1W,
'btnW': btnW, 'btn': mid1B, 'txtW': txtW, 'tfW': txfW, 'ccbW': cbbW, 'label': mid1L, 'size': size}
mid2 = {'title': 'COLOR', 'X': mid2X, 'Y': mid2Y, 'H': mid2H, 'W': mid2W,
'btnW': btnW, 'btn': mid2B, 'txtW': txtW, 'tfW': txfW, 'ccbW': cbbW, 'label': mid2L, 'size': size}
mid3 = {'title': 'FUNCTIONS', 'X': mid3X, 'Y': mid3Y, 'H': mid3H, 'W': mid3W,
'btnW': btnW, 'btn': mid3B, 'txtW': txtW, 'tfW': txfW, 'ccbW': cbbW, 'label': mid3L, 'size': size}
# --------------------------------------------------------------------------------------------------------
# MAIN LAYOUT STRUCTURE
# --------------------------------------------------------------------------------------------------------
# Main Layout
self.layout = QtWidgets.QGridLayout(self)
# self.layout.setContentsMargins(QtCore.QMargins(5,5,5,5))
# --------------------------------------------------------------------------------------------------------
# TOP SECTION
# Controller Library section (TOP1)
self.controllerLibraryUI(top1)
# Controller Manager section (TOP2)
self.controllerManagerUI(top2)
# Channel Box section (TOP3)
# self.channelbox()
# --------------------------------------------------------------------------------------------------------
# MID SECTION
# Quick Access section (MID1)
self.controllerQuickAssetUI(mid1)
# Color Pallet section (MID2)
self.colorPalletUI(mid2)
# Extra Function section (MID3)
self.extraFunctions(mid3)
# --------------------------------------------------------------------------------------------------------
# BOT SECTION
# UI ELEMENTS
# -------------------------------------------
# TOP
# Top1 Layout
def controllerLibraryUI(self, top1):
"""
This will define a layout in first bottom column (left)
return:
A Library UI that you can load/save any controller from your own.
"""
# ---------------------------------------------------------------------------------------------------------
# LIBRARY SECTION
# ---------------------------------------------------------------------------------------------------------
# Title
# ---------------------------------------------------------------------------------------------------------
# Create QLabel (text)
libraryLabel = QtWidgets.QLabel(top1['title'])
libraryLabel.setAlignment(QtCore.Qt.AlignCenter)
self.layout.addWidget(libraryLabel, top1['X'][1], top1['Y'][1], top1['H'][1], top1['W'][1])
# Header
# ---------------------------------------------------------------------------------------------------------
# Create QHBoxLayout Widget (text)
libHeaderWidget = QtWidgets.QWidget()
# libHeaderWidget.setSizePolicy( QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Minimum )
libHeaderLayout = QtWidgets.QHBoxLayout(libHeaderWidget)
# libHeaderLayout.setContentsMargins(QtCore.QMargins(2,2,2,2))
libHeaderScrollArea = QtWidgets.QScrollArea()
libHeaderScrollArea.setWidget(libHeaderWidget)
libHeaderScrollArea.setWidgetResizable(True)
libHeaderScrollArea.setMaximumHeight(45)
# Create QLineEdit
self.layout.addWidget(libHeaderScrollArea, top1['X'][2], top1['Y'][2], top1['H'][2], top1['W'][2])
self.saveNameField = QtWidgets.QLineEdit()
self.saveNameField.setMinimumWidth(top1['btnW'][1])
libHeaderLayout.addWidget(self.saveNameField)
# Create QPlushButton
saveBtn = QtWidgets.QPushButton(top1['btn'][1])
saveBtn.setMinimumWidth(top1['btnW'][1])
saveBtn.clicked.connect(self.saveItem)
libHeaderLayout.addWidget(saveBtn)
# Body - listWidget, load library from local computer
# ---------------------------------------------------------------------------------------------------------
# Create QListWidget
buf = 12
self.listLibWidget = QtWidgets.QListWidget()
self.listLibWidget.setViewMode(QtWidgets.QListWidget.IconMode)
self.listLibWidget.setIconSize(QtCore.QSize(top1['size'][1], top1['size'][1]))
self.listLibWidget.setResizeMode(QtWidgets.QListWidget.Adjust)
self.listLibWidget.setGridSize(QtCore.QSize(top1['size'][1] + buf, top1['size'][1] + buf))
self.layout.addWidget(self.listLibWidget, top1['X'][3], top1['Y'][3], top1['H'][3], top1['W'][3])
# Library footer - 3 buttons: import, refresh, close
# ---------------------------------------------------------------------------------------------------------
# Create QGridLayout Widget
libFooterWidget = QtWidgets.QWidget()
# libFooterWidget.setSizePolicy( QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Minimum )
self.libFooterLayout = QtWidgets.QGridLayout(libFooterWidget)
self.libFooterLayout.setContentsMargins(QtCore.QMargins(2, 2, 2, 2))
# Create QScrollArea
scrollLibArea = QtWidgets.QScrollArea()
scrollLibArea.setWidget(libFooterWidget)
scrollLibArea.setWidgetResizable(True)
scrollLibArea.setMaximumHeight(45)
self.layout.addWidget(scrollLibArea, top1['X'][4], top1['Y'][4], top1['H'][4], top1['W'][4])
# Create QPlushButton
importLibBtn = QtWidgets.QPushButton(top1['btn'][2])
importLibBtn.setMinimumWidth(top1['btnW'][1])
importLibBtn.clicked.connect(self.loadItem)
self.libFooterLayout.addWidget(importLibBtn, 0, 0)
# # Create QPlushButton
referenceBtn = QtWidgets.QPushButton(top1['btn'][4])
referenceBtn.setMinimumWidth(top1['btnW'][1])
referenceBtn.clicked.connect(self.referenceItem)
self.libFooterLayout.addWidget(referenceBtn, 0, 1)
#
# Create QPlushButton
removeBtn = QtWidgets.QPushButton(top1['btn'][3])
removeBtn.setMinimumWidth(top1['btnW'][1])
removeBtn.clicked.connect(self.removeItem)
self.libFooterLayout.addWidget(removeBtn, 0, 2)
# Top2 Layout
def controllerManagerUI(self, top2):
# ---------------------------------------------------------------------------------------------------------
# CONTROLLER MANAGER SECTION
# ---------------------------------------------------------------------------------------------------------
# Manager section title
# ---------------------------------------------------------------------------------------------------------
# Create QLabel
managerLabel = QtWidgets.QLabel(top2['title'])
managerLabel.setAlignment(QtCore.Qt.AlignCenter)
self.layout.addWidget(managerLabel, top2['X'][1], top2['Y'][1], top2['H'][1], top2['W'][1])
# Header
# ---------------------------------------------------------------------------------------------------------
# Create QHBoxLayout Widget
controllerManagerHeaderWidget = QtWidgets.QWidget()
controllerManagerHeaderWidget.setSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Minimum)
controllerManagerHeaderLayout = QtWidgets.QHBoxLayout(controllerManagerHeaderWidget)
# controllerManagerHeaderLayout.setContentsMargins(QtCore.QMargins(2,2,2,2))
controlManagerHeaderScrollArea = QtWidgets.QScrollArea()
controlManagerHeaderScrollArea.setWidget(controllerManagerHeaderWidget)
controlManagerHeaderScrollArea.setWidgetResizable(True)
controlManagerHeaderScrollArea.setMaximumHeight(45)
self.layout.addWidget(controlManagerHeaderScrollArea, top2['X'][2], top2['Y'][2], top2['H'][2], top2['W'][2])
# Create QLabel
text2D = QtWidgets.QLabel(top2['label'][1])
text2D.setMinimumWidth(top2['txtW'][1])
text2D.setMaximumWidth(top2['txtW'][1])
controllerManagerHeaderLayout.addWidget(text2D)
# Create QComboBox
self.nurbsType2DCB = QtWidgets.QComboBox()
for nurbsType in sorted(self.nurbsType2D):
self.nurbsType2DCB.addItem(nurbsType)
controllerManagerHeaderLayout.addWidget(self.nurbsType2DCB)
# Create QPushButton
create2DBtn = QtWidgets.QPushButton(top2['btn'][1])
create2DBtn.setMinimumWidth(top2['btnW'][1])
create2DBtn.clicked.connect(self.create2DController)
controllerManagerHeaderLayout.addWidget(create2DBtn)
# Create QLabel
text3D = QtWidgets.QLabel(top2['label'][1])
text3D.setMinimumWidth(top2['txtW'][1])
text3D.setMaximumWidth(top2['txtW'][1])
controllerManagerHeaderLayout.addWidget(text3D)
# Create QComboBox
self.nurbsType3DCB = QtWidgets.QComboBox()
for nurbsType in sorted(self.nurbsType3D):
self.nurbsType3DCB.addItem(nurbsType)
controllerManagerHeaderLayout.addWidget(self.nurbsType3DCB)
# Create QPushButton
create3DBtn = QtWidgets.QPushButton(top2['btn'][2])
create3DBtn.setMinimumWidth(top2['btnW'][1])
create3DBtn.clicked.connect(self.create3DController)
controllerManagerHeaderLayout.addWidget(create3DBtn)
refreshBtn = QtWidgets.QPushButton(top2['btn'][3])
refreshBtn.setMinimumWidth(top2['btnW'][1])
refreshBtn.clicked.connect(self.populateAll)
controllerManagerHeaderLayout.addWidget(refreshBtn)
# Manager Body - scrollWidget
# ---------------------------------------------------------------------------------------------------------
# Create QWidget
scrollManagerWidget = QtWidgets.QWidget()
scrollManagerWidget.setSizePolicy(QtWidgets.QSizePolicy.Maximum, QtWidgets.QSizePolicy.Maximum)
self.scrollLayout = QtWidgets.QVBoxLayout(scrollManagerWidget)
# self.scrollLayout.setContentsMargins(QtCore.QMargins(2,2,2,2))
# Create QScrollArea
scrollManagerArea = QtWidgets.QScrollArea()
scrollManagerArea.setWidgetResizable(True)
scrollManagerArea.setWidget(scrollManagerWidget)
self.layout.addWidget(scrollManagerArea, top2['X'][3], top2['Y'][3], top2['H'][3], top2['W'][3])
# Top 3 Layout
def channelbox(self):
title = QtWidgets.QLabel('CHANNEL BOX')
title.setAlignment(QtCore.Qt.AlignCenter)
self.layout.addWidget(title, 0, 11)
cbHeaderWidget = QtWidgets.QWidget()
cbHeaderWidget.setSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Minimum)
cbHeaderLayout = QtWidgets.QVBoxLayout(cbHeaderWidget)
cbHeaderScrollArea = QtWidgets.QScrollArea()
cbHeaderScrollArea.setWidget(cbHeaderWidget)
cbHeaderScrollArea.setWidgetResizable(True)
self.layout.addWidget(cbHeaderScrollArea, 1, 11, 5, 1)
channelBoxWidget = ChanelBox()
cbHeaderLayout.addWidget(channelBoxWidget)
# -------------------------------------------
# MID
# Mid1 Layout
def controllerQuickAssetUI(self, mid1):
"""
This will define a layout in first top column
return:
A DAMG CONTROLLER LIBRARY UI that you can create controllers
"""
# ---------------------------------------------------------------------------------------------------------
# DAMG CONTROLLER LIBRARY SECTION
# ---------------------------------------------------------------------------------------------------------
# Title
# ---------------------------------------------------------------------------------------------------------
# quickAccessLabel = QtWidgets.QLabel( mid1['title'] )
# quickAccessLabel.setAlignment( QtCore.Qt.AlignCenter )
# self.layout.addWidget( quickAccessLabel, mid1['X'][1], mid1['Y'][1], mid1['H'][1], mid1['W'][1])
# Header
# ---------------------------------------------------------------------------------------------------------
# Create QHBoxLayout Widget
quickAccessHeaderWidget = QtWidgets.QWidget()
quickAccessLayout = QtWidgets.QGridLayout(quickAccessHeaderWidget)
self.layout.addWidget(quickAccessHeaderWidget, mid1['X'][1], mid1['Y'][1], mid1['H'][1], mid1['W'][1])
# Create QLabel (text)
label2D = QtWidgets.QLabel(mid1['label'][1])
label2D.setAlignment(QtCore.Qt.AlignCenter)
quickAccessLayout.addWidget(label2D, 0, 0, 1, 4)
# Create QLabel (text)
label3D = QtWidgets.QLabel(mid1['label'][2])
label3D.setAlignment(QtCore.Qt.AlignCenter)
quickAccessLayout.addWidget(label3D, 0, 4, 1, 4)
# Body
# ---------------------------------------------------------------------------------------------------------
# Create QWidget (2D nurbs)
scrollNurbs2DWidget = QtWidgets.QWidget()
# scrollNurbs2DWidget.setSizePolicy( QtWidgets.QSizePolicy.Maximum, QtWidgets.QSizePolicy.Maximum )
self.quickAccess3DLayout = QtWidgets.QGridLayout(scrollNurbs2DWidget)
# Create QScrollArea
scrollNurbs2DArea = QtWidgets.QScrollArea()
scrollNurbs2DArea.setWidgetResizable(True)
# scrollNurbs2DArea.setSizePolicy( QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Minimum )
scrollNurbs2DArea.setWidget(scrollNurbs2DWidget)
self.layout.addWidget(scrollNurbs2DArea, mid1['X'][2], mid1['Y'][2], mid1['H'][2], mid1['W'][2])
# Create icon okButton
nurbs2Dnames = [key for key in self.nurbsType2D]
count2D = []
z = mid1['W'][2] + 2
for x in range((len(nurbs2Dnames) / z) + 1):
for y in range(z):
if len(count2D) >= len(nurbs2Dnames):
break
else:
index = len(count2D)
count2D.append('%s,%s' % (x, y))
nurbsType = nurbs2Dnames[index]
iconPth = os.path.join(self.scrIcons, self.nurbsType2D[nurbsType])
# print iconPth
# print os.path.exists(iconPth)
icon = QtGui.QIcon(iconPth)
toolTip = "Create a showLayout_new " + nurbsType
button = marv.RenderSetupButton(self, icon, mid1['size'][2])
button.setMinimumSize(mid1['size'][2], mid1['size'][2])
button.setMaximumSize(mid1['size'][2], mid1['size'][2])
button.setToolTip(toolTip)
button.clicked.connect(partial(self.create2DController, nurbsType))
self.quickAccess3DLayout.addWidget(button, x, y)
y += 1
x += 1
# Create QWidget (3D nurbs)
scrollNurbs3DWidget = QtWidgets.QWidget()
# scrollNurbs3DWidget.setSizePolicy( QtWidgets.QSizePolicy.Maximum, QtWidgets.QSizePolicy.Maximum )
self.quickAccess3DLayout = QtWidgets.QGridLayout(scrollNurbs3DWidget)
# Create QScrollArea
scrollNurbs3DArea = QtWidgets.QScrollArea()
scrollNurbs3DArea.setWidgetResizable(True)
# scrollNurbs3DWidget.setSizePolicy( QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Minimum )
scrollNurbs3DArea.setWidget(scrollNurbs3DWidget)
self.layout.addWidget(scrollNurbs3DArea, mid1['X'][2], mid1['Y'][2] + mid1['W'][2], mid1['H'][2], mid1['W'][2])
# Create icon okButton
nurbs3Dnames = [key for key in self.nurbsType3D]
count3D = []
for x in range((len(nurbs3Dnames) / z) + 1):
for y in range(z):
if len(count3D) >= len(nurbs3Dnames):
break
else:
index = len(count3D)
count3D.append('%s,%s' % (x, y))
nurbsType = nurbs3Dnames[index]
iconPth = os.path.join(self.scrIcons, self.nurbsType3D[nurbsType])
# print iconPth
# print os.path.exists(iconPth)
icon = QtGui.QIcon(iconPth)
toolTip = "Create a showLayout_new " + nurbsType
button = marv.RenderSetupButton(self, icon, mid1['size'][2])
button.setMinimumSize(mid1['size'][2], mid1['size'][2])
button.setMaximumSize(mid1['size'][2], mid1['size'][2])
button.setToolTip(toolTip)
button.clicked.connect(partial(self.create3DController, nurbsType))
self.quickAccess3DLayout.addWidget(button, x, y)
y += 1
x += 1
# Footer
# ---------------------------------------------------------------------------------------------------------
# Create QHBoxLayout Widget
quickAccessFooterWidget = QtWidgets.QWidget()
quickAccessFooterLayout = QtWidgets.QHBoxLayout(quickAccessFooterWidget)
self.layout.addWidget(quickAccessFooterWidget, mid1['X'][3], mid1['Y'][3], mid1['H'][3], mid1['W'][3])
# Create QPushButton
quickAccessBtn1 = QtWidgets.QPushButton(mid1['btn'][1])
quickAccessBtn1.clicked.connect(self.groupCenter)
quickAccessBtn1.setMinimumWidth(mid1['btnW'][1])
quickAccessFooterLayout.addWidget(quickAccessBtn1)
# Create QPushButton
quickAccessBtn2 = QtWidgets.QPushButton(mid1['btn'][2])
quickAccessBtn2.clicked.connect(self.centerPivot)
quickAccessBtn2.setMinimumWidth(mid1['btnW'][1])
quickAccessFooterLayout.addWidget(quickAccessBtn2)
# Create QPushButton
quickAccessBtn3 = QtWidgets.QPushButton(mid1['btn'][3])
quickAccessBtn3.clicked.connect(self.freezeTransformation)
quickAccessBtn3.setMinimumWidth(mid1['btnW'][1])
quickAccessFooterLayout.addWidget(quickAccessBtn3)
# Mid2 Layout
def colorPalletUI(self, mid2):
# ---------------------------------------------------------------------------------------------------------
# DAMG COLOR PALLET SECTION
# ---------------------------------------------------------------------------------------------------------
# Title
# ---------------------------------------------------------------------------------------------------------
# # Create QLabel
# colorPalletTitle = QtWidgets.QLabel( mid2['title'] )
# colorPalletTitle.setAlignment( QtCore.Qt.AlignCenter )
# self.layout.addWidget( colorPalletTitle, mid2['X'][1], mid2['Y'][1], mid2['H'][1], mid2['W'][1])
# Header
# ---------------------------------------------------------------------------------------------------------
# Create QHBoxlayout Widget
colorPalletHeaderWidget = QtWidgets.QWidget()
colorPalletHeaderLayout = QtWidgets.QHBoxLayout(colorPalletHeaderWidget)
self.layout.addWidget(colorPalletHeaderWidget, mid2['X'][1], mid2['Y'][1], mid2['H'][1], mid2['W'][1])
# Create QLabel
colorPalletLabel = QtWidgets.QLabel(mid2['label'][1])
colorPalletLabel.setAlignment(QtCore.Qt.AlignCenter)
colorPalletHeaderLayout.addWidget(colorPalletLabel)
# Body
# ---------------------------------------------------------------------------------------------------------
# Create QWidget
# color index to RGB for okButton color
rgb = {0: (.4, .4, .4), 16: (1, 1, 1), 3: (.75, .75, .75), 2: (.5, .5, .5), 1: (0, 0, 0), 18: (0, .7, 1),
28: (0, .5, .5),
29: (0, .2, .5), 15: (0, .2, .7), 6: (0, 0, 1), 5: (0, 0, 0.4), 19: (0, 1, .4), 14: (0, 1, 0),
23: (0, .7, .1),
26: (.4, .6, 0), 27: (0, .5, .2), 7: (0, 0.2, 0), 13: (1, 0, 0), 24: (.7, .4, .1), 10: (.7, 0.2, 0),
4: (.5, 0, 0.02), 11: (.3, 0.1, 0.1), 12: (.3, 0, 0), 20: (1, .6, .6), 21: (1, .6, .4), 9: (.7, 0, .7),
30: (.4, .2, .6), 31: (.5, .1, .3), 8: (.15, 0, .15), 22: (1, 1, .4), 17: (1, 1, 0), 25: (.6, .6, .2), }
# Create QWidget
scrollColorWidget = QtWidgets.QWidget()
# scrollColorWidget.setSizePolicy( QtWidgets.QSizePolicy.Maximum, QtWidgets.QSizePolicy.Maximum )
self.scrollColorLayout = QtWidgets.QGridLayout(scrollColorWidget)
# Create QScrollArea
scrollColorArea = QtWidgets.QScrollArea()
scrollColorArea.setWidgetResizable(True)
# scrollColorArea.setSizePolicy( QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Minimum )
scrollColorArea.setWidget(scrollColorWidget)
self.layout.addWidget(scrollColorArea, mid2['X'][2], mid2['Y'][2], mid2['H'][2], mid2['W'][2])
# Create icon okButton
rgbKeys = [key for key in rgb]
rgbKeys = sorted(rgbKeys)
countColor = []
z = mid2['W'][2] + 1
for x in range((len(rgbKeys) / z) + 1):
for y in range(z):
key = len(countColor)
countColor.append('%s,%s' % (x, y))
if key >= len(rgbKeys):
break
else:
index = rgbKeys[key]
r, g, b = [c * 255 for c in rgb[index]]
button = QtWidgets.QPushButton()
button.setMinimumSize(mid2['size'][3], mid2['size'][4])
button.setMaximumSize(mid2['size'][3], mid2['size'][4])
button.fixStyleSheet('background-color: rgba(%s,%s,%s,1.0)' % (r, g, b))
button.clicked.connect(partial(self.setColor, index))
self.scrollColorLayout.addWidget(button, x, y)
y += 1
x += 1
autoSideColorBtn = QtWidgets.QPushButton(mid2['btn'][1])
# autoSideColorBtn.setSizePolicy( QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Minimum )
autoSideColorBtn.clicked.connect(self.changeColorBySides)
self.scrollColorLayout.addWidget(autoSideColorBtn, x + 1, 0, mid2['H'][2], mid2['W'][2] + 1)
# Mid3 Layout
def extraFunctions(self, mid3):
fontFull = cmds.fontDialog(fl=True)
fontMain = []
for i in range(len(fontFull)):
fontMain.append(fontFull[i].split(" - ")[0])
fontList = sorted(list(set(fontMain)))
# title
extraFunctionsLabel = QtWidgets.QLabel(mid3['label'][8])
extraFunctionsLabel.setAlignment(QtCore.Qt.AlignCenter)
self.layout.addWidget(extraFunctionsLabel, mid3['X'][1], mid3["Y"][1], mid3['H'][1],
mid3['W'][1])
# Body
bodyWidget = QtWidgets.QWidget()
# bodyWidget.setSizePolicy( QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Minimum )
bodyWidget.setContentsMargins(2, 2, 2, 2)
bodyLayout = QtWidgets.QVBoxLayout(bodyWidget)
self.layout.addWidget(bodyWidget, mid3['X'][2], mid3["Y"][2], mid3['H'][2],
mid3['W'][2])
curveTextWidget = QtWidgets.QWidget()
# curveTextWidget.setSizePolicy( QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Minimum )
curveTextLayout = QtWidgets.QHBoxLayout(curveTextWidget)
# curveTextLayout.setContentsMargins( QtCore.QMargins( 2, 2, 2, 2 ) )
curveTextScrollArea = QtWidgets.QScrollArea()
curveTextScrollArea.setWidget(curveTextWidget)
curveTextScrollArea.setWidgetResizable(True)
# curveTextScrollArea.setContentsMargins( QtCore.QMargins( 2, 2, 2, 2 ) )
curveTextScrollArea.setMaximumHeight(45)
bodyLayout.addWidget(curveTextScrollArea)
# textCurveLabel = QtWidgets.QLabel(mid3['label'][9])
# textCurveLabel.setAlignment( QtCore.Qt.AlignCenter )
# curveTextLayout.addWidget(textCurveLabel, 0,0,1,4)
createBtn = QtWidgets.QPushButton(mid3['btn'][1])
createBtn.clicked.connect(self.createTextCurve)
createBtn.setMinimumWidth(mid3['btnW'][1])
curveTextLayout.addWidget(createBtn)
self.fontList = QtWidgets.QComboBox()
self.fontList.setMaximumWidth(mid3['ccbW'][2])
for font in fontList:
self.fontList.addItem(font)
curveTextLayout.addWidget(self.fontList)
self.functionsNameField = QtWidgets.QLineEdit()
self.functionsNameField.setMinimumWidth(mid3['tfW'][1])
curveTextLayout.addWidget(self.functionsNameField)
mirrorWidget = QtWidgets.QWidget()
# mirrorWidget.setSizePolicy( QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Minimum )
mirrorLayout = QtWidgets.QHBoxLayout(mirrorWidget)
# mirrorLayout.setContentsMargins( QtCore.QMargins( 5, 5, 5, 5 ) )
mirrorScrollArea = QtWidgets.QScrollArea()
mirrorScrollArea.setWidget(mirrorWidget)
mirrorScrollArea.setWidgetResizable(True)
# mirrorScrollArea.setContentsMargins( QtCore.QMargins( 5, 5, 5, 5 ) )
mirrorScrollArea.setMaximumHeight(45)
bodyLayout.addWidget(mirrorScrollArea)
# mirrorLabel = QtWidgets.QLabel(mid3['label'][10])
# mirrorLabel.setAlignment(QtCore.Qt.AlignCenter)
# mirrorLayout.addWidget(mirrorLabel, 0,0,1,4)
self.copyCheck = QtWidgets.QCheckBox('Copy')
mirrorLayout.addWidget(self.copyCheck)
xbtn = QtWidgets.QPushButton(mid3['btn'][2])
xbtn.clicked.connect(self.mirrorX)
xbtn.setMinimumWidth(mid3['btnW'][1])
mirrorLayout.addWidget(xbtn)
ybtn = QtWidgets.QPushButton(mid3['btn'][3])
ybtn.clicked.connect(self.mirrorY)
ybtn.setMinimumWidth(mid3['btnW'][1])
mirrorLayout.addWidget(ybtn)
zbtn = QtWidgets.QPushButton(mid3['btn'][4])
zbtn.clicked.connect(self.mirrorZ)
zbtn.setMinimumWidth(mid3['btnW'][1])
mirrorLayout.addWidget(zbtn)
addAttrWidget = QtWidgets.QWidget()
# addAttrWidget.setSizePolicy( QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Minimum )
addAttrLayout = QtWidgets.QGridLayout(addAttrWidget)
addAttrScrollArea = QtWidgets.QScrollArea()
addAttrScrollArea.setWidget(addAttrWidget)
addAttrScrollArea.setWidgetResizable(True)
# addAttrScrollArea.setContentsMargins( QtCore.QMargins( 2, 2, 2, 2 ) )
addAttrScrollArea.setMaximumHeight(180)
bodyLayout.addWidget(addAttrScrollArea)
titleLongName = QtWidgets.QLabel(mid3['label'][6])
titleLongName.setAlignment(QtCore.Qt.AlignCenter)
addAttrLayout.addWidget(titleLongName, 0, 0)
self.longNameField = QtWidgets.QLineEdit()
self.longNameField.setMinimumWidth(mid3['tfW'][1])
addAttrLayout.addWidget(self.longNameField, 0, 1, 1, 3)
titleShortName = QtWidgets.QLabel(mid3['label'][7])
titleShortName.setAlignment(QtCore.Qt.AlignCenter)
addAttrLayout.addWidget(titleShortName, 1, 0)
self.shortNameField = QtWidgets.QLineEdit()
self.shortNameField.setMinimumWidth(mid3['tfW'][1])
addAttrLayout.addWidget(self.shortNameField, 1, 1, 1, 3)
minLabel = QtWidgets.QLabel('Min')
minLabel.setAlignment(QtCore.Qt.AlignCenter)
addAttrLayout.addWidget(minLabel, 2, 0)
defaultLabel = QtWidgets.QLabel('Default')
defaultLabel.setAlignment(QtCore.Qt.AlignCenter)
addAttrLayout.addWidget(defaultLabel, 2, 1)
maxLabel = QtWidgets.QLabel('Max')
maxLabel.setAlignment(QtCore.Qt.AlignCenter)
addAttrLayout.addWidget(maxLabel, 2, 2)
fbLabel = QtWidgets.QLabel('F/B')
fbLabel.setAlignment(QtCore.Qt.AlignCenter)
addAttrLayout.addWidget(fbLabel, 2, 3)
self.minField = QtWidgets.QLineEdit()
addAttrLayout.addWidget(self.minField, 3, 0)
self.defaultField = QtWidgets.QLineEdit()
addAttrLayout.addWidget(self.defaultField, 3, 1)
self.maxField = QtWidgets.QLineEdit()
addAttrLayout.addWidget(self.maxField, 3, 2)
self.fbComboBox = QtWidgets.QComboBox()
self.fbComboBox.addItem('Float')
self.fbComboBox.addItem('Boolean')
addAttrLayout.addWidget(self.fbComboBox, 3, 3)
addAttrBtn = QtWidgets.QPushButton(mid3['btn'][5])
addAttrBtn.clicked.connect(self.addAttr)
addAttrLayout.addWidget(addAttrBtn, 4, 0, 1, 4)
leftHandBtn = QtWidgets.QPushButton(mid3['btn'][6])
leftHandBtn.setMinimumWidth(mid3['btnW'][1])
leftHandBtn.clicked.connect(self.leftHandPreset)
addAttrLayout.addWidget(leftHandBtn, 5, 0)
leftFootBtn = QtWidgets.QPushButton(mid3['btn'][7])
leftFootBtn.setMinimumWidth(mid3['btnW'][1])
leftFootBtn.clicked.connect(self.leftFootPreset)
addAttrLayout.addWidget(leftFootBtn, 5, 1)
rightHandBtn = QtWidgets.QPushButton(mid3['btn'][8])
rightHandBtn.setMinimumWidth(mid3['btnW'][1])
rightHandBtn.clicked.connect(self.rightHandPreset)
addAttrLayout.addWidget(rightHandBtn, 5, 2)
rightFootBtn = QtWidgets.QPushButton(mid3['btn'][9])
rightFootBtn.setMinimumWidth(mid3['btnW'][1])
rightFootBtn.clicked.connect(self.rightFootPreset)
addAttrLayout.addWidget(rightFootBtn, 5, 3)
# -------------------------------------------
# BOT
def thisSectionWillUpdateLater(self):
pass
# ****************************************** #
# -------------------------------------------
# Main Class Functions
# -------------------------------------------
# ****************************************** #
# -------------------------------------------
# Functions required in common
def warningFunction(self, message):
cmds.confirmDialog(t='Warning', m=message, b='OK')
cmds.warning(message)
def DAMGtoolBoxIIHelp(self, *args):
if cmds.window('helpDAMGToolBoxII', exists=True):
cmds.deleteUI('helpDAMGToolBoxII')
cmds.window('helpDAMGToolBoxII', t="Help")
cmds.rowColumnLayout(nc=3, cw=[(1, 10), (2, 400), (3, 10)])
cmds.columnLayout()
cmds.text(l="")
cmds.setParent('..')
cmds.columnLayout()
cmds.text(l="")
cmds.text(l='This tool have nurbs controller that you can use in rigging')
cmds.text(l='You can create any nurbs in the QUICK ASSETS sections, and color them as you want')
cmds.text(l='You can make text nurbs with what ever fonts installed in your computer')
cmds.text(l='You can add attributes in CREATE NEW ATTRIBUTE or delete attribut in CHANNEL BOX')
cmds.text(l='You can join all the shapes of nurbs into one in ADJUSTMENT')
cmds.text(l='The group okButton is to do group but center pivot object itself, not center of grid')
cmds.text(l="")
cmds.text('Have fun.')
cmds.text(l="")
cmds.setParent('..')
cmds.text(l="")
cmds.showWindow('helpDAMGToolBoxII')
def populateAll(self):
self.populateLibrarySection()
self.populateManagerSection()
# -------------------------------------------
# Top1 - Functions for user library sections
def loadItem(self):
"""load the currently selected controller"""
currentItem = self.listLibWidget.currentItem()
if not currentItem:
self.warningFunction('You must select an item')
name = currentItem.text()
self.library.load(name)
self.populateAll()
def saveItem(self):
"""This saves the controller with the given file name"""
name = self.saveNameField.text()
if not name.strip():
cmds.warning("You must give a name")
cmds.confirmDialog(t='Warning', m='You must give a name', b='OK')
return
files = [f for f in os.listdir(DIRECTORY)]
for file in files:
if name in file:
cmds.confirmDialog(t='Confirm', m='File %s already exists, override?' % name,
b=['Yes', 'No'], db='Yes', cb='No', dismissString='No')
self.library.save(name)
self.saveNameField.setText('')
self.populateAll()
def removeItem(self):
currentItem = self.listLibWidget.currentItem()
if not currentItem:
self.warningFunction('You must select something')
return
self.library.remove(currentItem)
self.populateAll()
def referenceItem(self):
name = self.listLibWidget.currentItem().text() or ""
if name == "":
self.warningFunction('You must select something')
return
self.library.reference(name)
self.populateAll()
def populateLibrarySection(self):
self.listLibWidget.clear()
self.library.find()
for name, info in self.library.items():
item = QtWidgets.QListWidgetItem(name)
self.listLibWidget.addItem(item)
screenshot = info.get('screenshot')
if screenshot:
icon = QtGui.QIcon(screenshot)
item.setIcon(icon)
# -------------------------------------------
# Top2 - Functions in controller manager
def addNurbs(self, nurbs):
widget = ControllerManager(nurbs)
self.scrollLayout.addWidget(widget)
widget.onSolo.connect(self.onSolo)
def onSolo(self, value):
nurbsWidgets = self.findChildren(ControllerManager)
for widget in nurbsWidgets:
if widget != self.sender():
widget.disableNurbs(value)
def populateManagerSection(self):
while self.scrollLayout.count():
widget = self.scrollLayout.takeAt(0).widget()
if widget:
widget.setVisible(False)
widget.deleteLater()
for nurbs in pm.ls(type=['nurbsCurve', 'nurbsSurface']):
self.addNurbs(nurbs)
# -------------------------------------------
# Mid1 - Functions in quick _assets
def create2DController(self, nurbsType=None):
if not nurbsType:
nurbsType = self.nurbsType2DCB.currentText()
func = ToolBoxIIfuncs.ToolBoxIIfuncs
func(nurbsType)
nurbs = cmds.ls(sl=True)[0]
cmds.rename(nurbs, nurbsType)
self.populateAll()
def create3DController(self, nurbsType=None, add=True):
if not nurbsType:
nurbsType = self.nurbsType3DCB.currentText()
func = ToolBoxIIfuncs.ToolBoxIIfuncs
func(nurbsType)
nurbs = cmds.ls(sl=True)[0]
cmds.rename(nurbs, nurbsType)
self.populateAll()
# -------------------------------------------
# Mid2 - Functions in color pallet
def setColor(self, index, *args):
selection = cmds.ls(sl=True)
for select in selection:
shapes = cmds.listRelatives(select, ad=True, s=True, f=True)
for node in shapes:
cmds.setAttr(node + ".overrideRGBColors", 0)
cmds.setAttr(node + ".overrideEnabled", 1)
cmds.setAttr(node + ".overrideColor", index)
self.populateAll()
def changeColorBySides(self, *args):
a = cmds.ls(sl=True) or []
if len(a) == 0:
a = cmds.ls(type='nurbsCurve')
LNurbs = []
RNurbs = []
letterL = ["_L", "left", "Left"]
letterR = ["_R", "right", "Right"]
for nurbs in a:
for left in letterL:
if left in nurbs:
LNurbs.append(nurbs)
for right in letterR:
if right in nurbs:
RNurbs.append(nurbs)
for nurbs in LNurbs:
cmds.setAttr(nurbs + '.overrideEnabled', 1)
cmds.setAttr(nurbs + '.overrideColor', 13)
for nurbs in RNurbs:
cmds.setAttr(nurbs + '.overrideEnabled', 1)
cmds.setAttr(nurbs + '.overrideColor', 6)
self.populateAll()
# -------------------------------------------
# Mid3 - Functions in extra functions
def createTextCurve(self, *args):
list = []
getText = self.functionsNameField.text()
font = self.fontList.currentText()
if (len(getText) == 0):
message = "Text field is empty, can not create text curve"
self.warningFunction(message)
return
if (len(getText) > 0):
list.append(cmds.textCurves(f=font, t=getText))
for x in range(len(list) - 1):
cmds.makeIdentity(list[x + 1], apply=True, t=1, r=1, s=1, n=0)
shapeNode = cmds.listRelatives(list[x + 1], shapes=True)
cmds.parent
cmds.delete(list[x + 1])
select = cmds.select(list[0])
cmds.rename(select, getText)
self.populateAll()
def groupCenter(self, *args):
a = cmds.ls(sl=True)
cmds.group(n=a[0] + "_groups")
self.populateAll()
def centerPivot(self, *args):
a = cmds.ls(sl=True)
if (len(a) > 0):
cmds.xform(cp=True)
self.populateAll()
def freezeTransformation(self, *args):
a = cmds.ls(sl=True)
if (len(a) > 0):
cmds.makeIdentity(apply=True)
self.populateAll()
def mirrorY(self, *args):
copyValue = self.copyCheck.checkState()
curSel = cmds.ls(sl=True)
if len(curSel) > 0:
if copyValue:
cmds.duplicate()
cmds.group(n="controllers_mirror_group")
cmds.xform(os=True, piv=[0, 0, 0])
cmds.scale(1, -1, 1)
cmds.ungroup('controllers_mirror_group')
else:
cmds.warning("nothing selected")
self.populateAll()
def mirrorZ(self, *args):
copyValue = self.copyCheck.checkState()
curSel = cmds.ls(sl=True)
if len(curSel) > 0:
if copyValue:
cmds.duplicate()
cmds.group(n="controllers_mirror_group")
cmds.xform(os=True, piv=[0, 0, 0])
cmds.scale(1, 1, -1)
cmds.ungroup('controllers_mirror_group')
else:
cmds.warning("nothing selected")
self.populateAll()
def mirrorX(self, *args):
copyValue = self.copyCheck.checkState()
curSel = cmds.ls(sl=True)
if len(curSel) > 0:
if copyValue:
cmds.duplicate()
cmds.group(n="controllers_mirror_group")
cmds.xform(os=True, piv=[0, 0, 0])
cmds.scale(-1, 1, 1)
cmds.ungroup('controllers_mirror_group')
else:
cmds.warning("nothing selected")
self.populateAll()
def addAttr(self, *args):
objSel = cmds.ls(sl=True) or []
longName = self.longNameField.text()
shortName = self.shortNameField.text()
minNum = self.minField.text()
defNum = self.defaultField.text()
maxNum = self.maxField.text()
ForB = self.fbComboBox.currentText()
if objSel == []:
message = 'You must select something'
self.warningFunction(message)
return
if (len(longName) == 0):
message = "Long name can not be blank"
self.warningFunction(message)
return
if (len(longName.split(" ")) > 1):
message = "Long name contains unavailable character"
self.warningFunction(message)
return
if (len(shortName) == 0):
shortName = longName
if (ForB == 'Boolean'):
for i in range(len(objSel)):
cmds.select(objSel[i])
cmds.addItem(ln=longName, nn=shortName, at='bool', dv=1, k=True)
i += 1
if (ForB == 'Float'):
for i in range(len(objSel)):
cmds.select(objSel[i])
cmds.addItem(ln=longName, nn=shortName, at='float', min=minNum,
max=maxNum, dv=defNum, k=True)
i += 1
return
if (len(shortName) > 0):
if (ForB == 'Boolean'):
for i in range(len(objSel)):
cmds.select(objSel[i])
cmds.addItem(ln=longName, nn=shortName, at='bool', dv=1, k=True)
i += 1
if (ForB == 'Float'):
for i in range(len(objSel)):
cmds.select(objSel[i])
cmds.addItem(ln=longName, nn=shortName, at='float', min=minNum,
max=maxNum, dv=defNum, k=True)
i += 1
def leftHandPreset(self, *args):
objSel = cmds.ls(sl=True)
hand = ["thumb", "index", "middle", "ring", "little"]
for i in objSel:
cmds.select(i, r=True)
for item in hand:
longName = "L_" + item + "Finger_Curl"
niceName = "L_" + item + "F_Curl"
cmds.addItem(ln=longName, nn=niceName, at='float', min=-5, dv=0, max=15, k=True)
def rightHandPreset(self, *args):
objSel = cmds.ls(sl=True)
hand = ["thumb", "index", "middle", "ring", "little"]
for i in objSel:
cmds.select(i, r=True)
for item in hand:
longName = "R_" + item + "Finger_Curl"
niceName = "R_" + item + "F_Curl"
cmds.addItem(ln=longName, nn=niceName, at='float', min=-5, dv=0, max=15, k=True)
def leftFootPreset(self, *args):
objSel = cmds.ls(sl=True)
foot = ["big", "long", "middle", "ring", "pinky"]
for i in objSel:
cmds.select(i, r=True)
for item in foot:
longName = "L_" + item + "Toe_Curl"
niceName = "L_" + item + "T_Curl"
cmds.addItem(ln=longName, nn=niceName, at='float', min=-5, dv=0, max=15, k=True)
def rightFootPreset(self, *args):
objSel = cmds.ls(sl=True)
foot = ["big", "long", "middle", "ring", "pinky"]
for i in objSel:
cmds.select(i, r=True)
for item in foot:
longName = "R_" + item + "Toe_Curl"
niceName = "R_" + item + "T_Curl"
cmds.addItem(ln=longName, nn=niceName, at='float', min=-5, dv=0, max=15, k=True)
# --------------------------------------------------------------------------------------------------------
# END OF CODE
# --------------------------------------------------------------------------------------------------------
|
StarcoderdataPython
|
3963
|
_all__ = ["db_handler","coin_value_handler"]
|
StarcoderdataPython
|
3341467
|
from .common_algos import bin_to_decimal, hex_to_decimal
|
StarcoderdataPython
|
186942
|
<filename>main.py
import os
import requests
from urllib.parse import urlparse
from dotenv import load_dotenv
import argparse
def configure_parser():
parser = argparse.ArgumentParser(description="""
ΠΡΠΎΠ³ΡΠ°ΠΌΠΌΠ° Π΄Π»Ρ ΡΠΎΠΊΡΠ°ΡΠ΅Π½ΠΈΡ URL (Π±ΠΈΡΠ»ΠΈΠ½ΠΊΠ°) ΠΈ ΠΏΠΎΠ»ΡΡΠ΅Π½ΠΈΡ ΠΊΠΎΠ»ΠΈΡΠ΅ΡΡΠ²Π° ΠΏΠ΅ΡΠ΅Ρ
ΠΎΠ΄ΠΎΠ² ΠΏΠΎ Π±ΠΈΡΠ»ΠΈΠ½ΠΊΡ.
ΠΡΠΎΠ³ΡΠ°ΠΌΠΌΠ° Π²Π·Π°ΠΈΠΌΠΎΠ΄Π΅ΠΉΡΡΠ²ΡΠ΅Ρ Ρ ΡΠ΅ΡΠ²ΠΈΡΠΎΠΌ Bitly.org ΡΠ΅ΡΠ΅Π· API-ΠΊΠ»ΡΡ.
ΠΡΠΈ Π²Π²ΠΎΠ΄Π΅ ΠΎΠ±ΡΡΠ½ΠΎΠΉ ΡΡΡΠ»ΠΊΠΈ Π²Ρ ΠΏΠΎΠ»ΡΡΠ°Π΅ΡΠ΅ Π±ΠΈΡΠ»ΠΈΠ½ΠΊ.
ΠΡΠΈ Π²Π²ΠΎΠ΄Π΅ ΠΆΠ΅ Π±ΠΈΡΠ»ΠΈΠ½ΠΊΠ° ΠΏΠΎΠ»ΡΡΠ°Π΅ΡΠ΅ ΠΊΠΎΠ»ΠΈΡΠ΅ΡΡΠ²ΠΎ ΠΏΠ΅ΡΠ΅Ρ
ΠΎΠ΄ΠΎΠ² ΠΏΠΎ Π½Π΅ΠΌΡ.
""")
parser.add_argument("user_link", nargs="+", help="CΡΡΠ»ΠΊΠ° Π΄Π»Ρ ΡΠΎΠΊΡΠ°ΡΠ΅Π½ΠΈΡ ΠΈΠ»ΠΈ Π±ΠΈΡΠ»ΠΈΠ½ΠΊ")
return parser
def check_link(api_key, user_link):
link_check = "https://api-ssl.bitly.com/v4/bitlinks/{bitlink}"
parsed_user_link = urlparse(user_link)
headers = {"Authorization": f"Bearer {api_key}"}
response = requests.get(
link_check.format(bitlink=f"{parsed_user_link.netloc}{parsed_user_link.path}"),
headers=headers,
)
check = response.ok
return check
def shorten_link(api_key, user_link):
bitlink_url = "https://api-ssl.bitly.com/v4/shorten"
headers = {"Authorization": f"Bearer {api_key}"}
payload = {"long_url": user_link}
response = requests.post(bitlink_url, headers=headers, json=payload)
response.raise_for_status()
bitlink = response.json()
return bitlink["id"]
def count_clicks(api_key, user_link):
click_url = "https://api-ssl.bitly.com/v4/bitlinks/{bitlink}/clicks/summary"
parsed_bitlink = urlparse(user_link)
headers = {"Authorization": f"Bearer {api_key}"}
payload = {
"unit": "day",
"units": -1,
}
response = requests.get(
click_url.format(bitlink=f"{parsed_bitlink.netloc}{parsed_bitlink.path}"),
params=payload,
headers=headers,
)
response.raise_for_status()
count_clicks = response.json()
return count_clicks["total_clicks"]
if __name__ == "__main__":
load_dotenv()
api_key = os.getenv("BITLY_API_KEY")
parser = configure_parser()
args = parser.parse_args()
for user_link in args.user_link:
check = check_link(api_key, user_link)
try:
if not check:
print("ΠΠΈΡΠ»ΠΈΠ½ΠΊ: ", shorten_link(api_key, user_link))
else:
print(f"ΠΠΎ Π²Π°ΡΠ΅ΠΉ ΡΡΡΠ»ΠΊΠ΅ ΠΏΡΠΎΡΠ»ΠΈ: {count_clicks(api_key, user_link)} ΡΠ°Π· (Π°)")
except requests.exceptions.HTTPError:
print("Π‘ΡΡΠ»ΠΊΠ° Π²Π²Π΅Π΄Π΅Π½Π° Π½Π΅Π²Π΅ΡΠ½ΠΎ ")
|
StarcoderdataPython
|
1744744
|
# This file used by autocompletion module, don't use it in other purposes
class ScAddr:
def IsValid(self, other):
pass
def ToInt(self, other):
pass
def __eq__(self, other):
pass
def __ne__(self, other):
pass
def __rshift__(self, other):
pass
def rshift(self, other):
pass
class ScType:
def __eq__(self, other):
pass
def __ne__(self, other):
pass
def __or__(self, other):
pass
def __and__(self, other):
pass
def __rshift__(self, other):
pass
def rshift(self, other):
pass
def IsLink(self):
pass
def IsEdge(self):
pass
def IsNode(self):
pass
def IsValid(self):
pass
def IsConst(self):
pass
def IsVar(self):
pass
def ToInt(self):
pass
Unknown = 0
Const = 1
Var = 2
Node = 3
Link = 4
EdgeUCommon = 5
EdgeDCommon = 6
EdgeUCommonConst = 7
EdgeDCommonConst = 8
EdgeAccess = 9
EdgeAccessConstPosPerm = 10
EdgeAccessConstNegPerm = 11
EdgeAccessConstFuzPerm = 12
EdgeAccessConstPosTemp = 13
EdgeAccessConstNegTemp = 14
EdgeAccessConstFuzTemp = 15
EdgeUCommonVar = 16
EdgeDCommonVar = 17
EdgeAccessVarPosPerm = 18
EdgeAccessVarNegPerm = 19
EdgeAccessVarFuzPerm = 20
EdgeAccessVarPosTemp = 21
EdgeAccessVarNegTemp = 22
EdgeAccessVarFuzTemp = 23
NodeConst = 24
NodeVar = 25
LinkConst = 26
LinkVar = 27
NodeConstStruct = 28
NodeConstTuple = 29
NodeConstRole = 30
NodeConstNoRole = 31
NodeConstClass = 32
NodeConstAbstract = 33
NodeConstMaterial = 34
NodeVarStruct = 35
NodeVarTuple = 36
NodeVarRole = 37
NodeVarNoRole = 38
NodeVarClass = 39
NodeVarAbstract = 40
NodeVarMaterial = 41
class ScMemoryContext:
def CreateNode(self, nodeType):
return ScAddr()
def CreateEdge(self, edgeType, src, trg):
return ScAddr()
def CreateLink(self):
return ScAddr()
def DeleteElement(self, elAddr):
return False
def GetName(self):
return ''
def IsElement(self, addr):
return false
def GetElementType(self, addr):
return ScType()
def GetEdgeInfo(self, addr):
return ()
def SetLinkContent(self, addr, content):
return False
def GetLinkContent(self, addr):
return ScLinkContent()
def Iterator3(self, src, edge, trg):
return ScIterator3()
def Iterator5(self, src, edge, trg, attrEdge, attrEl):
return ScIterator5()
def HelperResolveSystemIdtf(self, idtf, elType=ScType.Unknown):
return ScAddr()
def HelperSetSystemIdtf(self, idtf, addr):
return False
def HelperGetSystemIdtf(self, addr):
return ''
def HelperCheckEdge(self, src, trg, edgeType):
return False
def HelperGenTemplate(self, templ, params):
return ScTemplateGenResult()
def HelperSearchTemplate(self, templ):
return ScTemplateSearchResult()
def HelperBuildTemplate(self, addr):
return ScTemplate()
class ScIterator3:
def Next(self):
return False
def IsValid(self):
return False
def Get(self, idx):
return ScAddr()
class ScIterator5:
def Next(self):
return False
def IsValid(self):
return False
def Get(self, idx):
return ScAddr()
class ScLinkContent:
def AsString(self):
return ''
def AsInt(self):
return 0
def AsFloat(self):
return 0.0
class ScTemplateGenResult:
def Size(self):
return 0
def __getitem__(self, name):
return ScAddr()
class ScTemplateSearchResultItem:
def Size(self):
return 0
def __getitem__(self, name):
return ScAddr()
class ScTemplateSearchResult:
def Size(self):
return 0
def __getitem__(self, idx):
return ScTemplateSearchResultItem()
class ScTemplateGenParams:
def Add(self, paramName, value):
pass
def Get(self, paramName):
return ScAddr()
def IsEmpty(self):
return False
class ScTemplate:
def Triple(self, src, edge, trg):
return None
def TripleWithRelation(self, src, edge, trg, attrEdge, attrEl):
return None
def createScMemoryContext():
return ScMemoryContext()
def ScAddrFromHash(hash):
return ScAddr()
|
StarcoderdataPython
|
3234691
|
<reponame>Nik-Menendez/PyCudaAnalyzer<gh_stars>0
import os
from hep.cms.Dataset.CMSDataset import CMSDataset
from hep.root.TFile import TFile
# ______________________________________________________________________ ||
skim_dir = "/cmsuf/data/store/user/t2/users/klo/HToZaToLLGG/UFHZZLiteAnalyzer/HToZA_MC17_bkg/"
input_dir = "/cmsuf/data/store/user/t2/users/klo/HToZaToLLGG/HToZA_MC17_bkg/"
#tree_path_in_file = "Ana/passedEvents"
tree_path_in_file = "passedEvents"
hist_path_in_file = "Ana/sumWeights"
# ______________________________________________________________________ ||
DYJetsToLL = CMSDataset(
"DYLLJets",
[TFile(os.path.join(skim_dir,"DYJetsToLL_M-50_TuneCP5_13TeV-madgraphMLM-pythia8_RunIIFall17MiniAODv2.root"),tree_path_in_file,),],
xs = 6104.0,
plot_name = "DYJets",
)
DYJetsToLL.read_sumw_by_hist(os.path.join(input_dir,"DYJetsToLL_M-50_TuneCP5_13TeV-madgraphMLM-pythia8_RunIIFall17MiniAODv2.root"),hist_path_in_file)
# ______________________________________________________________________ ||
TTJets = CMSDataset(
"TTJets",
[TFile(os.path.join(skim_dir,"TTTo2L2Nu_TuneCP5_PSweights_13TeV-powheg-pythia8.root"),tree_path_in_file,),],
xs = 687.1,
plot_name = "TTJets",
)
TTJets.read_sumw_by_hist(os.path.join(input_dir,"TTTo2L2Nu_TuneCP5_PSweights_13TeV-powheg-pythia8.root"),hist_path_in_file)
# ______________________________________________________________________ ||
mc_bkg_samples = [
DYJetsToLL,
TTJets,
]
for b in mc_bkg_samples:
b.branches = [
"genWeight",
"dataMCWeight",
"pileupWeight",
"lepFSR_pt",
"lepFSR_eta",
"lepFSR_phi",
"lepFSR_mass",
"lep_pt",
"lep_eta",
"lep_phi",
"lep_id",
"lep_tightId",
"lep_RelIsoNoFSR",
"pho_pt",
"pho_eta",
"pho_phi",
"pho_sigmaEtaEta",
"pho_chargedHadronIso",
"pho_neutralHadronIso",
"pho_hadronicOverEm",
"pho_EleVote",
"pho_hasPixelSeed",
]
|
StarcoderdataPython
|
34700
|
<gh_stars>1-10
import boto3
import json
import urllib.request
import os
from . import reflect
def publish(name, payload):
if os.environ.get("NODE_ENV") == "testing":
try:
dump = json.dumps({"name": name, "payload": payload})
data = bytes(dump.encode())
handler = urllib.request.urlopen("http://localhost:3334/queues", data)
return handler.read().decode("utf-8")
except Exception as e:
print("arc.queues.publish to sandbox failed: " + str(e))
return data
else:
arc = reflect()
arn = arc["queues"][name]
sqs = boto3.client("sqs")
return sqs.send_message(
QueueUrl=arn, MessageBody=json.dumps(payload), DelaySeconds=0
)
|
StarcoderdataPython
|
1789124
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# */AIPND-revision/intropyproject-classify-pet-images/get_pet_labels.py
#
# PROGRAMMER: <NAME>
# DATE CREATED: 9/4/19
# REVISED DATE: 9/4/19
# PURPOSE: Create the function get_pet_labels that creates the pet labels from
# the image's filename. This function inputs:
# - The Image Folder as image_dir within get_pet_labels function and
# as in_arg.dir for the function call within the main function.
# This function creates and returns the results dictionary as results_dic
# within get_pet_labels function and as results within main.
# The results_dic dictionary has a 'key' that's the image filename and
# a 'value' that's a list. This list will contain the following item
# at index 0 : pet image label (string).
#
##
# Imports python modules
from os import listdir
def get_pet_labels(image_dir):
"""
Creates a dictionary of pet labels (results_dic) based upon the filenames
of the image files. These pet image labels are used to check the accuracy
of the labels that are returned by the classifier function, since the
filenames of the images contain the true identity of the pet in the image.
Be sure to format the pet labels so that they are in all lower case letters
and with leading and trailing whitespace characters stripped from them.
(ex. filename = 'Boston_terrier_02259.jpg' Pet label = 'boston terrier')
Parameters:
image_dir - The (full) path to the folder of images that are to be
classified by the classifier function (string)
Returns:
results_dic - Dictionary with 'key' as image filename and 'value' as a
List. The list contains for following item:
index 0 = pet image label (string)
"""
#Retrieve the filenames from the folder image_dir using listdir()
filename_list = listdir(image_dir)
#print(filename_list) #DEBUG
#Creates an empty dictionary, that will hold Pet Image filename (keys),
#filenames labels (as values)
results_dic = {}
#Processes through each file in the directory, extracting only the words
#of the file that contain the pet image label
for idx in range(0, len(filename_list), 1):
#Skips files that start with "." since its not a pet image file
if filename_list[0] != ".":
#Sets string to lower case letters
low_pet_label = filename_list[idx].lower()
#Splits lower case string by _ to break into words
word_list_pet_label = low_pet_label.split("_")
#Creates temporary label variable to hold pet label name
pet_label = ""
#Loops to check if word in pet name is only alphabetic characters
#if true append word to pet_label seperated by trailing space
for word in word_list_pet_label:
if word.isalpha():
pet_label += word + " "
#Strip off starting/trailing whitespace characters
pet_label = pet_label.strip()
#If filename doesn\'t already exist in dictionary add it and it\'s
#pet_label, otherwise print an erro message indicating duplicate files
#(filename)
if filename_list[idx] not in results_dic:
results_dic[filename_list[idx]] = [pet_label]
else:
print("** Warning: Duplicate file exist in directory", filename_list[idx])
#DEBUG iterating through a dictionary printing all key & their associated values
#print("\nPrinting all key-value pairs in dictionary results_dic:")
#for key in results_dic:
#print("Filename=", key, " Pet Label=", results_dic[key][0])
return results_dic
|
StarcoderdataPython
|
1649462
|
<gh_stars>100-1000
from vit.formatter.description_count import DescriptionCount
class DescriptionTruncatedCount(DescriptionCount):
def format(self, description, task):
if not description:
return self.empty()
truncated_description = self.format_description_truncated(description)
width = len(truncated_description)
colorized_description = self.colorize_description(truncated_description)
if not task['annotations']:
return (width, colorized_description)
else:
count_width, colorized_description = self.format_count(colorized_description, task)
return (width + count_width, colorized_description)
|
StarcoderdataPython
|
99594
|
from wsgiref.simple_server import make_server
from pyramid.config import Configurator
from pyramid.response import Response
from pyramid.view import view_config
import pyramid.httpexceptions as exc
import re
from pyramid.request import Request
from _pybgpstream import BGPStream, BGPRecord, BGPElem
from datetime import time, timedelta
import json
#@view_config(renderer='json')
def data(request):
import pdb; pdb.set_trace()
#for prameter in request.GET.keys():
collector, start, end = 0
if request.GET.has_key("route_collector"):
collector=request.GET["route_collector"]
if request.GET.has_key('timespan_start'):
start=request.GET["timespan_start"]
if request.GET.has_key('timespan_end'):
end=request.GET["timespan_end"]
# create a new bgpstream instance
stream = BGPStream()
# select the data source
# on the CLI it works this way $ bgpreader -d sqlite -o db-file,ULG.sqlite -w 1477000000,1777360000
stream.set_data_interface("sqlite")
print("collector from URL is: " + collector)
# FIXME Use the collector name
stream.set_data_interface_option('db-file', collector +'.sqlite')
# create a reusable bgprecord instance
rec = BGPRecord()
#if start >= 0 and end >= 0
# do something
#else
# do the default timerange
return response
if __name__ == '__main__':
config = Configurator()
config.add_route('bgpread', '/peers/')
config.add_view(data, route_name='bgpread', renderer='json')
app = config.make_wsgi_app()
server = make_server('0.0.0.0', 8080, app)
server.serve_forever()
|
StarcoderdataPython
|
3268991
|
# -*- coding: utf-8 -*-
"""rackio/logger/logdict.py
This module implements a dictionary based Class to hold
the tags to be logged.
"""
class LogTable(dict):
def __init__(self):
pass
def validate(self, period, tag):
if not type(period) in [int, float]:
return False
if type(tag) != str:
return False
return True
def add_tag(self, tag, period):
if not self.validate(period, tag):
return
for key, value in self.items():
if tag in value:
self[key].remove(tag)
if period in self.keys():
self[period].append(tag)
else:
self[period] = [tag]
def get_groups(self):
return list(self.keys())
def get_tags(self, group):
return self[group]
def get_all_tags(self):
result = list()
for group in self.get_groups():
result += self.get_tags(group)
return result
def get_period(self, tag):
for key, value in self.items():
if tag in value:
return key
def serialize(self):
return self
|
StarcoderdataPython
|
1708559
|
<reponame>BGTCapital/hummingbot
#!/usr/bin/env python
from os.path import join, realpath
import sys; sys.path.insert(0, realpath(join(__file__, "../../../../../")))
from hummingbot.connector.exchange.kraken.kraken_user_stream_tracker import KrakenUserStreamTracker
from hummingbot.connector.exchange.kraken.kraken_auth import KrakenAuth
from hummingbot.core.utils.async_utils import safe_ensure_future
import asyncio
import logging
import unittest
import conf
class KrakenUserStreamTrackerUnitTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.ev_loop: asyncio.BaseEventLoop = asyncio.get_event_loop()
cls.kraken_auth = KrakenAuth(conf.kraken_api_key, conf.kraken_secret_key)
cls.user_stream_tracker: KrakenUserStreamTracker = KrakenUserStreamTracker(kraken_auth=cls.kraken_auth)
cls.user_stream_tracker_task: asyncio.Task = safe_ensure_future(cls.user_stream_tracker.start())
def run_async(self, task):
return self.ev_loop.run_until_complete(task)
def test_user_stream(self):
self.ev_loop.run_until_complete(asyncio.sleep(20.0))
print(self.user_stream_tracker.user_stream)
def main():
logging.basicConfig(level=logging.INFO)
unittest.main()
if __name__ == "__main__":
main()
|
StarcoderdataPython
|
134285
|
from tests.cli_client.CLI import CLI
def main():
cli: CLI = CLI()
cli.mock_run()
if __name__ == "__main__":
main()
|
StarcoderdataPython
|
3246823
|
import turtle
import math
bob = turtle.Turtle()
def square(t, length):
for i in range(4):
t.fd(length)
t.lt(90)
def polygon(t, length, n):
degrees = 360/n
for i in range(n):
t.fd(length)
t.lt(degrees)
def circle(t, r):
circumference = 2*3.14*r
length = circumference / 30
polygon(t, length, 30)
def arccircle(t, r, arc):
percentage = arc/360
circumference = 2*3.14*r
length = circumference / 30
polygon(t, length, int(30*percentage))
def drawpie(t, length, n):
""" Draws an n-sided regular polygon pie
t: which turtle
r: radius
n: number of sides
"""
sinevalue = math.sin(math.pi/n)
r = abs(length/(2*sinevalue))
print(r)
interior_angle = 180*(n-2)/n
print(interior_angle)
turning_angle = 360 / n
# First draw a polygon
polygon(t, length, n)
# Then go to the centre
t.lt(interior_angle/2)
t.fd(r)
for spoke in range(n-1):
t.rt(180-(360/n))
t.fd(r)
t.rt(180)
t.fd(r)
drawpie(bob, 100, 5)
bob.fd(200)
drawpie(bob, 69, 8)
turtle.mainloop()
|
StarcoderdataPython
|
3227569
|
<reponame>anglebinbin/Barista-tool<filename>gui/network_manager/history_manager.py<gh_stars>1-10
class HistoryManager():
def __init__(self, maxSize=100):
""" Initialize HistoryManager which saved maximal maxSize states.
The maxSize+1 insertion removes the first
"""
self.history = []
# Position is a value contains the index of the state in a continues way.
# That means even when the first entries get deleted because of maxSize
# self.position do not get decreased by this.
self.position = -1
# the index of the current position in history list
self.__idx = -1
# self.listLen contains maxSize
self.listLen = maxSize
# If this gets true, no other entry can be make in history
self.__lockHistory = False
def canUndo(self):
return self.__idx >0
def canRedo(self):
return self.__idx < len(self.history)-1
def undo(self):
if self.canUndo():
self.__idx -=1
self.position -= 1
def redo(self):
if self.canRedo():
self.__idx +=1
self.position += 1
def currentState(self):
""" Get latest state saved in history"""
if len(self.history) > 0:
return self.history[self.__idx]
else:
return None
def _insert(self,element):
""" Insert element at the current position """
# remove newer elements
del self.history[self.__idx + 1:]
# Remove the oldest element if there are too many elements
if self.__idx == self.listLen:
del self.history[0]
else:
self.__idx += 1
self.history.append(element)
self.position += 1
def lockHistory(self, fun):
""" Lock history for the whole method fun.
As a result no history can be inserted whil
fun is executed.
"""
if self.__lockHistory:
return
self.__lockHistory = True
fun()
self.__lockHistory = False
def insertFunc(self, fun):
""" Insert an element in history using the function fun.
While fun is working insertFunc is locked, so no other
element can be added in history.
As a result recursivly insertion of history is stopped.
The function fun will be called with in insertion function
which can be called to insert an element in history.
E.g.:
def createNewElement(text):
# Nothing happend, because insertFunc is locked
historymanager.insertFunc(lambda insertF: insertF(text))
return text
historymananger.insertFunc(lambda insertF: insertF(createNewElement("bla"))
# Only one string will be insert
When inserting a new state old state gets removed if the limit of
entries is reached.
Also states newer than the current one (e.g. by using undo())
get removed (so you can't do a redo() anymore)
"""
if self.__lockHistory:
return
self.__lockHistory = True
fun(self._insert)
self.__lockHistory = False
def clear(self):
self.position = -1
self.__idx = -1
self.history = []
|
StarcoderdataPython
|
183889
|
<reponame>DonDzundza/Berkeley-AI-Course-Projects
# shopSmart.py
# ------------
# Licensing Information: Please do not distribute or publish solutions to this
# project. You are free to use and extend these projects for educational
# purposes. The Pacman AI projects were developed at UC Berkeley, primarily by
# <NAME> (<EMAIL>) and <NAME> (<EMAIL>).
# For more info, see http://inst.eecs.berkeley.edu/~cs188/sp09/pacman.html
"""
Here's the intended output of this script, once you fill it in:
Welcome to shop1 fruit shop
Welcome to shop2 fruit shop
For orders: [('apples', 1.0), ('oranges', 3.0)] best shop is shop1
For orders: [('apples', 3.0)] best shop is shop2
"""
import shop
def shopSmart(orderList, fruitShops):
"""
orderList: List of (fruit, numPound) tuples
fruitShops: List of FruitShops
"""
"*** YOUR CODE HERE ***"
bestShop = ('',9999999999999999)
for shop in fruitShops:
priceList = shop.fruitPrices
curCost = shop.getPriceOfOrder(orderList)
if curCost < bestShop[1]:
bestShop = (shop, curCost)
return bestShop[0]
if __name__ == '__main__':
"This code runs when you invoke the script from the command line"
order1 = [('apples',1.0), ('oranges',3.0)]
dir1 = {'apples': 2.0, 'oranges':1.0}
shop1 = shop.FruitShop('shop1',dir1)
dir2 = {'apples': 1.0, 'oranges': 5.0}
shop2 = shop.FruitShop('shop2',dir2)
shops = [shop1, shop2]
print "For orders ", order1, ", the best shop is", shopSmart(order1, shops).getName()
order2 = [('apples',3.0)]
print "For orders: ", order2, ", the best shop is", shopSmart(order2, shops).getName()
|
StarcoderdataPython
|
1702774
|
<reponame>tobiasraabe/locus-of-control
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from bld.project_paths import project_paths_join as ppj
LOC_MAP = {
"LOC_LUCK": "Item 3",
"LOC_ACHIEVED_DESERVE": "Item 2",
"LOC_POSSIBILITIES": "Item 8",
"LOC_LIFES_COURSE": "Item 1",
"LOC_DOUBT": "Item 7",
"LOC_OTHERS": "Item 5",
"LOC_LITTLE_CONTROL": "Item 10",
}
def plot_loadings():
fa = pd.read_pickle(ppj("OUT_DATA", "fa_loadings.pkl"))
fig, ax = plt.subplots()
ax.axhline(y=0, color="grey", alpha=0.7)
ax.axvline(x=0, color="grey", alpha=0.7)
ax.plot(fa.FIRST_FACTOR, fa.SECOND_FACTOR, "o")
for i, [x, y] in fa.iterrows():
if i in ["LOC_LITTLE_CONTROL", "LOC_OTHERS"]:
y -= 0.06
elif i in ["LOC_ACHIEVED_DESERVE"]:
y += 0.01
elif i in ["LOC_POSSIBILITIES"]:
y -= 0.09
ax.annotate(LOC_MAP[i], xy=(x - 0.02, y), ha="right")
ax.set_xlabel("First Factor")
ax.set_ylabel("Second Factor")
ax.set_xticks(np.arange(-1, 1.1, 0.5))
ax.set_yticks(np.arange(-1, 1.1, 0.5))
ax.set_xlim(-1.1, 1.1)
ax.set_ylim(-1.1, 1.1)
plt.savefig(ppj("OUT_FIGURES", "fig-fa-factor-loadings.png"))
if __name__ == "__main__":
plot_loadings()
|
StarcoderdataPython
|
3203314
|
<filename>skill/quotes.py
quotes = [
{
"headline": "<NAME> ΓΌber schlechte Chancen",
"content": "Wenn etwas wichtig genug ist, dann mach es, auch wenn alle Chancen gegen dich stehen."
},
{
"headline": "<NAME> ΓΌber den Aufbau einer Firma",
"content": "Eine Firma aufzubauen ist wie Kuchen backen. Man braucht von allen Zutaten genau die richtige Menge."
},
{
"headline": "<NAME> ΓΌber Geduld",
"content": "Geduld ist eine Tugend und ich erlerne sie gerade. Es ist eine harte Lehre."
},
{
"headline": "<NAME> ΓΌber Ziele",
"content": "Menschen arbeiten besser, wenn sie wissen fΓΌr welches Ziel und warum. Es ist wichtig, dass die Leute sich darauf freuen, morgens in die Arbeit zu kommen, und ihnen das Arbeiten SpaΓ macht."
},
{
"headline": "<NAME> ΓΌber groΓartige Unternehmen",
"content": "GroΓartige Unternehmen sind auf groΓartigen Produkten aufgebaut."
},
{
"headline": "<NAME> ΓΌber Innovation",
"content": "Wie entsteht innovatives Denken? Es ist eine Geisteshaltung, fΓΌr die man sich entscheiden muss."
},
{
"headline": "<NAME> ΓΌber komplizierte Aufgaben",
"content": "Es ist ein Fehler, eine groΓe Anzahl an Leuten einzustellen, um eine komplizierte Aufgabe zu lΓΆsen. Viele kΓΆnnen niemals Talent wettmachen, wenn es darum geht, die richtige LΓΆsung zu finden (zwei Menschen, die etwas nicht wissen, sind nicht besser als einer). Sie werden den Fortschritt aufhalten und unglaublich teuer machen."
},
{
"headline": "<NAME> ΓΌber Unternehmertum",
"content": "Unternehmer zu sein ist wie Glas zu essen und in den Abgrund des Todes zu starren."
},
{
"headline": "<NAME> ΓΌber Selbstzufriedenheit",
"content": "Denk immer darΓΌber nach, wie du Dinge besser machen kannst, und hinterfrage dich."
},
{
"headline": "<NAME> ΓΌber seinen grΓΆΓten Fehler",
"content": "Mein grΓΆΓter Fehler ist vermutlich, zu viel Wert auf das Talent von jemanden zu legen und nicht auf seine PersΓΆnlichkeit. Ich denke es ist wichtig, dass jemand ein gutes Herz hat."
},
{
"headline": "<NAME> ΓΌber die Vergangenheit",
"content": "Wenn irgendwer lieber in der Vergangenheit leben will, dann kennt er sich mit Geschichte nicht besonders gut aus. Das Leben in frΓΌheren Zeiten war zum Kotzen. Die Menschen wussten sehr wenig und man wΓ€re wahrscheinlich in einem jungen Alter an einer furchtbaren Krankheit gestorben. Man hΓ€tte jetzt wahrscheinlich keine ZΓ€hne mehr. Als Frau wΓ€re es besonders schlimm."
},
{
"headline": "<NAME> ΓΌber die Zukunft",
"content": "Wenn du morgens aufwachst und denkst, dass die Zukunft besser sein wird, ist das ein schΓΆner Tag. Ansonsten ist er es nicht."
}
]
|
StarcoderdataPython
|
92797
|
<reponame>Time-xg/bookmanager_django<filename>user/models.py
from django.db import models
# Create your models here.
class UserInfo(models.Model):
# USER_CHOICES = (
# ('reader', 'Reader'),
# ('admin', 'Administrator'),
# )
# GENDER_CHOICES = (
# ('male', 'male'),
# ('female', 'female'),
# )
name = models.CharField(unique=True, max_length=50)
password = models.CharField(max_length=25)
permission = models.CharField(max_length=20, default='reader')
gender = models.CharField(max_length=20, default='male')
def __str__(self):
return self.name
|
StarcoderdataPython
|
1656670
|
# ================
# User Mixin
# ================
#
import json
from django.http import Http404
from django.utils.translation import ugettext_lazy as _
from django.conf import settings
from rest_framework import serializers
from .models import User as UserModel
class UserValidationMixin(object):
"""
Mixin to add few helper methods to User
"""
def validate_username(self, value):
"""
Validates the username. You can hook into this if you want to
(dynamically) restrict what usernames can be chosen.
"""
username = value.strip()
from django.contrib.auth.forms import UserCreationForm
USERNAME_REGEX = UserCreationForm().fields['username'].regex
if not USERNAME_REGEX.match(username):
raise serializers.ValidationError(_("Usernames can only contain letters, digits and @/./+/-/_."))
# TODO: Add regexp support to USERNAME_BLACKLIST
if username in settings.USERNAME_BLACKLIST:
raise serializers.ValidationError(_("Username can not be used. "
"Please use another username."))
if UserModel.query(UserModel.username==username).get():
raise serializers.ValidationError(_("This username is already taken. Please choose another."))
value = username
return value
def validate_email(self, value):
email = value.strip()
if UserModel.query(UserModel.email==email).get():
raise serializers.ValidationError(_("This email is already registered."))
value = email
return value
class UserMixin(object):
"""
Mixin to add few helper methods to User
"""
def get_user_obj(self, username=None, email=None):
'''
Get User obj
'''
if username:
obj = UserModel.query(UserModel.username == username).get()
elif email:
obj = UserModel.query(UserModel.email == email).get()
else:
raise Http404
return obj
|
StarcoderdataPython
|
1696708
|
<filename>src/tests/helpers/wsdl_locations.py
TEXT_CASING_WSDL = "https://www.dataaccess.com/webservicesserver/TextCasing.wso?WSDL"
|
StarcoderdataPython
|
3249532
|
import os
import os.path
import hashlib
import errno
from tqdm import tqdm
import gzip
import tarfile
import time
import zipfile
def gen_bar_updater(pbar):
def bar_update(count, block_size, total_size):
if pbar.total is None and total_size:
pbar.total = total_size
progress_bytes = count * block_size
n = progress_bytes - pbar.n
if n > 0:
pbar.update(n)
return bar_update
def check_integrity(fpath, md5=None):
if md5 is None:
return True
if not os.path.isfile(fpath):
return False
md5o = hashlib.md5()
with open(fpath, 'rb') as f:
# read in 1MB chunks
for chunk in iter(lambda: f.read(1024 * 1024), b''):
md5o.update(chunk)
md5c = md5o.hexdigest()
if md5c != md5:
return False
return True
def makedir_exist_ok(dirpath):
"""
Python2 support for os.makedirs(.., exist_ok=True)
"""
try:
os.makedirs(dirpath)
except OSError as e:
if e.errno == errno.EEXIST:
pass
else:
raise
def download_url(url, root, filename, md5=''):
# downloads file
from six.moves import urllib
root = os.path.expanduser(root)
fpath = os.path.join(root, filename)
makedir_exist_ok(root)
if os.path.isfile(fpath) and check_integrity(fpath, md5):
print('Using downloaded and verified file: ' + fpath)
else:
# https://developers.google.com/maps/documentation/elevation/web-service-best-practices#exponential-backoff
current_delay = 0.1 # Set the initial retry delay to 100ms.
max_delay = 5 # Set the maximum retry delay to 5 seconds.
while True:
try:
print('Downloading ' + url + ' to ' + fpath)
urllib.request.urlretrieve(
url, fpath,
reporthook=gen_bar_updater(tqdm(unit='B', unit_scale=True)))
break
except urllib.error.URLError:
pass
if current_delay > max_delay:
raise Exception("Can not download " + url)
print("Waiting", current_delay, "seconds before retrying.")
time.sleep(current_delay)
current_delay *= 2 # Increase the delay each time we retry.
def _is_tarxz(filename):
return filename.endswith(".tar.xz")
def _is_tar(filename):
return filename.endswith(".tar")
def _is_targz(filename):
return filename.endswith(".tar.gz")
def _is_gzip(filename):
return filename.endswith(".gz") and not filename.endswith(".tar.gz")
def _is_zip(filename):
return filename.endswith(".zip")
def extract_archive(from_path, to_path=None, remove_finished=False):
if to_path is None:
to_path = os.path.dirname(from_path)
if _is_tar(from_path):
with tarfile.open(from_path, 'r') as tar:
tar.extractall(path=to_path)
elif _is_targz(from_path):
with tarfile.open(from_path, 'r:gz') as tar:
tar.extractall(path=to_path)
elif _is_tarxz(from_path):
# .tar.xz archive only supported in Python 3.x
with tarfile.open(from_path, 'r:xz') as tar:
tar.extractall(path=to_path)
elif _is_gzip(from_path):
to_path = os.path.join(to_path, os.path.splitext(os.path.basename(from_path))[0])
with open(to_path, "wb") as out_f, gzip.GzipFile(from_path) as zip_f:
out_f.write(zip_f.read())
elif _is_zip(from_path):
with zipfile.ZipFile(from_path, 'r') as z:
z.extractall(to_path)
else:
raise ValueError("Extraction of {} not supported".format(from_path))
if remove_finished:
os.remove(from_path)
def download_and_extract_archive(url, download_root, extract_root=None, filename=None,
md5=None, remove_finished=False):
download_root = os.path.expanduser(download_root)
if extract_root is None:
extract_root = download_root
if not filename:
filename = os.path.basename(url)
download_url(url, download_root, filename, md5)
archive = os.path.join(download_root, filename)
print("Extracting {} to {}".format(archive, extract_root))
extract_archive(archive, extract_root, remove_finished)
|
StarcoderdataPython
|
3314197
|
<filename>tests/test_fastq_filter.py
# Copyright (c) 2021 Leiden University Medical Center
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import array
import itertools
import math
import statistics
import sys
from typing import List
from dnaio import Sequence
import fastq_filter
from fastq_filter import max_length_filter, \
mean_quality_filter, median_quality_filter, min_length_filter, \
qualmean, qualmedian
from fastq_filter.optimized_algorithms import DEFAULT_PHRED_SCORE_OFFSET
import pytest # type: ignore
def quallist_to_string(quallist: List[int]):
return array.array(
"B", [qual + DEFAULT_PHRED_SCORE_OFFSET for qual in quallist]
).tobytes().decode("ascii")
QUAL_STRINGS = [
b"I?>DC:>@?IDC9??G?>EH9E@66=9<?@E?DC:@<@BBFG>=FIC@F9>7CG?IC?I;CD9>>>A@C7>>"
b"8>>D9GCB<;?DD>C;9?>5G>?H?=6@>:G6B<?==A7?@???8IF<75C=@A:BEA@A;C89D:=1?=<A"
b">D=>B66C",
b"C:@?;8@=DC???>E>E;98BBB?9D=?@B;D?I:??FD8CH?A7?<H>ABD@C@C?>;;B<><;9@8BAFD"
b"?;:>I3DB<?<B=?A??CI>2E>><BD?A??FCBCE?DAI><B:8D>?C>@BA=F<>7=E=?DC=@9GG=>?"
b"C@><CA;>",
]
@pytest.mark.parametrize("qualstring", QUAL_STRINGS)
def test_qualmean(qualstring):
offset = DEFAULT_PHRED_SCORE_OFFSET
qualities = [qual - offset for qual in array.array("b", qualstring)]
probabilities = [10 ** (qual / -10) for qual in qualities]
average_prob = statistics.mean(probabilities)
phred = - 10 * math.log10(average_prob)
assert phred == pytest.approx(qualmean(qualstring))
@pytest.mark.parametrize("qualstring", QUAL_STRINGS)
def test_qualmedian(qualstring):
offset = DEFAULT_PHRED_SCORE_OFFSET
qualities = [qual - offset for qual in array.array("b", qualstring)]
median_quality = statistics.median(qualities)
assert median_quality == qualmedian(qualstring)
def test_qualmedian_correct():
# Make sure qualmedian also returns averages.
qualities = b"AACEGG" # Median value should be D. ord("D") == 68
result = qualmedian(qualities, 0)
assert result == 68.0
assert type(result) == float
INVALID_PHREDS = (chr(x).encode("latin-1") for x
in itertools.chain(range(33), range(127, 256)))
@pytest.mark.parametrize("qualstring", INVALID_PHREDS)
def test_qualmean_invalid_quals(qualstring):
with pytest.raises(ValueError) as error:
qualmean(qualstring)
error.match("Value outside phred range")
@pytest.mark.parametrize("qualstring", INVALID_PHREDS)
def test_qualmedian_invalid_quals(qualstring):
with pytest.raises(ValueError) as error:
qualmean(qualstring)
error.match("Value outside phred range")
def test_min_length_filter_pass():
assert min_length_filter(
10, Sequence("", "0123456789A", "???????????")) is True
def test_min_length_filter_fail():
assert min_length_filter(
12, Sequence("", "0123456789A", "???????????")) is False
def test_max_length_filter_pass():
assert max_length_filter(
12, Sequence("", "0123456789A", "???????????")) is True
def test_max_length_filter_fail():
assert max_length_filter(
10, Sequence("", "0123456789A", "???????????")) is False
def test_mean_quality_filter_fail():
assert mean_quality_filter(
10, Sequence("", "AAA", quallist_to_string([9, 9, 9]))) is False
def test_mean_quality_filter_pass():
assert mean_quality_filter(
8, Sequence("", "AAA", quallist_to_string([9, 9, 9]))) is True
def test_median_quality_filter_fail():
assert median_quality_filter(
10, Sequence("", "AAAAA", quallist_to_string([9, 9, 9, 10, 10]))
) is False
def test_median_quality_filter_pass():
assert median_quality_filter(
8-0.001, Sequence(
"", "AAAAAAA", quallist_to_string([1, 1, 1, 8, 9, 9, 9]))) is True
def test_fastq_records_to_file(tmp_path):
records = [Sequence("TEST", "A", "A")] * 3
out = tmp_path / "test.fq"
fastq_filter.fastq_records_to_file(records, str(out))
assert out.read_bytes() == b"@TEST\nA\n+\nA\n" \
b"@TEST\nA\n+\nA\n" \
b"@TEST\nA\n+\nA\n"
def test_file_to_fastq_records(tmp_path):
out = tmp_path / "test.fq"
out.write_bytes(b"@TEST\nA\n+\nA\n@TEST\nA\n+\nA\n@TEST\nA\n+\nA\n")
assert list(fastq_filter.file_to_fastq_records(str(out))) == [
Sequence("TEST", "A", "A")] * 3
def test_wrong_filter():
with pytest.raises(ValueError) as e:
fastq_filter.filter_string_to_filters("nonsense:20")
assert e.match("Unknown filter")
def test_filter_fastq(tmp_path):
in_f = tmp_path / "in.fq"
out_f = tmp_path / "out.fq"
in_f.write_bytes(b"@TEST\nAA\n+\nAA\n@TEST\nA\n+\n-\n@TEST\nA\n+\nA\n")
fastq_filter.filter_fastq(
"mean_quality:20|min_length:2", str(in_f), str(out_f))
# Only one record should survive the filter.
assert out_f.read_bytes() == b"@TEST\nAA\n+\nAA\n"
def test_main(tmp_path):
in_f = tmp_path / "in.fq"
out_f = tmp_path / "out.fq"
in_f.write_bytes(b"@TEST\nAA\n+\nAA\n@TEST\nA\n+\n-\n@TEST\nA\n+\nA\n")
sys.argv = ["", "-o", str(out_f), "mean_quality:20|min_length:2",
str(in_f)]
fastq_filter.main()
assert out_f.read_bytes() == b"@TEST\nAA\n+\nAA\n"
def test_help_filters(capsys):
sys.argv = ["", "--help-filters"]
with pytest.raises(SystemExit):
fastq_filter.main()
result = capsys.readouterr()
# Test if docstrings get printed.
assert "median quality of the FASTQ record" in result.out
assert "The mean quality" in result.out
assert "at least min_length" in result.out
assert "at most max_length" in result.out
@pytest.mark.parametrize("func", [qualmean, qualmedian])
def test_empty_quals_error(func):
with pytest.raises(ValueError) as error:
func(b"")
assert error.match("Empty")
|
StarcoderdataPython
|
68484
|
<filename>core/ai/behaviors/__init__.py
from core.ai.behaviors.base import Behavior
from core.ai.behaviors.meleeattack import MeleeAttack
from core.ai.behaviors.move import Move
from core.ai.behaviors.wait import Wait
|
StarcoderdataPython
|
59877
|
<filename>python/motorModule.py
#!/usr/bin/env python3
import os
import robomodules as rm
from messages import *
import RPi.GPIO as GPIO
import time
import signal
import sys
ADDRESS = os.environ.get("BIND_ADDRESS","localhost")
PORT = os.environ.get("BIND_PORT", 11293)
FREQUENCY = 0
LEFT_PWM = 32
LEFT_1 = 36
LEFT_2 = 38
RIGHT_PWM = 33
RIGHT_1 = 35
RIGHT_2 = 37
BACKWARD = 0
FORWARD = 1
LEFT_MOTOR = 0
RIGHT_MOTOR = 1
class MotorModule(rm.ProtoModule):
def __init__(self, addr, port):
print("Initializing Motors...")
self.subscriptions = [MsgType.TWIST]
super().__init__(addr, port, message_buffers, MsgType, FREQUENCY, self.subscriptions)
self.initializeMotors()
self.leftSpeed = 0
self.rightSpeed = 0
self.leftDir = 0
self.rightDir = 0
print("Motors Initialized")
def msg_received(self, msg, msg_type):
# This gets called whenever any message is received
if msg_type == MsgType.TWIST:
self.processTwist(msg.velocity, msg.omega)
def tick(self):
# this function will get called in a loop with FREQUENCY frequency
return
def initializeMotors(self):
GPIO.setmode(GPIO.BOARD)
GPIO.setup(LEFT_PWM, GPIO.OUT)
GPIO.setup(LEFT_1, GPIO.OUT)
GPIO.setup(LEFT_2, GPIO.OUT)
GPIO.setup(RIGHT_PWM, GPIO.OUT)
GPIO.setup(RIGHT_1, GPIO.OUT)
GPIO.setup(RIGHT_2, GPIO.OUT)
self.right_pwm = GPIO.PWM(RIGHT_PWM, 100)
self.left_pwm = GPIO.PWM(LEFT_PWM, 100)
self.right_pwm.start(0)
self.left_pwm.start(0)
self.setDirection(LEFT_MOTOR, FORWARD)
self.setDirection(RIGHT_MOTOR, FORWARD)
time.sleep(1)
def setDirection(self, motor, direction):
if motor == LEFT_MOTOR:
if direction == FORWARD:
GPIO.output(LEFT_1, True)
GPIO.output(LEFT_2, False)
else:
GPIO.output(LEFT_1, False)
GPIO.output(LEFT_2, True)
else:
if direction == FORWARD:
GPIO.output(RIGHT_1, True)
GPIO.output(RIGHT_2, False)
else:
GPIO.output(RIGHT_1, False)
GPIO.output(RIGHT_2, True)
# Takes linear and rotational values and converts into signals for left and right motor
def processTwist(self, linSpeed, rotSpeed):
leftSpeed = linSpeed
rightSpeed = linSpeed
leftSpeed += rotSpeed
rightSpeed -= rotSpeed
if leftSpeed >= 0:
self.setDirection(LEFT_MOTOR, FORWARD)
else:
self.setDirection(LEFT_MOTOR, BACKWARD)
if rightSpeed >= 0:
self.setDirection(RIGHT_MOTOR, FORWARD)
else:
self.setDirection(RIGHT_MOTOR, BACKWARD)
# If speeds beyond limits, set to limits. Otherwise set to calculated speed
if abs(leftSpeed) > 100:
self.left_pwm.ChangeDutyCycle(100)
else:
self.left_pwm.ChangeDutyCycle(abs(leftSpeed))
if abs(rightSpeed) > 100:
self.right_pwm.ChangeDutyCycle(100)
else:
self.right_pwm.ChangeDutyCycle(abs(rightSpeed))
def destroy(*args):
GPIO.cleanup()
print("Motor module safely terminated")
sys.exit()
def main():
signal.signal(signal.SIGINT, destroy)
signal.signal(signal.SIGTERM, destroy)
module = MotorModule(ADDRESS, PORT)
module.run()
if __name__ == "__main__":
main()
|
StarcoderdataPython
|
180788
|
<gh_stars>100-1000
from ._optimization import *
__all__ = [name for name in dir() if name[0] != '_']
|
StarcoderdataPython
|
4817553
|
# Copyright 1999-2018 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import itertools
from numbers import Integral
import numpy as np
import pandas as pd
from pandas.core.dtypes.cast import find_common_type
from ...tensor.core import TENSOR_TYPE
from ...tensor.datasource.empty import empty
from ...tensor.indexing.core import calc_shape, process_index
from ...serialize import AnyField, ListField
from ... import opcodes as OperandDef
from ..operands import DataFrameOperand, DataFrameOperandMixin, ObjectType
from ..utils import indexing_index_value
class DataFrameIloc(object):
def __init__(self, obj):
self._obj = obj
def __getitem__(self, indexes):
op = DataFrameIlocGetItem(indexes=process_index(self._obj.ndim, indexes), object_type=ObjectType.dataframe)
return op(self._obj)
def __setitem__(self, indexes, value):
if not np.isscalar(value):
raise NotImplementedError('Only scalar value is supported to set by iloc')
op = DataFrameIlocSetItem(indexes=process_index(self._obj.ndim, indexes), value=value, object_type=ObjectType.dataframe)
ret = op(self._obj)
self._obj.data = ret.data
class DataFrameIlocGetItem(DataFrameOperand, DataFrameOperandMixin):
_op_type_ = OperandDef.DATAFRAME_ILOC_GETITEM
_indexes = ListField('indexes')
def __init__(self, indexes=None, gpu=False, sparse=False, object_type=None, **kw):
super(DataFrameIlocGetItem, self).__init__(_indexes=indexes,
_gpu=gpu, _sparse=sparse,
_object_type=object_type, **kw)
@property
def indexes(self):
return self._indexes
def __call__(self, df):
# Note [Fancy Index of Numpy and Pandas]
#
# The numpy and pandas.iloc have different semantic when processing fancy index:
#
# >>> np.ones((3,3))[[1,2],[1,2]]
# array([1., 1.])
#
# >>> pd.DataFrame(np.ones((3,3))).iloc[[1,2],[1,2]]
# 1 2
# 1 1.0 1.0
# 2 1.0 1.0
#
# Thus, we processing the index along two axis of DataFrame seperately.
if isinstance(self.indexes[0], TENSOR_TYPE) or isinstance(self.indexes[1], TENSOR_TYPE):
raise NotImplementedError('The index value cannot be unexecuted mars tensor')
shape0 = tuple(calc_shape((df.shape[0],), (self.indexes[0],)))
shape1 = tuple(calc_shape((df.shape[1],), (self.indexes[1],)))
# NB: pandas only compresses the result to series when index on one of axis is integral
if isinstance(self.indexes[1], Integral):
shape = shape0
dtype = df.dtypes.iloc[self.indexes[1]]
index_value = indexing_index_value(df.index_value, self.indexes[0])
self._object_type = ObjectType.series
return self.new_series([df], shape=shape, dtype=dtype, index_value=index_value)
elif isinstance(self.indexes[0], Integral):
shape = shape1
dtype = find_common_type(df.dtypes.iloc[self.indexes[1]].values)
index_value = indexing_index_value(df.columns, self.indexes[1])
self._object_type = ObjectType.series
return self.new_series([df], shape=shape, dtype=dtype, index_value=index_value)
else:
return self.new_dataframe([df], shape=shape0 + shape1, dtypes=df.dtypes.iloc[self.indexes[1]],
index_value=indexing_index_value(df.index_value, self.indexes[0]),
columns_value=indexing_index_value(df.columns, self.indexes[1], store_data=True))
# FIXME The view behavior of DataFrame.iloc
#
# The pandas's iloc has complicated behavior about whether to create a view or not, it depends
# on the further operation on the view, as illustrated by the following example:
#
# >>> df = pd.DataFrame([[1,2], [3,4]])
# >>> x = df.iloc[:]
# >>> df
# 0 1
# 0 1 2
# 1 3 4
# >>> x
# 0 1
# 0 1 2
# 1 3 4
#
# >>> x.iloc[:] = 1000
# >>> x
# 0 1
# 0 1000 1000
# 1 1000 1000
# df
# 0 1
# 0 1000 1000
# 1 1000 1000
#
# >>> x.iloc[:] = 2000.0
# >>> x
# 0 1
# 0 2000.0 2000.0
# 1 2000.0 2000.0
# >>> df
# 0 1
# 0 1000 1000
# 1 1000 1000
@classmethod
def tile(cls, op):
in_df = op.inputs[0]
out_val = op.outputs[0]
# See Note [Fancy Index of Numpy and Pandas]
tensor0 = empty(in_df.shape[0], chunk_size=(in_df.nsplits[0],))[op.indexes[0]].tiles()
tensor1 = empty(in_df.shape[1], chunk_size=(in_df.nsplits[1],))[op.indexes[1]].tiles()
integral_index_on_index = isinstance(op.indexes[0], Integral)
integral_index_on_column = isinstance(op.indexes[1], Integral)
out_chunks = []
for index_chunk, column_chunk in itertools.product(tensor0.chunks, tensor1.chunks):
in_chunk = in_df.cix[index_chunk.inputs[0].index + column_chunk.inputs[0].index]
chunk_op = op.copy().reset_key()
chunk_op._indexes = (index_chunk.op.indexes[0], column_chunk.op.indexes[0])
if integral_index_on_column:
shape = index_chunk.shape
index = index_chunk.index
index_value = indexing_index_value(in_chunk.index_value, index_chunk.op.indexes[0])
out_chunk = chunk_op.new_chunk([in_chunk], shape=shape, index=index,
dtype=out_val.dtype, index_value=index_value)
elif integral_index_on_index:
shape = column_chunk.shape
index = column_chunk.index
index_value = indexing_index_value(in_chunk.columns, column_chunk.op.indexes[0])
out_chunk = chunk_op.new_chunk([in_chunk], shape=shape, index=index,
dtype=out_val.dtype, index_value=index_value)
else:
index_value = indexing_index_value(in_chunk.index_value, index_chunk.op.indexes[0])
columns_value = indexing_index_value(in_chunk.columns, column_chunk.op.indexes[0], store_data=True)
dtypes = in_chunk.dtypes.iloc[column_chunk.op.indexes[0]]
out_chunk = chunk_op.new_chunk([in_chunk],
shape=index_chunk.shape + column_chunk.shape,
index=index_chunk.index + column_chunk.index,
dtypes=dtypes, index_value=index_value, columns_value=columns_value)
out_chunks.append(out_chunk)
new_op = op.copy()
if integral_index_on_column or integral_index_on_index:
if integral_index_on_column:
nsplits = tensor0.nsplits
else:
nsplits = tensor1.nsplits
return new_op.new_seriess(op.inputs, out_val.shape, dtype=out_val.dtype,
index_value=out_val.index_value, chunks=out_chunks, nsplits=nsplits)
else:
nsplits = tensor0.nsplits + tensor1.nsplits
return new_op.new_dataframes(op.inputs, out_val.shape, dtypes=out_val.dtypes,
index_value=out_val.index_value,
columns_value=out_val.columns, chunks=out_chunks, nsplits=nsplits)
@classmethod
def execute(cls, ctx, op):
chunk = op.outputs[0]
r = ctx[op.inputs[0].key].iloc[op.indexes]
if isinstance(r, pd.Series) and r.dtype != chunk.dtype:
r = r.astype(chunk.dtype)
ctx[chunk.key] = r
class DataFrameIlocSetItem(DataFrameOperand, DataFrameOperandMixin):
_op_type_ = OperandDef.DATAFRAME_ILOC_SETITEM
_indexes = ListField('indexes')
_value = AnyField('value')
def __init__(self, indexes=None, value=None, gpu=False, sparse=False, object_type=None, **kw):
super(DataFrameIlocSetItem, self).__init__(_indexes=indexes, _value=value,
_gpu=gpu, _sparse=sparse,
_object_type=object_type, **kw)
@property
def indexes(self):
return self._indexes
@property
def value(self):
return self._value
def __call__(self, df):
if isinstance(self.indexes[0], TENSOR_TYPE) or isinstance(self.indexes[1], TENSOR_TYPE):
raise NotImplementedError('The index value cannot be unexecuted mars tensor')
return self.new_dataframe([df], shape=df.shape, dtypes=df.dtypes,
index_value=df.index_value, columns_value=df.columns)
@classmethod
def tile(cls, op):
in_df = op.inputs[0]
out_df = op.outputs[0]
# See Note [Fancy Index of Numpy and Pandas]
tensor0 = empty(in_df.shape[0], chunk_size=(in_df.nsplits[0],))[op.indexes[0]].tiles()
tensor1 = empty(in_df.shape[1], chunk_size=(in_df.nsplits[1],))[op.indexes[1]].tiles()
chunk_mapping = {c0.inputs[0].index + c1.inputs[0].index: (c0, c1)
for c0, c1 in itertools.product(tensor0.chunks, tensor1.chunks)}
out_chunks = []
for chunk in in_df.chunks:
if chunk.index not in chunk_mapping:
out_chunks.append(chunk)
else:
chunk_op = op.copy().reset_key()
index_chunk, column_chunk = chunk_mapping[chunk.index]
chunk_op._indexes = (index_chunk.op.indexes[0], column_chunk.op.indexes[0])
chunk_op._value = op.value
out_chunk = chunk_op.new_chunk([chunk],
shape=chunk.shape, index=chunk.index, dtypes=chunk.dtypes,
index_value=chunk.index_value, columns_value=chunk.columns)
out_chunks.append(out_chunk)
new_op = op.copy()
return new_op.new_dataframes(op.inputs, shape=out_df.shape, dtypes=out_df.dtypes,
index_value=out_df.index_value, columns_value=out_df.columns,
chunks=out_chunks, nsplits=in_df.nsplits)
@classmethod
def execute(cls, ctx, op):
chunk = op.outputs[0]
r = ctx[op.inputs[0].key].copy(deep=True)
r.iloc[op.indexes] = op.value
ctx[chunk.key] = r
def iloc(df):
return DataFrameIloc(df)
|
StarcoderdataPython
|
3330723
|
<reponame>adrienlachaize/dezede
from django.test import TestCase
from .utils import HTMLAnnotatedCharList, AnnotatedDiff
class HTMLAnnotatedCharListTestCase(TestCase):
def setUp(self):
self.html_annotated_char_list = HTMLAnnotatedCharList('<p>blabla</p>')
def test_annotate(self):
self.html_annotated_char_list.annotate(0, 1,
'<span class="sc">', '</span>')
self.assertEqual(str(self.html_annotated_char_list),
'<p><span class="sc">b</span>labla</p>')
self.html_annotated_char_list.annotate(0, 1,
'<b>', '</b>')
self.assertEqual(str(self.html_annotated_char_list),
'<p><span class="sc"><b>b</b></span>labla</p>')
self.html_annotated_char_list.annotate(5, 5,
'<i>', '</i>')
self.assertEqual(str(self.html_annotated_char_list),
'<p><span class="sc"><b>b</b></span>labl<i></i>a</p>')
self.html_annotated_char_list.annotate(5, 6,
'<i>', '</i>')
self.assertEqual(
str(self.html_annotated_char_list),
'<p><span class="sc"><b>b</b></span>labl<i></i><i>a</i></p>')
class AnnotatedDiffTestCase(TestCase):
def assertScore(self, a, b, score):
self.assertEqual(AnnotatedDiff(a, b).get_score(), score)
def test_perfect_score(self):
self.assertScore(
'abcdefghijklmnopqrstuvwxyz',
'abcdefghijklmnopqrstuvwxyz',
'20/20')
def test_score_missing_char(self):
self.assertScore(
'bcdefghijklmnopqrstuvwxyz',
'abcdefghijklmnopqrstuvwxyz',
'16/20')
self.assertScore(
'abcdefghijklmnopqrstuvwxy',
'abcdefghijklmnopqrstuvwxyz',
'16/20')
self.assertScore(
'abcdefghijklnopqrstuvwxyz',
'abcdefghijklmnopqrstuvwxyz',
'16/20')
def test_score_missing_diacritic(self):
self.assertScore(
'abcdefghijklmnopqrstuvwxyz',
'Γ’bcdefghijklmnopqrstuvwxyz',
'18/20')
self.assertScore(
'abcdefghijklmnopqrstuvwxyz',
'abcdΓ©fghijklmnopqrstuvwxyz',
'18/20')
self.assertScore(
'abcdefghijklmnopqrstuvwxyz',
'abcdefghΓ―jklmnopqrstuvwxyz',
'18/20')
# Missing several diacritics
self.assertScore(
'abcdefghijklmnopqrstuvwxyz',
'Γ’bcdΓ©fghΓ―jklmnopqrstuvwxyz',
'15/20')
def test_score_extra_diacritic(self):
self.assertScore(
'Γ’bcdefghijklmnopqrstuvwxyz',
'abcdefghijklmnopqrstuvwxyz',
'18/20')
self.assertScore(
'abcdΓ©fghijklmnopqrstuvwxyz',
'abcdefghijklmnopqrstuvwxyz',
'18/20')
self.assertScore(
'abcdefghΓ―jklmnopqrstuvwxyz',
'abcdefghijklmnopqrstuvwxyz',
'18/20')
# Multiple extra diacritics
self.assertScore(
'Γ’bcdΓ©fghΓ―jklmnopqrstuvwxyz',
'abcdefghijklmnopqrstuvwxyz',
'15/20')
|
StarcoderdataPython
|
4801381
|
<gh_stars>10-100
from cisco_sdwan_policy.BaseObject import BaseObject
class Sequence(BaseObject):
def __init__(self,id,name,type,base_action,ip_type,match,actions,**kwargs):
self.id = id
self.name = name
self.type = type
self.baseAction = base_action
self.ip_type=ip_type
self.actions = actions
self.match = match
super().__init__(**kwargs)
@staticmethod
def get_list_obj(obj_id,lists):
for obj in lists:
if obj.id==obj_id:
return obj
return None
@classmethod
def from_json(cls,config,lists,**kwargs):
for action in config["actions"]:
if action.get("parameter"):
if type(action["parameter"])==list:
for para in action["parameter"]:
if para.get("ref"):
resp = cls.get_list_obj(para.get("ref"),lists)
if resp:
action["parameter"][action["parameter"].index(para)]["ref"]=resp
else:
raise Exception("List not found")
elif type(action["parameter"])==dict:
para = action["parameter"]
if para.get("ref"):
resp = cls.get_list_obj(para.get("ref"), lists)
if resp:
action["parameter"]["ref"] = resp
else:
raise Exception("List not found")
else:
# Might be cflowd
pass
new_match=[]
for match in config["match"]["entries"]:
matched=False
if match.get("ref"):
resp = cls.get_list_obj(match.get("ref"), lists)
if resp:
config["match"]["entries"][config["match"]["entries"].index(match)]["ref"]=resp
else:
raise Exception("Undefined List found.")
config["match"]=config["match"]["entries"]
id = config["sequenceId"]
name = config["sequenceName"]
types = config["sequenceType"]
baseAction = config.get("baseAction")
ip_type = config.get("sequenceIpType")
actions = config["actions"]
match = config["match"]
return cls(id,name,types,baseAction,ip_type,match,actions,**kwargs)
def to_json(self):
resp = {
"sequenceId": self.id,
"sequenceName": self.name,
"baseAction": self.baseAction,
"sequenceType": self.type,
"match": {
"entries": []
},
"actions": []
}
if self.ip_type:
resp["sequenceIpType"]=self.ip_type
for match in self.match:
if match.get("ref"):
resp["match"]["entries"].append({
"field":match["field"],
"ref":match["ref"].get_id()
})
else:
resp["match"]["entries"].append(match)
for action in self.actions:
if action.get("parameter"):
if type(action["parameter"]) == list:
new_para = []
for para in action["parameter"]:
if para.get("ref"):
new_para.append({
"field": para["field"],
"ref": para["ref"].get_id()
})
else:
new_para.append(para)
resp["actions"].append({
"type":action["type"],
"parameter":new_para
})
elif type(action["parameter"]) == dict:
if action["parameter"].get("ref"):
action["parameter"]["ref"]=action["parameter"]["ref"].get_id()
resp["actions"].append(action)
else:
resp["actions"].append(action)
else:
#cflowd
resp["actions"].append(action)
return resp
def add_match(self,field,value):
for matches in self.match:
if matches["field"]==field:
raise Exception("Duplicate field in Match.")
if type(value) in [str,int,float]:
self.match.append({
"field":field,
"value":value
})
else:
self.match.append({
"field":field,
"ref":value
})
def add_action(self,action_type,field,value):
def generate_param():
if type(value) in [str, int, float]:
return{
"field": field,
"value": value
}
else:
return{
"field": field,
"ref": value
}
for action in self.actions:
if action["type"]==action_type:
for param in action["parameter"]:
if param["field"]==field:
raise Exception("Duplicate field in Action.")
action["parameter"].append(generate_param())
return True
self.actions.append({
"type":action_type,
"parameter":[generate_param()]
})
|
StarcoderdataPython
|
156216
|
import unittest
from app.models import Articles
class TestArticle(unittest.TestCase):
'''
Test Class to test the behaviour of the Article class
'''
def setUp(self):
'''
Set up that will run before every Test
'''
self.new_article = Articles("Palestinians evacuate the body of Palestinian journalist <NAME>, 31, who was shot and killed by an Israeli sharpshooter in the Gaza Strip on April 6, 2018 [<NAME>/Reuters]"
"https://www.aljazeera.com/news/","https://www.aljazeera.com/indepth/opinion/media-mass-deception-180409092703608.html/2018/06/1-lead-image.jpg","2018-06-21T11:00:53Z")
def test_instance(self):
self.assertTrue(isinstance(self.new_article,Articles))
if __name__ == '__main__':
unittest.main(verbosity=2)
|
StarcoderdataPython
|
3366827
|
<filename>src/afancontrol/temp/file.py
import glob
import re
from pathlib import Path
from typing import Optional, Tuple
from afancontrol.configparser import ConfigParserSection
from afancontrol.temp.base import Temp, TempCelsius
def _expand_glob(path: str):
matches = glob.glob(path)
if not matches:
return path # a FileNotFoundError will be raised on a first read attempt
if len(matches) == 1:
return matches[0]
raise ValueError("Expected glob to expand to a single path, got %r" % (matches,))
class FileTemp(Temp):
def __init__(
self,
temp_path: str, # /sys/class/hwmon/hwmon0/temp1
*,
min: Optional[TempCelsius],
max: Optional[TempCelsius],
panic: Optional[TempCelsius],
threshold: Optional[TempCelsius]
) -> None:
super().__init__(panic=panic, threshold=threshold)
temp_path = re.sub(r"_input$", "", temp_path)
# Allow paths looking like this (this one is from an nvme drive):
# /sys/devices/pci0000:00/0000:00:01.3/[...]/hwmon/hwmon*/temp1_input
# The `hwmon*` might change after reboot, but it is always a single
# directory within the device.
temp_path = _expand_glob(temp_path + "_input")
temp_path = re.sub(r"_input$", "", temp_path)
self._temp_input = Path(temp_path + "_input")
self._temp_min = Path(temp_path + "_min")
self._temp_max = Path(temp_path + "_max")
self._min = min
self._max = max
@classmethod
def from_configparser(cls, section: ConfigParserSection) -> Temp:
panic = TempCelsius(section.getfloat("panic", fallback=None))
threshold = TempCelsius(section.getfloat("threshold", fallback=None))
min = TempCelsius(section.getfloat("min", fallback=None))
max = TempCelsius(section.getfloat("max", fallback=None))
return cls(section["path"], min=min, max=max, panic=panic, threshold=threshold)
def __eq__(self, other):
if isinstance(other, type(self)):
return (
self._temp_input == other._temp_input
and self._temp_min == other._temp_min
and self._temp_max == other._temp_max
and self._min == other._min
and self._max == other._max
and self._panic == other._panic
and self._threshold == other._threshold
)
return NotImplemented
def __ne__(self, other):
return not (self == other)
def __repr__(self):
return "%s(%r, min=%r, max=%r, panic=%r, threshold=%r)" % (
type(self).__name__,
str(self._temp_input),
self._min,
self._max,
self._panic,
self._threshold,
)
def _get_temp(self) -> Tuple[TempCelsius, TempCelsius, TempCelsius]:
temp = self._read_temp_from_path(self._temp_input)
return temp, self._get_min(), self._get_max()
def _get_min(self) -> TempCelsius:
if self._min is not None:
return self._min
try:
min_t = self._read_temp_from_path(self._temp_min)
except FileNotFoundError:
raise RuntimeError(
"Please specify `min` and `max` temperatures for "
"the %s sensor" % self._temp_input
)
return min_t
def _get_max(self) -> TempCelsius:
if self._max is not None:
return self._max
try:
max_t = self._read_temp_from_path(self._temp_max)
except FileNotFoundError:
raise RuntimeError(
"Please specify `min` and `max` temperatures for "
"the %s sensor" % self._temp_input
)
return max_t
def _read_temp_from_path(self, path: Path) -> TempCelsius:
return TempCelsius(int(path.read_text().strip()) / 1000)
|
StarcoderdataPython
|
3268699
|
<filename>datastructure/practice/c1/p_1_34.py
import random
def make_a_mistake(s):
index = random.randint(0, len(s) - 1)
c = s[index]
while not ord('A') <= ord(c) <= ord('z'):
index = random.randint(0, len(s) - 1)
c = s[index]
new_c = chr(random.randint(ord('A'), ord('z')))
chars = list(s)
chars[index] = new_c
return "".join(chars)
def main():
template = "I will never spam my friends again."
error_count = 0
for i in range(100):
make_mistake = bool(random.randint(0, 1)) if error_count < 8 else False
if make_mistake:
error_count += 1
print(make_a_mistake(template))
else:
print(template)
pass
if __name__ == '__main__':
main()
|
StarcoderdataPython
|
3304406
|
<reponame>gebeto/python<filename>s3/main.py
import boto3
import botocore
import time
import os
BUCKET_NAME = 'tms-system-docs'
resource = boto3.resource('s3',
aws_access_key_id=os.getenv("AWS_ACCESS_KEY_ID"),
aws_secret_access_key=os.getenv("AWS_SECRET_ACCESS_KEY"),
)
client = resource.meta.client
def get_resource():
return resource
def get_client():
return client
def obj_to_dict(key, client=client):
presigned_url = client.generate_presigned_url(
'get_object',
Params={
'Bucket': BUCKET_NAME,
'Key': key
},
ExpiresIn=3600
)
return {
"title": os.path.split(key)[-1],
"src": presigned_url,
}
def file_exists(bucket, file_key):
try:
bucket.Object(file_key).get()
return True
except botocore.exceptions.ClientError as e:
if e.response['Error']['Code'] == 'NoSuchKey':
print('NoSuchKey')
return False
def get_file_name_with_timestamp(file_name):
timestamp = str(int(time.time() * 1000))
file_name_split = list(os.path.splitext(file_name))
file_name_split[0] = file_name_split[0] + '-' + str(timestamp)
return "".join(file_name_split)
def upload_file(load_number, file_name, file):
s3 = get_resource()
s3 = resource
bucket = s3.Bucket(BUCKET_NAME)
file_name = get_file_name_with_timestamp(file_name)
file_key = f'load/{load_number}/{file_name}'
obj = bucket.put_object(Key=file_key, Body=file)
return obj_to_dict(file_key)
def all_documents(load_number):
client = get_client()
objs = client.list_objects_v2(Bucket=BUCKET_NAME, Prefix=f"load/{load_number}")["Contents"]
files = [obj_to_dict(obj["Key"]) for obj in objs]
return files
|
StarcoderdataPython
|
3317202
|
<reponame>Plasmakatt/farmerextreme<filename>debug/cameraextreme.py
#!/usr/bin/python -Btt
import sys
import os
import time
sys.path.append(os.path.dirname(os.path.realpath(__file__)) + "/../lib")
print(os.path.dirname(os.path.realpath(__file__)) + "/../lib")
from cameracontrol import CameraControl
def main():
try:
camera_control = CameraControl()
camera_control.start()
while True:
qrcode = camera_control.get_qr_code()
if qrcode:
print qrcode
time.sleep(1)
except KeyboardInterrupt:
camera_control.stop()
camera_control.join()
if __name__ == '__main__':
main()
|
StarcoderdataPython
|
4817378
|
<gh_stars>0
#!/usr/bin/python3
"""
Sensors aggregation and storage.
https://github.com/dimitar-kunchev/NR-VentilationMonitoring
@author: <NAME>
@license: See the LICENSE file
@email: <EMAIL>
"""
import RPi.GPIO as GPIO
import serial
import time
import pymysql
import configparser
import json
import os
import sys
import threading
import queue
db_connection = None # type: pymysql.Connection
# These declarations use some default settings. Adjust with the config file
PIN_S0 = 4
PIN_S1 = 3
PIN_S2 = 17
PIN_EN = 18
SENSORS_COUNT = 6
# Conversion of RPM to airflow for each sensor
RPM_TO_AIRFLOW_COEFFICIENTS = [
120.0 / 6000.0,
120.0 / 6000.0,
120.0 / 6000.0,
120.0 / 6000.0,
120.0 / 6000.0,
120.0 / 6000.0
]
# When set to True the mysql thread should stop
STOP_MYSQL_THREAD_FLAG = False
def set_slave_address(addr: int):
"""Sets the S* pins high/low to conenct to a sensor. Note we use inverted logic. Depends on your wiring!"""
GPIO.output(PIN_S0, GPIO.LOW if (0x01 & addr) else GPIO.HIGH)
GPIO.output(PIN_S1, GPIO.LOW if (0x01 & (addr >> 1)) else GPIO.HIGH)
GPIO.output(PIN_S2, GPIO.LOW if (0x01 & (addr >> 2)) else GPIO.HIGH)
def mysql_thread_func(config: configparser.ConfigParser, q: queue.Queue):
"""The MySQL thread. Push queries in the queue and it executes them. Two while loops so we reconnect when
something goes wrong"""
while not STOP_MYSQL_THREAD_FLAG:
# Connect database
try:
db_con = pymysql.connect(host=config.get('db', 'host'),
user=config.get('db', 'user'),
password=config.get('db', 'pass'),
database=config.get('db', 'db'))
while not STOP_MYSQL_THREAD_FLAG:
if not q.empty():
_query = q.get()
with db_con.cursor() as _c:
_c.execute(_query)
db_con.commit()
else:
time.sleep(1)
except pymysql.err.OperationalError:
time.sleep(2)
if __name__ == "__main__":
# Load the config
if not os.path.exists('./config.ini'):
print('No config file!')
sys.exit(1)
config = configparser.ConfigParser()
config.read('./config.ini')
SENSORS_COUNT = config.getint('sensors', 'count')
_tmp = config.get('sensors', 'rpm_to_airflow')
RPM_TO_AIRFLOW_COEFFICIENTS = json.loads(_tmp)
PIN_S0 = config.getint('gpio', 'address_0')
PIN_S1 = config.getint('gpio', 'address_1')
PIN_S2 = config.getint('gpio', 'address_2')
PIN_EN = config.getint('gpio', 'enable')
# Setup hardware
GPIO.setwarnings(False)
GPIO.setmode(GPIO.BCM)
GPIO.setup(PIN_S0, GPIO.OUT)
GPIO.setup(PIN_S1, GPIO.OUT)
GPIO.setup(PIN_S2, GPIO.OUT)
GPIO.setup(PIN_EN, GPIO.OUT)
# Setup UART
uart = serial.Serial(port=config.get('uart', 'uart'), baudrate=config.getint('uart', 'baudrate'),
xonxoff=False, rtscts=False, timeout=1)
# Enable the multiplexor IC. Inverted logic!
GPIO.output(PIN_EN, GPIO.HIGH)
# Setup a queue to push mysql queries
queries_queue = queue.Queue()
# Start the mysql queries process
mysql_thread = threading.Thread(target=mysql_thread_func, args=(config, queries_queue))
mysql_thread.start()
# Loop reading and saving
try:
while True:
_sql_insert_values = []
for i in range(0, SENSORS_COUNT):
set_slave_address(i)
uart.flushInput()
# time.sleep(0.1)
# Wait for S symbol - frame start
uart.read_until(terminator=b'\x53', size=20)
# Wait for E symbol - frame end
_l = uart.read_until(terminator=b'\x45', size=20)
_parsed = False
_rpm = 0
_temp = 0
if _l and len(_l) > 1:
try:
_str = _l[:-1].decode('ASCII')
if _str and len(_str) > 1:
_ch = _str.split(',')
if len(_ch) is 2:
_rpm = float(_ch[0])
_temp = float(_ch[1])
_parsed = True
except:
_parsed = False
if _parsed:
# print('S%d RPM: %d Temp: %.2f' % (i, _rpm, _temp))
_airflow = RPM_TO_AIRFLOW_COEFFICIENTS[i] * _rpm
# _last_readings[i] = {'temp': _temp, 'rpm': _rpm, 'airflow': _airflow}
if _temp > -127:
_sql_insert_values.append('(now(), %d, %.2f, %.2f, %.2f)' % (i, _temp, _rpm, _airflow))
# else:
# print('S%d ERR' % i)
# with db_connection.cursor() as cr:
# cr.execute('insert into sensors_data (ts, sensor, temperature, rpm, airflow) values ' +
# ','.join(_sql_insert_values))
# db_connection.commit()
if len(_sql_insert_values):
_q = 'insert into sensors_data (ts, sensor, temperature, rpm, airflow) values ' + \
','.join(_sql_insert_values)
queries_queue.put(_q)
except KeyboardInterrupt:
print("Signal received")
print('Shutting down')
STOP_MYSQL_THREAD_FLAG = True
mysql_thread.join(2)
|
StarcoderdataPython
|
3370926
|
import kafka
import time
import redis
from arguments import configs
from helpers import get_absolute_links, normalize_url, to_sha1
if __name__ == "__main__":
producer = kafka.KafkaProducer(bootstrap_servers=configs.kafka_host)
exist_urls = redis.StrictRedis(host=configs.redis_host, port=configs.redis_port,
db=configs.redis_db, password=configs.redis_password)
# load homepages
homepages = open("pages.txt").readlines()
while True:
for homepage in homepages:
homepage = homepage.strip()
# send encoded home page to redis anh add to kafka consumer
all_urls = get_absolute_links(homepage)
for url in all_urls:
url = normalize_url(url)
encoded_url = to_sha1(url)
if not exist_urls.exists(encoded_url):
# add new url to redis and add original to kafka producer
exist_urls.set(encoded_url, 0)
producer.send("links", url.encode())
time.sleep(10)
|
StarcoderdataPython
|
3374459
|
<filename>lib/JumpScale/clients/openvcloud/Client.py
from JumpScale import j
from JumpScale.clients.portal.PortalClient import ApiError
import time
import datetime
import os
import requests
def refresh_jwt(jwt, payload):
if payload['iss'] == 'itsyouonline':
refreshurl = "https://itsyou.online/v1/oauth/jwt/refresh"
response = requests.get(refreshurl, headers={'Authorization': 'bearer {}'.format(jwt)})
if response.ok:
return response.text
else:
raise RuntimeError("Failed to refresh JWT eror: {}:{}".format(response.status_code, response.text))
pass
else:
raise RuntimeError('Refresh JWT with issuers {} not support'.format(payload['iss']))
class Factory:
def __init__(self):
self.__jslocation__ = "j.clients.openvcloud"
def get(self, url, login=None, password=<PASSWORD>, secret=None, port=443, jwt=None):
cl = Client(url, login, password, secret, port, jwt)
return cl
def getFromService(self, service):
return self.get(
url=service.model.data.url,
login=service.model.data.login,
password=<PASSWORD>,
jwt=service.model.data.jwt,
port=service.model.data.port)
def patchMS1(api):
def patchmethod(method, argmap):
def wrapper(**kwargs):
for oldkey, newkey in argmap.items():
if oldkey in kwargs:
value = kwargs.pop(oldkey)
kwargs[newkey] = value
return method(**kwargs)
wrapper.__doc__ = method.__doc__
return wrapper
api.cloudapi.portforwarding.list = patchmethod(
api.cloudapi.portforwarding.list, {'cloudspaceId': 'cloudspaceid'})
api.cloudapi.portforwarding.delete = patchmethod(
api.cloudapi.portforwarding.delete, {'cloudspaceId': 'cloudspaceid'})
api.cloudapi.portforwarding.create = patchmethod(api.cloudapi.portforwarding.create,
{'cloudspaceId': 'cloudspaceid', 'machineId': 'vmid'})
class Client:
def __init__(self, url, login, password=<PASSWORD>, secret=None, port=443, jwt=None):
if not password and not secret and not jwt:
raise ValueError("Can not connect to openvcloud without either password, secret or jwt")
self._url = url
self._login = login
self._password = password
self._secret = secret
self._jwt = jwt
self.api = j.clients.portal.get(url, port)
# patch handle the case where the connection dies because of inactivity
self.__patch_portal_client(self.api)
self._isms1 = 'mothership1' in url
self.__login(password, secret, jwt)
if self._isms1:
jsonpath = os.path.join(os.path.dirname(__file__), 'ms1.json')
self.api.load_swagger(file=jsonpath, group='cloudapi')
patchMS1(self.api)
else:
self.api.load_swagger(group='cloudapi')
def __patch_portal_client(self, api):
# try to relogin in the case the connection is dead because of
# inactivity
origcall = api.__call__
def patch_call(that, *args, **kwargs):
try:
return origcall(that, *args, **kwargs)
except ApiError:
if ApiError.response.status_code == 419:
self.__login(self._password, self._secret, self._jwt)
return origcall(that, *args, **kwargs)
raise
api.__call__ = patch_call
def __login(self, password, secret, jwt):
if not password and not secret and not jwt:
raise RuntimeError("Can not connect to openvcloud without either password, secret or jwt")
if jwt:
import jose.jwt
payload = jose.jwt.get_unverified_claims(jwt)
if payload['exp'] < time.time():
jwt = refresh_jwt(jwt, payload)
self.api._session.headers['Authorization'] = 'bearer {}'.format(jwt)
self._login = payload['username']
else:
if password:
if self._isms1:
secret = self.api.cloudapi.users.authenticate(
username=self._login, password=password)
else:
secret = self.api.system.usermanager.authenticate(
name=self._login, secret=password)
# make sure cookies are empty, clear guest cookie
self.api._session.cookies.clear()
self.api._session.cookies['beaker.session.id'] = secret
@property
def accounts(self):
ovc_accounts = self.api.cloudapi.accounts.list()
accounts = list()
for account in ovc_accounts:
accounts.append(Account(self, account))
return accounts
@property
def locations(self):
return self.api.cloudapi.locations.list()
def account_get(self, name, create=True,
maxMemoryCapacity=-1, maxVDiskCapacity=-1, maxCPUCapacity=-1, maxNASCapacity=-1,
maxNetworkOptTransfer=-1, maxNetworkPeerTransfer=-1, maxNumPublicIP=-1):
for account in self.accounts:
if account.model['name'] == name:
return account
else:
if create is False:
raise KeyError("No account with name \"%s\" found" % name)
self.api.cloudbroker.account.create(username=self.login,
name=name,
maxMemoryCapacity=maxMemoryCapacity,
maxVDiskCapacity=maxVDiskCapacity,
maxCPUCapacity=maxCPUCapacity,
maxNASCapacity=maxNASCapacity,
maxNetworkOptTransfer=maxNetworkOptTransfer,
maxNetworkPeerTransfer=maxNetworkPeerTransfer,
maxNumPublicIP=maxNumPublicIP)
return self.account_get(name, False)
@property
def login(self):
return self._login
def __repr__(self):
return "openvcloud client: %s" % self._url
__str__ = __repr__
class Authorizables:
@property
def owners(self):
_owners = []
for user in self.model['acl']:
if not user['canBeDeleted']:
_owners.append(user['userGroupId'])
return _owners
@property
def authorized_users(self):
return [u['userGroupId'] for u in self.model['acl']]
def authorize_user(self, username, right="ACDRUX"):
if username not in self.authorized_users:
self._addUser(username, right)
self.refresh()
return True
def unauthorize_user(self, username):
canBeDeleted = [u['userGroupId'] for u in self.model['acl'] if u.get('canBeDeleted', True) is True]
if username in self.authorized_users and username in canBeDeleted:
self._deleteUser(username)
self.refresh()
return True
class Account(Authorizables):
def __init__(self, client, model):
self.client = client
self.model = model
self.id = model['id']
@property
def spaces(self):
ovc_spaces = self.client.api.cloudapi.cloudspaces.list()
spaces = list()
for space in ovc_spaces:
if space['accountId'] == self.model['id']:
spaces.append(Space(self, space))
return spaces
@property
def disks(self):
"""
Wrapper to list all disks related to an account
:return: list of disks details
"""
return self.client.api.cloudapi.disks.list(accountId=self.id)
def delete_disk(self, disk_id, detach=True):
"""
Wrapper to delete disk by its id. I think there should be a class for disks to list all its wrappers
:param disk_id: integer: The disk id need to be removed
:param detach: boolean: detach the disk from the machine first
:return:
"""
return self.client.api.cloudapi.disks.delete(diskId=disk_id, detach=detach)
def space_get(self, name, location="", create=True,
maxMemoryCapacity=-1, maxVDiskCapacity=-1, maxCPUCapacity=-1, maxNASCapacity=-1,
maxNetworkOptTransfer=-1, maxNetworkPeerTransfer=-1, maxNumPublicIP=-1,
externalnetworkId=None):
"""
will get space if it exists,if not will create it
to retrieve existing one location does not have to be specified
example: for ms1 possible locations: ca1, eu1 (uk), eu2 (be)
"""
if not location:
raise j.exceptions.RuntimeError("Location cannot be empty.")
for space in self.spaces:
if space.model['name'] == name and space.model['location'] == location:
return space
else:
if create:
self.client.api.cloudapi.cloudspaces.create(access=self.client.login,
name=name,
accountId=self.id,
location=location,
maxMemoryCapacity=maxMemoryCapacity,
maxVDiskCapacity=maxVDiskCapacity,
maxCPUCapacity=maxCPUCapacity,
maxNASCapacity=maxNASCapacity,
maxNetworkOptTransfer=maxNetworkOptTransfer,
maxNetworkPeerTransfer=maxNetworkPeerTransfer,
maxNumPublicIP=maxNumPublicIP,
externalnetworkId=externalnetworkId)
return self.space_get(name, location, False)
else:
raise j.exceptions.RuntimeError(
"Could not find space with name %s" % name)
def create_disk(self, name, gid, description, size=0, type="B"):
res = self.client.api.cloudapi.disks.create(accountId=self.id,
name=name,
gid=gid,
description=description,
size=size,
type=type)
return res
def _addUser(self, username, right):
self.client.api.cloudapi.accounts.addUser(
accountId=self.id, userId=username, accesstype=right)
def _deleteUser(self, username):
self.client.api.cloudapi.accounts.deleteUser(accountId=self.id, userId=username, recursivedelete=True)
def save(self):
self.client.api.cloudapi.accounts.update(accountId=self.model['id'],
name=self.model['name'],
maxMemoryCapacity=self.model.get('maxMemoryCapacity'),
maxVDiskCapacity=self.model.get('maxVDiskCapacity'),
maxCPUCapacity=self.model.get('maxCPUCapacity'),
maxNASCapacity=self.model.get('maxNASCapacity'),
maxNetworkOptTransfer=self.model.get('maxNetworkOptTransfer'),
maxNetworkPeerTransfer=self.model.get('maxNetworkPeerTransfer'),
maxNumPublicIP=self.model.get('maxNumPublicIP')
)
def refresh(self):
accounts = self.client.api.cloudapi.accounts.list()
for account in accounts:
if account['id'] == self.id:
self.model = account
break
else:
raise j.exceptions.RuntimeError("Account has been deleted")
def delete(self):
self.client.api.cloudbroker.account.delete(accountId=self.id, reason='API request')
def __str__(self):
return "openvcloud client account: %(name)s" % (self.model)
__repr__ = __str__
class Space(Authorizables):
def __init__(self, account, model):
self.account = account
self.client = account.client
self.model = model
self.id = model["id"]
def add_external_network(self, name, subnet, gateway, startip, endip, gid, vlan):
self.client.api.cloudbroker.iaas.addExternalNetwork(cloudspaceId=self.id,
name=name,
subnet=subnet,
getway=gateway,
startip=startip,
endip=endip,
gid=gid,
vlan=vlan)
def save(self):
self.client.api.cloudapi.cloudspaces.update(cloudspaceId=self.model['id'],
name=self.model['name'],
maxMemoryCapacity=self.model.get('maxMemoryCapacity'),
maxVDiskCapacity=self.model.get('maxVDiskCapacity'),
maxCPUCapacity=self.model.get('maxCPUCapacity'),
maxNASCapacity=self.model.get('maxNASCapacity'),
maxNetworkOptTransfer=self.model.get('maxNetworkOptTransfer'),
maxNetworkPeerTransfer=self.model.get('maxNetworkPeerTransfer'),
maxNumPublicIP=self.model.get('maxNumPublicIP')
)
@property
def machines(self):
ovc_machines = self.client.api.cloudapi.machines.list(cloudspaceId=self.id)
machines = dict()
for machine in ovc_machines:
machines[machine['name']] = Machine(self, machine)
return machines
def _addUser(self, username, right):
self.client.api.cloudapi.cloudspaces.addUser(
cloudspaceId=self.id, userId=username, accesstype=right)
def _deleteUser(self, username):
self.client.api.cloudapi.cloudspaces.deleteUser(cloudspaceId=self.id, userId=username, recursivedelete=True)
def refresh(self):
cloudspaces = self.client.api.cloudapi.cloudspaces.list()
for cloudspace in cloudspaces:
if cloudspace['id'] == self.id:
self.model = cloudspace
break
else:
raise j.exceptions.RuntimeError("Cloudspace has been deleted")
def machine_create(self, name, memsize=2, vcpus=1, disksize=10, datadisks=[], image="Ubuntu 15.10 x64", sizeId=None, stackId=None):
"""
@param memsize in MB or GB
for now vcpu's is ignored (waiting for openvcloud)
"""
imageId = self.image_find_id(image)
if sizeId is None:
sizeId = self.size_find_id(memsize)
if name in self.machines:
raise j.exceptions.RuntimeError(
"Name is not unique, already exists in %s" % self)
print("cloudspaceid:%s name:%s size:%s image:%s disksize:%s" %
(self.id, name, sizeId, imageId, disksize))
if stackId:
self.client.api.cloudbroker.machine.createOnStack(cloudspaceId=self.id, name=name,
sizeId=sizeId, imageId=imageId, disksize=disksize, datadisks=datadisks, stackid=stackId)
else:
self.client.api.cloudapi.machines.create(
cloudspaceId=self.id, name=name, sizeId=sizeId, imageId=imageId, disksize=disksize, datadisks=datadisks)
return self.machines[name]
@property
def portforwardings(self):
return self.client.api.cloudapi.portforwarding.list(cloudspaceId=self.id)
def isPortforwardExists(self, publicIp, publicport, protocol):
for pf in self.portforwardings:
if pf['publicIp'] == publicIp and int(pf['publicPort']) == int(publicport) and pf['protocol'] == protocol:
return True
return False
def size_find_id(self, memory=None, vcpus=None):
if memory < 100:
memory = memory * 1024 # prob given in GB
sizes = [(item["memory"], item) for item in self.sizes]
sizes.sort(key=lambda size: size[0])
sizes.reverse()
for size, sizeinfo in sizes:
if memory > size / 1.1:
return sizeinfo['id']
raise j.exceptions.RuntimeError("did not find memory size:%s" % memory)
@property
def sizes(self):
return self.client.api.cloudapi.sizes.list(cloudspaceId=self.id)
def image_find_id(self, name):
name = name.lower()
for image in self.images:
imageNameFound = image["name"].lower()
if imageNameFound.find(name) != -1:
return image["id"]
images = [item["name"].lower() for item in self.images]
raise j.exceptions.RuntimeError(
"did not find image:%s\nPossible Images:\n%s\n" % (name, images))
@property
def images(self):
return self.client.api.cloudapi.images.list(cloudspaceId=self.id, accountId=self.account.id)
def delete(self):
self.client.api.cloudapi.cloudspaces.delete(cloudspaceId=self.id)
def get_space_ip(self):
space = self.client.api.cloudapi.cloudspaces.get(cloudspaceId=self.id)
def getSpaceIP(space):
if space['publicipaddress'] == '':
space = self.client.api.cloudapi.cloudspaces.get(cloudspaceId=self.id)
return space['publicipaddress']
space_ip = getSpaceIP(space)
start = time.time()
timeout = 120
while space_ip == '' and start + timeout > time.time():
time.sleep(5)
space_ip = getSpaceIP(space)
if space_ip == '':
raise j.exceptions.RuntimeError("Could not get IP Address for space %(name)s" % space)
return space_ip
def __repr__(self):
return "space: %s (%s)" % (self.model["name"], self.id)
__str__ = __repr__
class Machine:
def __init__(self, space, model):
self.space = space
self.client = space.client
self.model = model
self.id = self.model["id"]
self.name = self.model["name"]
def start(self):
self.client.api.cloudapi.machines.start(machineId=self.id)
def stop(self):
self.client.api.cloudapi.machines.stop(machineId=self.id)
def restart(self):
self.client.api.cloudapi.machines.restart(machineId=self.id)
def delete(self):
self.client.api.cloudapi.machines.delete(machineId=self.id)
def create_snapshot(self, name=str(datetime.datetime.now())):
self.client.api.cloudapi.machines.snapshot(machineId=self.id, name=name)
def list_snapshots(self):
return self.client.api.cloudapi.machines.listSnapshots(machineId=self.id)
def delete_snapshot(self, epoch):
self.client.api.cloudapi.machines.deleteSnapshot(machineId=self.id, epoch=epoch)
def add_disk(self, name, description, size=10, type='D', ssdSize=0):
disk_id = self.client.api.cloudapi.machines.addDisk(machineId=self.id,
diskName=name,
description=description,
size=size,
type=type,
ssdSize=ssdSize)
return disk_id
def disk_limit_io(self, disk_id, iops=50):
self.client.api.cloudapi.disks.limitIO(diskId=disk_id, iops=iops)
@property
def portforwardings(self):
return self.client.api.cloudapi.portforwarding.list(cloudspaceId=self.space.id, machineId=self.id)
def create_portforwarding(self, publicport, localport, protocol='tcp'):
if protocol not in ['tcp', 'udp']:
raise j.exceptions.RuntimeError("Protocol for portforward should be tcp or udp not %s" % protocol)
machineip, _ = self.get_machine_ip()
publicAddress = self.space.model['publicipaddress']
if not publicAddress:
raise j.exceptions.RuntimeError("No public address found, cannot create port forward")
# define real publicport, override it by a generated one if needed
realpublicport = publicport
if publicport is None:
unavailable_ports = [int(portinfo['publicPort']) for portinfo in self.space.portforwardings]
candidate = 2200
while candidate in unavailable_ports:
candidate += 1
realpublicport = candidate
if not self.space.isPortforwardExists(publicAddress, realpublicport, protocol):
try:
self.client.api.cloudapi.portforwarding.create(
cloudspaceId=self.space.id,
protocol=protocol,
localPort=localport,
machineId=self.id,
publicIp=publicAddress,
publicPort=realpublicport
)
except Exception as e:
# if we have a conflict response, let's check something:
# - if it's an auto-generated port, we probably hit a concurrence issue
# let's try again with a new port
if str(e).startswith("409 Conflict") and publicport is None:
return self.create_portforwarding(None, localport, protocol)
# - if the port was choose excplicitly, then it's not the lib's fault
raise
return (realpublicport, localport)
def delete_portforwarding(self, publicport):
self.client.api.cloudapi.portforwarding.deleteByPort(
cloudspaceId=self.space.id,
publicIp=self.space.model['publicipaddress'],
publicPort=publicport,
proto='tcp'
)
def delete_portfowarding_by_id(self, pfid):
self.client.api.cloudapi.portforwarding.delete(cloudspaceid=self.space.id,
id=pfid)
def get_machine_ip(self):
machine = self.client.api.cloudapi.machines.get(machineId=self.id)
def getMachineIP(machine):
if machine['interfaces'][0]['ipAddress'] == 'Undefined':
machine = self.client.api.cloudapi.machines.get(
machineId=self.id)
return machine['interfaces'][0]['ipAddress']
machineip = getMachineIP(machine)
start = time.time()
timeout = 200
while machineip == 'Undefined' and start + timeout > time.time():
time.sleep(5)
machineip = getMachineIP(machine)
if machineip == 'Undefined':
raise j.exceptions.RuntimeError(
"Could not get IP Address for machine %(name)s" % machine)
return machineip, machine
def get_ssh_connection(self, requested_sshport=None):
"""
Will get a cuisine executor for the machine.
Will attempt to create a portforwarding
:return:
"""
machineip, machine = self.get_machine_ip()
publicip = self.space.model['publicipaddress']
while not publicip:
time.sleep(5)
self.space.refresh()
publicip = self.space.model['publicipaddress']
sshport = None
usedports = set()
for portforward in self.space.portforwardings:
if portforward['localIp'] == machineip and int(portforward['localPort']) == 22:
sshport = int(portforward['publicPort'])
break
usedports.add(int(portforward['publicPort']))
if not requested_sshport:
requested_sshport = 2200
while requested_sshport in usedports:
requested_sshport += 1
if not sshport:
self.create_portforwarding(requested_sshport, 22)
sshport = requested_sshport
login = machine['accounts'][0]['login']
password = machine['accounts'][0]['password']
# TODO: we need tow work with keys *2
return j.tools.executor.getSSHBased(publicip, sshport, login, password)
def __repr__(self):
return "machine: %s (%s)" % (self.model["name"], self.id)
__str__ = __repr__
|
StarcoderdataPython
|
1758653
|
# Variables generales
jugador_x = 0
# Gameloop
while True:
if termina_juego():
break
# Revisamos teclas
if tecla_derecha:
# Actualizamos datos
jugador_x += 1
# Pintamos de acuerdo los nuevos datos
pintar_jugador(jugador_x)
|
StarcoderdataPython
|
3327779
|
"""Python client."""
import logging
import sys
import cosmosid.api.upload as upload
import cosmosid.utils as utils
from cosmosid.api import auth
from cosmosid.api.analysis import Analysis
from cosmosid.api.artifacts import Artifacts
from cosmosid.api.files import Files, Runs
from cosmosid.api.import_workflow import ImportWorkflow
from cosmosid.api.reports import Reports
from cosmosid.api.workflow import Workflow
from cosmosid.helpers.auth import ApiKeyAuth
from cosmosid.helpers.exceptions import (
CosmosidException,
NotFoundException,
UploadException,
ValidationError,
)
LOGGER = logging.getLogger(__name__)
class CosmosidApi(object):
"""
Client is a python client on top of the CosmosID interface.
"""
logger = logging.getLogger(__name__)
BASE_URL = "https://app.cosmosid.com"
def __init__(self, api_key=None, base_url=BASE_URL):
"""Initialize a client with the given params."""
try:
if not api_key:
api_key = self.__auth()
api_key = utils.key_len(api_key)
except ValidationError as err:
utils.log_traceback(err)
base_url = base_url or self.BASE_URL
if base_url != self.BASE_URL:
self.logger.info("Using base URL: %s", base_url)
self.base_url = base_url
self.api_key = api_key
def __auth(self):
"""Read api_key for authentication."""
api_key = None
try:
auth = ApiKeyAuth()
api_key = auth()
if api_key is None:
raise ValidationError("Api Key is empty")
except (KeyError, ValueError) as err:
self.logger.info("Can't get Cosmosid Api Key")
utils.log_traceback(err)
return api_key
def dashboard(self, parent):
file_obj = Files(base_url=self.base_url, api_key=self.api_key)
try:
res = file_obj.get_dashboard(parent_id=parent)
if res:
if res["status"]:
return res
else:
raise NotFoundException(res["message"])
else:
raise CosmosidException(
"Response from service is empty " "for directory {}".format(parent)
)
except NotFoundException as err:
utils.log_traceback(err)
except CosmosidException as err:
self.logger.error("Get directory list exception")
utils.log_traceback(err)
except Exception as err:
self.logger.error("Failed to get listing of directory %s", parent)
utils.log_traceback(err)
def get_enabled_workflows(self):
workflow = Workflow(base_url=self.base_url, api_key=self.api_key)
try:
return workflow.get_workflows()
except Exception as err:
self.logger.error("Client exception occurred")
utils.log_traceback(err)
def import_workflow(self, workflow_ids, files, file_type, parent_id=None):
import_wf = ImportWorkflow(base_url=self.base_url, api_key=self.api_key)
files_s3 = []
try:
for file in files["files"]:
files_s3.append(
upload.upload_file(
file=file,
file_type=file_type,
parent_id=parent_id,
api_key=self.api_key,
base_url=self.base_url,
)
)
import_wf.import_workflow(
workflow_ids,
{"files": files_s3, "file_name": files["sample_name"]},
file_type,
parent_id,
)
except UploadException as err:
self.logger.error("\nError occurred on File import: {}".format(files))
utils.log_traceback(err)
def upload_files(self, files, file_type, parent_id=None):
"""Upload single file."""
error_msg = "\nError occurred on File upload: {}".format(files)
try:
upload_res = upload.upload_and_save(
files=files,
parent_id=parent_id,
file_type=file_type,
base_url=self.base_url,
api_key=self.api_key,
)
if upload_res:
return upload_res["id"]
else:
self.logger.error(error_msg)
except UploadException as err:
self.logger.error(error_msg)
utils.log_traceback(err)
def analysis_list(self, file_id=None, run_id=None):
"""Get list of analysis for a given file id."""
analysis = Analysis(base_url=self.base_url, api_key=self.api_key)
try:
analysis_list = analysis.get_list(file_id=file_id, run_id=run_id)
if analysis_list:
if analysis_list["status"]:
return analysis_list
else:
raise NotFoundException(analysis_list["message"])
else:
raise CosmosidException(
"Error occurred on get list of " "analysis for a File: %s" % file_id
)
except NotFoundException as err:
self.logger.error("NotFound")
utils.log_traceback(err)
except CosmosidException as err:
self.logger.error("Get analysis list exception")
utils.log_traceback(err)
except Exception as err:
self.logger.error("Client exception occurred")
utils.log_traceback(err)
def artifacts_list(
self,
run_id=None,
artifact_type=None,
output_file=None,
output_dir=None,
url=None,
):
"""Get list of artifact for a given file id."""
artifacts = Artifacts(base_url=self.base_url, api_key=self.api_key)
artifacts_content = artifacts.get_list(
run_id=run_id, artifact_type=artifact_type
)
if not artifacts_content:
raise Exception("Exception occurred.")
if url:
sys.stdout.write(artifacts_content["data"])
sys.stdout.flush()
return ("", "")
if run_id and artifact_type:
result = artifacts.save_artifacts(
url=artifacts_content["data"],
output_file=output_file,
output_dir=output_dir,
)
if not result:
raise Exception("Exception occurred during artifact creation.")
LOGGER.info(f"Artifact has been saved to: {result}")
LOGGER.info(f"Task Done")
return ("", "")
if run_id and artifact_type is None:
artifacts_content = artifacts.get_artifacts(run_id=run_id)
header = ["artifact_type"]
if not artifacts_content:
raise Exception("Exception occurred.")
if not artifacts_content["artifacts"]:
LOGGER.info(
f"\nThere are no artifacts for run id {artifacts_content['run_id']}"
)
return (header, [[" " for _ in range(len(header))]])
body = [[i["artifact_type"]] for i in artifacts_content["artifacts"]]
return (header, body)
def report(self, file_id=None, output_file=None, output_dir=None):
"""Upload single file."""
report = Reports(base_url=self.base_url, api_key=self.api_key, file_id=file_id)
try:
file_obj = Files(base_url=self.base_url, api_key=self.api_key)
res = file_obj.get_file(file_id=file_id)
if not res:
raise CosmosidException(
f"Response from service is empty for file id {file_id}"
)
results = report.save_report(out_file=output_file, out_dir=output_dir)
if results["status"]:
return results
else:
raise CosmosidException(f'{results["message"]} File id: {file_id}')
except CosmosidException as err:
self.logger.error("Save report error")
utils.log_traceback(err)
def sample_run_list(self, file_id):
"""Get list of runs for a given file id."""
sample_runs = Runs(base_url=self.base_url, api_key=self.api_key)
try:
sample_run_list = sample_runs.get_runs_list(file_id=file_id)
if sample_run_list:
if sample_run_list["status"]:
return sample_run_list
else:
raise NotFoundException(sample_run_list["message"])
else:
raise CosmosidException(
f"Error occurred on get list of runs for a File: {file_id}"
)
except NotFoundException as err:
self.logger.error("NotFound")
utils.log_traceback(err)
except CosmosidException as err:
self.logger.error("Get runs list exception")
utils.log_traceback(err)
except Exception as err:
self.logger.error("Client exception occurred")
utils.log_traceback(err)
def pricing(self, data):
"""Get pricing information for the given list of samples and their sizes
data:
[ { "sample_key": "sample_name", "extension": "bam", "file_sizes": [100, 300]},
...
]
"""
try:
return upload.pricing(
data=data, base_url=self.base_url, api_key=self.api_key
)
except Exception as err:
self.logger.error(err)
utils.log_traceback(err)
def profile(self):
""" "Get profile information for current user"""
try:
return auth.get_profile(self.base_url, self.api_key)
except Exception as err:
self.logger.error("Client exception occurred")
utils.log_traceback(err)
raise
|
StarcoderdataPython
|
1605277
|
<filename>World 3/Exercise 82.py
import os
odds = []
evens = []
general = []
while True:
os.system('cls' if os.name == 'nt' else 'clear')
numbers=float(input("Type a value: "))
general.append(numbers)
if numbers % 2 == 0:
odds.append(numbers)
else:
evens.append(numbers)
if numbers < 0:
if (numbers * -1) % 2 == 1:
evens.pop()
else:
odds.pop()
general.pop()
break
print(f"List of evens: {evens}")
print(f"List of Odds: {odds}")
print(f"General numbers: {general}")
|
StarcoderdataPython
|
1674763
|
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ducktape.mark import parametrize
from ducktape.utils.util import wait_until
from ducktape.mark.resource import cluster
from kafkatest.services.zookeeper import ZookeeperService
from kafkatest.services.kafka import KafkaService
from kafkatest.services.verifiable_producer import VerifiableProducer
from kafkatest.services.console_consumer import ConsoleConsumer
from kafkatest.tests.produce_consume_validate import ProduceConsumeValidateTest
from kafkatest.utils import is_int_with_prefix
class CompressionTest(ProduceConsumeValidateTest):
"""
These tests validate produce / consume for compressed topics.
"""
COMPRESSION_TYPES = ["snappy", "gzip", "lz4", "zstd", "none"]
def __init__(self, test_context):
""":type test_context: ducktape.tests.test.TestContext"""
super(CompressionTest, self).__init__(test_context=test_context)
self.topic = "test_topic"
self.zk = ZookeeperService(test_context, num_nodes=1)
self.kafka = KafkaService(test_context, num_nodes=1, zk=self.zk, topics={self.topic: {
"partitions": 10,
"replication-factor": 1}})
self.num_partitions = 10
self.timeout_sec = 60
self.producer_throughput = 1000
self.num_producers = len(self.COMPRESSION_TYPES)
self.messages_per_producer = 1000
self.num_consumers = 1
def setUp(self):
self.zk.start()
def min_cluster_size(self):
# Override this since we're adding services outside of the constructor
return super(CompressionTest, self).min_cluster_size() + self.num_producers + self.num_consumers
@cluster(num_nodes=8)
@parametrize(compression_types=COMPRESSION_TYPES, new_consumer=True)
@parametrize(compression_types=COMPRESSION_TYPES, new_consumer=False)
def test_compressed_topic(self, compression_types, new_consumer):
"""Test produce => consume => validate for compressed topics
Setup: 1 zk, 1 kafka node, 1 topic with partitions=10, replication-factor=1
compression_types parameter gives a list of compression types (or no compression if
"none"). Each producer in a VerifiableProducer group (num_producers = number of compression
types) will use a compression type from the list based on producer's index in the group.
- Produce messages in the background
- Consume messages in the background
- Stop producing, and finish consuming
- Validate that every acked message was consumed
"""
self.kafka.security_protocol = "PLAINTEXT"
self.kafka.interbroker_security_protocol = self.kafka.security_protocol
self.producer = VerifiableProducer(self.test_context, self.num_producers, self.kafka,
self.topic, throughput=self.producer_throughput,
message_validator=is_int_with_prefix,
compression_types=compression_types)
self.consumer = ConsoleConsumer(self.test_context, self.num_consumers, self.kafka, self.topic,
new_consumer=new_consumer, consumer_timeout_ms=60000,
message_validator=is_int_with_prefix)
self.kafka.start()
self.run_produce_consume_validate(lambda: wait_until(
lambda: self.producer.each_produced_at_least(self.messages_per_producer) == True,
timeout_sec=120, backoff_sec=1,
err_msg="Producer did not produce all messages in reasonable amount of time"))
|
StarcoderdataPython
|
1758180
|
<filename>src/ite/algs/causal_multitask_gaussian_processes/model.py
# Copyright (c) 2019, <NAME>
# Licensed under the BSD 3-clause license (see LICENSE.txt)
# third party
import GPy
import numpy as np
import pandas as pd
from sklearn.neighbors import KNeighborsRegressor
# ite absolute
from ite.utils.metrics import HistoricMetrics
from ite.utils.metrics import Metrics
class CMGP:
"""
An implementation of various Gaussian models for Causal inference building on GPy.
"""
def __init__(
self,
dim: int = 1,
dim_outcome: int = 2,
mod: str = "Multitask",
mode: str = "CMGP",
max_gp_iterations: int = 1000,
) -> None:
"""
Class constructor.
Initialize a GP object for causal inference.
:mod: 'Multitask'
:dim: the dimension of the input. Default is 1
:kern: ['Matern'] or ['RBF'], Default is the Radial Basis Kernel
:mkern: For multitask models, can select from IMC and LMC models, default is IMC
"""
# Setup
self.dim = dim
self.dim_outcome = dim_outcome
self.mod = mod
self.mode = mode
self.max_gp_iterations = max_gp_iterations
if (self.dim < 1) or (type(self.dim) != int):
raise ValueError(
"Invalid value for the input dimension! Input dimension has to be a positive integer."
)
self.train_perf_metrics = HistoricMetrics()
def train(
self,
Train_X: pd.DataFrame,
Train_T: pd.DataFrame,
Train_Y: pd.DataFrame,
Opt_Train_Y: pd.DataFrame,
Test_X: pd.DataFrame,
Test_Y: pd.DataFrame,
) -> HistoricMetrics:
"""
Optimizes the model hyperparameters using the factual samples for the treated and control arms.
Train_X has to be an N x dim matrix.
:Train_X: The input covariates
:Train_T: The treatment assignments
:Train_Y: The corresponding outcomes
"""
# Inputs: Train_X (the features), Train_T (treatment assignments), Train_Y (outcomes)
# Train_X has to be an N x dim matrix.
Dataset = pd.DataFrame(Train_X)
Dataset["Y"] = Train_Y
Dataset["T"] = Train_T
if self.dim > 1:
Feature_names = list(range(self.dim))
else:
Feature_names = [0]
Dataset0 = Dataset[Dataset["T"] == 0].copy()
Dataset1 = Dataset[Dataset["T"] == 1].copy()
# Extract data for the first learning task (control population)
X0 = np.reshape(Dataset0[Feature_names].copy(), (len(Dataset0), self.dim))
y0 = np.reshape(np.array(Dataset0["Y"].copy()), (len(Dataset0), 1))
# Extract data for the second learning task (treated population)
X1 = np.reshape(Dataset1[Feature_names].copy(), (len(Dataset1), self.dim))
y1 = np.reshape(np.array(Dataset1["Y"].copy()), (len(Dataset1), 1))
# Create an instance of a GPy Coregionalization model
K0 = GPy.kern.RBF(self.dim, ARD=True)
K1 = GPy.kern.RBF(self.dim, ARD=True)
kernel_dict = {
"CMGP": GPy.util.multioutput.LCM(
input_dim=self.dim, num_outputs=self.dim_outcome, kernels_list=[K0, K1]
),
"NSGP": GPy.util.multioutput.ICM(
input_dim=self.dim, num_outputs=self.dim_outcome, kernel=K0
),
}
self.model = GPy.models.GPCoregionalizedRegression(
X_list=[X0, X1], Y_list=[y0, y1], kernel=kernel_dict[self.mode]
)
self.initialize_hyperparameters(Train_X, Train_T, Train_Y)
try:
self.model.optimize("bfgs", max_iters=self.max_gp_iterations)
except np.linalg.LinAlgError as err:
print("Covariance matrix not invertible. ", err)
raise err
metrics = self.test(Train_X, Opt_Train_Y)
self.train_perf_metrics.add(
"sqrt_PEHE", metrics.sqrt_PEHE(), "in-sample metrics"
)
self.train_perf_metrics.add("ATE", metrics.ATE(), "in-sample metrics")
self.train_perf_metrics.add("MSE", metrics.MSE(), "in-sample metrics")
metrics = self.test(Test_X, Test_Y)
self.train_perf_metrics.add(
"sqrt_PEHE", metrics.sqrt_PEHE(), "out-sample metrics"
)
self.train_perf_metrics.add("ATE", metrics.ATE(), "out-sample metrics")
self.train_perf_metrics.add("MSE", metrics.ATE(), "out-sample metrics")
return self.train_perf_metrics
def train_metrics(self) -> HistoricMetrics:
return self.train_perf_metrics
def predict(self, X: pd.DataFrame) -> pd.DataFrame:
"""
Infers the treatment effect for a certain set of input covariates.
Returns the predicted ITE and posterior variance.
:X: The input covariates at which the outcomes need to be predicted
"""
if self.dim == 1:
X_ = X[:, None]
X_0 = np.hstack([X_, np.reshape(np.array([0] * len(X)), (len(X), 1))])
X_1 = np.hstack([X_, np.reshape(np.array([1] * len(X)), (len(X), 1))])
noise_dict_0 = {"output_index": X_0[:, 1:].astype(int)}
noise_dict_1 = {"output_index": X_1[:, 1:].astype(int)}
Y_est_0 = self.model.predict(X_0, Y_metadata=noise_dict_0)[0]
Y_est_1 = self.model.predict(X_1, Y_metadata=noise_dict_1)[0]
else:
X_0 = np.array(
np.hstack([X, np.zeros_like(X[:, 1].reshape((len(X[:, 1]), 1)))])
)
X_1 = np.array(
np.hstack([X, np.ones_like(X[:, 1].reshape((len(X[:, 1]), 1)))])
)
X_0_shape = X_0.shape
X_1_shape = X_1.shape
noise_dict_0 = {
"output_index": X_0[:, X_0_shape[1] - 1]
.reshape((X_0_shape[0], 1))
.astype(int)
}
noise_dict_1 = {
"output_index": X_1[:, X_1_shape[1] - 1]
.reshape((X_1_shape[0], 1))
.astype(int)
}
Y_est_0 = np.array(
list(self.model.predict(X_0, Y_metadata=noise_dict_0)[0])
)
Y_est_1 = np.array(
list(self.model.predict(X_1, Y_metadata=noise_dict_1)[0])
)
result = pd.DataFrame(
0, index=np.arange(len(Y_est_0)), columns=["y_hat_0", "y_hat_1"]
)
result["y_hat_0"] = Y_est_0
result["y_hat_1"] = Y_est_1
return result
def test(self, Test_X: pd.DataFrame, Test_Y: pd.DataFrame) -> Metrics:
"""
Infers the treatment effect for a certain set of input covariates and generate metrics, compared to a reference.
"""
Hat_Y = self.predict(Test_X).to_numpy()
return Metrics(Hat_Y, Test_Y)
def initialize_hyperparameters(
self, X: pd.DataFrame, T: pd.DataFrame, Y: pd.DataFrame
) -> None:
"""
Initializes the multi-tasking model's hyper-parameters before passing to the optimizer
:X: The input covariates
:T: The treatment assignments
:Y: The corresponding outcomes
"""
# -----------------------------------------------------------------------------------
# Output Parameters:
# -----------------
# :Ls0, Ls1: length scale vectors for treated and control, dimensions match self.dim
# :s0, s1: noise variances for the two kernels
# :a0, a1: diagonal elements of correlation matrix 0
# :b0, b1: off-diagonal elements of correlation matrix 1
# -----------------------------------------------------------------------------------
Dataset = pd.DataFrame(X)
Dataset["Y"] = Y
Dataset["T"] = T
if self.dim > 1:
Feature_names = list(range(self.dim))
else:
Feature_names = [0]
Dataset0 = Dataset[Dataset["T"] == 0].copy()
Dataset1 = Dataset[Dataset["T"] == 1].copy()
neigh0 = KNeighborsRegressor(n_neighbors=10)
neigh1 = KNeighborsRegressor(n_neighbors=10)
neigh0.fit(Dataset0[Feature_names], Dataset0["Y"])
neigh1.fit(Dataset1[Feature_names], Dataset1["Y"])
Dataset["Yk0"] = neigh0.predict(Dataset[Feature_names])
Dataset["Yk1"] = neigh1.predict(Dataset[Feature_names])
Dataset0["Yk0"] = Dataset.loc[Dataset["T"] == 0, "Yk0"]
Dataset0["Yk1"] = Dataset.loc[Dataset["T"] == 0, "Yk1"]
Dataset1["Yk0"] = Dataset.loc[Dataset["T"] == 1, "Yk0"]
Dataset1["Yk1"] = Dataset.loc[Dataset["T"] == 1, "Yk1"]
a0 = np.sqrt(np.mean((Dataset0["Y"] - np.mean(Dataset0["Y"])) ** 2))
a1 = np.sqrt(np.mean((Dataset1["Y"] - np.mean(Dataset1["Y"])) ** 2))
b0 = np.mean(
(Dataset["Yk0"] - np.mean(Dataset["Yk0"]))
* (Dataset["Yk1"] - np.mean(Dataset["Yk1"]))
) / (a0 * a1)
b1 = b0
s0 = np.sqrt(np.mean((Dataset0["Y"] - Dataset0["Yk0"]) ** 2)) / a0
s1 = np.sqrt(np.mean((Dataset1["Y"] - Dataset1["Yk1"]) ** 2)) / a1
# `````````````````````````````````````````````````````
self.model.sum.ICM0.rbf.lengthscale = 10 * np.ones(self.dim)
self.model.sum.ICM1.rbf.lengthscale = 10 * np.ones(self.dim)
self.model.sum.ICM0.rbf.variance = 1
self.model.sum.ICM1.rbf.variance = 1
self.model.sum.ICM0.B.W[0] = b0
self.model.sum.ICM0.B.W[1] = b0
self.model.sum.ICM1.B.W[0] = b1
self.model.sum.ICM1.B.W[1] = b1
self.model.sum.ICM0.B.kappa[0] = a0 ** 2
self.model.sum.ICM0.B.kappa[1] = 1e-4
self.model.sum.ICM1.B.kappa[0] = 1e-4
self.model.sum.ICM1.B.kappa[1] = a1 ** 2
self.model.mixed_noise.Gaussian_noise_0.variance = s0 ** 2
self.model.mixed_noise.Gaussian_noise_1.variance = s1 ** 2
|
StarcoderdataPython
|
3368940
|
"""
System tests for execute multiple policies
"""
from time import sleep
from cafe.drivers.unittest.decorators import tags
from test_repo.autoscale.fixtures import AutoscaleFixture
class ExecuteMultiplePoliciesTest(AutoscaleFixture):
"""
System tests to verify execute multiple scaling policies' scenarios
"""
def setUp(self):
"""
Create a scaling group with minentities > 0, with multiple scaling
policies and execute one scale up policy to create 2 servers
"""
super(ExecuteMultiplePoliciesTest, self).setUp()
self.create_group_response = \
self.autoscale_behaviors.create_scaling_group_given(
gc_min_entities=self.gc_min_entities_alt,
gc_cooldown=0)
self.group = self.create_group_response.entity
self.change = 2
self.change_percent = 50
self.cooldown = 3
self.policy_up_change = self.autoscale_behaviors.create_policy_given(
group_id=self.group.id, sp_change=self.change,
sp_cooldown=self.cooldown)
self.policy_down_change = self.autoscale_behaviors.create_policy_given(
group_id=self.group.id, sp_change=-(self.change - 1),
sp_cooldown=self.cooldown)
self.policy_up_change_percent = \
self.autoscale_behaviors.create_policy_given(
group_id=self.group.id, sp_change_percent=self.change_percent,
sp_cooldown=self.cooldown)
self.policy_down_change_percent = \
self.autoscale_behaviors.create_policy_given(
group_id=self.group.id,
sp_change_percent=-(self.change_percent),
sp_cooldown=self.cooldown)
self.policy_desired_capacity = \
self.autoscale_behaviors.create_policy_given(
group_id=self.group.id,
sp_desired_capacity=self.group.groupConfiguration.minEntities,
sp_cooldown=self.cooldown)
self.policy_up_execute = {
'change': self.change, 'cooldown': self.cooldown}
self.policy_executed = self.autoscale_behaviors.create_policy_webhook(
group_id=self.group.id,
policy_data=self.policy_up_execute,
execute_policy=True)
self.resources.add(self.group, self.empty_scaling_group)
@tags(speed='quick', convergence='yes')
def test_policy_up_cooldown(self):
"""
Execute a scale up policy with cooldown > 0 more than once within the
cooldown period, and policy execution fails when cooldown is not met
"""
execute_on_cooldown = self.autoscale_client.execute_policy(
self.group.id,
self.policy_executed['policy_id'])
self.assertEquals(
execute_on_cooldown.status_code, 403,
msg='Scale up policy executed sucessfully for group {0}'
' when cooldown is not met: {1}'
.format(self.group.id, execute_on_cooldown.status_code))
@tags(speed='quick')
def test_policy_down_cooldown(self):
"""
Execute a scale down policy with cooldown > 0 more than once within the
cooldown period, and policy execution fails when cooldown is not met
"""
execute_scale_down = self.autoscale_client.execute_policy(
self.group.id,
self.policy_down_change['id'])
self.assertEquals(
execute_scale_down.status_code, 202,
msg='Policy down failed to execute for group {0} with {1}'
.format(self.group.id, execute_scale_down.status_code))
execute_on_cooldown = self.autoscale_client.execute_policy(
self.group.id,
self.policy_down_change['id'])
self.assertEquals(
execute_on_cooldown.status_code, 403,
msg='Scale down policy executed when cooldown is not met with {0}'
' for group {1}'
.format(execute_on_cooldown.status_code, self.group.id))
@tags(speed='slow', convergence='yes')
def test_execute_different_policies_simaltaneously(self):
"""
The policy cooldown times are not enforced when executing different
policies, and executing such polcies result in active servers as
expected
"""
execute_change_percent_scale_up = self.autoscale_client.execute_policy(
self.group.id,
self.policy_up_change_percent['id'])
self.assertEquals(
execute_change_percent_scale_up.status_code, 202,
msg='Scale up policy execution for group {0} failed with {1}'
.format(self.group.id, execute_change_percent_scale_up.status_code)
)
execute_change_scale_down = self.autoscale_client.execute_policy(
self.group.id,
self.policy_down_change['id'])
self.assertEquals(
execute_change_scale_down.status_code, 202,
msg='Scale down policy execution for group {0} failed with {1}'
.format(self.group.id, execute_change_scale_down.status_code))
execute_desired_capacity_scale = self.autoscale_client.execute_policy(
self.group.id,
self.policy_desired_capacity['id'])
self.assertEquals(
execute_desired_capacity_scale.status_code, 202,
msg='Policy with desired capacity=minentities failed to execute '
'with {0} for group {1}'
.format(execute_desired_capacity_scale.status_code, self.group.id))
self.wait_for_expected_number_of_active_servers(
group_id=self.group.id,
expected_servers=self.group.groupConfiguration.minEntities)
self.assert_servers_deleted_successfully(
self.group.launchConfiguration.server.name,
self.group.groupConfiguration.minEntities)
@tags(speed='slow', convergence='yes')
def test_scale_up_scale_down_multiple_policies_in_sequence(self):
"""
Different scale up and scale down policies on the scaling group can be
executed in sequence after each policy's cooldown time
"""
self._execute_policy_after_cooldown(
self.group.id, self.policy_executed['policy_id'])
self._execute_policy_after_cooldown(
self.group.id, self.policy_up_change['id'])
self._execute_policy_after_cooldown(
self.group.id, self.policy_down_change['id'])
self._execute_policy_after_cooldown(
self.group.id, self.policy_down_change['id'])
self._execute_policy_after_cooldown(
self.group.id, self.policy_up_change_percent['id'])
self._execute_policy_after_cooldown(
self.group.id, self.policy_down_change_percent['id'])
self._execute_policy_after_cooldown(
self.group.id, self.policy_desired_capacity['id'])
self.wait_for_expected_number_of_active_servers(
group_id=self.group.id,
expected_servers=self.group.groupConfiguration.minEntities)
self.assert_servers_deleted_successfully(
self.group.launchConfiguration.server.name,
self.group.groupConfiguration.minEntities)
@tags(speed='quick', convergence='yes')
def test_multiple_webhook_policies_in_group_in_different_requests(self):
"""
Creating multiple webhook policies with the same payload, using
multiple create policy requests is successful.
"""
policy_count = 3
group = (self.autoscale_behaviors.create_scaling_group_min()).entity
self.resources.add(group, self.empty_scaling_group)
policy_id_list = []
for _ in range(policy_count):
create_policy_response = \
self.autoscale_behaviors.create_policy_given(
group_id=group.id,
sp_name='multi_web_policy',
sp_change=1)
self.assertEquals(
create_policy_response['status_code'], 201,
msg='Created multiple scaling policies with same policy data'
', response code: {0}'.format(
create_policy_response['status_code']))
policy_id_list.append(create_policy_response['id'])
self.assertEqual(len(set(policy_id_list)), policy_count)
def _execute_policy_after_cooldown(self, group_id, policy_id):
"""
After the cooldown period, executes the policy and asserts if the
policy was executed successfully
"""
sleep(self.cooldown)
execute_policy = self.autoscale_client.execute_policy(
self.group.id,
policy_id)
self.assertEquals(
execute_policy.status_code, 202,
msg='Execution of the policy after cooldown failed with {0} '
'for group {1}'.format(execute_policy.status_code, self.group.id))
|
StarcoderdataPython
|
3341290
|
<reponame>RedDrum-Redfish-Project/RedDrum-Frontend
# Copyright Notice:
# Copyright 2018 Dell, Inc. All rights reserved.
# License: BSD License. For full license text see link: https://github.com/RedDrum-Redfish-Project/RedDrum-Frontend/LICENSE.txt
import os
from .resource import RfStaticResource
from .generateId import rfGenerateId
#from .rootData import RfRoot
import json
import time
import sys
from .redfish_headers import RfAddHeaders
class RfSessionService():
# Note that resource was created in serviceRoot for the session service.
def __init__(self, rfr):
self.rfr=rfr #xg999fix
self.rdr=rfr
self.loadResourceTemplates(rfr )
self.loadSessionServiceDatabase(rfr )
self.initializeSessionsDict(rfr )
self.hdrs=RfAddHeaders(rfr)
self.magic="123456"
def loadResourceTemplates( self, rfr ):
#load SessionService Template
indxFilePath=os.path.join(rfr.baseDataPath,"templates", "SessionService.json")
if os.path.isfile(indxFilePath):
self.sessionServiceTemplate=json.loads( open(indxFilePath,"r").read() )
else:
self.rfr.logMsg("CRITICAL","*****ERROR: SessionService: Json Data file:{} Does not exist. Exiting.".format(indxFilePath))
sys.exit(10)
#load Sessions Collection Template
indxFilePath=os.path.join(rfr.baseDataPath,"templates", "SessionCollection.json")
if os.path.isfile(indxFilePath):
self.sessionsCollectionTemplate=json.loads( open(indxFilePath,"r").read() )
else:
self.rfr.logMsg("CRITICAL","*****ERROR: SessionService: Json Data file:{} Does not exist. Exiting.".format(indxFilePath))
sys.exit(10)
#load Session Entry Template
indxFilePath=os.path.join(rfr.baseDataPath,"templates", "Session.json")
if os.path.isfile(indxFilePath):
self.sessionEntryTemplate=json.loads( open(indxFilePath,"r").read() )
else:
self.rfr.logMsg("CRITICAL","*****ERROR: SessionService: Json Data file:{} Does not exist. Exiting.".format(indxFilePath))
sys.exit(10)
def loadSessionServiceDatabase(self,rfr ):
sessionServiceDbFilename="SessionServiceDb.json"
self.sessionServiceDbFilePath=os.path.join(rfr.varDataPath,"db", sessionServiceDbFilename )
if os.path.isfile(self.sessionServiceDbFilePath):
self.sessionServiceDb=json.loads( open(self.sessionServiceDbFilePath,"r").read() )
else:
self.rfr.logMsg("WARNING",
"*****WARNING: Json Data file:{} Does not exist. Creating default.".format(self.sessionServiceDbFilePath))
# read the data in from the default database dir with the rm-tools package
dfltDbFilePath=os.path.join(rfr.baseDataPath,"db", sessionServiceDbFilename)
if os.path.isfile(dfltDbFilePath):
self.sessionServiceDb=json.loads( open(dfltDbFilePath,"r").read() )
else:
self.rfr.logMsg("CRITICAL","*****ERROR: Default Json Database file:{} Does not exist. Exiting.".format(dfltDbFilePath))
sys.exit(10)
#write the data back out to the var directory where the dynamic db info is kept
sessionServiceDbJson=json.dumps(self.sessionServiceDb,indent=4)
with open( self.sessionServiceDbFilePath, 'w', encoding='utf-8') as f:
f.write(sessionServiceDbJson)
def initializeSessionsDict(self,rfr):
# this is the in-memory database of open sessions
# the sessionsDict is an dict indexed by sessionsDict[sessionId][<sessionParameters>]
# self.sessionsDict[sessionid]=
# { "UserName": username, "UserPrivileges": userPrivileges, "AccountId": accountid,
# "X-Auth-Token": authtoken, "LocationUri": locationUri, "LastAccessTime": lastAccessTime }
self.sessionsDict=dict() #create an empty dict of session entries
# GET SessionService
def getSessionServiceResource(self,request):
# generate headers
hdrs = self.hdrs.rfRespHeaders(request, contentType="json", resource=self.sessionServiceTemplate, allow="GetPatch")
# Process HEAD method
if request.method=="HEAD":
return(0,200,"","",hdrs)
# create a copy of the SessionService resource template
resData2=dict(self.sessionServiceTemplate)
# assign the required properties
resData2["@odata.id"] = "/redfish/v1/SessionService"
resData2["Id"] = "SessionService"
resData2["Name"]= "RackManager Session Service"
resData2["Description"] = "RackManager Session Service"
# assign link to the Sessions collection
resData2["Sessions"] = {"@odata.id": "/redfish/v1/SessionService/Sessions"}
# set the dynamic data in the template copy to the value in the sessionService database
resData2["SessionTimeout"]=self.sessionServiceDb["SessionTimeout"]
# create the response json data and return
resp=json.dumps(resData2,indent=4)
# generate the headers and return the response
return(0,200,"",resp,hdrs)
# PATCH SessionService
def patchSessionServiceResource(self, request, patchData):
# generate headers
hdrs = self.hdrs.rfRespHeaders(request)
#first verify client didn't send us a property we cant patch
for key in patchData:
if( key != "SessionTimeout" ):
return (4, 400, "Bad Request-Invalid Patch Property Sent", "", hdrs)
# now patch the valid properties sent
if( "SessionTimeout" in patchData):
newVal=patchData['SessionTimeout']
if( (newVal < 30) or (newVal >86400) ):
return(4, 400, "Bad Request-not in correct range", "", hdrs)
else:
# the data is good and in range, save it and return ok
self.sessionServiceDb["SessionTimeout"]=newVal
# write the data back out to the sessionService database file
sessionServiceDbJson=json.dumps(self.sessionServiceDb,indent=4)
with open( self.sessionServiceDbFilePath, 'w', encoding='utf-8') as f:
f.write(sessionServiceDbJson)
# return to URI handling OK, with no content
return(0, 204, "", "", hdrs)
else:
return (4, 400, "Bad Request-Invalid Patch Property Sent", "", hdrs)
# getSessionAuthInfo()
# returns: rc, errMsgString, sessionId, authToken, userPrivileges, accountId, username
# rc=404 if sessionId is invalid.
# rc=401 if authToken is invalid or mismatches sessionid, or session is expired
# self.sessionsDict[sessionid]={"UserName": username, "UserPrivileges": userPrivileges,
# "AccountId": accountid,
# "X-Auth-Token": authtoken, "LocationUri": locationUri, "LastAccessTime": lastAccessTime}
def getSessionAuthInfo(self,sessionid=None, authtoken=None ):
storedAuthToken=None
storedSessionId=None
storedPrivileges=None
# if sessionid is not None, verify that the sessionId is valid
if sessionid is not None:
if sessionid not in self.sessionsDict:
return(404, "SessionId Not Found",None,None,None,None,None)
else:
#the sessionid exists, so get associated authToken
storedSessionId=sessionid
storedAuthToken=self.sessionsDict[sessionid]["X-Auth-Token"]
storedPrivileges=self.sessionsDict[sessionid]["UserPrivileges"]
storedUserName=self.sessionsDict[sessid]["UserName"]
storedAccountId=self.sessionsDict[sessid]["AccountId"]
# if authtoken was also passed in, check if it matches the stored value
if authtoken is not None:
if(authtoken != storedAuthToken):
return(401, "Not Authroized-AuthToken Incorrect",None,None,None,None,None)
# else if authtoken is not None, look it up, verify it exists
elif authtoken is not None:
# case where sessionid was not passed in, but authtoken was
# we need to go lookup authtoken w/o sessionid
foundToken=False
for sessid in self.sessionsDict:
if( self.sessionsDict[sessid]["X-Auth-Token"] == authtoken ):
foundToken=True
storedSessionId=sessid
storedAuthToken=self.sessionsDict[sessid]["X-Auth-Token"]
storedPrivileges=self.sessionsDict[sessid]["UserPrivileges"]
storedUserName=self.sessionsDict[sessid]["UserName"]
storedAccountId=self.sessionsDict[sessid]["AccountId"]
break
if foundToken is False:
return(401, "Not Authroized-Token Not Found",None,None,None,None,None)
# else, both sessionid and authtoken are None, which is invalid call
else:
return(500, "Invalid Auth Check",None,None,None,None,None)
# verify that the session has not expired
currentTime=int(time.time())
lastAccessTime=self.sessionsDict[storedSessionId]["LastAccessTime"]
sessionTimeout=self.sessionServiceDb["SessionTimeout"]
if( (currentTime - lastAccessTime) > sessionTimeout ):
# it timed out. delete the session, and return unauthorized
del self.sessionsDict[storedSessionId]
# return 404 since we deleted the session and the uri is no longer valid
return(404, "Session Not Found-Expired",None,None,None,None,None)
else:
#else-update the timestamp--to indicate the session was used
self.sessionsDict[storedSessionId]["LastAccessTime"]=currentTime
# if here, all ok, return privileges
#returns: rc, errMsgString, sessionId, authToken, userPrivileges
return(0, "OK", storedSessionId, storedAuthToken, storedPrivileges, storedAccountId, storedUserName )
# ------------Session Collection Functions----------------
# Post Sessions
# POST to sessions collection (Login)
def postSessionsResource(self,request, postData):
# generate headers for 4xx error messages
errhdrs = self.hdrs.rfRespHeaders(request )
# process special cases for request coming in over http or https based on RedDrum.conf auth config settings
#requestHeadersLower = {k.lower() : v.lower() for k,v in request.headers.items()}
#print("EEEEEEEE: hdrs: {}".format(requestHeadersLower))
#if "X-Rm-From-Rproxy" in requestHeadersLower and requestHeadersLower["x-rm-from-rproxy"]=="https":
if "X-Rm-From-Rproxy" in request.headers and request.headers["X-Rm-From-Rproxy"]=="HTTPS":
# case: scheme is https, so execute the API
pass
elif self.rdr.RedfishAllowSessionLoginOverHttp is True:
# case: scheme=http, but login over http is allowed, so execute the API
pass
else:
# case: scheme=http, login over http is NOT allowed
# so return a 404-Not Found status
return(4, 404, "404-Not Found-URI not supported over http", "", errhdrs)
# verify that the client didn't send us a property we cant initialize the session with
# we need to fail the request if we cant handle any properties sent
for key in postData:
if( (key != "UserName") and (key != "Password") ):
return (4, 400, "Bad Request-Invalid Post Property Sent", "", errhdrs)
# now check that all required on create properties were sent as post data
username=None
password=<PASSWORD>
if( "UserName" in postData):
username=postData['UserName']
if("Password" in postData):
password=postData['Password']
if( (username is None) or (password is None) ):
return (4, 400, "Bad Request-Required On Create properties not all sent", "", errhdrs)
# now verify that the login credentials are valid and get the privileges
rc,errMsg,accountid,roleId,userPrivileges=self.rfr.root.accountService.getAccountAuthInfo(username,password)
if( rc != 0 ): # unauthenticated
return(4, 401, "Unauthorized--invalid user or password","", errhdrs)
# otherwise, if here, it is an authenticated user
# check if user has login privilege
if( "Login" not in userPrivileges ):
return(4, 401, "Unauthorized--User does not have login privilege","", errhdrs)
#get time to update timer in sessDict
lastAccessTime=int(time.time())
# now Generate a session ID and auth token as a random number
sessionid=rfGenerateId(leading="S",size=8)
authtoken=rfGenerateId(leading="A",size=8)
# Generate the location header
locationUri="/redfish/v1/SessionService/Sessions/" + sessionid
# add the new session entry to add to the sessionsDict
self.sessionsDict[sessionid]={"UserName": username, "UserPrivileges": userPrivileges, "AccountId": accountid,
"X-Auth-Token": authtoken, "LocationUri": locationUri, "LastAccessTime": lastAccessTime}
# get the response data
rc,status,msg,respData,respHdr=self.getSessionEntry(request, sessionid)
if( rc != 0):
#something went wrong--return 500
return(5, 500, "Error Getting New Session Data","",{})
# get the response Header with Link, Location, and X-Auth-Token headers
respHeaderData = self.hdrs.rfRespHeaders(request, contentType="json", location=locationUri, xauthtoken=authtoken,
resource=self.sessionEntryTemplate)
#return to flask uri handler
return(0, 201, "Created", respData, respHeaderData)
# GET SessionsCollection
# GET sessions Collection
def getSessionsCollectionResource(self, request ):
hdrs=self.hdrs.rfRespHeaders(request, contentType="json", allow=["HEAD","GET","POST"],
resource=self.sessionsCollectionTemplate)
if request.method=="HEAD":
return(0,200,"","",hdrs)
# the routine copies a template file with the static redfish parameters
# then it updates the dynamic properties from the sessionsDict
# for SessionCollection GET, build the Members array
# first walk the sessionsDict and check if any sessions have timed-out.
# If any session has timed-out, delete it now
currentTime=int(time.time())
sessionTimeout=self.sessionServiceDb["SessionTimeout"]
sessDict2=dict(self.sessionsDict)
for sessionid in sessDict2.keys():
# check if this session entry has timed-out. If so, delete it.
lastAccessTime=sessDict2[sessionid]["LastAccessTime"]
if( (currentTime - lastAccessTime) > sessionTimeout ):
# this session is timed out. remove it from the original sessionDict
del self.sessionsDict[sessionid]
# Then copy the sessionsCollection template file (which has an empty sessions array)
resData2=dict(self.sessionsCollectionTemplate)
count=0
# now walk through the entries in the sessionsDict and built the sessionsCollection Members array
# not that it starts out an empty array
for sessionEntry in self.sessionsDict.keys():
# increment members count, and create the member for the next entry
count=count+1
newMember=[{"@odata.id": self.sessionsDict[sessionEntry]["LocationUri"] } ]
# add the new member to the members array we are building
resData2["Members"] = resData2["Members"] + newMember
resData2["<EMAIL>"]=count
# convert to json
jsonRespData2=json.dumps(resData2,indent=4)
return(0, 200, "",jsonRespData2, hdrs)
# GET Session Entry
def getSessionEntry(self,request, sessionid):
# generate error header for 4xx errors
errhdrs=self.hdrs.rfRespHeaders(request)
# First: verify that the sessionId is valid
if sessionid not in self.sessionsDict:
return(4, 404, "Not Found", "",errhdrs)
# Second: Check if the session has timed-out.
# If it has timed-out, delete it now, and re-check if session is not found
currentTime=int(time.time())
sessionTimeout=self.sessionServiceDb["SessionTimeout"]
lastAccessTime=self.sessionsDict[sessionid]["LastAccessTime"]
if( (currentTime - lastAccessTime) > sessionTimeout ):
# this session is timed out. remove it from the sessionDict
del self.sessionsDict[sessionid]
# re-verify if the session exists - since we may have just removed it
if sessionid not in self.sessionsDict:
return(4, 404, "Not Found", "",errhdrs)
# generate header info
respHdrs=self.hdrs.rfRespHeaders(request, contentType="json", allow=["HEAD","GET","DELETE"],
resource=self.sessionEntryTemplate)
if request.method=="HEAD":
return(0,200,"","",respHdrs)
# copy the template sessionEntry resource
resData=dict(self.sessionEntryTemplate)
# construct response properties
resData["Name"]="Session Resource"
resData["Description"]="Resource for a specific session that was created"
resData["Id"]=sessionid
resData["UserName"]=self.sessionsDict[sessionid]["UserName"]
resData["@odata.id"]=self.sessionsDict[sessionid]["LocationUri"]
# convert to json
jsonRespData=(json.dumps(resData,indent=4))
return(0, 200, "", jsonRespData, respHdrs)
# Delete Session, logout, delete the session
# all we have to do is verify the sessionid is correct--
# and then, if it is valid, delete the entry for that sessionid from the sessionsDict
# For reference: self.sessionsDict[sessionid]=
# { "UserName": username, "UserPrivileges": userPrivileges, "AccountId": accountid,
# "X-Auth-Token": authtoken, "LocationUri": locationUri, "LastAccessTime": lastAccessTime }
def deleteSession(self, request, sessionid):
# generate the headers
hdrs=self.hdrs.rfRespHeaders(request)
# First, verify that the sessionid is valid
if sessionid not in self.sessionsDict:
return(4, 404, "Not Found","",hdrs)
# verify authorization credentials
# if we got here, we know the user authenticated and has privilege "ConfigureManager" or "Login"
# if user privileges include ConfigureManager, always execute the API
# if user privileges do not include ConfigureManager, but do include Login,
# then ONLY execute the API if the session belongs to "This User"
isAuthorized=False
if "ConfigureManager" in self.rdr.root.accountService.currentUserPrivileges:
# this user has admin privileges for the sessions so it can delete any users session
isAuthorized=True
elif "Login" in self.rdr.root.accountService.currentUserPrivileges:
# this user only has privileges to delete its own sessions.
# check if sessionid is owned by the authenticated user
sessionAccountId = self.sessionsDict[sessionid]["AccountId"]
if sessionAccountId == self.rdr.root.accountService.currentUserAccountId:
# this user only has privileges to delete its own sessions
isAuthorized=True
# return 403 Unauthorized if authorization failed here
if isAuthorized is False:
return(4, 403, "Forbidden-Privileges not sufficient","",hdrs)
# if here, authorization passesd. delete the session and return 204
del self.sessionsDict[sessionid]
return(0, 204, "No Content", "", hdrs)
# end
|
StarcoderdataPython
|
1618338
|
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
class PySrsly(PythonPackage):
"""srsly: Modern high-performance serialization utilities for Python."""
homepage = "https://github.com/explosion/srsly"
pypi = "srsly/srsly-2.0.1.tar.gz"
version('2.0.1', sha256='fa3c7375be8fe75f23c27feafbfb5f738d55ffdbf02964c6896fb7684f519a52')
version('2.0.0', sha256='785b00e00406120dbef4ca82925051e6b60fe870c5f84f0d22b3632d574eb870')
version('1.0.2', sha256='59258b81d567df207f8a0a33c4b5fa232afccf1d927c8ce3ba5395bfd64c0ed8')
depends_on('[email protected]:', when='@2:', type=('build', 'run'))
depends_on('py-setuptools', type='build')
depends_on('py-wheel', when='@2:', type='build')
depends_on('[email protected]:', when='@2:', type='build')
depends_on('[email protected]', when='^python@:3.3', type=('build', 'run'))
# https://github.com/explosion/srsly/pull/24
patch('subprocess.patch', when='@2.0.0:2.0.1')
|
StarcoderdataPython
|
3317727
|
<reponame>calico/stimulated_emission_imaging<filename>figure_generation/figure_A9.py<gh_stars>1-10
import os
import numpy as np
import matplotlib.pyplot as plt
import np_tif
from stack_registration import bucket
def main():
assert os.path.isdir('./../images')
if not os.path.isdir('./../images/figure_A9'):
os.mkdir('./../images/figure_A9')
folder_string = (
'./../../stimulated_emission_imaging-data' +
'/2017_05_09_pulse_length_scan_')
pulsewidths = np.array([8,4,2,1])
# location of center lobe
bright_spot_x = np.array([219, 235, 204, 207])-1
bright_spot_y = np.array([47, 71, 37, 66])-1
# cropping area defined
top_left_x = [133, 146, 114, 118]
top_left_y = [0, 4, 0, 4]#[0, 7, 0, 5]
crop_width = 175
crop_height = 118
# where on the plot should the cropped images be
plot_pos_y = [0.105, 0.245, 0.37, 0.62]
plot_pos_x = [0.25, 0.34, 0.51, 0.77]
STE_signal = np.zeros(4)
STE_signal_relative = np.zeros(4)
STE_image_cropped = np.zeros((4,14,21))
qtr_width = 12
num_reps = 50
num_delays = 3
for i in range(4):
pulsewidth = pulsewidths[i]
extra_text = ''
if pulsewidth == 4: extra_text = '_again_good'
rest_of_folder_string = (str(pulsewidth) + 'us' + extra_text)
filename = (folder_string + rest_of_folder_string +
'/STE_phase_angle_2_green_1395mW_red_300mW.tif')
assert os.path.exists(filename)
data = np_tif.tif_to_array(filename).astype(np.float64)
filename = (folder_string + rest_of_folder_string +
'/STE_phase_angle_2_green_0mW_red_300mW.tif')
assert os.path.exists(filename)
data_ctrl = np_tif.tif_to_array(filename).astype(np.float64)
# get rid of overexposed rows at top and bottom of images
less_rows = 3
data = data[:, less_rows:data.shape[1] - less_rows, :]
data_ctrl = data_ctrl[:,less_rows:data_ctrl.shape[1] - less_rows, :]
# Get the average pixel brightness in the background region of the
# phase contrast image. We'll use it to account for laser intensity
# fluctuations
avg_laser_brightness = get_bg_level(data.mean(axis=0))
# scale all images to have the same background brightness. This
# amounts to a correction of roughly 1% or less
local_laser_brightness = get_bg_level(data)
data = data * (
avg_laser_brightness / local_laser_brightness).reshape(
data.shape[0], 1, 1)
# do the same for control (green off) data
local_laser_brightness_ctrl = get_bg_level(data_ctrl)
data_ctrl = data_ctrl * (
avg_laser_brightness / local_laser_brightness_ctrl).reshape(
data_ctrl.shape[0], 1, 1)
# reshape to hyperstack
data = data.reshape(
num_reps, num_delays, data.shape[1], data.shape[2])
data_ctrl = data_ctrl.reshape(
num_reps, num_delays, data_ctrl.shape[1], data_ctrl.shape[2])
# now that brightness is corrected, repetition average the data
data = data.mean(axis=0)
data_ctrl = data_ctrl.mean(axis=0)
# subtract avg of green off images from green on images
data_simult = data[1, :, :]
data_non_simult = 0.5 * (data[0, :, :] + data[2, :, :])
STE_image = data_simult - data_non_simult
data_simult_ctrl = data_ctrl[1, :, :]
data_non_simult_ctrl = 0.5 * (data_ctrl[0, :, :] + data_ctrl[2, :, :])
STE_image_ctrl = data_simult_ctrl - data_non_simult_ctrl
# subtract AOM effects (even though it doesn't seem like there are any)
STE_image = STE_image# - STE_image_ctrl
# capture stim emission signal
my_col = bright_spot_x[i]
my_row = bright_spot_y[i]
main_lobe = STE_image[
my_row-qtr_width:my_row+qtr_width,
my_col-qtr_width:my_col+qtr_width]
left_edge = STE_image[:,qtr_width*2]
STE_signal[i] = np.mean(main_lobe)
STE_signal_relative[i] = STE_signal[i] - np.mean(left_edge)
# crop stim emission image
STE_image_cropped_single = STE_image[
top_left_y[i]:top_left_y[i] + crop_height,
top_left_x[i]:top_left_x[i] + crop_width,
]
# Our pixels are tiny (8.7 nm/pixel) to give large dynamic range.
# This is not great for viewing, because fluctuations can swamp the
# signal. This step bins the pixels into a more typical size.
bucket_width = 8 # bucket width in pixels
STE_image_cropped_single = bucket(
STE_image_cropped_single, (bucket_width, bucket_width)
) / bucket_width**2
STE_image_cropped[i, :, :] = STE_image_cropped_single
# get max/min values for plot color scaling
STE_max = np.amax(STE_image_cropped)
STE_min = np.amin(STE_image_cropped)
STE_image_cropped[:, -2:-1, 1:6] = STE_max # scale bar
my_intensity = 1/pulsewidths
fig, ax1 = plt.subplots()
ax1.plot(my_intensity,STE_signal_relative,'o',color='black',markersize=10)
plt.ylim(ymin=0,ymax=196)
ax1.set_ylabel('Average signal brightness (pixel counts)', color='black')
ax1.tick_params('y', colors='k')
plt.xlabel('Normalized laser intensity (constant energy)')
plt.grid()
for i in range(4):
a = plt.axes([plot_pos_x[i], plot_pos_y[i], .12, .12])
plt.imshow(STE_image_cropped[i,:,:], cmap=plt.cm.gray,
interpolation='nearest', vmax=STE_max, vmin=STE_min)
plt.xticks([])
plt.yticks([])
# plot energy per exposure
green_uJ = np.array([10, 10, 10, 10])
red_uJ = np.array([2, 2, 2, 2])
ax2 = ax1.twinx()
ax2.plot(my_intensity, green_uJ, '--b', linewidth=2)
ax2.plot(my_intensity, red_uJ, '--b', linewidth=2)
ax2.set_ylabel('Optical energy per exposure (Β΅J)',color='blue')
ax2.tick_params('y', colors='b')
ax2.set_ylim(ymin=0, ymax=11.4)
ax1.set_xlim(xmin=0,xmax=1.125)
# annotate with red/green pulses
im = plt.imread('green_shortpulse.png')
a = plt.axes([0.773, 0.81, .08, .08], frameon=False)
plt.imshow(im)
plt.xticks([])
plt.yticks([])
im = plt.imread('green_longpulse.png')
a = plt.axes([0.16, 0.77, .1, .1], frameon=False)
plt.imshow(im)
plt.xticks([])
plt.yticks([])
im = plt.imread('red_shortpulse.png')
a = plt.axes([0.773, 0.25, .08, .08], frameon=False)
plt.imshow(im)
plt.xticks([])
plt.yticks([])
im = plt.imread('red_longpulse.png')
a = plt.axes([0.16, 0.21, .1, .1], frameon=False)
plt.imshow(im)
plt.xticks([])
plt.yticks([])
plt.savefig('./../images/figure_A9/phase_contrast_dye_pulse_length_scan.svg')
plt.show()
return None
def get_bg_level(data):
num_regions = 2
# region 1
bg_up = 2
bg_down = 120
bg_left = 285
bg_right = 379
bg_level = data[..., bg_up:bg_down, bg_left:bg_right].mean(axis=(-2, -1))
# region 2
bg_up = 2
bg_down = 120
bg_left = 1
bg_right = 81
bg_level += data[..., bg_up:bg_down, bg_left:bg_right].mean(axis=(-2, -1))
return(bg_level / num_regions)
main()
|
StarcoderdataPython
|
3311121
|
<gh_stars>10-100
"""cyme.branch.httpd
- Our embedded WSGI server used to serve the HTTP API.
"""
from __future__ import absolute_import
from eventlet import listen
from eventlet import wsgi
from django.core.handlers import wsgi as djwsgi
from django.core.servers.basehttp import AdminMediaHandler
from requests import get
from .thread import gThread
from .signals import httpd_ready
class HttpServer(gThread):
joinable = False
def __init__(self, addrport=None):
host, port = addrport or ('', 8000)
if host == 'localhost':
# dnspython bug?
host = '127.0.0.1'
self.host, self.port = self.addrport = (host, port)
super(HttpServer, self).__init__()
def server(self, sock, handler):
return wsgi.server(sock, handler,
log=self.create_log(),
protocol=self.create_http_protocol())
def run(self):
handler = AdminMediaHandler(djwsgi.WSGIHandler())
sock = listen(self.addrport)
g = self.spawn(self.server, sock, handler)
self.info('ready')
httpd_ready.send(sender=self, addrport=self.addrport,
handler=handler, sock=sock)
return g.wait()
def _do_ping(self, timeout):
return get(self.url + '/ping/', timeout=timeout).ok
def create_log(self):
logger = self
class _Log(object):
def write(self, message):
message = message.rstrip('\n')
(logger.debug if '/ping/' in message else logger.info)(message)
return _Log()
def create_http_protocol(self):
logger = self
class HttpProtocol(wsgi.HttpProtocol):
def get_format_args(self, format, *args):
return ['%s - - [%s] %s', self.address_string(),
self.log_date_time_string(),
format] + args
def log_message(self, format, *args):
return logger.info(*self.get_format_args(format, *args))
def log_error(self, format, *args):
return logger.error(*self.get_format_args(format, *args))
return HttpProtocol
@property
def url(self):
addr, port = self.addrport
if not addr or addr in ('0.0.0.0', ):
addr = '127.0.0.1'
return 'http://%s:%s' % (addr, port)
@property
def logger_name(self):
return 'wsgi'
|
StarcoderdataPython
|
4826149
|
import sys
import os
import time
import math
import torch
import numpy as np
from PIL import Image, ImageDraw, ImageFont
from torch.autograd import Variable
import torch.nn.functional as F
import cv2
from scipy import spatial
import struct
import imghdr
import cython
from scipy.special import softmax
#TensorRT stuff
from numpy import array
import pycuda.driver as cuda
import pycuda.autoinit
import tensorrt as trt
import sys, os
sys.path.insert(1, os.path.join(sys.path[0], ".."))
import common
from numba import jit
from numba import vectorize, float64
import numba as nb
# You can set the logger severity higher to suppress messages (or lower to display more messages).
TRT_LOGGER = trt.Logger(trt.Logger.WARNING)
def get_all_files(directory):
files = []
for f in os.listdir(directory):
if os.path.isfile(os.path.join(directory, f)):
files.append(os.path.join(directory, f))
else:
files.extend(get_all_files(os.path.join(directory, f)))
return files
def calcAngularDistance(gt_rot, pr_rot):
rotDiff = np.dot(gt_rot, np.transpose(pr_rot))
trace = np.trace(rotDiff)
return np.rad2deg(np.arccos((trace-1.0)/2.0))
def get_camera_intrinsic():
K = np.zeros((3, 3), dtype='float64')
# life came
# K[0, 0], K[0, 2] = 1.13908155e+03, 6.57642892e+02
# K[1, 1], K[1, 2] = 1.13705701e+03, 3.28071843e+02
# K[2, 2] = 1.
# Logitech C920
K[0, 0], K[0, 2] = 935.67, 624.06
K[1, 1], K[1, 2] = 934.86, 354.35
K[2, 2] = 1.
return K
def get_camera_distortion_mat():
dist = [[-0.00580032, -0.17520014, 0.00051201, 0.00432754, 0.24850474]]
return np.array(dist)
def compute_projection(points_3D, transformation, internal_calibration):
projections_2d = np.zeros((2, points_3D.shape[1]), dtype='float32')
camera_projection = (internal_calibration.dot(transformation)).dot(points_3D)
projections_2d[0, :] = camera_projection[0, :]/camera_projection[2, :]
projections_2d[1, :] = camera_projection[1, :]/camera_projection[2, :]
return projections_2d
def compute_transformation(points_3D, transformation):
return transformation.dot(points_3D)
def calc_pts_diameter(pts):
diameter = -1
for pt_id in range(pts.shape[0]):
pt_dup = np.tile(np.array([pts[pt_id, :]]), [pts.shape[0] - pt_id, 1])
pts_diff = pt_dup - pts[pt_id:, :]
max_dist = math.sqrt((pts_diff * pts_diff).sum(axis=1).max())
if max_dist > diameter:
diameter = max_dist
return diameter
def adi(pts_est, pts_gt):
nn_index = spatial.cKDTree(pts_est)
nn_dists, _ = nn_index.query(pts_gt, k=1)
e = nn_dists.mean()
return e
def get_3D_corners(vertices):
min_x = np.min(vertices[0,:])
max_x = np.max(vertices[0,:])
min_y = np.min(vertices[1,:])
max_y = np.max(vertices[1,:])
min_z = np.min(vertices[2,:])
max_z = np.max(vertices[2,:])
# use stub since we know the cargo ball's bounding box
#min_x = -0.33/2
#max_x = 0.33/2
#min_y = -0.33/2
#max_y = 0.33/2
#min_z = -0.33/2
#max_z = 0.33/2
corners = np.array([[min_x, min_y, min_z],
[min_x, min_y, max_z],
[min_x, max_y, min_z],
[min_x, max_y, max_z],
[max_x, min_y, min_z],
[max_x, min_y, max_z],
[max_x, max_y, min_z],
[max_x, max_y, max_z]])
corners = np.concatenate((np.transpose(corners), np.ones((1,8)) ), axis=0)
return corners
def pnp(points_3D, points_2D, cameraMatrix):
try:
distCoeffs = pnp.distCoeffs
except:
distCoeffs = np.zeros((8, 1), dtype='float32')
assert points_2D.shape[0] == points_2D.shape[0], 'points 3D and points 2D must have same number of vertices'
_, rvecs, tvecs = cv2.solvePnP(points_3D,
# points_2D,
np.ascontiguousarray(points_2D[:,:2]).reshape((-1,1,2)),
cameraMatrix,
distCoeffs)
# , None, None, False, cv2.SOLVEPNP_UPNP)
# R_exp, t, _ = cv2.solvePnPRansac(points_3D,
# points_2D,
# cameraMatrix,
# distCoeffs,
# reprojectionError=12.0)
#
R, _ = cv2.Rodrigues(rvecs)
# Rt = np.c_[R, t]
return rvecs, R, tvecs
def get_2d_bb(box, size):
x = box[0]
y = box[1]
min_x = np.min(np.reshape(box, [9,2])[:,0])
max_x = np.max(np.reshape(box, [9,2])[:,0])
min_y = np.min(np.reshape(box, [9,2])[:,1])
max_y = np.max(np.reshape(box, [9,2])[:,1])
w = max_x - min_x
h = max_y - min_y
new_box = [x*size, y*size, w*size, h*size]
return new_box
def compute_2d_bb(pts):
min_x = np.min(pts[0,:])
max_x = np.max(pts[0,:])
min_y = np.min(pts[1,:])
max_y = np.max(pts[1,:])
w = max_x - min_x
h = max_y - min_y
cx = (max_x + min_x) / 2.0
cy = (max_y + min_y) / 2.0
new_box = [cx, cy, w, h]
return new_box
def compute_2d_bb_from_orig_pix(pts, size):
min_x = np.min(pts[0,:]) / 1280.0
max_x = np.max(pts[0,:]) / 1280.0
min_y = np.min(pts[1,:]) / 720.0
max_y = np.max(pts[1,:]) / 720.0
w = max_x - min_x
h = max_y - min_y
cx = (max_x + min_x) / 2.0
cy = (max_y + min_y) / 2.0
new_box = [cx*size, cy*size, w*size, h*size]
return new_box
def bbox_iou(box1, box2, x1y1x2y2=False):
if x1y1x2y2:
mx = min(box1[0], box2[0])
Mx = max(box1[2], box2[2])
my = min(box1[1], box2[1])
My = max(box1[3], box2[3])
w1 = box1[2] - box1[0]
h1 = box1[3] - box1[1]
w2 = box2[2] - box2[0]
h2 = box2[3] - box2[1]
else:
mx = min(box1[0]-box1[2]/2.0, box2[0]-box2[2]/2.0)
Mx = max(box1[0]+box1[2]/2.0, box2[0]+box2[2]/2.0)
my = min(box1[1]-box1[3]/2.0, box2[1]-box2[3]/2.0)
My = max(box1[1]+box1[3]/2.0, box2[1]+box2[3]/2.0)
w1 = box1[2]
h1 = box1[3]
w2 = box2[2]
h2 = box2[3]
uw = Mx - mx
uh = My - my
cw = w1 + w2 - uw
ch = h1 + h2 - uh
carea = 0
if cw <= 0 or ch <= 0:
return 0.0
area1 = w1 * h1
area2 = w2 * h2
carea = cw * ch
uarea = area1 + area2 - carea
return carea/uarea
def bbox_iou_cube(box1, box2, x1y1x2y2=True):
if x1y1x2y2:
# point 1, 3, 5, 7 are points that form the front face of the cube
# point 3 and 5 are the upper left and lower right points of the rectangle, to be used for nms area overlap calculation
# nms algorithm x1 is point 3's X coordinate which has index 6 in the "boxes" array of length 21
# nms algorithm y1 is point 3's Y coordinate which has index 7 in the "boxes" array of length 21
# nms algorithm x2 is point 5's X coordinate which has index 10 in the "boxes" array of length 21
# nms algorithm y2 is point 5's y coordinate which has index 11 in the "boxes" array of length 21
# With above chocie, we pick index 6, 7, 10 and 11 from the "boxes" array of length 21, for nms
mx = min(box1[6], box2[6])
Mx = max(box1[10], box2[10])
my = min(box1[7], box2[7])
My = max(box1[11], box2[11])
w1 = box1[10] - box1[6]
h1 = box1[11] - box1[7]
w2 = box2[10] - box2[6]
h2 = box2[11] - box2[7]
else:
mx = min(box1[0]-box1[2]/2.0, box2[0]-box2[2]/2.0)
Mx = max(box1[0]+box1[2]/2.0, box2[0]+box2[2]/2.0)
my = min(box1[1]-box1[3]/2.0, box2[1]-box2[3]/2.0)
My = max(box1[1]+box1[3]/2.0, box2[1]+box2[3]/2.0)
w1 = box1[2]
h1 = box1[3]
w2 = box2[2]
h2 = box2[3]
uw = Mx - mx
uh = My - my
cw = w1 + w2 - uw
ch = h1 + h2 - uh
carea = 0
if cw <= 0 or ch <= 0:
return 0.0
area1 = w1 * h1
area2 = w2 * h2
carea = cw * ch
uarea = area1 + area2 - carea
return carea/uarea
def convert_bbox_format_for_sorting(bboxes):
all_boxes = []
for i in range(len(bboxes)):
w = 1280
h = 720
x1 = bboxes[i][6]*w
y1 = bboxes[i][7]*h
x2 = bboxes[i][10]*w
y2 = bboxes[i][11]*h
confidence = bboxes[i][18]
confidence = bboxes[i][18]
class_label = bboxes[i][20]
all_boxes.append([x1, y1, x2, y2, confidence, confidence, class_label])
return all_boxes
def corner_confidences(gt_corners, pr_corners, th=30, sharpness=2, im_width=1280, im_height=720):
''' gt_corners: Ground-truth 2D projections of the 3D bounding box corners, shape: (16 x nA), type: torch.FloatTensor
pr_corners: Prediction for the 2D projections of the 3D bounding box corners, shape: (16 x nA), type: torch.FloatTensor
th : distance threshold, type: int
sharpness : sharpness of the exponential that assigns a confidence value to the distance
-----------
return : a torch.FloatTensor of shape (nA,) with 8 confidence values
'''
shape = gt_corners.size()
nA = shape[1]
dist = gt_corners - pr_corners
dist = dist.t().contiguous().view(nA, 8, 2)
dist[:, :, 0] = dist[:, :, 0] * im_width
dist[:, :, 1] = dist[:, :, 1] * im_height
eps = 1e-5
distthresh = torch.FloatTensor([th]).repeat(nA, 8)
dist = torch.sqrt(torch.sum((dist)**2, dim=2)).squeeze() # nA x 8
mask = (dist < distthresh).type(torch.FloatTensor)
conf = torch.exp(sharpness*(1 - dist/distthresh))-1 # mask * (torch.exp(math.log(2) * (1.0 - dist/rrt)) - 1)
conf0 = torch.exp(sharpness*(1 - torch.zeros(conf.size(0),1))) - 1
conf = conf / conf0.repeat(1, 8)
# conf = 1 - dist/distthresh
conf = mask * conf # nA x 8
mean_conf = torch.mean(conf, dim=1)
return mean_conf
def corner_confidence(gt_corners, pr_corners, th=30, sharpness=2, im_width=1280, im_height=720):
''' gt_corners: Ground-truth 2D projections of the 3D bounding box corners, shape: (16,) type: list
pr_corners: Prediction for the 2D projections of the 3D bounding box corners, shape: (16,), type: list
th : distance threshold, type: int
sharpness : sharpness of the exponential that assigns a confidence value to the distance
-----------
return : a list of shape (8,) with 8 confidence values
'''
dist = torch.FloatTensor(gt_corners) - pr_corners
dist = dist.view(8, 2)
dist[:, 0] = dist[:, 0] * im_width
dist[:, 1] = dist[:, 1] * im_height
eps = 1e-5
dist = torch.sqrt(torch.sum((dist)**2, dim=1))
mask = (dist < th).type(torch.FloatTensor)
conf = torch.exp(sharpness * (1.0 - dist/th)) - 1
conf0 = torch.exp(torch.FloatTensor([sharpness])) - 1 + eps
conf = conf / conf0.repeat(8, 1)
# conf = 1.0 - dist/th
conf = mask * conf
return torch.mean(conf)
def corner_confidences9(gt_corners, pr_corners, th=80, sharpness=2, im_width=1280, im_height=720):
''' gt_corners: Ground-truth 2D projections of the 3D bounding box corners, shape: (16 x nA), type: torch.FloatTensor
pr_corners: Prediction for the 2D projections of the 3D bounding box corners, shape: (16 x nA), type: torch.FloatTensor
th : distance threshold, type: int
sharpness : sharpness of the exponential that assigns a confidence value to the distance
-----------
return : a torch.FloatTensor of shape (nA,) with 9 confidence values
'''
shape = gt_corners.size()
nA = shape[1]
dist = gt_corners - pr_corners
dist = dist.t().contiguous().view(nA, 9, 2)
dist[:, :, 0] = dist[:, :, 0] * im_width
dist[:, :, 1] = dist[:, :, 1] * im_height
eps = 1e-5
distthresh = torch.FloatTensor([th]).repeat(nA, 9)
dist = torch.sqrt(torch.sum((dist)**2, dim=2)).squeeze() # nA x 9
mask = (dist < distthresh).type(torch.FloatTensor)
conf = torch.exp(sharpness*(1 - dist/distthresh))-1 # mask * (torch.exp(math.log(2) * (1.0 - dist/rrt)) - 1)
conf0 = torch.exp(sharpness*(1 - torch.zeros(conf.size(0),1))) - 1
conf = conf / conf0.repeat(1, 9)
# conf = 1 - dist/distthresh
conf = mask * conf # nA x 9
mean_conf = torch.mean(conf, dim=1)
return mean_conf
def corner_confidence9(gt_corners, pr_corners, th=80, sharpness=2, im_width=1280, im_height=720):
''' gt_corners: Ground-truth 2D projections of the 3D bounding box corners, shape: (18,) type: list
pr_corners: Prediction for the 2D projections of the 3D bounding box corners, shape: (18,), type: list
th : distance threshold, type: int
sharpness : sharpness of the exponential that assigns a confidence value to the distance
-----------
return : a list of shape (9,) with 9 confidence values
'''
dist = torch.FloatTensor(gt_corners) - pr_corners
dist = dist.view(9, 2)
dist[:, 0] = dist[:, 0] * im_width
dist[:, 1] = dist[:, 1] * im_height
eps = 1e-5
dist = torch.sqrt(torch.sum((dist)**2, dim=1))
mask = (dist < th).type(torch.FloatTensor)
conf = torch.exp(sharpness * (1.0 - dist/th)) - 1
conf0 = torch.exp(torch.FloatTensor([sharpness])) - 1 + eps
conf = conf / conf0.repeat(9, 1)
# conf = 1.0 - dist/th
conf = mask * conf
return torch.mean(conf)
@vectorize([float64(float64)])
def sigmoid(x):
return 1.0/(math.exp(-x)+1.)
def softmax_torch(x):
x = torch.exp(x - torch.max(x))
x = x/x.sum()
return x
def nms(boxes, nms_thresh):
if len(boxes) == 0:
return boxes
det_confs = torch.zeros(len(boxes))
# print("unsorted")
# print_class_and_conf(boxes)
for i in range(len(boxes)):
det_confs[i] = 1-boxes[i][4]
_,sortIds = torch.sort(det_confs)
out_boxes = []
for i in range(len(boxes)):
box_i = boxes[sortIds[i]]
if box_i[4] > 0:
out_boxes.append(box_i)
for j in range(i+1, len(boxes)):
box_j = boxes[sortIds[j]]
if bbox_iou(box_i, box_j, x1y1x2y2=False) > nms_thresh:
#print(box_i, box_j, bbox_iou(box_i, box_j, x1y1x2y2=False))
box_j[4] = 0
# print("sorted")
# print_class_and_conf(out_boxes)
return out_boxes
def nms_v2(boxes, nms_thresh):
if len(boxes) == 0:
return boxes
det_confs = torch.zeros(len(boxes))
print("unsorted")
print_class_and_conf(boxes)
for i in range(len(boxes)):
det_confs[i] = 1-boxes[i][4]
_,sortIds = torch.sort(det_confs)
out_boxes = []
for i in range(len(boxes)):
box_i = boxes[sortIds[i]]
if box_i[4] > 0:
out_boxes.append(box_i)
for j in range(i+1, len(boxes)):
box_j = boxes[sortIds[j]]
if bbox_iou(box_i, box_j, x1y1x2y2=False) > nms_thresh:
#print(box_i, box_j, bbox_iou(box_i, box_j, x1y1x2y2=False))
box_j[4] = 0
print("sorted")
print_class_and_conf(out_boxes)
return out_boxes
def print_class_and_conf(boxes):
for box in boxes:
print('class ', int(box[20]), 'conf ', '{:0.3f}'.format(float(box[18])))
def nms_multi(boxes, nms_thresh):
if len(boxes) == 0:
return boxes
det_confs = torch.zeros(len(boxes))
for i in range(len(boxes)):
det_confs[i] = 1-boxes[i][0][4]
_,sortIds = torch.sort(det_confs)
out_boxes = []
for i in range(len(boxes)):
box_i = boxes[sortIds[i]]
if box_i[0][4] > 0:
out_boxes.append(box_i[0])
for j in range(i+1, len(boxes)):
box_j = boxes[sortIds[j]]
if bbox_iou(box_i, box_j, x1y1x2y2=False) > nms_thresh:
#print(box_i, box_j, bbox_iou(box_i, box_j, x1y1x2y2=False))
box_j[0][4] = 0
return out_boxes
def nms_multi_v2(boxes, nms_thresh):
if len(boxes) == 0:
return boxes
det_confs = torch.zeros(len(boxes))
# index 18 is the det_conf i.e. confidence of the detected object
for i in range(len(boxes)):
det_confs[i] = 1-boxes[i][18]
_,sortIds = torch.sort(det_confs)
out_boxes = []
for i in range(len(boxes)):
box_i = boxes[sortIds[i]]
if box_i[18] > 0:
out_boxes.append(box_i)
for j in range(i+1, len(boxes)):
box_j = boxes[sortIds[j]]
if bbox_iou_cube(box_i, box_j, x1y1x2y2=True) > nms_thresh:
#print(box_i, box_j, bbox_iou(box_i, box_j, x1y1x2y2=False))
box_j[18] = 0
return out_boxes
# import the necessary packages
import numpy as np
# Malisiewicz et al.
def non_max_suppression_fast(boxes, overlapThresh):
boxes = np.asarray(boxes)
# if there are no boxes, return an empty list
if len(boxes) == 0:
return []
# if the bounding boxes integers, convert them to floats --
# this is important since we'll be doing a bunch of divisions
# if boxes.dtype.kind == "i":
# boxes = boxes.astype("float")
# initialize the list of picked indexes
pick = []
# grab the coordinates of the bounding boxes
# x1 = boxes[:,0]
# y1 = boxes[:,1]
# x2 = boxes[:,2]
# y2 = boxes[:,3]
# grab the front faces of the cube as bounding boxes
# point 1, 3, 5, 7 are points that form the front face of the cube
# point 3 and 5 are the upper left and lower right points of the rectangle, to be used for nms area overlap calculation
# nms algorithm x1 is point 3's X coordinate which has index 6 in the "boxes" array of length 21
# nms algorithm y1 is point 3's Y coordinate which has index 7 in the "boxes" array of length 21
# nms algorithm x2 is point 5's X coordinate which has index 10 in the "boxes" array of length 21
# nms algorithm y2 is point 5's y coordinate which has index 11 in the "boxes" array of length 21
# With above chocie, we pick index 6, 7, 10 and 11 from the "boxes" array of length 21, for nms
x1 = boxes[:,6]
y1 = boxes[:,7]
x2 = boxes[:,10]
y2 = boxes[:,11]
# print('x1', x1)
# print('y1', y1)
# print('x2', x2)
# print('y2', y2)
# compute the area of the bounding boxes and sort the bounding
# boxes by the bottom-right y-coordinate of the bounding box
area = (x2 - x1 + 1) * (y2 - y1 + 1)
idxs = np.argsort(y2)
# keep looping while some indexes still remain in the indexes
# list
while len(idxs) > 0:
# grab the last index in the indexes list and add the
# index value to the list of picked indexes
last = len(idxs) - 1
i = idxs[last]
pick.append(i)
# find the largest (x, y) coordinates for the start of
# the bounding box and the smallest (x, y) coordinates
# for the end of the bounding box
xx1 = np.maximum(x1[i], x1[idxs[:last]])
yy1 = np.maximum(y1[i], y1[idxs[:last]])
xx2 = np.minimum(x2[i], x2[idxs[:last]])
yy2 = np.minimum(y2[i], y2[idxs[:last]])
# compute the width and height of the bounding box
w = np.maximum(0, xx2 - xx1 + 1)
h = np.maximum(0, yy2 - yy1 + 1)
print('w', w)
print('h', h)
# compute the ratio of overlap
overlap = (w * h) / area[idxs[:last]]
print('overlap', overlap)
# delete all indexes from the index list that have
idxs = np.delete(idxs, np.concatenate(([last],
np.where(overlap > overlapThresh)[0])))
# return only the bounding boxes that were picked using the
# integer data type
# print('boxes[pick]', boxes[pick])
return boxes[pick].tolist()
def fix_corner_order(corners2D_gt):
corners2D_gt_corrected = np.zeros((9, 2), dtype='float32')
corners2D_gt_corrected[0, :] = corners2D_gt[0, :]
corners2D_gt_corrected[1, :] = corners2D_gt[1, :]
corners2D_gt_corrected[2, :] = corners2D_gt[3, :]
corners2D_gt_corrected[3, :] = corners2D_gt[5, :]
corners2D_gt_corrected[4, :] = corners2D_gt[7, :]
corners2D_gt_corrected[5, :] = corners2D_gt[2, :]
corners2D_gt_corrected[6, :] = corners2D_gt[4, :]
corners2D_gt_corrected[7, :] = corners2D_gt[6, :]
corners2D_gt_corrected[8, :] = corners2D_gt[8, :]
return corners2D_gt_corrected
def convert2cpu(gpu_matrix):
return torch.FloatTensor(gpu_matrix.size()).copy_(gpu_matrix)
def convert2cpu_long(gpu_matrix):
return torch.LongTensor(gpu_matrix.size()).copy_(gpu_matrix)
# custom function
@cython.boundscheck(False)
def get_region_boxes(output, conf_thresh, num_classes, only_objectness=1, validation=False):
t0minus = time.time()
# Parameters
anchor_dim = 1
#if output.dim() == 3:
#output = output.cpu().numpy()
print('output numpy shape ',output.shape)
if output.shape == 3:
output = output.unsqueeze(0) #TODO
batch = output.shape[0]
assert(output.shape[1] == (19+num_classes)*anchor_dim)
h = output.shape[2]
w = output.shape[3]
# Activation
t0 = time.time()
all_boxes = []
max_conf = -100000
output = output.reshape(batch*anchor_dim, 19+num_classes, h*w)#.transpose(0,1).ascontiguousarray(output)
#print('reshaped output numpy has shape ',output.shape)
output = np.transpose(output, (1,0,2))
#print('reshaped output numpy has shape ',output.shape)
output = np.ascontiguousarray(output)
#print('reshaped output numpy has shape ',output.shape)
output = output.reshape(19+num_classes, batch*anchor_dim*h*w)
#grid_x = torch.linspace(0, w-1, w).repeat(h,1).repeat(batch*anchor_dim, 1, 1).view(batch*anchor_dim*h*w).cuda()
temp_x = np.linspace(0, w-1, w)
temp_x = np.tile(temp_x, (h,1))
temp_x = np.tile(temp_x, (batch*anchor_dim, 1, 1))
grid_x = temp_x.reshape(batch*anchor_dim*h*w)
temp_y = np.linspace(0, h-1, h)
temp_y = np.tile(temp_y,(w,1))
temp_y = np.transpose(temp_y, (1,0))
grid_y = np.tile(temp_y, (batch*anchor_dim, 1, 1)).reshape(batch*anchor_dim*h*w)
# define vectorized sigmoid
sigmoid_v = np.vectorize(sigmoid)
xs0 = sigmoid_v(output[0]) + grid_x
ys0 = sigmoid_v(output[1]) + grid_y
xs1 = output[2] + grid_x
ys1 = output[3] + grid_y
xs2 = output[4] + grid_x
ys2 = output[5] + grid_y
xs3 = output[6] + grid_x
ys3 = output[7] + grid_y
xs4 = output[8] + grid_x
ys4 = output[9] + grid_y
xs5 = output[10] + grid_x
ys5 = output[11] + grid_y
xs6 = output[12] + grid_x
ys6 = output[13] + grid_y
xs7 = output[14] + grid_x
ys7 = output[15] + grid_y
xs8 = output[16] + grid_x
ys8 = output[17] + grid_y
det_confs = sigmoid_v(output[18])
output_transpose = np.transpose(output[19:19+num_classes], (1,0))
cls_confs = softmax(output_transpose)
cls_max_ids = np.argmax(cls_confs, 1)
cls_max_confs = np.amax(cls_confs, 1)
cls_max_confs = cls_max_confs.reshape(-1)
cls_max_ids = cls_max_ids.reshape(-1)
t1 = time.time()
# GPU to CPU
sz_hw = h*w
sz_hwa = sz_hw*anchor_dim
# det_confs = convert2cpu(det_confs)
# cls_max_confs = convert2cpu(cls_max_confs)
# cls_max_ids = convert2cpu_long(cls_max_ids)
# xs0 = convert2cpu(xs0)
# ys0 = convert2cpu(ys0)
# xs1 = convert2cpu(xs1)
# ys1 = convert2cpu(ys1)
# xs2 = convert2cpu(xs2)
# ys2 = convert2cpu(ys2)
# xs3 = convert2cpu(xs3)
# ys3 = convert2cpu(ys3)
# xs4 = convert2cpu(xs4)
# ys4 = convert2cpu(ys4)
# xs5 = convert2cpu(xs5)
# ys5 = convert2cpu(ys5)
# xs6 = convert2cpu(xs6)
# ys6 = convert2cpu(ys6)
# xs7 = convert2cpu(xs7)
# ys7 = convert2cpu(ys7)
# xs8 = convert2cpu(xs8)
# ys8 = convert2cpu(ys8)
#if validation:
#cls_confs = convert2cpu(cls_confs.view(-1, num_classes))
t2 = time.time()
# Boxes filter
for b in range(batch):
boxes = []
max_conf = -1
for cy in range(h):
for cx in range(w):
for i in range(anchor_dim):
ind = b*sz_hwa + i*sz_hw + cy*w + cx
det_conf = det_confs[ind]
if only_objectness:
conf = det_confs[ind]
else:
conf = det_confs[ind] * cls_max_confs[ind]
if conf > max_conf:
max_conf = conf
max_ind = ind
if conf > conf_thresh:
bcx0 = xs0[ind]
bcy0 = ys0[ind]
bcx1 = xs1[ind]
bcy1 = ys1[ind]
bcx2 = xs2[ind]
bcy2 = ys2[ind]
bcx3 = xs3[ind]
bcy3 = ys3[ind]
bcx4 = xs4[ind]
bcy4 = ys4[ind]
bcx5 = xs5[ind]
bcy5 = ys5[ind]
bcx6 = xs6[ind]
bcy6 = ys6[ind]
bcx7 = xs7[ind]
bcy7 = ys7[ind]
bcx8 = xs8[ind]
bcy8 = ys8[ind]
cls_max_conf = cls_max_confs[ind]
cls_max_id = cls_max_ids[ind]
box = [bcx0/w, bcy0/h, bcx1/w, bcy1/h, bcx2/w, bcy2/h, bcx3/w, bcy3/h, bcx4/w, bcy4/h, bcx5/w, bcy5/h, bcx6/w, bcy6/h, bcx7/w, bcy7/h, bcx8/w, bcy8/h, det_conf, cls_max_conf, cls_max_id]
if (not only_objectness) and validation:
for c in range(num_classes):
tmp_conf = cls_confs[ind][c]
if c != cls_max_id and det_confs[ind]*tmp_conf > conf_thresh:
box.append(tmp_conf)
box.append(c)
boxes.append(box)
if len(boxes) == 0:
bcx0 = xs0[max_ind]
bcy0 = ys0[max_ind]
bcx1 = xs1[max_ind]
bcy1 = ys1[max_ind]
bcx2 = xs2[max_ind]
bcy2 = ys2[max_ind]
bcx3 = xs3[max_ind]
bcy3 = ys3[max_ind]
bcx4 = xs4[max_ind]
bcy4 = ys4[max_ind]
bcx5 = xs5[max_ind]
bcy5 = ys5[max_ind]
bcx6 = xs6[max_ind]
bcy6 = ys6[max_ind]
bcx7 = xs7[max_ind]
bcy7 = ys7[max_ind]
bcx8 = xs8[max_ind]
bcy8 = ys8[max_ind]
cls_max_conf = cls_max_confs[max_ind]
cls_max_id = cls_max_ids[max_ind]
det_conf = det_confs[max_ind]
box = [bcx0/w, bcy0/h, bcx1/w, bcy1/h, bcx2/w, bcy2/h, bcx3/w, bcy3/h, bcx4/w, bcy4/h, bcx5/w, bcy5/h, bcx6/w, bcy6/h, bcx7/w, bcy7/h, bcx8/w, bcy8/h, det_conf, cls_max_conf, cls_max_id]
boxes.append(box)
all_boxes.append(boxes)
else:
all_boxes.append(boxes)
all_boxes.append(boxes)
t3 = time.time()
if True:
print('---------------------------------')
print('gpu to cpu for numpy : %f' % (t0-t0minus))
print('matrix computation : %f' % (t1-t0))
print(' gpu to cpu : %f' % (t2-t1))
print(' boxes filter : %f' % (t3-t2))
print('---------------------------------')
return all_boxes
def get_region_boxes_multi(output, conf_thresh, num_classes, anchors, num_anchors, only_objectness=1, validation=False):
# Parameters
anchor_step = len(anchors)//num_anchors
if output.dim() == 3:
output = output.unsqueeze(0)
batch = output.size(0)
assert(output.size(1) == (19+num_classes)*num_anchors)
h = output.size(2)
w = output.size(3)
# Activation
t0 = time.time()
all_boxes = []
max_conf = -100000
#output = output.view(batch*anchor_dim, 19+num_classes, h*w).transpose(0,1).contiguous().view(19+num_classes, batch*anchor_dim*h*w)
output = output.view(batch*num_anchors, 19+num_classes, h*w).transpose(0,1).contiguous().view(19+num_classes, batch*num_anchors*h*w)
grid_x = torch.linspace(0, w-1, w).repeat(h,1).repeat(batch*num_anchors, 1, 1).view(batch*num_anchors*h*w).cuda()
grid_y = torch.linspace(0, h-1, h).repeat(w,1).t().repeat(batch*num_anchors, 1, 1).view(batch*num_anchors*h*w).cuda()
xs0 = torch.sigmoid(output[0]) + grid_x
ys0 = torch.sigmoid(output[1]) + grid_y
xs1 = output[2] + grid_x
ys1 = output[3] + grid_y
xs2 = output[4] + grid_x
ys2 = output[5] + grid_y
xs3 = output[6] + grid_x
ys3 = output[7] + grid_y
xs4 = output[8] + grid_x
ys4 = output[9] + grid_y
xs5 = output[10] + grid_x
ys5 = output[11] + grid_y
xs6 = output[12] + grid_x
ys6 = output[13] + grid_y
xs7 = output[14] + grid_x
ys7 = output[15] + grid_y
xs8 = output[16] + grid_x
ys8 = output[17] + grid_y
det_confs = torch.sigmoid(output[18])
cls_confs = torch.nn.Softmax()(Variable(output[19:19+num_classes].transpose(0,1))).data
cls_max_confs, cls_max_ids = torch.max(cls_confs, 1)
cls_max_confs = cls_max_confs.view(-1)
cls_max_ids = cls_max_ids.view(-1)
t1 = time.time()
# GPU to CPU
sz_hw = h*w
sz_hwa = sz_hw*num_anchors
det_confs = convert2cpu(det_confs)
cls_max_confs = convert2cpu(cls_max_confs)
cls_max_ids = convert2cpu_long(cls_max_ids)
xs0 = convert2cpu(xs0)
ys0 = convert2cpu(ys0)
xs1 = convert2cpu(xs1)
ys1 = convert2cpu(ys1)
xs2 = convert2cpu(xs2)
ys2 = convert2cpu(ys2)
xs3 = convert2cpu(xs3)
ys3 = convert2cpu(ys3)
xs4 = convert2cpu(xs4)
ys4 = convert2cpu(ys4)
xs5 = convert2cpu(xs5)
ys5 = convert2cpu(ys5)
xs6 = convert2cpu(xs6)
ys6 = convert2cpu(ys6)
xs7 = convert2cpu(xs7)
ys7 = convert2cpu(ys7)
xs8 = convert2cpu(xs8)
ys8 = convert2cpu(ys8)
if validation:
cls_confs = convert2cpu(cls_confs.view(-1, num_classes))
t2 = time.time()
# Boxes filter
for b in range(batch):
boxes = []
max_conf = -1
for cy in range(h):
for cx in range(w):
for i in range(num_anchors):
ind = b*sz_hwa + i*sz_hw + cy*w + cx
det_conf = det_confs[ind]
if only_objectness:
conf = det_confs[ind]
else:
conf = det_confs[ind] * cls_max_confs[ind]
if conf > max_conf:
max_conf = conf
max_ind = ind
if conf > conf_thresh:
bcx0 = xs0[ind]
bcy0 = ys0[ind]
bcx1 = xs1[ind]
bcy1 = ys1[ind]
bcx2 = xs2[ind]
bcy2 = ys2[ind]
bcx3 = xs3[ind]
bcy3 = ys3[ind]
bcx4 = xs4[ind]
bcy4 = ys4[ind]
bcx5 = xs5[ind]
bcy5 = ys5[ind]
bcx6 = xs6[ind]
bcy6 = ys6[ind]
bcx7 = xs7[ind]
bcy7 = ys7[ind]
bcx8 = xs8[ind]
bcy8 = ys8[ind]
cls_max_conf = cls_max_confs[ind]
cls_max_id = cls_max_ids[ind]
box = [bcx0/w, bcy0/h, bcx1/w, bcy1/h, bcx2/w, bcy2/h, bcx3/w, bcy3/h, bcx4/w, bcy4/h, bcx5/w, bcy5/h, bcx6/w, bcy6/h, bcx7/w, bcy7/h, bcx8/w, bcy8/h, det_conf, cls_max_conf, cls_max_id]
if (not only_objectness) and validation:
for c in range(num_classes):
tmp_conf = cls_confs[ind][c]
if c != cls_max_id and det_confs[ind]*tmp_conf > conf_thresh:
box.append(tmp_conf)
box.append(c)
boxes.append(box)
if len(boxes) == 0:
bcx0 = xs0[max_ind]
bcy0 = ys0[max_ind]
bcx1 = xs1[max_ind]
bcy1 = ys1[max_ind]
bcx2 = xs2[max_ind]
bcy2 = ys2[max_ind]
bcx3 = xs3[max_ind]
bcy3 = ys3[max_ind]
bcx4 = xs4[max_ind]
bcy4 = ys4[max_ind]
bcx5 = xs5[max_ind]
bcy5 = ys5[max_ind]
bcx6 = xs6[max_ind]
bcy6 = ys6[max_ind]
bcx7 = xs7[max_ind]
bcy7 = ys7[max_ind]
bcx8 = xs8[max_ind]
bcy8 = ys8[max_ind]
cls_max_conf = cls_max_confs[max_ind]
cls_max_id = cls_max_ids[max_ind]
det_conf = det_confs[max_ind]
box = [bcx0/w, bcy0/h, bcx1/w, bcy1/h, bcx2/w, bcy2/h, bcx3/w, bcy3/h, bcx4/w, bcy4/h, bcx5/w, bcy5/h, bcx6/w, bcy6/h, bcx7/w, bcy7/h, bcx8/w, bcy8/h, det_conf, cls_max_conf, cls_max_id]
boxes.append(box)
all_boxes.append(boxes)
else:
all_boxes.append(boxes)
all_boxes.append(boxes)
t3 = time.time()
if False:
print('---------------------------------')
print('matrix computation : %f' % (t1-t0))
print(' gpu to cpu : %f' % (t2-t1))
print(' boxes filter : %f' % (t3-t2))
print('---------------------------------')
return all_boxes
def get_corresponding_region_boxes(output, conf_thresh, num_classes, anchors, num_anchors, correspondingclass, only_objectness=1, validation=False):
debug = False
# Parameters
anchor_step = len(anchors)//num_anchors
if output.dim() == 3:
output = output.unsqueeze(0)
batch = output.size(0)
if debug:
print('output.size(1) ', output.size(1) )
print('(19+num_classes)*num_anchors)', (19+num_classes)*num_anchors)
assert(output.size(1) == (19+num_classes)*num_anchors)
h = output.size(2)
w = output.size(3)
# Activation
t0 = time.time()
all_boxes = []
max_conf = -100000
max_cls_conf = -100000
output = output.view(batch*num_anchors, 19+num_classes, h*w).transpose(0,1).contiguous().view(19+num_classes, batch*num_anchors*h*w)
grid_x = torch.linspace(0, w-1, w).repeat(h,1).repeat(batch*num_anchors, 1, 1).view(batch*num_anchors*h*w).cuda()
grid_y = torch.linspace(0, h-1, h).repeat(w,1).t().repeat(batch*num_anchors, 1, 1).view(batch*num_anchors*h*w).cuda()
xs0 = torch.sigmoid(output[0]) + grid_x
ys0 = torch.sigmoid(output[1]) + grid_y
xs1 = output[2] + grid_x
ys1 = output[3] + grid_y
xs2 = output[4] + grid_x
ys2 = output[5] + grid_y
xs3 = output[6] + grid_x
ys3 = output[7] + grid_y
xs4 = output[8] + grid_x
ys4 = output[9] + grid_y
xs5 = output[10] + grid_x
ys5 = output[11] + grid_y
xs6 = output[12] + grid_x
ys6 = output[13] + grid_y
xs7 = output[14] + grid_x
ys7 = output[15] + grid_y
xs8 = output[16] + grid_x
ys8 = output[17] + grid_y
det_confs = torch.sigmoid(output[18])
cls_confs = torch.nn.Softmax()(Variable(output[19:19+num_classes].transpose(0,1))).data
cls_max_confs, cls_max_ids = torch.max(cls_confs, 1)
cls_max_confs = cls_max_confs.view(-1)
cls_max_ids = cls_max_ids.view(-1)
t1 = time.time()
# GPU to CPU
sz_hw = h*w
sz_hwa = sz_hw*num_anchors
det_confs = convert2cpu(det_confs)
cls_max_confs = convert2cpu(cls_max_confs)
cls_max_ids = convert2cpu_long(cls_max_ids)
xs0 = convert2cpu(xs0)
ys0 = convert2cpu(ys0)
xs1 = convert2cpu(xs1)
ys1 = convert2cpu(ys1)
xs2 = convert2cpu(xs2)
ys2 = convert2cpu(ys2)
xs3 = convert2cpu(xs3)
ys3 = convert2cpu(ys3)
xs4 = convert2cpu(xs4)
ys4 = convert2cpu(ys4)
xs5 = convert2cpu(xs5)
ys5 = convert2cpu(ys5)
xs6 = convert2cpu(xs6)
ys6 = convert2cpu(ys6)
xs7 = convert2cpu(xs7)
ys7 = convert2cpu(ys7)
xs8 = convert2cpu(xs8)
ys8 = convert2cpu(ys8)
if validation:
cls_confs = convert2cpu(cls_confs.view(-1, num_classes))
t2 = time.time()
# Boxes filter
for b in range(batch):
boxes = []
max_conf = -1
for cy in range(h):
for cx in range(w):
for i in range(num_anchors):
ind = b*sz_hwa + i*sz_hw + cy*w + cx
det_conf = det_confs[ind]
if only_objectness:
conf = det_confs[ind]
else:
conf = det_confs[ind] * cls_max_confs[ind]
if (det_confs[ind] > max_conf) and (cls_confs[ind, correspondingclass] > max_cls_conf):
max_conf = det_confs[ind]
max_cls_conf = cls_confs[ind, correspondingclass]
max_ind = ind
if conf > conf_thresh:
bcx0 = xs0[ind]
bcy0 = ys0[ind]
bcx1 = xs1[ind]
bcy1 = ys1[ind]
bcx2 = xs2[ind]
bcy2 = ys2[ind]
bcx3 = xs3[ind]
bcy3 = ys3[ind]
bcx4 = xs4[ind]
bcy4 = ys4[ind]
bcx5 = xs5[ind]
bcy5 = ys5[ind]
bcx6 = xs6[ind]
bcy6 = ys6[ind]
bcx7 = xs7[ind]
bcy7 = ys7[ind]
bcx8 = xs8[ind]
bcy8 = ys8[ind]
cls_max_conf = cls_max_confs[ind]
cls_max_id = cls_max_ids[ind]
box = [bcx0/w, bcy0/h, bcx1/w, bcy1/h, bcx2/w, bcy2/h, bcx3/w, bcy3/h, bcx4/w, bcy4/h, bcx5/w, bcy5/h, bcx6/w, bcy6/h, bcx7/w, bcy7/h, bcx8/w, bcy8/h, det_conf, cls_max_conf, cls_max_id]
if (not only_objectness) and validation:
for c in range(num_classes):
tmp_conf = cls_confs[ind][c]
if c != cls_max_id and det_confs[ind]*tmp_conf > conf_thresh:
box.append(tmp_conf)
box.append(c)
boxes.append(box)
boxesnp = np.array(boxes)
if (len(boxes) == 0) or (not (correspondingclass in boxesnp[:,20])):
bcx0 = xs0[max_ind]
bcy0 = ys0[max_ind]
bcx1 = xs1[max_ind]
bcy1 = ys1[max_ind]
bcx2 = xs2[max_ind]
bcy2 = ys2[max_ind]
bcx3 = xs3[max_ind]
bcy3 = ys3[max_ind]
bcx4 = xs4[max_ind]
bcy4 = ys4[max_ind]
bcx5 = xs5[max_ind]
bcy5 = ys5[max_ind]
bcx6 = xs6[max_ind]
bcy6 = ys6[max_ind]
bcx7 = xs7[max_ind]
bcy7 = ys7[max_ind]
bcx8 = xs8[max_ind]
bcy8 = ys8[max_ind]
cls_max_conf = max_cls_conf # cls_max_confs[max_ind]
cls_max_id = correspondingclass # cls_max_ids[max_ind]
det_conf = max_conf # det_confs[max_ind]
box = [bcx0/w, bcy0/h, bcx1/w, bcy1/h, bcx2/w, bcy2/h, bcx3/w, bcy3/h, bcx4/w, bcy4/h, bcx5/w, bcy5/h, bcx6/w, bcy6/h, bcx7/w, bcy7/h, bcx8/w, bcy8/h, det_conf, cls_max_conf, cls_max_id]
# experiment: chris commented out
boxes.append(box)
# print(boxes)
# experiment: chris commented out
all_boxes.append(boxes)
else:
all_boxes.append(boxes)
t3 = time.time()
if False:
print('---------------------------------')
print('matrix computation : %f' % (t1-t0))
print(' gpu to cpu : %f' % (t2-t1))
print(' boxes filter : %f' % (t3-t2))
print('---------------------------------')
return all_boxes
@jit("float32(float32[:,:])", cache=False, nopython=True, nogil=True, parallel=True)
def esum(z):
return np.sum(np.exp(z))
@jit("float32[:,:](float32[:,:])", cache=False, nopython=True, nogil=True, parallel=True)
def softmax_optimized(z):
num = np.exp(z)
s = num / esum(z)
return s
@jit(nopython=True)
def np_argmax_with_axis_1(a):
shape = a.shape
# print('shape ', shape)
# print('size of first dim', shape[0] + 1)
dim = shape[0]
a_list = []
for i in range(dim):
#print('max index for row ' + str(i) + ':', np.argmax(a[i]))
b = np.argmax(a[i])
a_list.append(b)
# print('argmax result list ', a_list)
return np.array(a_list)
@jit(nopython=True)
def np_amax_with_axis_1(a):
shape = a.shape
dim = shape[0]
a_list = []
for i in range(dim):
#print('max index for row ' + str(i) + ':', np.argmax(a[i]))
b = np.amax(a[i])
a_list.append(b)
# print('amax result list ', a_list)
return np.array(a_list)
#@jit(nopython=True) # this has the potential to speed up, but is not working due to unknown numba error --> NotImplementedError: incompatible shape for array
def get_corresponding_region_boxes_trt(output, conf_thresh, num_classes, num_anchors, correspondingclass, only_objectness=1, validation=False):
# print('type of output', type(output))
# print('type of conf_thresh', type(conf_thresh))
# print('type of num_classes', type(num_classes))
# print('type of num_anchors', type(num_anchors))
# print('type of correspondingclass', type(correspondingclass))
# print('type of output', type(output))
#t0minus = time.time()
# Parameters
#if output.dim() == 3:
# if output.shape == 3:
# output = output.unsqueeze(0) #TODO
batch = output.shape[0]
assert(output.shape[1] == (19+num_classes)*num_anchors)
h = output.shape[2]
w = output.shape[3]
# Activation
#t0 = time.time()
all_boxes = []
max_conf = -100000
max_cls_conf = -100000
#output = output.view(batch*num_anchors, 19+num_classes, h*w).transpose(0,1).contiguous().view(19+num_classes, batch*num_anchors*h*w)
output = output.reshape(batch*num_anchors, 19+num_classes, h*w)#.transpose(0,1).ascontiguousarray(output)
#print('reshaped output numpy has shape ',output.shape)
output = np.transpose(output, (1,0,2))
#print('reshaped output numpy has shape ',output.shape)
output = np.ascontiguousarray(output)
#print('reshaped output numpy has shape ',output.shape)
output = output.reshape(19+num_classes, batch*num_anchors*h*w)
#grid_x = torch.linspace(0, w-1, w).repeat(h,1).repeat(batch*num_anchors, 1, 1).view(batch*num_anchors*h*w).cuda()
temp_x = np.linspace(0, w-1, w)
#temp_x = np.tile(temp_x, (h,1)) #this is the original code, replaced by the following np.repeat() style code for numba compatibility
# temp_x shape is (13, 13)
#print('temp_x shape', temp_x.shape)
size_x = temp_x.size
temp_x = temp_x.repeat(h).reshape(-1, h).transpose().reshape((-1, 1, size_x))
#print('temp_x shape', temp_x.shape)
# temp_x shape is (13, 1, 13)
#temp_x = np.tile(temp_x, (batch*num_anchors, 1, 1)) # this is the orignal, replaced by the following np.repeat() for numba compatibility
temp_x = temp_x.repeat(batch*num_anchors).reshape(-1, batch*num_anchors).transpose().reshape((-1,1,size_x))
grid_x = temp_x.reshape(batch*num_anchors*h*w)
#grid_y = torch.linspace(0, h-1, h).repeat(w,1).t().repeat(batch*num_anchors, 1, 1).view(batch*num_anchors*h*w).cuda()
temp_y = np.linspace(0, h-1, h)
#print('temp_y shape', temp_y.shape)
#temp_x shape is (13,) which is 1-D array of 13 elements
#temp_y = np.tile(temp_y,(w,1))
size_y = temp_y.size
# print('size_y', size_y)
temp_y = temp_y.repeat(w).reshape(-1, w).transpose().reshape((-1, size_y))
# print('temp_y shape', temp_y.shape)
temp_y = np.transpose(temp_y, (1,0))
print('temp_y shape', temp_y.shape)
size_y0 = temp_y.shape[0]
size_y1 = temp_y.shape[1]
print('size_y1', size_y1)
# temp_y = np.tile(temp_y, (batch*num_anchors, 1, 1))
# grid_y = temp_y.reshape(batch*num_anchors*h*w)
temp_y = temp_y.repeat(batch*num_anchors).reshape(-1, batch*num_anchors).transpose().reshape((-1,size_y0,size_y1))
grid_y = temp_y.reshape(batch*num_anchors*h*w)
# define vectorized sigmoid
#sigmoid_v = np.vectorize(sigmoid)
xs0 = sigmoid(output[0]) + grid_x
ys0 = sigmoid(output[1]) + grid_y
xs1 = output[2] + grid_x
ys1 = output[3] + grid_y
xs2 = output[4] + grid_x
ys2 = output[5] + grid_y
xs3 = output[6] + grid_x
ys3 = output[7] + grid_y
xs4 = output[8] + grid_x
ys4 = output[9] + grid_y
xs5 = output[10] + grid_x
ys5 = output[11] + grid_y
xs6 = output[12] + grid_x
ys6 = output[13] + grid_y
xs7 = output[14] + grid_x
ys7 = output[15] + grid_y
xs8 = output[16] + grid_x
ys8 = output[17] + grid_y
det_confs = sigmoid(output[18])
output_transpose = np.transpose(output[19:19+num_classes], (1,0))
cls_confs = softmax_optimized(output_transpose)
#cls_max_confs, cls_max_ids = torch.max(cls_confs, 1)
# cls_max_ids = np.argmax(cls_confs, 1)
cls_max_ids = np_argmax_with_axis_1(cls_confs) # replacing above line since argmax with axis is not supported by numba
#cls_max_confs = np.amax(cls_confs, 1)
cls_max_confs = np_amax_with_axis_1(cls_confs) # replacing above line since amax with axis is not supported by numba
cls_max_confs = cls_max_confs.reshape(-1)
cls_max_ids = cls_max_ids.reshape(-1)
#t1 = time.time()
# GPU to CPU
sz_hw = h*w
sz_hwa = sz_hw*num_anchors
#det_confs = convert2cpu(det_confs)
#cls_max_confs = convert2cpu(cls_max_confs)
#cls_max_ids = convert2cpu_long(cls_max_ids)
#xs0 = convert2cpu(xs0)
#ys0 = convert2cpu(ys0)
#xs1 = convert2cpu(xs1)
#ys1 = convert2cpu(ys1)
#xs2 = convert2cpu(xs2)
#ys2 = convert2cpu(ys2)
#xs3 = convert2cpu(xs3)
#ys3 = convert2cpu(ys3)
#xs4 = convert2cpu(xs4)
#ys4 = convert2cpu(ys4)
#xs5 = convert2cpu(xs5)
#ys5 = convert2cpu(ys5)
#xs6 = convert2cpu(xs6)
#ys6 = convert2cpu(ys6)
#xs7 = convert2cpu(xs7)
#ys7 = convert2cpu(ys7)
#xs8 = convert2cpu(xs8)
#ys8 = convert2cpu(ys8)
#if validation:
# cls_confs = convert2cpu(cls_confs.view(-1, num_classes))
#t2 = time.time()
# Boxes filter
for b in range(batch):
boxes = []
max_conf = -1
for cy in range(h):
for cx in range(w):
for i in range(num_anchors):
ind = b*sz_hwa + i*sz_hw + cy*w + cx
det_conf = det_confs[ind]
if only_objectness:
conf = det_confs[ind]
else:
conf = det_confs[ind] * cls_max_confs[ind]
if (det_confs[ind] > max_conf) and (cls_confs[ind, correspondingclass] > max_cls_conf):
max_conf = det_confs[ind]
max_cls_conf = cls_confs[ind, correspondingclass]
max_ind = ind
if conf > conf_thresh:
bcx0 = xs0[ind]
bcy0 = ys0[ind]
bcx1 = xs1[ind]
bcy1 = ys1[ind]
bcx2 = xs2[ind]
bcy2 = ys2[ind]
bcx3 = xs3[ind]
bcy3 = ys3[ind]
bcx4 = xs4[ind]
bcy4 = ys4[ind]
bcx5 = xs5[ind]
bcy5 = ys5[ind]
bcx6 = xs6[ind]
bcy6 = ys6[ind]
bcx7 = xs7[ind]
bcy7 = ys7[ind]
bcx8 = xs8[ind]
bcy8 = ys8[ind]
cls_max_conf = cls_max_confs[ind]
cls_max_id = cls_max_ids[ind]
box = [bcx0/w, bcy0/h, bcx1/w, bcy1/h, bcx2/w, bcy2/h, bcx3/w, bcy3/h, bcx4/w, bcy4/h, bcx5/w, bcy5/h, bcx6/w, bcy6/h, bcx7/w, bcy7/h, bcx8/w, bcy8/h, det_conf, cls_max_conf, cls_max_id]
if (not only_objectness) and validation:
for c in range(num_classes):
tmp_conf = cls_confs[ind][c]
if c != cls_max_id and det_confs[ind]*tmp_conf > conf_thresh:
box.append(tmp_conf)
box.append(c)
boxes.append(box)
boxesnp = np.array(boxes)
isCorrespondingClassIncluded = False
for i, v in np.ndenumerate(boxesnp.astype(np.int64)):
if v == correspondingclass:
isCorrespondingClassIncluded = True
else:
isCorrespondingClassIncluded = False
# print('isCorrespondingClassIncluded value', isCorrespondingClassIncluded)
# array_slice = boxesnp[:,20].astype(np.int64)
# print('array_slice', array_slice)
# print('type of array_slice', type(array_slice))
# print('type of correspondingclass', type(correspondingclass))
#if (len(boxes) == 0) or (not (correspondingclass in boxesnp[:,20].astype(np.int64))):
if ((len(boxes) == 0) or (not isCorrespondingClassIncluded)):
bcx0 = xs0[max_ind]
bcy0 = ys0[max_ind]
bcx1 = xs1[max_ind]
bcy1 = ys1[max_ind]
bcx2 = xs2[max_ind]
bcy2 = ys2[max_ind]
bcx3 = xs3[max_ind]
bcy3 = ys3[max_ind]
bcx4 = xs4[max_ind]
bcy4 = ys4[max_ind]
bcx5 = xs5[max_ind]
bcy5 = ys5[max_ind]
bcx6 = xs6[max_ind]
bcy6 = ys6[max_ind]
bcx7 = xs7[max_ind]
bcy7 = ys7[max_ind]
bcx8 = xs8[max_ind]
bcy8 = ys8[max_ind]
cls_max_conf = max_cls_conf # cls_max_confs[max_ind]
cls_max_id = correspondingclass # cls_max_ids[max_ind]
det_conf = max_conf # det_confs[max_ind]
box = [bcx0/w, bcy0/h, bcx1/w, bcy1/h, bcx2/w, bcy2/h, bcx3/w, bcy3/h, bcx4/w, bcy4/h, bcx5/w, bcy5/h, bcx6/w, bcy6/h, bcx7/w, bcy7/h, bcx8/w, bcy8/h, det_conf, cls_max_conf, cls_max_id]
# experiment: chris commented out
boxes.append(box)
# print(boxes)
# experiment: chris commented out
all_boxes.append(boxes)
else:
all_boxes.append(boxes)
#t3 = time.time()
if False:
print('---------------------------------')
print('gpu to cpu for numpy : %f' % (t0-t0minus))
print('matrix computation : %f' % (t1-t0))
print(' gpu to cpu : %f' % (t2-t1))
print(' boxes filter : %f' % (t3-t2))
print('---------------------------------')
return all_boxes
def get_boxes(output, conf_thresh, num_classes, anchors, num_anchors, correspondingclass, only_objectness=1, validation=False):
# Parameters
anchor_step = len(anchors)/num_anchors
if output.dim() == 3:
output = output.unsqueeze(0)
batch = output.size(0)
assert(output.size(1) == (19+num_classes)*num_anchors)
h = output.size(2)
w = output.size(3)
# Activation
t0 = time.time()
all_boxes = []
max_conf = -100000
max_cls_conf = -100000
output = output.view(batch*num_anchors, 19+num_classes, h*w).transpose(0,1).contiguous().view(19+num_classes, batch*num_anchors*h*w)
grid_x = torch.linspace(0, w-1, w).repeat(h,1).repeat(batch*num_anchors, 1, 1).view(batch*num_anchors*h*w).cuda()
grid_y = torch.linspace(0, h-1, h).repeat(w,1).t().repeat(batch*num_anchors, 1, 1).view(batch*num_anchors*h*w).cuda()
xs0 = torch.sigmoid(output[0]) + grid_x
ys0 = torch.sigmoid(output[1]) + grid_y
xs1 = output[2] + grid_x
ys1 = output[3] + grid_y
xs2 = output[4] + grid_x
ys2 = output[5] + grid_y
xs3 = output[6] + grid_x
ys3 = output[7] + grid_y
xs4 = output[8] + grid_x
ys4 = output[9] + grid_y
xs5 = output[10] + grid_x
ys5 = output[11] + grid_y
xs6 = output[12] + grid_x
ys6 = output[13] + grid_y
xs7 = output[14] + grid_x
ys7 = output[15] + grid_y
xs8 = output[16] + grid_x
ys8 = output[17] + grid_y
det_confs = torch.sigmoid(output[18])
cls_confs = torch.nn.Softmax()(Variable(output[19:19+num_classes].transpose(0,1))).data
cls_max_confs, cls_max_ids = torch.max(cls_confs, 1)
cls_max_confs = cls_max_confs.view(-1)
cls_max_ids = cls_max_ids.view(-1)
t1 = time.time()
# GPU to CPU
sz_hw = h*w
sz_hwa = sz_hw*num_anchors
det_confs = convert2cpu(det_confs)
cls_max_confs = convert2cpu(cls_max_confs)
cls_max_ids = convert2cpu_long(cls_max_ids)
xs0 = convert2cpu(xs0)
ys0 = convert2cpu(ys0)
xs1 = convert2cpu(xs1)
ys1 = convert2cpu(ys1)
xs2 = convert2cpu(xs2)
ys2 = convert2cpu(ys2)
xs3 = convert2cpu(xs3)
ys3 = convert2cpu(ys3)
xs4 = convert2cpu(xs4)
ys4 = convert2cpu(ys4)
xs5 = convert2cpu(xs5)
ys5 = convert2cpu(ys5)
xs6 = convert2cpu(xs6)
ys6 = convert2cpu(ys6)
xs7 = convert2cpu(xs7)
ys7 = convert2cpu(ys7)
xs8 = convert2cpu(xs8)
ys8 = convert2cpu(ys8)
if validation:
cls_confs = convert2cpu(cls_confs.view(-1, num_classes))
t2 = time.time()
# Boxes filter
for b in range(batch):
boxes = []
max_conf = -1
for cy in range(h):
for cx in range(w):
for i in range(num_anchors):
ind = b*sz_hwa + i*sz_hw + cy*w + cx
det_conf = det_confs[ind]
if only_objectness:
conf = det_confs[ind]
else:
conf = det_confs[ind] * cls_max_confs[ind]
if (conf > max_conf) and (cls_confs[ind, correspondingclass] > max_cls_conf):
max_conf = conf
max_cls_conf = cls_confs[ind, correspondingclass]
max_ind = ind
if conf > conf_thresh:
bcx0 = xs0[ind]
bcy0 = ys0[ind]
bcx1 = xs1[ind]
bcy1 = ys1[ind]
bcx2 = xs2[ind]
bcy2 = ys2[ind]
bcx3 = xs3[ind]
bcy3 = ys3[ind]
bcx4 = xs4[ind]
bcy4 = ys4[ind]
bcx5 = xs5[ind]
bcy5 = ys5[ind]
bcx6 = xs6[ind]
bcy6 = ys6[ind]
bcx7 = xs7[ind]
bcy7 = ys7[ind]
bcx8 = xs8[ind]
bcy8 = ys8[ind]
cls_max_conf = cls_max_confs[ind]
cls_max_id = cls_max_ids[ind]
box = [bcx0/w, bcy0/h, bcx1/w, bcy1/h, bcx2/w, bcy2/h, bcx3/w, bcy3/h, bcx4/w, bcy4/h, bcx5/w, bcy5/h, bcx6/w, bcy6/h, bcx7/w, bcy7/h, bcx8/w, bcy8/h, det_conf, cls_max_conf, cls_max_id]
if (not only_objectness) and validation:
for c in range(num_classes):
tmp_conf = cls_confs[ind][c]
if c != cls_max_id and det_confs[ind]*tmp_conf > conf_thresh:
box.append(tmp_conf)
box.append(c)
boxes.append(box)
boxesnp = np.array(boxes)
if (len(boxes) == 0) or (not (correspondingclass in boxesnp[:,20])):
bcx0 = xs0[max_ind]
bcy0 = ys0[max_ind]
bcx1 = xs1[max_ind]
bcy1 = ys1[max_ind]
bcx2 = xs2[max_ind]
bcy2 = ys2[max_ind]
bcx3 = xs3[max_ind]
bcy3 = ys3[max_ind]
bcx4 = xs4[max_ind]
bcy4 = ys4[max_ind]
bcx5 = xs5[max_ind]
bcy5 = ys5[max_ind]
bcx6 = xs6[max_ind]
bcy6 = ys6[max_ind]
bcx7 = xs7[max_ind]
bcy7 = ys7[max_ind]
bcx8 = xs8[max_ind]
bcy8 = ys8[max_ind]
cls_max_conf = max_cls_conf # cls_max_confs[max_ind]
cls_max_id = correspondingclass # cls_max_ids[max_ind]
det_conf = det_confs[max_ind]
box = [bcx0/w, bcy0/h, bcx1/w, bcy1/h, bcx2/w, bcy2/h, bcx3/w, bcy3/h, bcx4/w, bcy4/h, bcx5/w, bcy5/h, bcx6/w, bcy6/h, bcx7/w, bcy7/h, bcx8/w, bcy8/h, det_conf, cls_max_conf, cls_max_id]
boxes.append(box)
# print(boxes)
all_boxes.append(boxes)
else:
all_boxes.append(boxes)
t3 = time.time()
if False:
print('---------------------------------')
print('matrix computation : %f' % (t1-t0))
print(' gpu to cpu : %f' % (t2-t1))
print(' boxes filter : %f' % (t3-t2))
print('---------------------------------')
return all_boxes
def plot_boxes_cv2(img, boxes, savename=None, class_names=None, color=None):
import cv2
colors = torch.FloatTensor([[1,0,1],[0,0,1],[0,1,1],[0,1,0],[1,1,0],[1,0,0]]);
def get_color(c, x, max_val):
ratio = float(x)/max_val * 5
i = int(math.floor(ratio))
j = int(math.ceil(ratio))
ratio = ratio - i
r = (1-ratio) * colors[i][c] + ratio*colors[j][c]
return int(r*255)
width = img.shape[1]
height = img.shape[0]
for i in range(len(boxes)):
box = boxes[i]
x1 = int(round((box[0] - box[2]/2.0) * width))
y1 = int(round((box[1] - box[3]/2.0) * height))
x2 = int(round((box[0] + box[2]/2.0) * width))
y2 = int(round((box[1] + box[3]/2.0) * height))
if color:
rgb = color
else:
rgb = (255, 0, 0)
if len(box) >= 7 and class_names:
cls_conf = box[5]
cls_id = box[6]
print('%s: %f' % (class_names[cls_id], cls_conf))
classes = len(class_names)
offset = cls_id * 123457 % classes
red = get_color(2, offset, classes)
green = get_color(1, offset, classes)
blue = get_color(0, offset, classes)
if color is None:
rgb = (red, green, blue)
img = cv2.putText(img, class_names[cls_id], (x1,y1), cv2.FONT_HERSHEY_SIMPLEX, 1.2, rgb, 1)
img = cv2.rectangle(img, (x1,y1), (x2,y2), rgb, 1)
if savename:
print("save plot results to %s" % savename)
cv2.imwrite(savename, img)
return img
def plot_boxes(img, boxes, savename=None, class_names=None):
colors = torch.FloatTensor([[1,0,1],[0,0,1],[0,1,1],[0,1,0],[1,1,0],[1,0,0]]);
def get_color(c, x, max_val):
ratio = float(x)/max_val * 5
i = int(math.floor(ratio))
j = int(math.ceil(ratio))
ratio = ratio - i
r = (1-ratio) * colors[i][c] + ratio*colors[j][c]
return int(r*255)
width = img.width
height = img.height
draw = ImageDraw.Draw(img)
for i in range(len(boxes)):
box = boxes[i]
x1 = (box[0] - box[2]/2.0) * width
y1 = (box[1] - box[3]/2.0) * height
x2 = (box[0] + box[2]/2.0) * width
y2 = (box[1] + box[3]/2.0) * height
rgb = (255, 0, 0)
if len(box) >= 7 and class_names:
cls_conf = box[5]
cls_id = box[6]
print('%s: %f' % (class_names[cls_id], cls_conf))
classes = len(class_names)
offset = cls_id * 123457 % classes
red = get_color(2, offset, classes)
green = get_color(1, offset, classes)
blue = get_color(0, offset, classes)
rgb = (red, green, blue)
draw.text((x1, y1), class_names[cls_id], fill=rgb)
draw.rectangle([x1, y1, x2, y2], outline = rgb)
if savename:
print("save plot results to %s" % savename)
img.save(savename)
return img
def read_truths(lab_path):
if os.path.getsize(lab_path):
truths = np.loadtxt(lab_path)
truths = truths.reshape(truths.size//21, 21) # to avoid single truth problem
return truths
else:
return np.array([])
def read_truths_args(lab_path):
truths = read_truths(lab_path)
new_truths = []
for i in range(truths.shape[0]):
new_truths.append([truths[i][0], truths[i][1], truths[i][2], truths[i][3], truths[i][4],
truths[i][5], truths[i][6], truths[i][7], truths[i][8], truths[i][9], truths[i][10],
truths[i][11], truths[i][12], truths[i][13], truths[i][14], truths[i][15], truths[i][16], truths[i][17], truths[i][18]])
return np.array(new_truths)
def read_pose(lab_path):
if os.path.getsize(lab_path):
truths = np.loadtxt(lab_path)
# truths = truths.reshape(truths.size/21, 21) # to avoid single truth problem
return truths
else:
return np.array([])
def load_class_names(namesfile):
class_names = []
with open(namesfile, 'r') as fp:
lines = fp.readlines()
for line in lines:
line = line.rstrip()
class_names.append(line)
return class_names
def image2torch(img):
width = img.width
height = img.height
img = torch.ByteTensor(torch.ByteStorage.from_buffer(img.tobytes()))
img = img.view(height, width, 3).transpose(0,1).transpose(0,2).contiguous()
img = img.view(1, 3, height, width)
img = img.float().div(255.0)
return img
def do_detect(model, img, conf_thresh, nms_thresh, use_cuda=1):
model.eval()
t0 = time.time()
if isinstance(img, Image.Image):
width = img.width
height = img.height
img = torch.ByteTensor(torch.ByteStorage.from_buffer(img.tobytes()))
img = img.view(height, width, 3).transpose(0,1).transpose(0,2).contiguous()
img = img.view(1, 3, height, width)
img = img.float().div(255.0)
elif type(img) == np.ndarray: # cv2 image
img = torch.from_numpy(img.transpose(2,0,1)).float().div(255.0).unsqueeze(0)
else:
print("unknow image type")
exit(-1)
t1 = time.time()
if use_cuda:
img = img.cuda()
img = torch.autograd.Variable(img)
t2 = time.time()
output = model(img)
output = output.data
print('model output shape ', output.size())
#for j in range(100):
# sys.stdout.write('%f ' % (output.storage()[j]))
#print('')
t3 = time.time()
#output = output.cpu().numpy();
num_classes = 1
# model.anchors = [0.1067, 0.9223]
# model.num_anchors = 1
boxes = get_region_boxes(output, conf_thresh, num_classes)[0]
#for j in range(len(boxes)):
# print(boxes[j])
t4 = time.time()
boxes = nms(boxes, nms_thresh)
t5 = time.time()
if True:
print('-----------------------------------')
print(' image to tensor : %f' % (t1 - t0))
print(' tensor to cuda : %f' % (t2 - t1))
print(' predict : %f' % (t3 - t2))
print('get_region_boxes : %f' % (t4 - t3))
print(' nms : %f' % (t5 - t4))
print(' total : %f' % (t5 - t0))
print('-----------------------------------')
return boxes
def do_detect_multi(model, img, conf_thresh, nms_thresh, correspondingclass, use_cuda=1):
model.eval()
t0 = time.time()
if isinstance(img, Image.Image):
width = img.width
height = img.height
img = torch.ByteTensor(torch.ByteStorage.from_buffer(img.tobytes()))
img = img.view(height, width, 3).transpose(0,1).transpose(0,2).contiguous()
img = img.view(1, 3, height, width)
img = img.float().div(255.0)
elif type(img) == np.ndarray: # cv2 image
img = torch.from_numpy(img.transpose(2,0,1)).float().div(255.0).unsqueeze(0)
else:
print("unknow image type")
exit(-1)
t1 = time.time()
if use_cuda:
img = img.cuda()
img = torch.autograd.Variable(img)
t2 = time.time()
output = model(img)
output = output.data
#for j in range(100):
# sys.stdout.write('%f ' % (output.storage()[j]))
#print('')
t3 = time.time()
boxes = get_corresponding_region_boxes(output, conf_thresh, model.num_classes, model.anchors, model.num_anchors, correspondingclass, only_objectness=1, validation=False)
#for j in range(len(boxes)):
# print(boxes[j])
t4 = time.time()
boxes = nms_multi(boxes, nms_thresh)
t5 = time.time()
# for debug
#print(boxes)
if False:
print('-----------------------------------')
print(' image to tensor : %f' % (t1 - t0))
print(' tensor to cuda : %f' % (t2 - t1))
print(' predict : %f' % (t3 - t2))
print('get_region_boxes : %f' % (t4 - t3))
print(' nms : %f' % (t5 - t4))
print(' total : %f' % (t5 - t0))
print('-----------------------------------')
return boxes[0], output
def do_detect_multi_v2(model, img, conf_thresh, nms_thresh, use_cuda=1):
model.eval()
t0 = time.time()
if isinstance(img, Image.Image):
width = img.width
height = img.height
img = torch.ByteTensor(torch.ByteStorage.from_buffer(img.tobytes()))
img = img.view(height, width, 3).transpose(0,1).transpose(0,2).contiguous()
img = img.view(1, 3, height, width)
img = img.float().div(255.0)
elif type(img) == np.ndarray: # cv2 image
img = torch.from_numpy(img.transpose(2,0,1)).float().div(255.0).unsqueeze(0)
else:
print("unknow image type")
exit(-1)
t1 = time.time()
if use_cuda:
img = img.cuda()
img = torch.autograd.Variable(img)
t2 = time.time()
output = model(img)
output = output.data
#for j in range(100):
# sys.stdout.write('%f ' % (output.storage()[j]))
#print('')
t3 = time.time()
#for j in range(len(boxes)):
# print(boxes[j])
t4 = time.time()
t5 = time.time()
# for debug
#print(boxes)
if False:
print('-----------------------------------')
print(' image to tensor : %f' % (t1 - t0))
print(' tensor to cuda : %f' % (t2 - t1))
print(' predict : %f' % (t3 - t2))
print('get_region_boxes : %f' % (t4 - t3))
print(' nms : %f' % (t5 - t4))
print(' total : %f' % (t5 - t0))
print('-----------------------------------')
return output
def do_detect_multi_v3(model, img, conf_thresh, nms_thresh, use_cuda=1):
model.eval()
t0 = time.time()
if isinstance(img, Image.Image):
width = img.width
height = img.height
img = torch.ByteTensor(torch.ByteStorage.from_buffer(img.tobytes()))
img = img.view(height, width, 3).transpose(0,1).transpose(0,2).contiguous()
img = img.view(1, 3, height, width)
img = img.float().div(255.0)
elif type(img) == np.ndarray: # cv2 image
img = torch.from_numpy(img.transpose(2,0,1)).float().div(255.0).unsqueeze(0)
else:
print("unknow image type")
exit(-1)
t1 = time.time()
if use_cuda:
img = img.cuda()
img = torch.autograd.Variable(img)
t2 = time.time()
output = model(img)
output = output.data
#for j in range(100):
# sys.stdout.write('%f ' % (output.storage()[j]))
#print('')
t3 = time.time()
num_classes = 3
boxes = get_region_boxes_multi(output, conf_thresh, num_classes, model.anchors, model.num_anchors)[0]
#for j in range(len(boxes)):
# print(boxes[j])
t4 = time.time()
# separate boxes based on its class, before applying nms
sub_box = [[] for _ in range(num_classes)]
for box in boxes:
for i in range(num_classes):
class_id = box[20]
if class_id == i:
sub_box[i].append(box)
# apply nms to each class of box, then merged them
merged_boxes = []
for i in range(num_classes):
if len(sub_box[i]):
sub_box[i] = nms_v2(sub_box[i], nms_thresh)
merged_boxes += sub_box[i]
t5 = time.time()
# for debug
print('final nms boxes')
print_class_and_conf(merged_boxes)
if False:
print('-----------------------------------')
print(' image to tensor : %f' % (t1 - t0))
print(' tensor to cuda : %f' % (t2 - t1))
print(' predict : %f' % (t3 - t2))
print('get_region_boxes : %f' % (t4 - t3))
print(' nms : %f' % (t5 - t4))
print(' total : %f' % (t5 - t0))
print('-----------------------------------')
return merged_boxes
def do_detect_trt(context, img, conf_thresh, nms_thresh, bindings, inputs, outputs, stream, use_cuda=1):
#model.eval()
t0 = time.time()
if isinstance(img, Image.Image):
width = img.width
height = img.height
img = torch.ByteTensor(torch.ByteStorage.from_buffer(img.tobytes()))
img = img.view(height, width, 3).transpose(0,1).transpose(0,2).contiguous()
img = img.view(1, 3, height, width)
img = img.float().div(255.0)
elif type(img) == np.ndarray: # cv2 image
img = torch.from_numpy(img.transpose(2,0,1)).float().div(255.0).unsqueeze(0)
img = img
else:
print("unknow image type")
exit(-1)
t1 = time.time()
# if use_cuda:
# img = img.cuda()
# img = torch.autograd.Variable(img)
t2 = time.time()
inputs[0].host = img.numpy()
trt_outputs = []
trt_outputs = common.do_inference(context, bindings=bindings, inputs=inputs, outputs=outputs, stream=stream)
trt_outputs = array(trt_outputs).reshape(1,20,13,13)
#output = model(img)
#output = output.data
print('model output shape ', trt_outputs.shape)
#for j in range(100):
# sys.stdout.write('%f ' % (output.storage()[j]))
#print('')
t3 = time.time()
#output = output.cpu().numpy();
# model.num_classes = 1
# model.anchors = model.anchors = [0.1067, 0.9223]
# model.num_anchors = 1
num_classes = 1
boxes = get_region_boxes(trt_outputs, conf_thresh, num_classes)[0]
#for j in range(len(boxes)):
# print(boxes[j])
t4 = time.time()
boxes = nms(boxes, nms_thresh)
#boxes = non_max_suppression_fast(boxes, 0.7)
t5 = time.time()
if True:
print('-----------------------------------')
print(' image to tensor : %f' % (t1 - t0))
print(' tensor to cuda : %f' % (t2 - t1))
print(' predict : %f' % (t3 - t2))
print('get_region_boxes : %f' % (t4 - t3))
print(' nms : %f' % (t5 - t4))
print(' total : %f' % (t5 - t0))
print('-----------------------------------')
return boxes
def do_detect_trt_multi(context, img, conf_thresh, nms_thresh, num_classes, anchors, num_anchors, bindings, inputs, outputs, stream, use_cuda=1):
#model.eval()
t0 = time.time()
if isinstance(img, Image.Image):
width = img.width
height = img.height
img = torch.ByteTensor(torch.ByteStorage.from_buffer(img.tobytes()))
img = img.view(height, width, 3).transpose(0,1).transpose(0,2).contiguous()
img = img.view(1, 3, height, width)
img = img.float().div(255.0)
#print('img type is Image')
elif type(img) == np.ndarray: # cv2 image
img = torch.from_numpy(img.transpose(2,0,1)).float().div(255.0).unsqueeze(0)
img = np.ascontiguousarray(img, dtype=np.float32)
#print('img type is cv2')
else:
print("unknow image type")
exit(-1)
t1 = time.time()
# if use_cuda:
# img = img.cuda()
# img = torch.autograd.Variable(img)
t2 = time.time()
inputs[0].host = img
trt_outputs = []
trt_outputs = common.do_inference(context, bindings=bindings, inputs=inputs, outputs=outputs, stream=stream)
trt_outputs = array(trt_outputs).reshape(1, num_anchors*(19 + num_classes),13,13)
#print('model output shape ', trt_outputs.shape)
t3 = time.time()
all_boxes = []
for i in range(num_classes):
correspondingclass = i
boxes = get_corresponding_region_boxes_trt(trt_outputs, conf_thresh, num_classes, num_anchors, correspondingclass, only_objectness=1, validation=True)[0]
# group boxes into each class
classified_boxes = []
for i in range(len(boxes)):
confidence = boxes[i][18]
obj_class = boxes[i][20]
if (confidence > conf_thresh) and (obj_class == correspondingclass):
classified_boxes.append(boxes[i])
# perfrom nms (non-maximum supression) for each class's boxes
classified_boxes = nms_multi_v2(classified_boxes, nms_thresh)
#classified_boxes = non_max_suppression_fast(classified_boxes, nms_thresh)
all_boxes.append(classified_boxes)
#for j in range(len(boxes)):
# print(boxes[j])
t4 = time.time()
t5 = time.time()
# for debug
#print(boxes)
if True:
print('-----------------------------------')
print(' image to tensor : %f' % (t1 - t0))
print(' tensor to cuda : %f' % (t2 - t1))
print(' predict : %f' % (t3 - t2))
print('get_region_boxes : %f' % (t4 - t3))
print(' nms : %f' % (t5 - t4))
print(' total : %f' % (t5 - t0))
print('-----------------------------------')
return all_boxes
def get_engine(onnx_file_path, engine_file_path=""):
"""Attempts to load a serialized engine if available, otherwise builds a new TensorRT engine and saves it."""
def build_engine():
"""Takes an ONNX file and creates a TensorRT engine to run inference with"""
with trt.Builder(TRT_LOGGER) as builder, builder.create_network() as network, trt.OnnxParser(network, TRT_LOGGER) as parser:
builder.max_workspace_size = 1 << 31 # 1GB
builder.max_batch_size = 1
# Parse model file
if not os.path.exists(onnx_file_path):
print('ONNX file {} not found, please run yolov3_to_onnx.py first to generate it.'.format(onnx_file_path))
exit(0)
print('Loading ONNX file from path {}...'.format(onnx_file_path))
with open(onnx_file_path, 'rb') as model:
print('Beginning ONNX file parsing')
parser.parse(model.read())
print('Completed parsing of ONNX file')
print('Building an engine from file {}; this may take a while...'.format(onnx_file_path))
engine = builder.build_cuda_engine(network)
print("Completed creating Engine")
with open(engine_file_path, "wb") as f:
f.write(engine.serialize())
return engine
if os.path.exists(engine_file_path):
# If a serialized engine exists, use it instead of building an engine.
print("Reading engine from file {}".format(engine_file_path))
with open(engine_file_path, "rb") as f, trt.Runtime(TRT_LOGGER) as runtime:
return runtime.deserialize_cuda_engine(f.read())
else:
return build_engine()
def preprosess_img(img_path):
frame = cv2.imread(img_path,0)
img = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
yolo_img =cv2.resize(img, (416, 416), interpolation=cv2.INTER_AREA)
plt.imshow(img)
return yolo_img
def read_data_cfg(datacfg):
options = dict()
options['gpus'] = '0,1,2,3'
options['num_workers'] = '10'
with open(datacfg, 'r') as fp:
lines = fp.readlines()
for line in lines:
line = line.strip()
if line == '':
continue
key,value = line.split('=')
key = key.strip()
value = value.strip()
options[key] = value
return options
def scale_bboxes(bboxes, width, height):
import copy
dets = copy.deepcopy(bboxes)
for i in range(len(dets)):
dets[i][0] = dets[i][0] * width
dets[i][1] = dets[i][1] * height
dets[i][2] = dets[i][2] * width
dets[i][3] = dets[i][3] * height
return dets
def file_lines(thefilepath):
count = 0
thefile = open(thefilepath, 'rb')
while True:
buffer = thefile.read(8192*1024)
if not buffer:
break
count += buffer.count(b'\n')
thefile.close( )
return count
def get_image_size(fname):
'''Determine the image type of fhandle and return its size.
from draco'''
with open(fname, 'rb') as fhandle:
head = fhandle.read(24)
if len(head) != 24:
return
if imghdr.what(fname) == 'png':
check = struct.unpack('>i', head[4:8])[0]
if check != 0x0d0a1a0a:
return
width, height = struct.unpack('>ii', head[16:24])
elif imghdr.what(fname) == 'gif':
width, height = struct.unpack('<HH', head[6:10])
elif imghdr.what(fname) == 'jpeg' or imghdr.what(fname) == 'jpg':
try:
fhandle.seek(0) # Read 0xff next
size = 2
ftype = 0
while not 0xc0 <= ftype <= 0xcf:
fhandle.seek(size, 1)
byte = fhandle.read(1)
while ord(byte) == 0xff:
byte = fhandle.read(1)
ftype = ord(byte)
size = struct.unpack('>H', fhandle.read(2))[0] - 2
# We are at a SOFn block
fhandle.seek(1, 1) # Skip `precision' byte.
height, width = struct.unpack('>HH', fhandle.read(4))
except Exception: #IGNORE:W0703
return
else:
return
return width, height
def logging(message):
print('%s %s' % (time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()), message))
def read_pose(lab_path):
if os.path.getsize(lab_path):
truths = np.loadtxt(lab_path)
return truths
else:
return np.array([])
|
StarcoderdataPython
|
1651625
|
<gh_stars>10-100
import math
def main():
x = float(raw_input("Enter the number: "))
guess = x / 2
guess1 = nextGuess(guess,x)
print "The square root is", guess1
def nextGuess(guess,x):
g = int(raw_input("Enter the number of iterations: "))
for i in range(g):
guess = (guess + (x/guess))/2
diff = math.sqrt(x)- guess
print "The difference is", diff
return guess
main()
|
StarcoderdataPython
|
5705
|
# -*- coding: utf-8 -*-
# Copyright 2019 <NAME>. All Rights Reserved.
#
# Licensed under the MIT License;
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://opensource.org/licenses/MIT
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import logging
import multiprocessing
import numpy as np
import os
import random
import torch
import torch.backends.cudnn as cudnn
from kerosene.configs.configs import RunConfiguration, DatasetConfiguration
from kerosene.configs.parsers import YamlConfigurationParser
from kerosene.loggers.visdom import PlotType, PlotFrequency
from kerosene.loggers.visdom.config import VisdomConfiguration
from kerosene.loggers.visdom.visdom import VisdomLogger, VisdomData
from kerosene.training.trainers import ModelTrainerFactory
from samitorch.inputs.utils import augmented_sample_collate
from torch.utils.data import DataLoader
from torch.utils.data.dataloader import DataLoader
from deepNormalize.config.parsers import ArgsParserFactory, ArgsParserType
from deepNormalize.factories.customModelFactory import CustomModelFactory
from deepNormalize.factories.customTrainerFactory import TrainerFactory
from deepNormalize.inputs.datasets import iSEGSliceDatasetFactory, MRBrainSSliceDatasetFactory, ABIDESliceDatasetFactory
from deepNormalize.nn.criterions import CustomCriterionFactory
from deepNormalize.utils.constants import *
from deepNormalize.utils.image_slicer import ImageReconstructor
cudnn.benchmark = True
cudnn.enabled = True
np.random.seed(42)
random.seed(42)
if __name__ == '__main__':
# Basic settings
logging.basicConfig(level=logging.INFO)
torch.set_num_threads(multiprocessing.cpu_count())
torch.set_num_interop_threads(multiprocessing.cpu_count())
args = ArgsParserFactory.create_parser(ArgsParserType.MODEL_TRAINING).parse_args()
# Create configurations.
run_config = RunConfiguration(use_amp=args.use_amp, local_rank=args.local_rank, amp_opt_level=args.amp_opt_level)
model_trainer_configs, training_config = YamlConfigurationParser.parse(args.config_file)
if not isinstance(model_trainer_configs, list):
model_trainer_configs = [model_trainer_configs]
dataset_configs = YamlConfigurationParser.parse_section(args.config_file, "dataset")
dataset_configs = {k: DatasetConfiguration(v) for k, v, in dataset_configs.items()}
data_augmentation_config = YamlConfigurationParser.parse_section(args.config_file, "data_augmentation")
config_html = [training_config.to_html(), list(map(lambda config: config.to_html(), dataset_configs.values())),
list(map(lambda config: config.to_html(), model_trainer_configs))]
# Prepare the data.
train_datasets = list()
valid_datasets = list()
test_datasets = list()
reconstruction_datasets = list()
iSEG_train = None
iSEG_CSV = None
MRBrainS_train = None
MRBrainS_CSV = None
ABIDE_train = None
ABIDE_CSV = None
iSEG_augmentation_strategy = None
MRBrainS_augmentation_strategy = None
ABIDE_augmentation_strategy = None
# Initialize the model trainers
model_trainer_factory = ModelTrainerFactory(model_factory=CustomModelFactory(),
criterion_factory=CustomCriterionFactory())
model_trainers = model_trainer_factory.create(model_trainer_configs)
if not isinstance(model_trainers, list):
model_trainers = [model_trainers]
# Create datasets
if dataset_configs.get("iSEG", None) is not None:
iSEG_train, iSEG_valid, iSEG_test, iSEG_reconstruction = iSEGSliceDatasetFactory.create_train_valid_test(
source_dir=dataset_configs["iSEG"].path,
modalities=dataset_configs["iSEG"].modalities,
dataset_id=ISEG_ID,
test_size=dataset_configs["iSEG"].validation_split,
max_subjects=dataset_configs["iSEG"].max_subjects,
max_num_patches=dataset_configs["iSEG"].max_num_patches,
augment=dataset_configs["iSEG"].augment,
patch_size=dataset_configs["iSEG"].patch_size,
step=dataset_configs["iSEG"].step,
test_patch_size=dataset_configs["iSEG"].test_patch_size,
test_step=dataset_configs["iSEG"].test_step,
data_augmentation_config=data_augmentation_config)
train_datasets.append(iSEG_train)
valid_datasets.append(iSEG_valid)
reconstruction_datasets.append(iSEG_reconstruction)
if dataset_configs.get("MRBrainS", None) is not None:
MRBrainS_train, MRBrainS_valid, MRBrainS_test, MRBrainS_reconstruction = MRBrainSSliceDatasetFactory.create_train_valid_test(
source_dir=dataset_configs["MRBrainS"].path,
modalities=dataset_configs["MRBrainS"].modalities,
dataset_id=MRBRAINS_ID,
test_size=dataset_configs["MRBrainS"].validation_split,
max_subjects=dataset_configs["MRBrainS"].max_subjects,
max_num_patches=dataset_configs["MRBrainS"].max_num_patches,
augment=dataset_configs["MRBrainS"].augment,
patch_size=dataset_configs["MRBrainS"].patch_size,
step=dataset_configs["MRBrainS"].step,
test_patch_size=dataset_configs["MRBrainS"].test_patch_size,
test_step=dataset_configs["MRBrainS"].test_step,
data_augmentation_config=data_augmentation_config)
test_datasets.append(MRBrainS_test)
reconstruction_datasets.append(MRBrainS_reconstruction)
if dataset_configs.get("ABIDE", None) is not None:
ABIDE_train, ABIDE_valid, ABIDE_test, ABIDE_reconstruction = ABIDESliceDatasetFactory.create_train_valid_test(
source_dir=dataset_configs["ABIDE"].path,
modalities=dataset_configs["ABIDE"].modalities,
dataset_id=ABIDE_ID,
sites=dataset_configs["ABIDE"].sites,
max_subjects=dataset_configs["ABIDE"].max_subjects,
test_size=dataset_configs["ABIDE"].validation_split,
max_num_patches=dataset_configs["ABIDE"].max_num_patches,
augment=dataset_configs["ABIDE"].augment,
patch_size=dataset_configs["ABIDE"].patch_size,
step=dataset_configs["ABIDE"].step,
test_patch_size=dataset_configs["ABIDE"].test_patch_size,
test_step=dataset_configs["ABIDE"].test_step,
data_augmentation_config=data_augmentation_config)
train_datasets.append(ABIDE_train)
valid_datasets.append(ABIDE_valid)
test_datasets.append(ABIDE_test)
reconstruction_datasets.append(ABIDE_reconstruction)
if len(list(dataset_configs.keys())) == 2:
segmentation_reconstructor = ImageReconstructor(
[iSEG_reconstruction._source_images[0], MRBrainS_reconstruction._source_images[0]],
patch_size=dataset_configs["iSEG"].test_patch_size,
reconstructed_image_size=(1, 256, 256, 192), step=dataset_configs["iSEG"].test_step,
models=[model_trainers[0]],
segment=True,
batch_size=8)
input_reconstructor = ImageReconstructor(
[iSEG_reconstruction._source_images[0], MRBrainS_reconstruction._source_images[0]],
patch_size=dataset_configs["iSEG"].test_patch_size,
reconstructed_image_size=(1, 256, 256, 192),
step=dataset_configs["iSEG"].test_step,
batch_size=50)
gt_reconstructor = ImageReconstructor(
[iSEG_reconstruction._target_images[0], MRBrainS_reconstruction._target_images[0]],
patch_size=dataset_configs["iSEG"].test_patch_size,
reconstructed_image_size=(1, 256, 256, 192),
step=dataset_configs["iSEG"].test_step,
is_ground_truth=True,
batch_size=50)
if dataset_configs["iSEG"].augment:
augmented_input_reconstructor = ImageReconstructor(
[iSEG_reconstruction._source_images[0], MRBrainS_reconstruction._source_images[0]],
patch_size=dataset_configs["iSEG"].test_patch_size,
reconstructed_image_size=(1, 256, 256, 192),
step=dataset_configs["iSEG"].test_step,
batch_size=50,
alpha=data_augmentation_config["test"]["bias_field"]["alpha"][0],
prob_bias=data_augmentation_config["test"]["bias_field"]["prob_bias"],
snr=data_augmentation_config["test"]["noise"]["snr"],
prob_noise=data_augmentation_config["test"]["noise"]["prob_noise"])
else:
augmented_input_reconstructor = None
augmented_normalized_input_reconstructor = None
else:
segmentation_reconstructor = ImageReconstructor(
[iSEG_reconstruction._source_images[0], MRBrainS_reconstruction._source_images[0],
ABIDE_reconstruction._source_images[0]],
patch_size=(1, 32, 32, 32),
reconstructed_image_size=(1, 256, 256, 192),
step=dataset_configs["iSEG"].test_step,
models=[model_trainers[0]],
normalize_and_segment=True,
batch_size=4)
input_reconstructor = ImageReconstructor(
[iSEG_reconstruction._source_images[0], MRBrainS_reconstruction._source_images[0],
ABIDE_reconstruction._source_images[0]],
patch_size=(1, 32, 32, 32),
reconstructed_image_size=(1, 256, 256, 192),
step=dataset_configs["iSEG"].test_step,
batch_size=50)
gt_reconstructor = ImageReconstructor(
[iSEG_reconstruction._target_images[0], MRBrainS_reconstruction._target_images[0],
ABIDE_reconstruction._target_images[0]],
patch_size=(1, 32, 32, 32),
reconstructed_image_size=(1, 256, 256, 192),
step=dataset_configs["iSEG"].test_step,
batch_size=50,
is_ground_truth=True)
if dataset_configs["iSEG"].augment:
augmented_input_reconstructor = ImageReconstructor(
[iSEG_reconstruction._source_images[0], MRBrainS_reconstruction._source_images[0],
ABIDE_reconstruction._source_images[0]],
patch_size=(1, 32, 32, 32),
reconstructed_image_size=(1, 256, 256, 192),
step=dataset_configs["iSEG"].test_step,
batch_size=50,
alpha=data_augmentation_config["test"]["bias_field"]["alpha"][0],
prob_bias=data_augmentation_config["test"]["bias_field"]["prob_bias"],
snr=data_augmentation_config["test"]["noise"]["snr"],
prob_noise=data_augmentation_config["test"]["noise"]["prob_noise"])
else:
augmented_input_reconstructor = None
augmented_normalized_input_reconstructor = None
# Concat datasets.
if len(dataset_configs) > 1:
train_dataset = torch.utils.data.ConcatDataset(train_datasets)
valid_dataset = torch.utils.data.ConcatDataset(valid_datasets)
test_dataset = torch.utils.data.ConcatDataset(test_datasets)
else:
train_dataset = train_datasets[0]
valid_dataset = valid_datasets[0]
test_dataset = test_datasets[0]
# Create loaders.
dataloaders = list(map(lambda dataset: DataLoader(dataset,
training_config.batch_size,
sampler=None,
shuffle=True,
num_workers=args.num_workers,
collate_fn=augmented_sample_collate,
drop_last=True,
pin_memory=True),
[train_dataset, valid_dataset, test_dataset]))
# Initialize the loggers.
visdom_config = VisdomConfiguration.from_yml(args.config_file, "visdom")
exp = args.config_file.split("/")[-3:]
if visdom_config.save_destination is not None:
save_folder = visdom_config.save_destination + os.path.join(exp[0], exp[1],
os.path.basename(
os.path.normpath(visdom_config.env)))
else:
save_folder = "saves/{}".format(os.path.basename(os.path.normpath(visdom_config.env)))
[os.makedirs("{}/{}".format(save_folder, model), exist_ok=True)
for model in
["Discriminator", "Generator", "Segmenter"]]
visdom_logger = VisdomLogger(visdom_config)
visdom_logger(VisdomData("Experiment", "Experiment Config", PlotType.TEXT_PLOT, PlotFrequency.EVERY_EPOCH, None,
config_html))
visdom_logger(VisdomData("Experiment", "Patch count", PlotType.BAR_PLOT, PlotFrequency.EVERY_EPOCH,
x=[len(iSEG_train) if iSEG_train is not None else 0,
len(MRBrainS_train) if MRBrainS_train is not None else 0,
len(ABIDE_train) if ABIDE_train is not None else 0],
y=["iSEG", "MRBrainS", "ABIDE"], params={"opts": {"title": "Patch count"}}))
trainer = TrainerFactory(training_config.trainer).create(training_config,
model_trainers,
dataloaders,
reconstruction_datasets,
None,
input_reconstructor,
segmentation_reconstructor,
augmented_input_reconstructor,
None,
gt_reconstructor,
run_config,
dataset_configs,
save_folder,
visdom_logger)
trainer.train(training_config.nb_epochs)
|
StarcoderdataPython
|
67377
|
<reponame>wangyushengcp3/pytorch-auto-drive<filename>utils/datasets/culane.py
import torchvision
import os
import pickle
import numpy as np
from tqdm import tqdm
from PIL import Image
# CULane direct loading (work with the segmentation style lists)
class CULane(torchvision.datasets.VisionDataset):
def __init__(self, root, image_set, transforms=None, transform=None, target_transform=None,
ppl=31, gap=10, start=290):
super().__init__(root, transforms, transform, target_transform)
self.ppl = ppl
self.gap = gap
self.start = start # y coordinate to start annotation
# Checks
if not os.path.exists('./output'):
os.makedirs('./output')
if image_set not in ['train', 'val', 'test']:
raise ValueError
# Data list
with open(os.path.join(root, 'lists', image_set + '.txt'), "r") as f:
contents = [x.strip() for x in f.readlines()]
# Load filenames
if image_set == 'test' or image_set == 'val': # Test
self.images = [os.path.join(root, x + '.jpg') for x in contents]
self.targets = [os.path.join('./output', x + '.lines.txt') for x in contents]
else: # Train
self.images = [os.path.join(root, x[:x.find(' ')] + '.jpg') for x in contents]
self.targets = []
print('Loading targets into memory...')
processed_file = os.path.join(root, 'train_processed_targets')
if os.path.exists(processed_file):
with open(processed_file, 'rb') as f:
self.targets = pickle.load(f)
else:
print('Pre-processing will only be performed for 1 time, please wait ~10 minutes.')
for x in tqdm(contents):
with open(os.path.join(root, x[:x.find(' ')] + '.lines.txt'), 'r') as f:
self.targets.append(self._load_target(f.readlines()))
with open(processed_file, 'wb') as f:
pickle.dump(self.targets, f)
print('Loading complete.')
assert len(self.targets) == len(self.images)
def __getitem__(self, index):
# Return x (input image) & y (L lane with N coordinates (x, y) as np.array (L x N x 2))
# Empty coordinates are marked by (-2, y)
# If just testing,
# y is the filename to store prediction
img = Image.open(self.images[index]).convert('RGB')
target = self.targets[index]
# Transforms
if self.transforms is not None:
img, target = self.transforms(img, target)
return img, target
def __len__(self):
return len(self.images)
def _load_target(self, lines):
# Read file content to lists (file content could be empty or variable number of lanes)
target = np.array([[[-2.0, self.start + i * self.gap] for i in range(self.ppl)]
for _ in range(len(lines))], dtype=np.float32)
for i in range(len(lines)): # lines=[] will end this immediately
temp = [float(k) for k in lines[i].strip().split(' ')]
for j in range(int(len(temp) / 2)):
x = temp[2 * j]
y = temp[2 * j + 1]
target[i][target[i][:, 1] == y] = [x, y]
return target
@staticmethod
def load_target_xy(lines):
# A direct loading of JSON file to a list of N x 2 numpy arrays
target = []
for line in lines:
temp = [float(x) for x in line.strip().split(' ')]
target.append(np.array(temp).reshape(-1, 2))
return target
|
StarcoderdataPython
|
1664685
|
<filename>deepmd/xyz2raw.py
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import os
import json
import argparse
from collections import Counter
from ase.io import read, write
from tqdm import tqdm
import dpdata
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'-df', '--datafile',
default='data.xyz', help='time series files'
)
parser.add_argument(
'-ej', '--enjson',
default='e0.json', help='json file with single atom energies'
)
args = parser.parse_args()
#
substract_baseline = False
if os.path.exists(args.enjson):
substract_baseline = True
with open(args.enjson, 'r') as fopen:
e0_dict = json.load(fopen)
# sanity check, dpdata only needs species, pos, Z, force, virial
# ase-extxyz is inconsistent with quip-xyz, especially the force
frames = read(args.datafile, ':')
print('number of frames ', len(frames))
atomic_properties = ['numbers', 'positions', 'forces']
calc_props = ['energy', 'forces']
for atoms in tqdm(frames):
# remove extra properties in atoms
cur_properties = list(atoms.arrays.keys())
for prop in cur_properties:
if prop not in atomic_properties:
#atoms.arrays.pop(prop)
del atoms.arrays[prop]
# atoms info
# del atoms.info['feature_vector']
# TODO: check if calculator exists
atoms.calc = None # ase copys xyz info to SinglePointCalculator?
stored_forces = atoms.arrays.get('forces', None)
if stored_forces is not None:
atoms.arrays['force'] = stored_forces.copy()
del atoms.arrays['forces']
# calc
#cur_calc_props = list(atoms.calc.results.keys())
#for prop in cur_calc_props:
# if prop not in calc_props:
# del atoms.calc.results[prop]
# move forces to force
# check e0
if substract_baseline:
chemical_symbols = atoms.get_chemical_symbols()
sym_dict = Counter(chemical_symbols)
tot_e0 = 0.0
for elem, num in sym_dict.items():
tot_e0 += num*e0_dict[elem]
atoms.info['energy'] -= tot_e0
#print(tot_e0)
#print(sym_dict)
write('dp_raw.xyz', frames)
#
xyz_multi_systems = dpdata.MultiSystems.from_file(
file_name='./dp_raw.xyz',
fmt='quip/gap/xyz'
)
print(xyz_multi_systems)
xyz_multi_systems.to_deepmd_raw('./raw_data/')
pass
|
StarcoderdataPython
|
1751893
|
import psutil
from monitor.host import base
class HostMonitorPsutilDriver(base.BaseHostMonitorDriver):
def get_vmem_total(self):
return psutil.virtual_memory().total
def get_vmem_used(self):
return psutil.virtual_memory().used
def get_disk_io(self):
current_disk_io = psutil.disk_io_counters()
disk_io = self._get_delta_disk_io(
current_disk_io.read_bytes, current_disk_io.write_bytes)
return disk_io
def get_net_io(self):
current_net_io = psutil.net_io_counters()
net_io = self._get_delta_net_io(
current_net_io.bytes_sent, current_net_io.bytes_recv
)
return net_io
def get_cpu_percent(self, interval=1):
return psutil.cpu_percent(interval=interval)
def get_vcore_num(self):
return psutil.cpu_count()
|
StarcoderdataPython
|
3283683
|
# Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
import os
import socket
import llnl.util.tty as tty
from spack import *
def cmake_cache_entry(name, value, vtype=None):
"""
Helper that creates CMake cache entry strings used in
'host-config' files.
"""
if vtype is None:
if value == "ON" or value == "OFF":
vtype = "BOOL"
else:
vtype = "PATH"
return 'set({0} "{1}" CACHE {2} "")\n\n'.format(name, value, vtype)
class Dray(Package, CudaPackage):
"""High-Order Mesh Ray Tracer."""
homepage = "https://github.com/LLNL/devil_ray"
git = "https://github.com/LLNL/devil_ray.git"
url = "https://github.com/LLNL/devil_ray/releases/download/v0.1.2/dray-v0.1.2.tar.gz"
maintainers = ['cyrush']
version('develop', branch='develop', submodules='True')
version('0.1.8', sha256='ae78ca6a5a31f06f6400a4a1ff6fc1d75347c8b41027a80662179f5b877eee30')
version('0.1.7', sha256='11ea794c1a24d7ed0d76bad7209d62bafc033ec40a2ea3a00e68fe598c6aa46d')
version('0.1.6', sha256='43f39039599e3493cbbaeaf5621b611bef301ff504bed6e32c98f30bb2179e92')
version('0.1.5', sha256='aaf0975561a8e7910b9353e2dc30bd78abf9f01c306ec042422b7da223d3a8b8')
version('0.1.4', sha256='e763a3aa537b23486a4788f9d68db0a3eb545f6a2e617cd7c8a876682ca2d0a0')
version('0.1.3', sha256='b2f624a072463189997343b1ed911cc34c9bb1b6c7f0c3e48efeb40c05dd0d92')
version('0.1.2', sha256='46937f20124b28dc78a634e8e063a3e7a3bbfd9f424ce2680b08417010c376da')
version('0.1.1', sha256='e5daa49ee3367c087f5028dc5a08655298beb318014c6f3f65ef4a08fcbe346c')
version('0.1.0', sha256='8b341138e1069361351e0a94478608c5af479cca76e2f97d556229aed45c0169')
variant('openmp', default=True, description='Build OpenMP backend')
variant("shared", default=True, description="Build as shared libs")
variant("test", default=True, description='Build unit tests')
variant("utils", default=True, description='Build utilities')
variant("logging", default=False, description='Enable logging')
variant("stats", default=False, description='Enable stats')
variant("mpi", default=True, description='Enable MPI compiler')
# set to false for systems that implicitly link mpi
variant('blt_find_mpi', default=True, description='Use BLT CMake Find MPI logic')
def propagate_cuda_arch(package, spec=None):
if not spec:
spec = ''
for cuda_arch in CudaPackage.cuda_arch_values:
depends_on('{0} +cuda cuda_arch={1}'
.format(package, cuda_arch),
when='{0} +cuda cuda_arch={1}'
.format(spec, cuda_arch))
depends_on('mpi', when='+mpi')
depends_on('[email protected]:', type='build')
depends_on('[email protected]:', when='+cuda', type='build')
depends_on("conduit~shared", when="~shared")
depends_on("conduit+shared", when="+shared")
depends_on("apcomp~mpi", when="~mpi")
depends_on("apcomp+mpi", when="+mpi")
depends_on("apcomp~openmp", when="~openmp")
depends_on("apcomp+openmp", when="+openmp")
depends_on("apcomp~shared", when="~shared")
depends_on("apcomp+shared", when="+shared")
depends_on("raja@:0.13", when="@:0.1.6")
depends_on("raja~cuda", when="~cuda")
depends_on("raja+cuda", when="+cuda")
propagate_cuda_arch('raja')
depends_on("raja~shared", when="~shared")
depends_on("raja+shared", when="+shared")
depends_on("raja~openmp", when="~openmp")
depends_on("raja+openmp", when="+openmp")
depends_on("umpire@:4.9", when="@:0.1.6")
# Only use umpire cuda if not shared.
depends_on("umpire+cuda", when="+cuda")
depends_on("umpire~cuda", when="~cuda")
depends_on("umpire+cuda~shared", when="+cuda+shared")
depends_on("umpire~cuda+shared", when="~cuda+shared")
propagate_cuda_arch('umpire')
depends_on("umpire~shared", when="~shared")
depends_on("mfem+conduit~threadsafe")
depends_on("mfem+shared", when="+shared")
depends_on("mfem~shared", when="~shared")
def setup_build_environment(self, env):
env.set('CTEST_OUTPUT_ON_FAILURE', '1')
def install(self, spec, prefix):
"""
Build and install Devil Ray.
"""
with working_dir('spack-build', create=True):
host_cfg_fname = self.create_host_config(spec,
prefix)
cmake_args = []
# if we have a static build, we need to avoid any of
# spack's default cmake settings related to rpaths
# (see: https://github.com/LLNL/spack/issues/2658)
if "+shared" in spec:
cmake_args.extend(std_cmake_args)
else:
for arg in std_cmake_args:
if arg.count("RPATH") == 0:
cmake_args.append(arg)
cmake_args.extend(["-C", host_cfg_fname, "../src"])
print("Configuring Devil Ray...")
cmake(*cmake_args)
print("Building Devil Ray...")
make()
# run unit tests if requested
if "+test" in spec and self.run_tests:
print("Running Devil Ray Unit Tests...")
make("test")
print("Installing Devil Ray...")
make("install")
# install copy of host config for provenance
install(host_cfg_fname, prefix)
def create_host_config(self, spec, prefix):
"""
This method creates a 'host-config' file that specifies
all of the options used to configure and build ascent.
For more details about 'host-config' files see:
https://ascent.readthedocs.io/en/latest/BuildingAscent.html
"""
#######################
# Compiler Info
#######################
c_compiler = env["SPACK_CC"]
cpp_compiler = env["SPACK_CXX"]
#######################################################################
# By directly fetching the names of the actual compilers we appear
# to doing something evil here, but this is necessary to create a
# 'host config' file that works outside of the spack install env.
#######################################################################
sys_type = spec.architecture
# if on llnl systems, we can use the SYS_TYPE
if "SYS_TYPE" in env:
sys_type = env["SYS_TYPE"]
##############################################
# Find and record what CMake is used
##############################################
if "+cmake" in spec:
cmake_exe = spec['cmake'].command.path
else:
cmake_exe = which("cmake")
if cmake_exe is None:
msg = 'failed to find CMake (and cmake variant is off)'
raise RuntimeError(msg)
cmake_exe = cmake_exe.path
host_cfg_fname = "%s-%s-%s-devil_ray.cmake" % (socket.gethostname(),
sys_type,
spec.compiler)
cfg = open(host_cfg_fname, "w")
cfg.write("##################################\n")
cfg.write("# spack generated host-config\n")
cfg.write("##################################\n")
cfg.write("# {0}-{1}\n".format(sys_type, spec.compiler))
cfg.write("##################################\n\n")
# Include path to cmake for reference
cfg.write("# cmake from spack \n")
cfg.write("# cmake executable path: %s\n\n" % cmake_exe)
#######################
# Compiler Settings
#######################
cfg.write("#######\n")
cfg.write("# using %s compiler spec\n" % spec.compiler)
cfg.write("#######\n\n")
cfg.write("# c compiler used by spack\n")
cfg.write(cmake_cache_entry("CMAKE_C_COMPILER", c_compiler))
cfg.write("# cpp compiler used by spack\n")
cfg.write(cmake_cache_entry("CMAKE_CXX_COMPILER", cpp_compiler))
if "+mpi" in spec:
mpicc_path = spec['mpi'].mpicc
mpicxx_path = spec['mpi'].mpicxx
# if we are using compiler wrappers on cray systems
# use those for mpi wrappers, b/c spec['mpi'].mpicxx
# etc make return the spack compiler wrappers
# which can trip up mpi detection in CMake 3.14
if spec['mpi'].mpicc == spack_cc:
mpicc_path = c_compiler
mpicxx_path = cpp_compiler
cfg.write(cmake_cache_entry("ENABLE_MPI", "ON"))
cfg.write(cmake_cache_entry("MPI_C_COMPILER", mpicc_path))
cfg.write(cmake_cache_entry("MPI_CXX_COMPILER", mpicxx_path))
if "+blt_find_mpi" in spec:
cfg.write(cmake_cache_entry("ENABLE_FIND_MPI", "ON"))
else:
cfg.write(cmake_cache_entry("ENABLE_FIND_MPI", "OFF"))
mpiexe_bin = join_path(spec['mpi'].prefix.bin, 'mpiexec')
if os.path.isfile(mpiexe_bin):
# starting with cmake 3.10, FindMPI expects MPIEXEC_EXECUTABLE
# vs the older versions which expect MPIEXEC
if self.spec["cmake"].satisfies('@3.10:'):
cfg.write(cmake_cache_entry("MPIEXEC_EXECUTABLE",
mpiexe_bin))
else:
cfg.write(cmake_cache_entry("MPIEXEC",
mpiexe_bin))
else:
cfg.write(cmake_cache_entry("ENABLE_MPI", "OFF"))
# use global spack compiler flags
cppflags = ' '.join(spec.compiler_flags['cppflags'])
if cppflags:
# avoid always ending up with ' ' with no flags defined
cppflags += ' '
cflags = cppflags + ' '.join(spec.compiler_flags['cflags'])
if cflags:
cfg.write(cmake_cache_entry("CMAKE_C_FLAGS", cflags))
cxxflags = cppflags + ' '.join(spec.compiler_flags['cxxflags'])
if cxxflags:
cfg.write(cmake_cache_entry("CMAKE_CXX_FLAGS", cxxflags))
fflags = ' '.join(spec.compiler_flags['fflags'])
if self.spec.satisfies('%cce'):
fflags += " -ef"
if fflags:
cfg.write(cmake_cache_entry("CMAKE_Fortran_FLAGS", fflags))
#######################
# Backends
#######################
cfg.write("# CUDA Support\n")
if "+cuda" in spec:
cfg.write(cmake_cache_entry("ENABLE_CUDA", "ON"))
if 'cuda_arch' in spec.variants:
cuda_value = spec.variants['cuda_arch'].value
cuda_arch = cuda_value[0]
cfg.write(cmake_cache_entry('CUDA_ARCH',
'sm_{0}'.format(cuda_arch)))
else:
cfg.write(cmake_cache_entry("ENABLE_CUDA", "OFF"))
if "+openmp" in spec:
cfg.write(cmake_cache_entry("ENABLE_OPENMP", "ON"))
else:
cfg.write(cmake_cache_entry("ENABLE_OPENMP", "OFF"))
# shared vs static libs
if "+shared" in spec:
cfg.write(cmake_cache_entry("BUILD_SHARED_LIBS", "ON"))
else:
cfg.write(cmake_cache_entry("BUILD_SHARED_LIBS", "OFF"))
#######################
# Unit Tests
#######################
if "+test" in spec:
cfg.write(cmake_cache_entry("DRAY_ENABLE_TESTS", "ON"))
# we need this to control BLT tests
cfg.write(cmake_cache_entry("ENABLE_TESTS", "ON"))
else:
cfg.write(cmake_cache_entry("DRAY_ENABLE_TESTS", "OFF"))
# we need this to control BLT tests
cfg.write(cmake_cache_entry("ENABLE_TESTS", "OFF"))
#######################
# Utilities
#######################
if "+utils" in spec:
cfg.write(cmake_cache_entry("DRAY_ENABLE_UTILS", "ON"))
else:
cfg.write(cmake_cache_entry("DRAY_ENABLE_UTILS", "OFF"))
#######################
# Logging
#######################
if "+logging" in spec:
cfg.write(cmake_cache_entry("ENABLE_LOGGING", "ON"))
else:
cfg.write(cmake_cache_entry("ENABLE_LOGGING", "OFF"))
#######################
# Status
#######################
if "+stats" in spec:
cfg.write(cmake_cache_entry("ENABLE_STATS", "ON"))
else:
cfg.write(cmake_cache_entry("ENABLE_STATS", "OFF"))
#######################################################################
# Core Dependencies
#######################################################################
cfg.write("# conduit from spack \n")
cfg.write(cmake_cache_entry("CONDUIT_DIR", spec['conduit'].prefix))
cfg.write("# mfem from spack \n")
cfg.write(cmake_cache_entry("MFEM_DIR", spec['mfem'].prefix))
cfg.write("# raja from spack \n")
cfg.write(cmake_cache_entry("RAJA_DIR", spec['raja'].prefix))
cfg.write("# umpire from spack \n")
cfg.write(cmake_cache_entry("UMPIRE_DIR", spec['umpire'].prefix))
cfg.write("# apcompositor from spack \n")
cfg.write(cmake_cache_entry("APCOMP_DIR", spec['apcomp'].prefix))
cfg.write("##################################\n")
cfg.write("# end spack generated host-config\n")
cfg.write("##################################\n")
cfg.close()
host_cfg_fname = os.path.abspath(host_cfg_fname)
tty.info("spack generated conduit host-config file: " + host_cfg_fname)
return host_cfg_fname
|
StarcoderdataPython
|
3301177
|
from scipy import fftpack
import matplotlib.pyplot as plt
import sys
import numpy as np
image = plt.imread(sys.argv[1]) # flatten=True gives a greyscale image
fft2 = fftpack.fft2(image)
plt.imshow(20*np.log10(abs(fft2)))
plt.show()
|
StarcoderdataPython
|
1667284
|
# -*- coding: utf-8 -*-
"""The decompressor interface."""
import abc
class Decompressor(object):
"""Decompressor interface."""
# pylint: disable=redundant-returns-doc
@abc.abstractmethod
def Decompress(self, compressed_data):
"""Decompresses the compressed data.
Args:
compressed_data (bytes): compressed data.
Returns:
tuple(bytes, bytes): uncompressed data and remaining compressed data.
"""
|
StarcoderdataPython
|
1614978
|
<reponame>daicang/Euler<gh_stars>0
# Find a*b, for |a|, |b| in range(1000), s.t. x^2 + a*x + b
# produces maxium number of primes for consecutive values of n
def prime_under(n):
primes = [2]
for x in range(3, n):
for p in primes:
if p * p > x:
primes.append(x)
break
if x % p == 0:
break
return primes
def is_prime(x, primes):
for p in primes:
if p == x:
return True
elif p > x:
return False
def solve():
primes_b = prime_under(1000)
primes_result = prime_under(998*998+999*998+999)
counter_max = 0
for b in primes_b:
# increament: 2n + a + 1, n in [0, b-2]
for a in range(-b+2, 1000, 2):
counter = 1
p = b
for n in range(0, b):
p += 2*n + a + 1
if not is_prime(p, primes_result):
break
counter += 1
if counter > counter_max:
print counter, a, b
counter_max = counter
result_a, result_b = a, b
return abs(result_a*result_b)
print solve()
|
StarcoderdataPython
|
3236194
|
<reponame>mrcrilly/vs-vlan-db
from vsvlandb import api
from flask import render_template, request
from flask.ext import restful
# Define our endpoints:
class ApiVLANs(restful.Resource):
def get(self):
return {'get': 'Not implemented'}
def post(self):
return {'post': 'Not implemented'}
def put(self):
return {'put': 'Not implemented'}
def delete(self):
return {'delete': 'Not implemented'}
class ApiSubnets(restful.Resource):
def get(self):
pass
def post(self):
return {'post': 'Not implemented'}
def put(self):
return {'put': 'Not implemented'}
def delete(self):
return {'delete': 'Not implemented'}
class ApiSites(restful.Resource):
def get(self):
return {'get': 'Not implemented'}
def post(self):
return {'post': 'Not implemented'}
def put(self):
return {'put': 'Not implemented'}
def delete(self):
return {'delete': 'Not implemented'}
# Add them to the API:
api.add_resource(ApiVLANs, '/api/vlans/')
api.add_resource(ApiSubnets, '/api/subnets/')
api.add_resource(ApiSites, '/api/sites/')
|
StarcoderdataPython
|
59516
|
<filename>src/nepal/tests/test_container.py<gh_stars>1-10
# encoding: utf-8
from __future__ import print_function
import json
import pytest
from nepal.models.container import Container
from nepal.models.node import Node
from profile.models.user import User
from toolbox.icepick import ordered
@pytest.mark.django_db
class ContainerTest:
@pytest.fixture(autouse=True)
def setUp(self, client):
self.client = client
self.user = User.objects.create_user(email='<EMAIL>', password='<PASSWORD>')
response = self.client.post('/users/login', data={'email': '<EMAIL>',
'password': '<PASSWORD>'})
token = response.data.get('token')
self.headers = {'HTTP_AUTHORIZATION': 'JWT {0}'.format(token)}
node1 = {
'name': 'node1',
'so': 'centos',
'provider': 'do',
'ip': '192.168.127.12'
}
self.node = Node.objects.create(**node1)
container1 = {
'id': 100,
'name': 'container1',
'config': {}
}
self.container = Container.objects.create(**container1)
self.container.nodes.add(self.node)
@pytest.mark.django_db(transaction=True)
def test_create_new_container(self):
data = {
'name': 'container_test',
'nodes': [self.node.id],
'config': {
"registry": {
"image": "registry:2.4",
"environment": [
"RACK_ENV=development",
"SHOW=true",
"DEBUG=False"
],
"volumes": [
"/opt/registry/tmp:/tmp/registry-dev:Z",
"/opt/nginx/certs:/certs:Z"
],
"expose": [
5000
],
"ports": [
"5000:5000"
]
}
}
}
result = self.client.post('/containers', data=json.dumps(data),
content_type='application/json', **self.headers)
# TODO: must be assert more things
assert 201 == result.status_code
result_db = Container.objects.get(name='container_test')
assert 'container_test' == result_db.name
assert ordered(data.get('config')) == ordered(result_db.config)
@pytest.mark.xfail
@pytest.mark.django_db(transaction=True)
def test_start_a_container(self):
response = self.client.get('/conteiners/1/?action=start',
content_type='application/json', **self.headers)
assert {} == response.data
@pytest.mark.django_db(transaction=True)
def test_get_container_all(self):
response = self.client.get('/containers',
content_type='application/json', **self.headers)
assert 1 == len(response.data)
@pytest.mark.django_db(transaction=True)
def test_get_container_by_id(self):
response = self.client.get('/containers/100', content_type='application/json',
**self.headers)
result = response.data
assert 200 == response.status_code
assert 100 == result.get('id')
@pytest.mark.django_db(transaction=True)
def test_update_container_ok(self):
data = {
'name': 'app1',
'nodes': [self.node.id],
'config': {
"registry": {
"image": "registry:2.4",
"environment": [
"RACK_ENV=development",
"SHOW=true",
"DEBUG=False"
],
"expose": [
5000
],
"ports": [
"5000:5000"
]
}
}
}
response = self.client.put('/containers/100', data=json.dumps(data),
content_type='application/json', **self.headers)
assert 200 == response.status_code
@pytest.mark.django_db(transaction=True)
def test_try_update_container_not_found(self):
response = self.client.put('/containers/132', data=json.dumps({}),
content_type='application/json', **self.headers)
assert 404 == response.status_code
@pytest.mark.django_db(transaction=True)
def test_delete_container_ok(self):
response = self.client.delete('/containers/100', content_type='application/json',
**self.headers)
assert 204 == response.status_code
@pytest.mark.django_db(transaction=True)
def test_try_delete_container_that_not_exist(self):
response = self.client.delete('/containers/122', content_type='application/json',
**self.headers)
assert 404 == response.status_code
@pytest.mark.django_db(transaction=True)
def test_get_count_nodes(self):
result = self.client.get('/containers?action=count', content_type='application/json',
**self.headers)
assert 200 == result.status_code
assert 1 == result.data.get('result')
|
StarcoderdataPython
|
18520
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# from unittest import mock
# from datakit_dworld.push import Push
def test_push(capsys):
"""Sample pytest test function with a built-in pytest fixture as an argument.
"""
# cmd = Greeting(None, None, cmd_name='dworld push')
# parsed_args = mock.Mock()
# parsed_args.greeting = 'Hello world!'
# cmd.run(parsed_args)
# out, err = capsys.readouterr()
# assert 'Hello world!' in out
|
StarcoderdataPython
|
3361156
|
<reponame>Wooble/rustplus<filename>rustplus/api/remote/heartbeat.py
import asyncio
import time
class HeartBeat:
def __init__(self, rust_api) -> None:
self.rust_api = rust_api
self.next_run = time.time()
self.running = False
async def start_beat(self) -> None:
if self.running:
return
self.running = True
asyncio.create_task(self._heart_beat())
async def _heart_beat(self) -> None:
while True:
if time.time() >= self.next_run:
await self.beat()
else:
await asyncio.sleep(1)
async def beat(self) -> None:
if self.rust_api.remote.ws is not None and self.rust_api.remote.ws.open:
await self.rust_api._send_wakeup_request()
def reset_rythm(self) -> None:
self.next_run = time.time() + 240
|
StarcoderdataPython
|
117781
|
# Generated by Django 2.0.13 on 2020-03-04 14:55
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('ddcz', '0023_skills'),
]
operations = [
migrations.RenameModel(
old_name='Dovednosti',
new_name='Skill',
),
]
|
StarcoderdataPython
|
3216942
|
<filename>Redmash/redmash_db.py
#/u/GoldenSights
import traceback
import sys
import time
import datetime
import string
import sqlite3
'''USER CONFIGURATION'''
#TIMESTAMP = '%A %d %B %Y'
TIMESTAMP = '%a %d %b %Y'
#The time format.
# "%A %d %B %Y" = "Wendesday 04 June 2014"
#http://docs.python.org/2/library/time.html#time.strftime
HEADER = ""
#Put this at the top of the .txt file
FORMAT = "_timestamp_: [_title_](_slink_) - /u/_author_ (+_score_)"
FORMAT_HTML = "_timestamp_: <a href=\"_shortlink_\">[_flairtext_] _title_</a> - <a href=\"_authorlink_\">_author_</a> (+_score_)<br>"
HTMLHEADER = '<html style="font-family:Consolas;font-size:10pt;">'
TSFORMAT = ""
#USE THESE INJECTORS TO CREATE CUSTOM OUTPUT
#_timestamp_ which follows the TIMESTAMP format
#_title_
#_url_
#_subreddit_
#_shortlink_
#_author_
#_authorlink_
#_numcomments_
#_score_
#_flairtext_
#_flaircss_
READ_FROM_FILE = ""
PRINTFILE = ""
SCORETHRESH = 0
HTMLMODE = False
USERMODE = False
BREAKDOWNMODE = False
EXTENSION = '.txt'
# Variables are input by user during the
# inputvars() method
'''All done!'''
class Post:
#Generic class to convert SQL columns into an object
pass
sql = None
cur = None
# 0 - idint
# 1 - idstr
# 2 - created
# 3 - self
# 4 - nsfw
# 5 - author
# 6 - title
# 7 - url
# 8 - selftext
# 9 - score
# 10 - subreddit
# 11 - distinguished
# 12 - textlen
# 13 - num_comments
# 14 - flair_text
# 15 - flair_css_class
def createpost(postdata):
post = Post()
post.id = postdata[1]
if 't3_' in post.id or 't1_' in post.id:
post.fullname = post.id
post.id = post.id.split('_')[1]
else:
post.fullname = 't3_' + post.id
post.type = int(post.fullname.split('_')[0][-1])
post.created_utc = postdata[2]
post.is_self = postdata[3]
post.over_18 = postdata[4]
post.author = postdata[5]
post.title = postdata[6]
post.title = post.title.replace('\n', '')
post.url = postdata[7]
post.selftext = postdata[8]
post.score = postdata[9]
post.subreddit = postdata[10]
post.distinguished = postdata[11]
post.textlen = postdata[12]
post.num_comments = postdata[13]
post.link_flair_text = postdata[14]
post.link_flair_css_class = postdata[15]
post.short_link = 'http://redd.it/' + post.id
return post
def preparefile(filesuffix):
filesuffix += EXTENSION
listfile = open(PRINTFILE + filesuffix, 'w', encoding='utf-8')
if HTMLMODE is True:
print(HTMLHEADER, file=listfile)
return listfile
def closefile(listfile):
if HTMLMODE is True:
print('</html>', file=listfile)
listfile.close()
def work(listfile):
if HEADER != '':
print(HEADER, file=listfile)
previous_timestamp = ''
while True:
post = cur.fetchone()
if post is None:
break
post = createpost(post)
if post.score < SCORETHRESH:
continue
if post.type != 3:
continue
timestamp = post.created_utc
timestamp = datetime.datetime.fromtimestamp(int(timestamp)).strftime(TIMESTAMP)
if HTMLMODE:
final = FORMAT_HTML
else:
final = FORMAT
if timestamp != previous_timestamp:
final = TSFORMAT + final
final = final.replace('_timestamp_', timestamp)
final = final.replace('_title_', post.title)
flair_text = post.link_flair_text if post.link_flair_text else ""
flair_css = post.link_flair_css_class if post.link_flair_css_class else ""
post.link_flair_text = flair_text
post.link_flair_css_class = flair_css
final = final.replace('_flairtext_', flair_text)
final = final.replace('_flaircss_', flair_css)
authorlink = 'http://reddit.com/u/' + post.author
final = final.replace('_author_', post.author)
final = final.replace('_authorlink_', authorlink)
final = final.replace('_subreddit_', post.subreddit)
url = post.url
if url is None:
url = post.short_link
else:
url = url.replace('http://www.reddit.com', 'http://np.reddit.com')
final = final.replace('_url_', url)
shortlink = post.short_link
#slink = slink.replace('http://', 'http://np.')
final = final.replace('_slink_', shortlink)
final = final.replace('_flairtext_', flair_text)
final = final.replace('_score_', str(post.score))
final = final.replace('_numcomments_', str(post.num_comments))
print(final, file=listfile)
previous_timestamp = timestamp
def writefiles():
print('Writing time files')
listfile = preparefile('_date')
cur.execute('SELECT * FROM posts WHERE score >= ? ORDER BY created DESC', [SCORETHRESH])
work(listfile)
closefile(listfile)
print('Writing title files')
listfile = preparefile('_title')
cur.execute('SELECT * FROM posts WHERE score >= ? ORDER BY title ASC', [SCORETHRESH])
work(listfile)
closefile(listfile)
print('Writing score files')
listfile = preparefile('_score')
cur.execute('SELECT * FROM posts WHERE score >= ? ORDER BY score DESC', [SCORETHRESH])
work(listfile)
closefile(listfile)
if USERMODE is False:
print('Writing author files')
listfile = preparefile('_author')
cur.execute('SELECT * FROM posts WHERE score >= ? ORDER BY author ASC', [SCORETHRESH])
work(listfile)
closefile(listfile)
if USERMODE is True:
print('Writing subreddit files')
listfile = preparefile('_subreddit')
cur.execute('SELECT * FROM posts WHERE score >= ? ORDER BY subreddit ASC', [SCORETHRESH])
work(listfile)
closefile(listfile)
print('Writing flair file')
listfile = preparefile('_flair')
cur.execute('SELECT * FROM posts WHERE score >= ? AND flair_text IS NOT NULL ORDER BY flair_text, created ASC', [SCORETHRESH])
work(listfile)
cur.execute('SELECT * FROM posts WHERE score >= ? AND flair_text IS NULL ORDER BY flair_text, created ASC', [SCORETHRESH])
work(listfile)
closefile(listfile)
print('Done.')
def breakdown(doreturn=False, mode='user'):
print('\nBreaking it down...')
listfile = preparefile('')
if mode == 'subreddit':
cur.execute('SELECT * FROM posts WHERE score >= ? ORDER BY author ASC', [SCORETHRESH])
if mode == 'user':
cur.execute('SELECT * FROM posts WHERE score >= ? ORDER BY subreddit ASC', [SCORETHRESH])
count_submissions = 0
count_comments = 0
previous = ''
breakdowndict = {}
while True:
post = cur.fetchone()
if post is None:
breakdowndict[previous] = {'submissions':count_submissions, 'comments':count_comments}
break
post = createpost(post)
if mode == 'subreddit':
relevant = post.author
elif mode == 'user':
relevant = post.subreddit
if relevant != previous:
breakdowndict[previous] = {'submissions':count_submissions, 'comments':count_comments}
previous = relevant
count_submissions = 0
count_comments = 0
if post.type == 1:
count_comments += 1
if post.type == 3:
count_submissions += 1
del breakdowndict['']
if doreturn is True:
return breakdowndict
keys = list(breakdowndict.keys())
longestkey = max([len(k) for k in keys])
keys.sort(key=lambda x: (breakdowndict[x]['submissions'] + breakdowndict[x]['comments'], x), reverse=True)
out = []
for k in keys:
relevant = (' '*(longestkey-len(k))) + ('"%s"' % k)
submissions = breakdowndict[k]['submissions']
comments = breakdowndict[k]['comments']
o = '%s:{%s:%d, %s:%d}' % (relevant, '"submissions"', submissions, '"comments"', comments)
out.append(o)
out = ',\n'.join(out)
out = '{\n' + out + '\n}'
print(out, file=listfile)
#json.dump(breakdowndict, listfile, sort_keys=True, indent=4)
def inputvars():
global READ_FROM_FILE
global PRINTFILE
global SCORETHRESH
global HTMLMODE
global USERMODE
global BREAKDOWNMODE
global EXTENSION
global sql
global cur
try:
READ_FROM_FILE = sys.argv[1]
except IndexError:
READ_FROM_FILE = input('] Input database = ')
if READ_FROM_FILE[-3:] != '.db':
READ_FROM_FILE += '.db'
filename = READ_FROM_FILE.replace('\\', '/')
filename = filename.split('/')[-1]
if filename[0] == '@':
USERMODE = True
try:
PRINTFILE = sys.argv[2]
except IndexError:
PRINTFILE = input('] Output filename = ')
try:
SCORETHRESH = int(sys.argv[3])
except IndexError:
SCORETHRESH = int(input('] Score threshold = '))
HTMLMODE = '.html' in PRINTFILE
BREAKDOWNMODE = '.json' in PRINTFILE
if HTMLMODE:
EXTENSION = '.html'
PRINTFILE = PRINTFILE.replace('.html', '')
elif BREAKDOWNMODE:
EXTENSION = '.json'
PRINTFILE = PRINTFILE.replace('.json', '')
else:
EXTENSION = '.txt'
PRINTFILE = PRINTFILE.replace('.txt', '')
sql = sqlite3.connect(READ_FROM_FILE)
cur = sql.cursor()
def main():
inputvars()
if BREAKDOWNMODE is False:
writefiles()
else:
if USERMODE is True:
breakdown(mode='user')
else:
breakdown(mode='subreddit')
if __name__ == '__main__':
main()
|
StarcoderdataPython
|
91621
|
<reponame>rajeevs1992/pyhealthvault
from healthvaultlib.helpers.requestmanager import RequestManager
class Method:
def __init__(self, request, response):
self.request = request
self.response = response
def execute(self, connection):
requestmgr = RequestManager(self, connection)
requestmgr.makerequest()
|
StarcoderdataPython
|
1762974
|
from shared.utils import get_db_ref
db = get_db_ref()
class ModelBasic(db.Model):
id = db.Column(db.Integer, primary_key=True, autoincrement=True)
model_name = db.Column(db.String(50), nullable=False)
model_dataset = db.Column(db.Integer, db.ForeignKey('data_file.id'))
model_type = db.Column(db.Integer, nullable=False)
target_class = db.Column(db.String(50), nullable=False)
configs = db.relationship("ModelConfigs", cascade="all,delete", backref="model")
results = db.relationship("ModelResults", cascade="all,delete", backref="model")
created_on = db.Column(db.DateTime, server_default=db.func.now())
updated_on = db.Column(db.DateTime, server_default=db.func.now(), server_onupdate=db.func.now())
class ModelConfigs(db.Model):
id = db.Column(db.Integer, primary_key=True, autoincrement=True)
parameter = db.Column(db.String(50), nullable=False)
value = db.Column(db.String(50), nullable=False)
model_id = db.Column(db.Integer, db.ForeignKey('model_basic.id'))
created_on = db.Column(db.DateTime, server_default=db.func.now())
updated_on = db.Column(db.DateTime, server_default=db.func.now(), server_onupdate=db.func.now())
class ModelResults(db.Model):
id = db.Column(db.Integer, primary_key=True, autoincrement=True)
model_id = db.Column(db.Integer, db.ForeignKey('model_basic.id'))
epoc = db.Column(db.Integer, nullable=False)
iteration = db.Column(db.Integer, nullable=False)
metric = db.Column(db.String(50), nullable=False)
value = db.Column(db.Float, nullable=False)
|
StarcoderdataPython
|
3369975
|
##
# Copyright (c) 2013-2017 Apple Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##
from txweb2 import responsecode
from txweb2.dav.noneprops import NonePropertyStore
from txweb2.dav.util import allDataFromStream
from txweb2.http import Response, HTTPError, StatusResponse, JSONResponse
from txweb2.http_headers import MimeType, MimeDisposition
from txweb2.stream import ProducerStream
from twisted.internet.defer import succeed, returnValue, inlineCallbacks
from twisted.internet.protocol import Protocol
from twistedcaldav.extensions import DAVResource, \
DAVResourceWithoutChildrenMixin
from twistedcaldav.resource import ReadOnlyNoCopyResourceMixIn
from twistedcaldav.scheduling_store.caldav.resource import \
deliverSchedulePrivilegeSet
from txdav.xml import element as davxml
import base64
import json
class ConduitResource(ReadOnlyNoCopyResourceMixIn, DAVResourceWithoutChildrenMixin, DAVResource):
"""
Podding cross-pod RPC conduit resource.
Extends L{DAVResource} to provide cross-pod RPC functionality.
"""
def __init__(self, parent, store):
"""
@param parent: the parent resource of this one.
"""
assert parent is not None
DAVResource.__init__(self, principalCollections=parent.principalCollections())
self.parent = parent
self.store = store
def deadProperties(self):
if not hasattr(self, "_dead_properties"):
self._dead_properties = NonePropertyStore(self)
return self._dead_properties
def etag(self):
return succeed(None)
def checkPreconditions(self, request):
return None
def resourceType(self):
return davxml.ResourceType.ischeduleinbox
def contentType(self):
return MimeType.fromString("text/html; charset=utf-8")
def isCollection(self):
return False
def isCalendarCollection(self):
return False
def isPseudoCalendarCollection(self):
return False
@inlineCallbacks
def principalForCalendarUserAddress(self, address):
for principalCollection in self.principalCollections():
principal = yield principalCollection.principalForCalendarUserAddress(address)
if principal is not None:
returnValue(principal)
returnValue(None)
def render(self, request):
output = """<html>
<head>
<title>Podding Conduit Resource</title>
</head>
<body>
<h1>Podding Conduit Resource.</h1>
</body
</html>"""
response = Response(200, {}, output)
response.headers.setHeader("content-type", MimeType("text", "html"))
return response
@inlineCallbacks
def http_POST(self, request):
"""
The server-to-server POST method.
"""
# Check shared secret
if not self.store.directoryService().serversDB().getThisServer().checkSharedSecret(request.headers):
self.log.error("Invalid shared secret header in cross-pod request")
raise HTTPError(StatusResponse(responsecode.FORBIDDEN, "Not authorized to make this request"))
# Look for XPOD header
xpod = request.headers.getRawHeaders("XPOD")
contentType = request.headers.getHeader("content-type")
if xpod is not None:
# Attachments are sent in the request body with the JSON data in a header. We
# decode the header and add the request.stream as an attribute of the JSON object.
xpod = xpod[0]
try:
j = json.loads(base64.b64decode(xpod))
except (TypeError, ValueError) as e:
self.log.error("Invalid JSON header in request: {ex}\n{xpod}", ex=e, xpod=xpod)
raise HTTPError(StatusResponse(responsecode.BAD_REQUEST, "Invalid JSON header in request: {}\n{}".format(e, xpod)))
j["stream"] = request.stream
j["streamType"] = contentType
else:
# Check content first
if "{}/{}".format(contentType.mediaType, contentType.mediaSubtype) != "application/json":
self.log.error("MIME type {mime} not allowed in request", mime=contentType)
raise HTTPError(StatusResponse(responsecode.BAD_REQUEST, "MIME type {} not allowed in request".format(contentType)))
body = (yield allDataFromStream(request.stream))
try:
j = json.loads(body)
except ValueError as e:
self.log.error("Invalid JSON data in request: {ex}\n{body}", ex=e, body=body)
raise HTTPError(StatusResponse(responsecode.BAD_REQUEST, "Invalid JSON data in request: {}\n{}".format(e, body)))
# Log extended item
if not hasattr(request, "extendedLogItems"):
request.extendedLogItems = {}
request.extendedLogItems["xpod"] = j["action"] if "action" in j else "unknown"
# Look for a streaming action which needs special handling
if self.store.conduit.isStreamAction(j):
# Get the conduit to process the data stream
try:
stream = ProducerStream()
class StreamProtocol(Protocol):
def connectionMade(self):
stream.registerProducer(self.transport, False)
def dataReceived(self, data):
stream.write(data)
def connectionLost(self, reason):
stream.finish()
result = yield self.store.conduit.processRequestStream(j, StreamProtocol())
try:
ct, name = result
except ValueError:
code = responsecode.BAD_REQUEST
else:
headers = {"content-type": MimeType.fromString(ct)}
headers["content-disposition"] = MimeDisposition("attachment", params={"filename": name})
returnValue(Response(responsecode.OK, headers, stream))
except Exception as e:
# Send the exception over to the other side
result = {
"result": "exception",
"class": ".".join((e.__class__.__module__, e.__class__.__name__,)),
"details": str(e),
}
code = responsecode.BAD_REQUEST
else:
# Get the conduit to process the data
try:
result = yield self.store.conduit.processRequest(j)
code = responsecode.OK if result["result"] == "ok" else responsecode.BAD_REQUEST
except Exception as e:
# Send the exception over to the other side
result = {
"result": "exception",
"class": ".".join((e.__class__.__module__, e.__class__.__name__,)),
"details": str(e),
}
code = responsecode.BAD_REQUEST
response = JSONResponse(code, result)
returnValue(response)
##
# ACL
##
def supportedPrivileges(self, request):
return succeed(deliverSchedulePrivilegeSet)
def defaultAccessControlList(self):
privs = (
davxml.Privilege(davxml.Read()),
)
return succeed(
davxml.ACL(
# DAV:Read for all principals (includes anonymous)
davxml.ACE(
davxml.Principal(davxml.All()),
davxml.Grant(*privs),
davxml.Protected(),
),
)
)
|
StarcoderdataPython
|
3329291
|
# -*- coding: utf-8 -*-
"""
Created on Tue Feb 18 14:18:42 2020
@author: paul
"""
import cv2
import numpy as np
import matplotlib.pyplot as plt
import glob
import re
import os
import random
import shutil
from scipy import stats
#Data sample
src=r"..\image_bbox_slicer-master\slice_output"
dst_perc=r"..\image_bbox_slicer-master\slice_contrast"
os.makedirs(dst_perc,exist_ok=True)
files = [fn for fn in sorted(glob.glob(src + '/*.png'))]
for img_file in files:
# Find SSID_slice name for file name
SSID_slice=re.search("[A-Za-z]{2}_S[0-9]{3}_[0-9]{2,}_[0-9]{6}",img_file).group(0)
img1 = cv2.imread(os.path.join(src,SSID_slice+".png"))
n_channels=img1.shape[2]
# Initialize some arrays
perc_img = np.zeros((img1.shape[0],img1.shape[1],n_channels),dtype = 'uint8')
perc_2=[None]*n_channels
perc_98=[None]*n_channels
perc_img_corrected=np.empty((img1.shape[0],img1.shape[1],n_channels))
perc_img_corrected[:]=np.nan
# For each channel, calculate the 2 and 98 percentile values and stretch
for k in range(n_channels):
perc_2[k]=np.percentile(img1[:,:,k],2)
perc_98[k]=np.percentile(img1[:,:,k],98)
perc_img[:,:,k] = 255.0*(img1[:,:,k]-perc_2[k])/(perc_98[k]-perc_2[k])
perc_img_corrected[:,:,k]=(np.where(img1[:,:,k]>perc_98[k],255,np.where(img1[:,:,k]<perc_2[k],0,perc_img[:,:,k])))
cv2.imwrite(os.path.join(dst_perc,SSID_slice+".png"),perc_img_corrected)
|
StarcoderdataPython
|
3247529
|
<filename>app/producer.py
from kafka import KafkaProducer
import time
# connect to Kafka
producer = KafkaProducer(bootstrap_servers='kafka:9092')
def emit():
for i in range(100):
print(f'send message {i}')
str_res = f'{i}'
producer.send('foobar', str_res.encode())
time.sleep(1)
if __name__ == '__main__':
emit()
|
StarcoderdataPython
|
67098
|
# Using Keras to load our model and images
from keras.models import load_model
from keras.preprocessing import image
# To grab environment variables, image directories, and image paths
import os
from os.path import isfile, join
# To sort our image directories by natural sort
from natsort import os_sorted
# To turn our lists into numpy arrays
import numpy as np
# Stops TF optimization warnings from displaying
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
# Path to dataset, to model, and to save/load history of model
DATASET_PATH = './data'
MODEL_PATH = 'face_mask_model'
HISTORY_PATH = MODEL_PATH + '/history.joblib'
# Target size our model was trained on
TARGET_SIZE = (150, 150)
# Image and Directory path to be used by our functions
IMAGE_DIRECTORY_PATH = '/full/path/here/'
# Replace IMAGE_DIRECTORY_PATH if image is not inside of image directory
IMAGE_PATH = IMAGE_DIRECTORY_PATH + 'imageNameHere.jpg'
# Loading in our previously trained model using joblib
model = load_model(MODEL_PATH)
# Returns a True/False in respect to whether the model predicts the person is wearing a mask
def predict_image(image_path):
# Load in image and set target size to what model was trained on
image_data = image.load_img(image_path, target_size=TARGET_SIZE)
# Convert to a numpy array, rescales to what we trained our model on and adds additional level of nesting
image_array = np.array(image_data)
image_array = image_array / 255.0
image_batch = np.expand_dims(image_array, axis=0)
# Gets prediction of passed image
prediction = (model.predict(image_batch) > 0.5).astype("int32")
# True if wearing a mask - False if not
return prediction[0] == 0.0
# Returns 2D array in respect to each image in the directory predicted to be wearing a mask as True/False & image name
def predict_directory(directory_path):
image_list = os_sorted([f for f in os.listdir(directory_path) if isfile(join(directory_path, f))])
predictions = []
for image_name in image_list:
# Load in image from directory list joined with directory path and set target size to what model was trained on
image_data = image.load_img(directory_path + image_name, target_size=TARGET_SIZE)
# Convert to a numpy array, rescales to what we trained our model on and adds additional level of nesting
image_array = image.img_to_array(image_data)
image_array = image_array / 255.0
image_batch = np.expand_dims(image_array, axis=0)
# Gets prediction of passed image
prediction = (model.predict(image_batch) > 0.5).astype("int32")
# Appends array of size 2 with True if wearing a mask - False if not & image name i.e. [True, image1.jpg]
predictions.append([prediction[0][0] == 0.0, image_name])
return predictions
if __name__ == '__main__':
print(predict_image(IMAGE_PATH))
print(predict_directory(IMAGE_DIRECTORY_PATH))
|
StarcoderdataPython
|
1657726
|
from utime import sleep_us
from machine import Pin
class IR_OUT:
SHORT = 562
LONG = 1686 # A long pulse burst is 1686us long, thats 3 times a short pulse burst
def __init__(self):
self.pin = Pin(4, Pin.OUT)
@micropython.viper
def pulse2(self, cycles): # Probably a very hacky solution, should be improved later
GPIO_BASE = ptr32(0x60000300) # GPIO base register
for i in range(int(cycles)): # Generate 38kHz pulse burst
GPIO_BASE[1] = 0x10 # Turn on pin 4
for _ in range(67):
pass
GPIO_BASE[2] = 0x10 # Turn off pin 4
for _ in range(139):
pass
def pulse(self, time):
self.pulse2(int(time/26)) # We need circa (time/26ms) cycles, because every 38kHz signal pulse is 26ms long
def send(self, data):
print(data)
data = ''.join(data.split()) # Remove all whitespaces
self.pulse(9000) # 9ms leading pulse burst
sleep_us(4500) # 4.5ms space
for bit in data:
self.pulse(IR_OUT.SHORT) # Every bit starts with an 562us puse burst
sleep_us(IR_OUT.SHORT) if bit is '0' else sleep_us(IR_OUT.LONG) # space depending on '0' or '1'
self.pulse(IR_OUT.SHORT) # Finish message with a final pulse burst
sleep_us(100000) # Sleep some time after message
def repeat(self): # Send repeat code
self.pulse(9000) # 9ms leading pulse burst
sleep_us(2250) # 2.25ms space
self.pulse(IR_OUT.SHORT) # 562us pulse burst to mark the end of the space
sleep_us(100000) # Sleep some time after message
|
StarcoderdataPython
|
3313562
|
<gh_stars>0
"""Create a frequency table with descending order of frequency."""
from collections import Counter
def frequency_table(nums):
"""Return a frequency table for given number list."""
table = Counter(nums)
print('Number\tFrequency')
for num in table.most_common():
print(f'{num[0]}\t{num[1]}')
if __name__ == '__main__':
scores = [9,7,8,10,9,9,9,4,5,6,1,5,6,1,5,6,7,8,6,1,10]
frequency = frequency_table(scores)
|
StarcoderdataPython
|
3355559
|
<gh_stars>1-10
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function, unicode_literals
import re
class AtWikiStripper(object):
# Comment: `// comment`
COMMENT = re.compile(r'^//')
# Inline annotation: `&color(#999999){text}`, `&nicovideo(url)`
INLINE_ANN = re.compile(r'&[a-z_]+\(([^()]*?)\)({([^{}]+?)})?'), 3
# Inline links: `[[page]]`, `[[alias>URL]]`
INLINE_LINK = re.compile(r'\[\[(.+?)((>|>>)(.+?))?\]\]'), 1
# Inline italic: `'''text'''`
INLINE_ITALIC = re.compile(r'\'\'\'(.+?)\'\'\''), 1
# Inline bold: `''text''`
INLINE_BOLD = re.compile(r'\'\'(.+?)\'\''), 1
# Inline del: `%%text%%`
INLINE_DEL = re.compile(r'%%(.+?)%%'), 1
# Line annotation: `#right(){text}`, `#comment()`, `#region`
LINE_ANN = re.compile(r'^#[a-z_]+(\(([^()]*?)\)({([^{}]+?)})?)?\s*$'), 4
# Line horizontal line: `----`
LINE_HR = re.compile(r'^----\s*()$'), 1
# Line item list and heading: `+foo`, `-foo`, `*foo`
LINE_ITEMLIST = re.compile(r'^(\*+|\++|-+)(.+)$'), 2
# Line quote: `>text`
LINE_QUOTE = re.compile(r'^>+(.+)$'), 1
# Line formatted: ` text`
LINE_PRE = re.compile(r'^ (.+)$'), 1
# Block annotation: `#exk(){{{` ... `}}}`
BLOCK_BEGIN_ANN = re.compile(r'^#[a-z_]+\(([^{}()]*?)\)({+)\s*$')
BLOCK_END_ANN = re.compile(r'^(}+)\s*$')
def __init__(self, source):
self._source = source
def _inline_strip(self, line, pattern, group):
while True:
prev = line
# Note: prior to Python 3.5, use of backreference of nonmatching group
# in replacement string raises exception.
line = pattern.sub(lambda m: m.group(group), line)
if prev == line: return line
def _line_process(self, buf, line, pattern, group):
prev = line
line = pattern.sub(lambda m: m.group(group), line)
if prev == line: return False
buf.append(line)
return True
def text(self):
ret = []
lines = self._source.splitlines()
block_level = 0
for line in lines:
if self.COMMENT.match(line): continue
line = self._inline_strip(line, *self.INLINE_ANN)
line = self._inline_strip(line, *self.INLINE_LINK)
line = self._inline_strip(line, *self.INLINE_ITALIC)
line = self._inline_strip(line, *self.INLINE_BOLD)
line = self._inline_strip(line, *self.INLINE_DEL)
if self._line_process(ret, line, *self.LINE_ANN): continue
if self._line_process(ret, line, *self.LINE_HR): continue
if self._line_process(ret, line, *self.LINE_ITEMLIST): continue
if self._line_process(ret, line, *self.LINE_QUOTE): continue
if self._line_process(ret, line, *self.LINE_PRE): continue
if block_level == 0:
m = self.BLOCK_BEGIN_ANN.match(line)
if m:
block_level = len(m.group(2))
continue
else:
m = self.BLOCK_END_ANN.match(line)
if m and len(m.group(1)) == block_level:
block_level = 0
continue
ret.append(line)
return '\n'.join(ret)
|
StarcoderdataPython
|
1639630
|
from django.apps import AppConfig
class UpdownConfig(AppConfig):
name = 'updown'
|
StarcoderdataPython
|
45899
|
import sys
from django.apps import apps
from django.core.management import BaseCommand
from viewwork import BaseViewWork
from viewwork.models import Menu
class Command(BaseCommand):
def add_arguments(self, parser):
super().add_arguments(parser)
parser.add_argument('action', action='store', type=str, choices=['add', 'delete'])
def add(self):
for app_label, values in BaseViewWork.vw.items():
app = apps.get_app_config(app_label)
urls = sys.modules[f'{app.module.__name__}.urls']
namespace = getattr(urls, 'app_name', None) or app.module.__name__
for item in Menu.objects.filter(view__in=values.keys()):
item.view = f'{namespace}:{item.view}'
item.save(update_fields=('view',))
def delete(self):
for item in Menu.objects.filter(view__icontains=':'):
item.view = item.view.split(':')[1]
item.save(update_fields=('view',))
def handle(self, *args, **options):
if options['action'] == 'add':
self.add()
elif options['action'] == 'delete':
self.delete()
|
StarcoderdataPython
|
178941
|
<filename>main.py
import pygame
from pygame.locals import *
from sys import exit
pygame.init()
largura = 640
altura = 480
preto = (0,0,0)
tela = pygame.display.set_mode((largura, altura))
pygame.display.set_caption('Sprites')
|
StarcoderdataPython
|
85939
|
"""
Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
SPDX-License-Identifier: MIT-0
"""
from test.testlib.testcase import BaseTestCase
from cfnlint import conditions
class TestEquals(BaseTestCase):
""" Test Equals Logic """
def test_equal_value_string(self):
""" Test equals setup """
template = 'us-east-1'
result = conditions.EqualsValue(template)
self.assertTrue(result == 'us-east-1')
self.assertFalse(result == 'us-west-2')
|
StarcoderdataPython
|
65123
|
import os
from dvc.repo.scm_context import scm_context
from dvc.utils import relpath, resolve_output, resolve_paths
from dvc.utils.fs import path_isin
from ..exceptions import InvalidArgumentError, OutputDuplicationError
from . import locked
@locked
@scm_context
def imp_url(
self,
url,
out=None,
fname=None,
erepo=None,
frozen=True,
no_exec=False,
remote=None,
to_remote=False,
desc=None,
jobs=None,
):
from dvc.dvcfile import Dvcfile
from dvc.stage import Stage, create_stage, restore_meta
out = resolve_output(url, out)
path, wdir, out = resolve_paths(
self, out, always_local=to_remote and not out
)
if to_remote and no_exec:
raise InvalidArgumentError(
"--no-exec can't be combined with --to-remote"
)
if not to_remote and remote:
raise InvalidArgumentError(
"--remote can't be used without --to-remote"
)
# NOTE: when user is importing something from within their own repository
if (
erepo is None
and os.path.exists(url)
and path_isin(os.path.abspath(url), self.root_dir)
):
url = relpath(url, wdir)
stage = create_stage(
Stage,
self,
fname or path,
wdir=wdir,
deps=[url],
outs=[out],
erepo=erepo,
)
restore_meta(stage)
if desc:
stage.outs[0].desc = desc
dvcfile = Dvcfile(self, stage.path)
dvcfile.remove()
try:
self.check_modified_graph([stage])
except OutputDuplicationError as exc:
raise OutputDuplicationError(exc.output, set(exc.stages) - {stage})
if no_exec:
stage.ignore_outs()
elif to_remote:
remote = self.cloud.get_remote(remote, "import-url")
stage.outs[0].transfer(url, odb=remote.odb, jobs=jobs)
stage.save_deps()
stage.md5 = stage.compute_md5()
else:
stage.run(jobs=jobs)
stage.frozen = frozen
dvcfile.dump(stage)
return stage
|
StarcoderdataPython
|
1645574
|
<reponame>persona7548/cyphersAPI
import requests
import time
import pandas
import json
import csv
headers = {'Content-Type': 'application/json; charset=utf-8','apikey' :'***********'}
equipment = list(["101","102","103","104","105","106","202","203","301","302","303","304","305","107","204","205"])
csvfile = pandas.read_csv('C:/Users/KTH/Desktop/GitHub/matchId.csv',header=None,encoding='ANSI')
uniqCsvfile = csvfile.drop_duplicates()
for id in range(1,len(csvfile)):
print(id)
try:
matchID = uniqCsvfile[0][id]
except:
continue
time.sleep(0.05)
url = 'https://api.neople.co.kr/cy/matches/'+matchID
try:
r = requests.get(url=url,headers = headers)
except:
time.sleep(1)
id = id-1
print(id," Pass")
continue
data = json.loads(r.text)
map = str(data["players"][1]["map"]["mapId"])
date = str(data["date"])
if data["teams"][0]["result"] =="win":
winList = data["teams"][0]["players"]
loseList = data["teams"][1]["players"]
else:
winList = data["teams"][1]["players"]
loseList = data["teams"][0]["players"]
f = open('C:/Users/KTH/Desktop/GitHub/matchData.csv', 'a')
#create win match record
f.write("win,"+matchID+","+map)
playerCount = data["players"]
for i in range(len(playerCount)):
player = playerCount[i]
if player["playerId"] in winList:
f.write(","+str(player["playInfo"]["characterId"]))
f.write("\n")
#create lose match record
f.write("lose,"+matchID+","+map)
playerCount = data["players"]
for i in range(len(playerCount)):
player = playerCount[i]
if player["playerId"] in loseList:
f.write(","+str(player["playInfo"]["characterId"]))
f.write("\n")
f.close()
#create detail match infomation
f = open('C:/Users/KTH/Desktop/GitHub/matchInfo.csv', 'a')
for i in range(len(playerCount)):
player = data["players"][i]
if player["playerId"] in winList:
f.write(date+","+"win,"+map+",")
else:
f.write(date+","+"lose,"+map+",")
f.write(matchID+","+player["playerId"]+","+str(player["playInfo"]["random"])+","+str(player["playInfo"]["partyUserCount"])+","+str(player["playInfo"]["partyId"])+","+str(player["playInfo"]["playTypeName"])
+","+player["playInfo"]["characterId"]+","+str(player["playInfo"]["level"])
+","+str(player["playInfo"]["killCount"])+","+str(player["playInfo"]["deathCount"])+","+str(player["playInfo"]["assistCount"])
+","+str(player["playInfo"]["attackPoint"])+","+str(player["playInfo"]["damagePoint"])+","+str(player["playInfo"]["battlePoint"])
+","+str(player["playInfo"]["sightPoint"]) +","+str(player["playInfo"]["towerAttackPoint"]) +","+str(player["playInfo"]["backAttackCount"]) +","+str(player["playInfo"]["comboCount"])
+","+str(player["playInfo"]["spellCount"]) +","+str(player["playInfo"]["healAmount"]) +","+str(player["playInfo"]["sentinelKillCount"]) +","+str(player["playInfo"]["demolisherKillCount"]) +","+str(player["playInfo"]["trooperKillCount"])
+","+str(player["playInfo"]["guardianKillCount"]) +","+str(player["playInfo"]["guardTowerKillCount"]) +","+str(player["playInfo"]["getCoin"]) +","+str(player["playInfo"]["spendCoin"]) +","+str(player["playInfo"]["spendConsumablesCoin"])
+","+str(player["playInfo"]["playTime"])+","+str(player["playInfo"]["responseTime"])+","+str(player["playInfo"]["minLifeTime"])+","+str(player["playInfo"]["maxLifeTime"])
+","+player["position"]["name"]+","+player["position"]["attribute"][0]["id"]+","+player["position"]["attribute"][1]["id"]+","+player["position"]["attribute"][2]["id"])
itemNum =0
for j in range(0,16):
try:
if (player["items"][itemNum]["equipSlotCode"] == equipment[j]):
f.write(","+player["items"][itemNum]["itemId"])
itemNum = itemNum+1
else:
f.write(",notEquip")
except:
f.write(",notEquip")
continue
f.write("\n")
f.close()
|
StarcoderdataPython
|
3399303
|
<reponame>KevinLuo41/LeetCodeInPython
#!/usr/bin/env python
# encoding: utf-8
"""
sort_list.py
Created by Shengwei on 2014-07-21.
"""
# https://oj.leetcode.com/problems/sort-list/
# tags: easy / medium, linked-list, merge sort, D&C, recursion
"""
Sort a linked list in O(n log n) time using constant space complexity.
"""
# Definition for singly-linked list.
# class ListNode:
# def __init__(self, x):
# self.val = x
# self.next = None
def merge_sorted_list(l1, l2):
cursor = dummy_head = ListNode(0)
while l1 and l2:
if l1.val < l2.val:
cursor.next = l1
l1 = l1.next
else:
cursor.next = l2
l2 = l2.next
# bug: forgot this... what a shame!
cursor = cursor.next
cursor.next = l1 or l2
return dummy_head.next
class Solution:
# @param head, a ListNode
# @return a ListNode
def sortList(self, head):
cur, count = head, 0
while cur:
count += 1
cur = cur.next
def sort(head, length):
if length <= 1:
if head:
# IMPORTANT: break nodes into individuals.
# There could otherwise exist a cycle.
head.next = None
return head
l2, half = head, length / 2
for _ in xrange(half):
l2 = l2.next
# note: neither l1 nor l2 can be None
l1 = sort(head, half)
l2 = sort(l2, length - half)
return merge_sorted_list(l1, l2)
return sort(head, count)
|
StarcoderdataPython
|
1754634
|
import warnings
warnings.filterwarnings('ignore')
import os
import pandas as pd
import math
import time
import random
import shutil
import numpy as np
import pandas as pd
from sklearn.model_selection import StratifiedKFold, GroupKFold, KFold
from tqdm.auto import tqdm
#from functools import partial
import sys
sys.path.append(f"{os.getcwd()}/src")
import torch
from src.trainer import trainer
from src.logger import init_logger
import yaml
import argparse
from metrics import get_score
def seed_torch(seed = 0):
random.seed(seed)
os.environ['PYTHONHASHSEED'] = str(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.backends.cudnn.deterministic = True
def get_cv_split(config):
"""
config : config is original file with loading yaml
"""
config_split = config["split"]
if config_split["name"] == "StratifiedKFold":
return StratifiedKFold(**config_split["params"])
def main(exp_file_name: str):
"""
prepare something
"""
with open (f"./config/{exp_file_name}.yml") as file:
config = yaml.safe_load(file)
config_general = config["general"]
train = pd.read_csv(config_general["train_file"])
#test = pd.read_csv("../input/ranzcr-clip-catheter-line-classification/sample_submission.csv")
if config_general["debug"]:
train = train.sample(n = 1000, random_state = config_general["seed"]).reset_index(drop = True)
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
print(f"We use {device}!")
if not os.path.exists(config_general["output_dir"]):
os.makedirs(config_general["output_dir"])
LOGGER = init_logger(config_general)
seed_torch(seed = config_general["seed"])
folds = train.copy()
Fold = get_cv_split(config)
for n, (train_index, val_index) in enumerate(Fold.split(folds, folds.target)):
folds.loc[val_index, 'fold'] = int(n)
folds['fold'] = folds['fold'].astype(int)
def get_result(result_df):
preds = result_df[[f'pred_{c}' for c in config["target_cols"]]].values
labels = result_df[config["target_cols"]].values
score, scores = get_score(labels, preds)
LOGGER.info(f'Score : {score: <.4f} Scores : {np.round(scores, decimals = 4)}')
if config_general["train"]:
oof_df = pd.DataFrame()
for fold in range(config_general["n_fold"]):
if fold in config_general["trn_fold"]:
_oof_df = trainer(folds, fold, device, config, LOGGER)
oof_df = pd.concat([oof_df, _oof_df])
LOGGER.info(f"======== fold : {fold} result =========")
get_result(_oof_df)
# CV result
LOGGER.info(f"========= CV ========")
get_result(oof_df)
oof_df.to_csv(config_general["output_dir"] + "/" + "oof_df.csv", index = False)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("config", type = str,
help = "your config file number e.g. your file is named exp001.yml, then you should set exp001")
args = parser.parse_args()
main(args.config)
|
StarcoderdataPython
|
1740011
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2017 Google
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# ----------------------------------------------------------------------------
#
# *** AUTO GENERATED CODE *** AUTO GENERATED CODE ***
#
# ----------------------------------------------------------------------------
#
# This file is automatically generated by Magic Modules and manual
# changes will be clobbered when the file is regenerated.
#
# Please read more about how to change this file at
# https://www.github.com/GoogleCloudPlatform/magic-modules
#
# ----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function
__metaclass__ = type
################################################################################
# Documentation
################################################################################
ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ["preview"], 'supported_by': 'community'}
DOCUMENTATION = '''
---
module: gcp_bigtable_instance_info
description:
- Gather info for GCP Instance
short_description: Gather info for GCP Instance
author: Google Inc. (@googlecloudplatform)
requirements:
- python >= 2.6
- requests >= 2.18.4
- google-auth >= 1.3.0
options:
project:
description:
- The Google Cloud Platform project to use.
type: str
auth_kind:
description:
- The type of credential used.
type: str
required: true
choices:
- application
- machineaccount
- serviceaccount
service_account_contents:
description:
- The contents of a Service Account JSON file, either in a dictionary or as a
JSON string that represents it.
type: jsonarg
service_account_file:
description:
- The path of a Service Account JSON file if serviceaccount is selected as type.
type: path
service_account_email:
description:
- An optional service account email address if machineaccount is selected and
the user does not wish to use the default email.
type: str
scopes:
description:
- Array of scopes to be used
type: list
elements: str
env_type:
description:
- Specifies which Ansible environment you're running this module within.
- This should not be set unless you know what you're doing.
- This only alters the User Agent string for any API requests.
type: str
notes:
- for authentication, you can set service_account_file using the C(gcp_service_account_file)
env variable.
- for authentication, you can set service_account_contents using the C(GCP_SERVICE_ACCOUNT_CONTENTS)
env variable.
- For authentication, you can set service_account_email using the C(GCP_SERVICE_ACCOUNT_EMAIL)
env variable.
- For authentication, you can set auth_kind using the C(GCP_AUTH_KIND) env variable.
- For authentication, you can set scopes using the C(GCP_SCOPES) env variable.
- Environment variables values will only be used if the playbook values are not set.
- The I(service_account_email) and I(service_account_file) options are mutually exclusive.
'''
EXAMPLES = '''
- name: get info on an instance
gcp_bigtable_instance_info:
project: test_project
auth_kind: serviceaccount
service_account_file: "/tmp/auth.pem"
'''
RETURN = '''
resources:
description: List of resources
returned: always
type: complex
contains:
state:
description:
- The current state of the instance.
returned: success
type: str
name:
description:
- The unique name of the instance.
returned: success
type: str
displayName:
description:
- The descriptive name for this instance as it appears in UIs.
- Can be changed at any time, but should be kept globally unique to avoid confusion.
returned: success
type: str
type:
description:
- The type of the instance. Defaults to `PRODUCTION`.
returned: success
type: str
labels:
description:
- Labels are a flexible and lightweight mechanism for organizing cloud resources
into groups that reflect a customer's organizational needs and deployment
strategies. They can be used to filter resources and aggregate metrics.
returned: success
type: dict
clusters:
description:
- An array of clusters. Maximum 4.
returned: success
type: complex
contains:
name:
description:
- The unique name of the cluster.
returned: success
type: str
serveNodes:
description:
- The number of nodes allocated to this cluster. More nodes enable higher
throughput and more consistent performance.
returned: success
type: int
defaultStorageType:
description:
- The type of storage used by this cluster to serve its parent instance's
tables, unless explicitly overridden.
returned: success
type: str
location:
description:
- The location where this cluster's nodes and storage reside. For best performance,
clients should be located as close as possible to this cluster. Currently
only zones are supported, so values should be of the form `projects/<project>/locations/<zone>`.
returned: success
type: str
state:
description:
- The current state of the cluster.
returned: success
type: str
'''
################################################################################
# Imports
################################################################################
from ansible_collections.google.cloud.plugins.module_utils.gcp_utils import navigate_hash, GcpSession, GcpModule, GcpRequest
import json
################################################################################
# Main
################################################################################
def main():
module = GcpModule(argument_spec=dict())
if not module.params['scopes']:
module.params['scopes'] = ['https://www.googleapis.com/auth/bigtable']
return_value = {'resources': fetch_list(module, collection(module))}
module.exit_json(**return_value)
def collection(module):
return "https://bigtableadmin.googleapis.com/v2/projects/{project}/instances".format(**module.params)
def fetch_list(module, link):
auth = GcpSession(module, 'bigtable')
return auth.list(link, return_if_object, array_name='instances')
def return_if_object(module, response):
# If not found, return nothing.
if response.status_code == 404:
return None
# If no content, return nothing.
if response.status_code == 204:
return None
try:
module.raise_for_status(response)
result = response.json()
except getattr(json.decoder, 'JSONDecodeError', ValueError) as inst:
module.fail_json(msg="Invalid JSON response with error: %s" % inst)
if navigate_hash(result, ['error', 'errors']):
module.fail_json(msg=navigate_hash(result, ['error', 'errors']))
return result
if __name__ == "__main__":
main()
|
StarcoderdataPython
|
3291574
|
from project import socketio
from project.solution.observer import Observer
from project.controller.smart_tv_controller import block_tv
ob_on = False
@socketio.on("observerConnect")
def observer_connect():
global ob_on
observer = Observer()
observer.messages_types = ("status", "notification", "confirmation")
observer.steps_to_adapt = [(block_tv, (False,))]
observer.steps_for_behave_normal = [(block_tv, (True,))]
observer.start()
while True:
if not ob_on:
observer.stop()
break
@socketio.on("observerDisconnect")
def observer_disconnect():
global ob_on
ob_on = False
print("Observer close connection!")
|
StarcoderdataPython
|
3326548
|
from django.db import models
from jsonfield import JSONField
from generic_serializer import SerializableModel
from test_app.models import DataProvider
class OauthConfig(models.Model, SerializableModel):
data_provider = models.OneToOneField(DataProvider, related_name="oauth_config", on_delete=models.CASCADE)
authorize_url = models.TextField()
access_token_url = models.TextField()
client_id = models.TextField()
client_secret = models.TextField()
scope = JSONField()
def __str__(self):
return f"OAuth2:{self.data_provider.provider_name}"
class Meta:
app_label = 'test_app'
|
StarcoderdataPython
|
1637696
|
<reponame>matroid/matroid-python<filename>test/test_detectors_labels.py
import os
import time
from datetime import datetime
import pytest
from test.data import TEST_IMAGE_FILE, RANDOM_MONGO_ID, TEST_IMAGE_URL
from matroid.error import APIConnectionError, InvalidQueryError, APIError
from test.helper import print_test_pass
DETECTOR_TEST_ZIP = os.getcwd() + '/test/test_file/cat-dog-lacroix.zip'
class TestDetectorsAndLabels(object):
def test_detector_and_labels(self, set_up_client):
detector_id = None
import_detector_id = None
redo_detector_id = None
detector_name = 'py-test-detector-{}'.format(datetime.now())
label_name = 'py-test-label'
bbox = {'left': 0.2, 'top': 0.1, 'width': 0.6, 'height': 0.8}
# Info for import_detector_test
import_detector_name = 'upload_py_gender_facial'
detector_type = 'facial_recognition'
input_tensor = 'input_5[128,128,3]'
output_tensor = 'prob3[2]'
labels = ['female', 'male']
file_proto = os.getcwd() + '/test/test_file/gender_only_all_android.pb'
# track so they can be deleted
self.feedback_ids = []
# set up client
self.api = set_up_client
# start testing
try:
self.delete_pending_detectors()
detector_id = self.create_detector_test(
file=DETECTOR_TEST_ZIP, name=detector_name, detector_type='general')
self.wait_detector_ready_for_edit(detector_id)
label_id = self.create_label_with_images_with_images_test(
name=label_name, detector_id=detector_id, image_files=TEST_IMAGE_FILE)
self.get_annotations_test(
detector_id=detector_id, label_id=label_id)
image_id = self.get_label_images_test(
detector_id=detector_id, label_id=label_id)
self.update_annotations_test(
detector_id=detector_id, label_id=label_id, image_id=image_id, bbox=bbox)
self.update_label_with_images_test(
detector_id=detector_id, label_id=label_id, image_files=TEST_IMAGE_FILE)
self.delete_label_test(detector_id=detector_id, label_id=label_id)
self.finalize_detector_test(detector_id=detector_id)
self.wait_detector_training(detector_id)
self.get_detector_info_test(detector_id=detector_id)
self.add_feedback_test(detector_id=detector_id)
self.delete_feedback_test(detector_id=detector_id)
self.search_detectors_test()
self.list_detectors_test()
redo_detector_id = self.redo_detector_test(
detector_id=detector_id)
import_detector_id = self.import_detector_test(name=import_detector_name, input_tensor=input_tensor,
output_tensor=output_tensor, detector_type='facial_recognition',
file_proto=file_proto, labels=labels)
finally:
if detector_id:
self.delete_detector_test(detector_id, 'main detector')
if import_detector_id:
self.delete_detector_test(
import_detector_id, 'imported detector')
if redo_detector_id:
self.delete_detector_test(redo_detector_id, 'redo detector')
def create_detector_test(self, file, name, detector_type):
with pytest.raises(APIConnectionError) as e:
invalid_zip_path = os.getcwd() + '/test/test_file/invalid.zip'
self.api.create_detector(
file=invalid_zip_path, name=name, detectorType=detector_type)
assert ('No such file or directory' in str(e))
res = self.api.create_detector(
file=file, name=name, detectorType=detector_type)
assert(res['detectorId'] != None)
print_test_pass()
return res['detectorId']
def create_label_with_images_with_images_test(self, name, detector_id, image_files):
with pytest.raises(APIError) as e:
self.api.create_label_with_images(detectorId=RANDOM_MONGO_ID,
name=name, imageFiles=image_files)
assert ('invalid_query_err' in str(e))
res = self.api.create_label_with_images(detectorId=detector_id,
name=name, imageFiles=image_files)
assert('successfully uploaded 1 images to label' in res['message'])
print_test_pass()
return res['labelId']
def get_annotations_test(self, detector_id, label_id):
res = self.api.get_annotations(
detectorId=detector_id, labelId=label_id)
assert (res['images'] != None)
print_test_pass()
def get_label_images_test(self, detector_id, label_id):
res = self.api.get_label_images(
detectorId=detector_id, labelId=label_id)
assert(res['images'] != None)
print_test_pass()
return res['images'][0]['imageId']
def update_annotations_test(self, detector_id, label_id, image_id, bbox):
with pytest.raises(APIError) as e:
self.api.update_annotations(
detectorId=detector_id, labelId=label_id, images=[])
assert ('invalid_query_err' in str(e))
res = self.api.update_annotations(detectorId=detector_id, labelId=label_id, images=[
{'id': image_id, 'bbox': bbox}])
assert (res['message'] == 'successfully updated 1 images')
print_test_pass()
def update_label_with_images_test(self, detector_id, label_id, image_files):
res = self.api.update_label_with_images(
detectorId=detector_id, labelId=label_id, imageFiles=image_files)
assert ('successfully uploaded 1 images to label' in res['message'])
print_test_pass()
def delete_label_test(self, detector_id, label_id):
res = self.api.delete_label(
detectorId=detector_id, labelId=label_id)
assert (res['message'] == 'Successfully deleted the label')
print_test_pass()
def finalize_detector_test(self, detector_id):
res = self.api.finalize_detector(detectorId=detector_id)
assert (res['message'] == 'training began successfully')
print_test_pass()
def get_detector_info_test(self, detector_id):
res = self.api.get_detector_info(detectorId=detector_id)
assert (res['id'] == detector_id)
print_test_pass()
def add_feedback_test(self, detector_id):
feedback = [
{
'feedbackType': 'positive',
'label': 'cat',
'boundingBox': {
'top': .1,
'left': .1,
'height': .1,
'width': .1,
},
}
]
res = self.api.add_feedback(detectorId=detector_id, file=TEST_IMAGE_FILE, feedback=feedback)
assert (len(res['feedback']) == 1)
feedback_id = res['feedback'][0]['id']
assert (feedback_id is not None)
self.feedback_ids.append(feedback_id)
url_feedback = [
{
'feedbackType': 'positive',
'label': 'cat',
'boundingBox': {
'top': .1,
'left': .1,
'height': .1,
'width': .1,
},
},
{
'feedbackType': 'negative',
'label': 'cat',
'boundingBox': {
'top': .3,
'left': .3,
'height': .3,
'width': .3,
},
}
]
res = self.api.add_feedback(detectorId=detector_id, feedback=url_feedback, url=TEST_IMAGE_URL)
assert (len(res['feedback']) == 2)
for feedback_item in res['feedback']:
feedback_id = feedback_item['id']
assert (feedback_id is not None)
self.feedback_ids.append(feedback_id)
single_feedback = {
'feedbackType': 'negative',
'label': 'cat',
'boundingBox': {
'top': .2,
'left': .2,
'height': .2,
'width': .2,
},
}
res = self.api.add_feedback(detectorId=detector_id, url=TEST_IMAGE_URL, feedback=single_feedback)
assert (len(res['feedback']) == 1)
feedback_id = res['feedback'][0]['id']
assert (feedback_id is not None)
self.feedback_ids.append(feedback_id)
print_test_pass()
def delete_feedback_test(self, detector_id):
for feedback_id in self.feedback_ids:
res = self.api.delete_feedback(feedbackId=feedback_id, detectorId=detector_id)
assert (res['feedbackId'] is not None)
print_test_pass()
def search_detectors_test(self):
res = self.api.search_detectors()
assert (res[0]['id'] != None)
print_test_pass()
def list_detectors_test(self):
res = self.api.list_detectors()
assert (len(res) > 0)
print_test_pass()
def redo_detector_test(self, detector_id):
res = self.api.redo_detector(detectorId=detector_id)
redo_detector_id = res['detectorId']
assert(redo_detector_id != None)
print_test_pass()
return redo_detector_id
def import_detector_test(self, name, input_tensor, output_tensor, detector_type, file_proto, labels):
res = self.api.import_detector(name=name, inputTensor=input_tensor, outputTensor=output_tensor,
detectorType=detector_type, fileProto=file_proto, labels=labels)
assert(res['detectorId'] != None)
print_test_pass()
return res['detectorId']
def delete_detector_test(self, detector_id, detector_type):
res = self.api.delete_detector(detectorId=detector_id)
assert (res['message'] == 'Deleted detector.')
print_test_pass()
# helpers
def delete_pending_detectors(self):
res = self.api.search_detectors(state='pending')
if len(res) == 1:
print('Info: found a pending detector, deleting it...')
self.api.delete_detector(detectorId=res[0]['id'])
print('Info: Deleted pending detector')
def wait_detector_training(self, detector_id):
res = self.api.get_detector_info(detectorId=detector_id)
print ('Info: waiting for detectors training')
indicator = '.'
max_indicator_length = 48
while res['state'] != 'trained' and res['state'] != 'failed':
if len(indicator) > max_indicator_length:
pytest.fail('Timeout when waiting for detector training')
print(indicator)
time.sleep(5)
res = self.api.get_detector_info(detectorId=detector_id)
indicator += '.'
print('Info: detector is ready')
def wait_detector_ready_for_edit(self, detector_id):
print('Info: waiting for pending detector to be ready for editing')
res = self.api.get_detector_info(detectorId=detector_id)
tried_num = 0
max_tries = 15
while (res['processing']):
if tried_num > max_tries:
pytest.fail(
'Timeout when waiting for detector to be ready for editing')
res = self.api.get_detector_info(detectorId=detector_id)
time.sleep(2)
tried_num += 1
print('Info: detector is ready for editing.')
|
StarcoderdataPython
|
154281
|
<filename>scripts/sentenceLengths.py
import sys
mapping = {}
total = 0
with open(sys.argv[1], 'r') as f:
for line in f:
total += 1
l = len(line.split())
mapping[l] = mapping.get(l, 0) + 1
print(mapping)
percentiles = {}
sumSoFar = 0
for l in sorted(mapping.keys()):
sumSoFar += mapping[l]
print("{}\t{:.3f}%".format(l, sumSoFar/total*100))
|
StarcoderdataPython
|
3273176
|
<filename>src/distance.py
def distance_matrix(patches, metric):
return distance_matrix_symmetrical(patches, metric)
# Assumes the metric is symmetrical.
def distance_matrix_symmetrical(patches, metric):
width = len(patches)
matrix = []
for y in range(width):
row = [float(metric(patches[x], patches[y])) for x in range(y)] + [0.0]
matrix.append(row)
# Fill in the rest of the matrix.
for y in range(width):
matrix[y].extend([matrix[x][y] for x in range(y + 1, width)])
return matrix
# Produces a distance matrix from a given set of patches, using a provided
# distance metric.
def distance_matrix_asymmetrical(patches, metric):
matrix = []
for p1 in patches:
row = [0.0 if p1 == p2 else float(metric(p1, p2)) for p2 in patches]
matrix.append(row)
return matrix
def matrix_to_string(matrix):
return '\n'.join(map(lambda row: ', '.join(map(str, row)), matrix))
def normalised_levenshtein(s1, s2):
return levenshtein(s1, s2) / float(max(len(s1), len(s2)))
# Adapted from:
# https://en.wikibooks.org/wiki/Algorithm_Implementation/Strings/Levenshtein_distance#Python
def levenshtein(s1, s2):
if len(s1) < len(s2):
return levenshtein(s2, s1)
if len(s2) == 0:
return len(s1)
previous_row = range(len(s2) + 1)
for i, c1 in enumerate(s1):
current_row = [i + 1]
for j, c2 in enumerate(s2):
insertions = previous_row[j + 1] + 1
deletions = current_row[j] + 1
substitutions = previous_row[j] + (c1 != c2)
current_row.append(min(insertions, deletions, substitutions))
previous_row = current_row
return previous_row[-1]
# Measures the distance between the program produced by a candidate patch and
# the original program.
def distance_to_origin(problem, patch):
dist = 0
for fix in patch.normalise(problem).fixes:
if fix.is_deletion():
dist += problem.size_of(fix.location)
elif fix.is_replacement():
dist += max(problem.size_of(fix.location),
problem.size_of(fix.surrogate))
else:
dist += problem.size_of(fix.surrogate)
return dist
|
StarcoderdataPython
|
3246041
|
import copy
import warnings
from collections import OrderedDict
from typing import List, Union
import numpy as np
import torch
__all__ = [
"normalize_image",
"channels_first",
"scale_intrinsics",
"pointquaternion_to_homogeneous",
"poses_to_transforms",
"create_label_image",
]
def normalize_image(rgb: Union[torch.Tensor, np.ndarray]):
r"""Normalizes RGB image values from :math:`[0, 255]` range to :math:`[0, 1]` range.
Args:
rgb (torch.Tensor or numpy.ndarray): RGB image in range :math:`[0, 255]`
Returns:
torch.Tensor or numpy.ndarray: Normalized RGB image in range :math:`[0, 1]`
Shape:
- rgb: :math:`(*)` (any shape)
- Output: Same shape as input :math:`(*)`
"""
if torch.is_tensor(rgb):
return rgb.float() / 255
elif isinstance(rgb, np.ndarray):
return rgb.astype(float) / 255
else:
raise TypeError("Unsupported input rgb type: %r" % type(rgb))
def channels_first(rgb: Union[torch.Tensor, np.ndarray]):
r"""Converts from channels last representation :math:`(*, H, W, C)` to channels first representation
:math:`(*, C, H, W)`
Args:
rgb (torch.Tensor or numpy.ndarray): :math:`(*, H, W, C)` ordering `(*, height, width, channels)`
Returns:
torch.Tensor or numpy.ndarray: :math:`(*, C, H, W)` ordering
Shape:
- rgb: :math:`(*, H, W, C)`
- Output: :math:`(*, C, H, W)`
"""
if not (isinstance(rgb, np.ndarray) or torch.is_tensor(rgb)):
raise TypeError("Unsupported input rgb type {}".format(type(rgb)))
if rgb.ndim < 3:
raise ValueError(
"Input rgb must contain atleast 3 dims, but had {} dims.".format(rgb.ndim)
)
if rgb.shape[-3] < rgb.shape[-1]:
msg = "Are you sure that the input is correct? Number of channels exceeds height of image: %r > %r"
warnings.warn(msg % (rgb.shape[-1], rgb.shape[-3]))
ordering = list(range(rgb.ndim))
ordering[-2], ordering[-1], ordering[-3] = ordering[-3], ordering[-2], ordering[-1]
if isinstance(rgb, np.ndarray):
return np.ascontiguousarray(rgb.transpose(*ordering))
elif torch.is_tensor(rgb):
return rgb.permute(*ordering).contiguous()
def scale_intrinsics(
intrinsics: Union[np.ndarray, torch.Tensor],
h_ratio: Union[float, int],
w_ratio: Union[float, int],
):
r"""Scales the intrinsics appropriately for resized frames where
:math:`h_\text{ratio} = h_\text{new} / h_\text{old}` and :math:`w_\text{ratio} = w_\text{new} / w_\text{old}`
Args:
intrinsics (numpy.ndarray or torch.Tensor): Intrinsics matrix of original frame
h_ratio (float or int): Ratio of new frame's height to old frame's height
:math:`h_\text{ratio} = h_\text{new} / h_\text{old}`
w_ratio (float or int): Ratio of new frame's width to old frame's width
:math:`w_\text{ratio} = w_\text{new} / w_\text{old}`
Returns:
numpy.ndarray or torch.Tensor: Intrinsics matrix scaled approprately for new frame size
Shape:
- intrinsics: :math:`(*, 3, 3)` or :math:`(*, 4, 4)`
- Output: Matches `intrinsics` shape, :math:`(*, 3, 3)` or :math:`(*, 4, 4)`
"""
if isinstance(intrinsics, np.ndarray):
scaled_intrinsics = intrinsics.astype(np.float32).copy()
elif torch.is_tensor(intrinsics):
scaled_intrinsics = intrinsics.to(torch.float).clone()
else:
raise TypeError("Unsupported input intrinsics type {}".format(type(intrinsics)))
if not (intrinsics.shape[-2:] == (3, 3) or intrinsics.shape[-2:] == (4, 4)):
raise ValueError(
"intrinsics must have shape (*, 3, 3) or (*, 4, 4), but had shape {} instead".format(
intrinsics.shape
)
)
if (intrinsics[..., -1, -1] != 1).any() or (intrinsics[..., 2, 2] != 1).any():
warnings.warn(
"Incorrect intrinsics: intrinsics[..., -1, -1] and intrinsics[..., 2, 2] should be 1."
)
scaled_intrinsics[..., 0, 0] *= w_ratio # fx
scaled_intrinsics[..., 1, 1] *= h_ratio # fy
scaled_intrinsics[..., 0, 2] *= w_ratio # cx
scaled_intrinsics[..., 1, 2] *= h_ratio # cy
return scaled_intrinsics
def pointquaternion_to_homogeneous(
pointquaternions: Union[np.ndarray, torch.Tensor], eps: float = 1e-12
):
r"""Converts 3D point and unit quaternions :math:`(t_x, t_y, t_z, q_x, q_y, q_z, q_w)` to
homogeneous transformations [R | t] where :math:`R` denotes the :math:`(3, 3)` rotation matrix and :math:`T`
denotes the :math:`(3, 1)` translation matrix:
.. math::
\left[\begin{array}{@{}c:c@{}}
R & T \\ \hdashline
\begin{array}{@{}ccc@{}}
0 & 0 & 0
\end{array} & 1
\end{array}\right]
Args:
pointquaternions (numpy.ndarray or torch.Tensor): 3D point positions and unit quaternions
:math:`(tx, ty, tz, qx, qy, qz, qw)` where :math:`(tx, ty, tz)` is the 3D position and
:math:`(qx, qy, qz, qw)` is the unit quaternion.
eps (float): Small value, to avoid division by zero. Default: 1e-12
Returns:
numpy.ndarray or torch.Tensor: Homogeneous transformation matrices.
Shape:
- pointquaternions: :math:`(*, 7)`
- Output: :math:`(*, 4, 4)`
"""
if not (
isinstance(pointquaternions, np.ndarray) or torch.is_tensor(pointquaternions)
):
raise TypeError(
'"pointquaternions" must be of type "np.ndarray" or "torch.Tensor". Got {0}'.format(
type(pointquaternions)
)
)
if not isinstance(eps, float):
raise TypeError('"eps" must be of type "float". Got {0}.'.format(type(eps)))
if pointquaternions.shape[-1] != 7:
raise ValueError(
'"pointquaternions" must be of shape (*, 7). Got {0}.'.format(
pointquaternions.shape
)
)
output_shape = (*pointquaternions.shape[:-1], 4, 4)
if isinstance(pointquaternions, np.ndarray):
t = pointquaternions[..., :3].astype(np.float32)
q = pointquaternions[..., 3:7].astype(np.float32)
transform = np.zeros(output_shape, dtype=np.float32)
else:
t = pointquaternions[..., :3].float()
q = pointquaternions[..., 3:7].float()
transform = torch.zeros(
output_shape, dtype=torch.float, device=pointquaternions.device
)
q_norm = (0.5 * (q ** 2).sum(-1)[..., None]) ** 0.5
q /= (
torch.max(q_norm, torch.tensor(eps))
if torch.is_tensor(q_norm)
else np.maximum(q_norm, eps)
)
if isinstance(q, np.ndarray):
q = np.matmul(q[..., None], q[..., None, :])
else:
q = torch.matmul(q.unsqueeze(-1), q.unsqueeze(-2))
txx = q[..., 0, 0]
tyy = q[..., 1, 1]
tzz = q[..., 2, 2]
txy = q[..., 0, 1]
txz = q[..., 0, 2]
tyz = q[..., 1, 2]
twx = q[..., 0, 3]
twy = q[..., 1, 3]
twz = q[..., 2, 3]
transform[..., 0, 0] = 1.0
transform[..., 1, 1] = 1.0
transform[..., 2, 2] = 1.0
transform[..., 3, 3] = 1.0
transform[..., 0, 0] -= tyy + tzz
transform[..., 0, 1] = txy - twz
transform[..., 0, 2] = txz + twy
transform[..., 1, 0] = txy + twz
transform[..., 1, 1] -= txx + tzz
transform[..., 1, 2] = tyz - twx
transform[..., 2, 0] = txz - twy
transform[..., 2, 1] = tyz + twx
transform[..., 2, 2] -= txx + tyy
transform[..., :3, 3] = t
return transform
def poses_to_transforms(poses: Union[np.ndarray, List[np.ndarray]]):
r"""Converts poses to transformations w.r.t. the first frame in the sequence having identity pose
Args:
poses (numpy.ndarray or list of numpy.ndarray): Sequence of poses in `numpy.ndarray` format.
Returns:
numpy.ndarray or list of numpy.ndarray: Sequence of frame to frame transformations where initial
frame is transformed to have identity pose.
Shape:
- poses: Could be `numpy.ndarray` of shape :math:`(N, 4, 4)`, or list of `numpy.ndarray`s of shape
:math:`(4, 4)`
- Output: Of same shape as input `poses`
"""
transformations = copy.deepcopy(poses)
for i in range(len(poses)):
if i == 0:
transformations[i] = np.eye(4)
else:
transformations[i] = np.linalg.inv(poses[i - 1]).dot(poses[i])
return transformations
def create_label_image(prediction: np.ndarray, color_palette: OrderedDict):
r"""Creates a label image, given a network prediction (each pixel contains class index) and a color palette.
Args:
prediction (numpy.ndarray): Predicted image where each pixel contains an integer,
corresponding to its class label.
color_palette (OrderedDict): Contains RGB colors (`uint8`) for each class.
Returns:
numpy.ndarray: Label image with the given color palette
Shape:
- prediction: :math:`(H, W)`
- Output: :math:`(H, W)`
"""
label_image = np.zeros(
(prediction.shape[0], prediction.shape[1], 3), dtype=np.uint8
)
for idx, color in enumerate(color_palette):
label_image[prediction == idx] = color
return label_image
|
StarcoderdataPython
|
3272899
|
<gh_stars>0
import unittest
from main import get_age
class TestSum(unittest.TestCase):
def test(self):
self.assertEqual(get_age("2 years old"), 2)
self.assertEqual(get_age("4 years old"), 4)
self.assertEqual(get_age("5 years old"), 5)
self.assertEqual(get_age("7 years old"), 7)
if __name__ == '__main__':
unittest.main()
|
StarcoderdataPython
|
195478
|
# Copyright (c) 2015-2019 The Switch Authors. All rights reserved.
# Licensed under the Apache License, Version 2.0, which is in the LICENSE file.
"""
Defines load zone parameters for the Switch model.
INPUT FILE INFORMATION
Import load zone data. The following tab-separated files are
expected in the input directory. Their index columns need to be on
the left, but the data columns can be in any order. Extra columns
will be ignored during import, and optional columns can be dropped.
Other modules (such as local_td) may look for additional columns in
some of these files. If you don't want to specify data for any
optional parameter, use a dot . for its value. Optional columns and
files are noted with a *.
load_zones.csv
LOAD_ZONE, zone_ccs_distance_km*, zone_dbid*
loads.csv
LOAD_ZONE, TIMEPOINT, zone_demand_mw
zone_coincident_peak_demand.csv*
LOAD_ZONE, PERIOD, zone_expected_coincident_peak_demand
"""
import os
from pyomo.environ import *
from switch_model.reporting import write_table
from switch_model.tools.graph import graph
dependencies = 'switch_model.timescales'
optional_dependencies = 'switch_model.transmission.local_td'
def define_dynamic_lists(mod):
"""
Zone_Power_Injections and Zone_Power_Withdrawals are lists of
components that contribute to load-zone level power balance equations.
sum(Zone_Power_Injections[z,t]) == sum(Zone_Power_Withdrawals[z,t])
for all z,t
Other modules may append to either list, as long as the components they
add are indexed by [zone, timepoint] and have units of MW. Other modules
often include Expressions to summarize decision variables on a zonal basis.
"""
mod.Zone_Power_Injections = []
mod.Zone_Power_Withdrawals = []
def define_components(mod):
"""
Augments a Pyomo abstract model object with sets and parameters that
describe load zones and associated power balance equations. Unless
otherwise stated, each set and parameter is mandatory.
LOAD_ZONES is the set of load zones. Each zone is effectively modeled as a
single bus connected to the inter-zonal transmission network (assuming
transmission is enabled). If local_td is included, the central zonal bus,
is connected to a "distributed bus" via local transmission and
distribution that incurs efficiency losses and must be upgraded over time
to always meet peak demand. Load zones are abbreviated as zone in
parameter names and as z for indexes.
zone_demand_mw[z,t] describes the power demand from the high voltage
transmission grid each load zone z and timepoint t. This will either go
into the Zone_Power_Withdrawals or the Distributed_Power_Withdrawals power
balance equations, depending on whether the local_td module is included
and has defined a distributed node for power balancing. If the local_td
module is excluded, this value should be the total withdrawals from the
central grid and should include any distribution losses. If the local_td
module is included, this should be set to total end-use demand (aka sales)
and should not include distribution losses. zone_demand_mw must be
non-negative.
zone_dbid[z] stores an external database id for each load zone. This
is optional and defaults to the name of the load zone. It will be
printed out when results are exported.
zone_ccs_distance_km[z] describes the length of a pipeline in
kilometers that would need to be built to transport CO2 from a load
zones central bus to the nearest viable CCS reservoir. This
parameter is optional and defaults to 0.
EXTERNAL_COINCIDENT_PEAK_DEMAND_ZONE_PERIODS is a set of load zones and
periods (z,p) that have zone_expected_coincident_peak_demand specified.
zone_expected_coincident_peak_demand[z,p] is an optional parameter than can
be used to externally specify peak load planning requirements in MW.
Currently local_td and planning_reserves determine capacity requirements
use zone_expected_coincident_peak_demand as well as load timeseries. Do not
specify this parameter if you wish for the model to endogenously determine
capacity requirements after accounting for both load and Distributed
Energy Resources (DER).
Derived parameters:
zone_total_demand_in_period_mwh[z,p] describes the total energy demand
of each load zone in each period in Megawatt hours.
"""
mod.LOAD_ZONES = Set(dimen=1, input_file='load_zones.csv')
mod.ZONE_TIMEPOINTS = Set(dimen=2,
initialize=lambda m: m.LOAD_ZONES * m.TIMEPOINTS,
doc="The cross product of load zones and timepoints, used for indexing.")
mod.zone_demand_mw = Param(
mod.ZONE_TIMEPOINTS,
input_file="loads.csv",
within=NonNegativeReals)
mod.zone_ccs_distance_km = Param(
mod.LOAD_ZONES,
within=NonNegativeReals,
input_file="load_zones.csv",
default=0.0)
mod.zone_dbid = Param(
mod.LOAD_ZONES,
input_file="load_zones.csv",
default=lambda m, z: z)
mod.min_data_check('LOAD_ZONES', 'zone_demand_mw')
try:
mod.Distributed_Power_Withdrawals.append('zone_demand_mw')
except AttributeError:
mod.Zone_Power_Withdrawals.append('zone_demand_mw')
mod.EXTERNAL_COINCIDENT_PEAK_DEMAND_ZONE_PERIODS = Set(
dimen=2, within=mod.LOAD_ZONES * mod.PERIODS,
input_file="zone_coincident_peak_demand.csv",
input_optional=True,
doc="Zone-Period combinations with zone_expected_coincident_peak_demand data.")
mod.zone_expected_coincident_peak_demand = Param(
mod.EXTERNAL_COINCIDENT_PEAK_DEMAND_ZONE_PERIODS,
input_file="zone_coincident_peak_demand.csv",
within=NonNegativeReals)
mod.zone_total_demand_in_period_mwh = Param(
mod.LOAD_ZONES, mod.PERIODS,
within=NonNegativeReals,
initialize=lambda m, z, p: (
sum(m.zone_demand_mw[z, t] * m.tp_weight[t]
for t in m.TPS_IN_PERIOD[p])))
# Make sure the model has duals enabled since we use the duals in post_solve()
mod.enable_duals()
def define_dynamic_components(mod):
"""
Adds components to a Pyomo abstract model object to enforce the
first law of thermodynamics at the level of load zone buses. Unless
otherwise stated, all terms describing power are in units of MW and
all terms describing energy are in units of MWh.
Zone_Energy_Balance[load_zone, timepoint] is a constraint that mandates
conservation of energy in every load zone and timepoint. This constraint
sums the model components in the lists Zone_Power_Injections and
Zone_Power_Withdrawals - each of which is indexed by (z, t) and
has units of MW - and ensures they are equal. The term tp_duration_hrs
is factored out of the equation for brevity.
"""
mod.Zone_Energy_Balance = Constraint(
mod.ZONE_TIMEPOINTS,
rule=lambda m, z, t: (
sum(
getattr(m, component)[z, t]
for component in m.Zone_Power_Injections
) == sum(
getattr(m, component)[z, t]
for component in m.Zone_Power_Withdrawals)))
def post_solve(instance, outdir):
"""
Exports load_balance.csv, load_balance_annual_zonal.csv, and load_balance_annual.csv.
Each component registered with Zone_Power_Injections and Zone_Power_Withdrawals will
become a column in these .csv files. As such, each column represents a power injection
or withdrawal and the sum of across all columns should be zero. Note that positive
terms are net injections (e.g. generation) while negative terms are net withdrawals
(e.g. load).
load_balance.csv contains the energy balance terms for for every zone and timepoint.
We also include a column called normalized_energy_balance_duals_dollar_per_mwh
that is a proxy for the locational marginal pricing (LMP). This value represents
the incremental cost per hour to increase the demand by 1 MW (or equivalently
the incremental cost of providing one more MWh of energy). This is not a perfect
proxy for LMP since it factors in build costs etc.
load_balance_annual_zonal.csv contains the energy injections and withdrawals
throughout a year for a given load zone.
load_balance_annual.csv contains the energy injections and withdrawals
throughout a year across all zones.
"""
write_table(
instance, instance.LOAD_ZONES, instance.TIMEPOINTS,
output_file=os.path.join(outdir, "load_balance.csv"),
headings=("load_zone", "timestamp", "normalized_energy_balance_duals_dollar_per_mwh",) + tuple(
instance.Zone_Power_Injections +
instance.Zone_Power_Withdrawals),
values=lambda m, z, t:
(
z,
m.tp_timestamp[t],
m.get_dual(
"Zone_Energy_Balance",
z, t,
divider=m.bring_timepoint_costs_to_base_year[t]
)
)
+ tuple(getattr(m, component)[z, t] for component in m.Zone_Power_Injections)
+ tuple(-getattr(m, component)[z, t] for component in m.Zone_Power_Withdrawals)
)
def get_component_per_year(m, z, p, component):
"""
Returns the weighted sum of component across all timepoints in the given period.
The components must be indexed by zone and timepoint.
"""
return sum(getattr(m, component)[z, t] * m.tp_weight_in_year[t] for t in m.TPS_IN_PERIOD[p])
write_table(
instance, instance.LOAD_ZONES, instance.PERIODS,
output_file=os.path.join(outdir, "load_balance_annual_zonal.csv"),
headings=("load_zone", "period",) + tuple(instance.Zone_Power_Injections + instance.Zone_Power_Withdrawals),
values=lambda m, z, p:
(z, p)
+ tuple(get_component_per_year(m, z, p, component) for component in m.Zone_Power_Injections)
+ tuple(-get_component_per_year(m, z, p, component) for component in m.Zone_Power_Withdrawals)
)
write_table(
instance, instance.PERIODS,
output_file=os.path.join(outdir, "load_balance_annual.csv"),
headings=("period",) + tuple(instance.Zone_Power_Injections + instance.Zone_Power_Withdrawals),
values=lambda m, p:
(p,)
+ tuple(sum(get_component_per_year(m, z, p, component) for z in m.LOAD_ZONES)
for component in m.Zone_Power_Injections)
+ tuple(-sum(get_component_per_year(m, z, p, component) for z in m.LOAD_ZONES)
for component in m.Zone_Power_Withdrawals)
)
@graph(
"energy_balance_duals",
title="Energy balance duals per period",
note="Note: Outliers and zero-valued duals are ignored."
)
def graph_energy_balance(tools):
load_balance = tools.get_dataframe('load_balance.csv')
load_balance = tools.transform.timestamp(load_balance)
load_balance["energy_balance_duals"] = tools.pd.to_numeric(
load_balance["normalized_energy_balance_duals_dollar_per_mwh"], errors="coerce") / 10
load_balance = load_balance[["energy_balance_duals", "time_row"]]
load_balance = load_balance.pivot(columns="time_row", values="energy_balance_duals")
percent_of_zeroes = sum(load_balance == 0) / len(load_balance) * 100
# Don't include the zero-valued duals
load_balance = load_balance.replace(0, tools.np.nan)
if load_balance.count().sum() != 0:
load_balance.plot.box(
ax=tools.get_axes(note=f"{percent_of_zeroes:.1f}% of duals are zero"),
xlabel='Period',
ylabel='Energy balance duals (cents/kWh)',
showfliers=False
)
@graph(
"daily_demand",
title="Total daily demand",
supports_multi_scenario=True
)
def demand(tools):
df = tools.get_dataframe("loads.csv", from_inputs=True, drop_scenario_info=False)
df = df.groupby(["TIMEPOINT", "scenario_name"], as_index=False).sum()
df = tools.transform.timestamp(df, key_col="TIMEPOINT", use_timepoint=True)
df = df.groupby(["season", "hour", "scenario_name", "time_row"], as_index=False).mean()
df["zone_demand_mw"] /= 1e3
pn = tools.pn
plot = pn.ggplot(df) + \
pn.geom_line(pn.aes(x="hour", y="zone_demand_mw", color="scenario_name")) + \
pn.facet_grid("time_row ~ season") + \
pn.labs(x="Hour (PST)", y="Demand (GW)", color="Scenario")
tools.save_figure(plot.draw())
@graph(
"demand",
title="Total demand",
supports_multi_scenario=True
)
def yearly_demand(tools):
df = tools.get_dataframe("loads.csv", from_inputs=True, drop_scenario_info=False)
df = df.groupby(["TIMEPOINT", "scenario_name"], as_index=False).sum()
df = tools.transform.timestamp(df, key_col="TIMEPOINT", use_timepoint=True)
df["zone_demand_mw"] *= df["tp_duration"] / 1e3
df["day"] = df["datetime"].dt.day_of_year
df = df.groupby(["day", "scenario_name", "time_row"], as_index=False)["zone_demand_mw"].sum()
pn = tools.pn
plot = pn.ggplot(df) + \
pn.geom_line(pn.aes(x="day", y="zone_demand_mw", color="scenario_name")) + \
pn.facet_grid("time_row ~ .") + \
pn.labs(x="Day of Year", y="Demand (GW)", color="Scenario")
tools.save_figure(plot.draw())
|
StarcoderdataPython
|
4819092
|
model_name = "bedroom_full2b"
epoch_load = "latest"
print("pointnetae", model_name, epoch_load)
data_dir = "../data"
split_dir = "../splits"
rooms_subdir = "Rooms"
roominfos_subdir = "RoomInfos"
model_params_subdir = "ModelParameters"
model_training_reconstructions_subdir = "TrainingReconstructions"
model_testing_reconstructions_subdir = "TestingReconstructions"
model_training_interpolations_subdir = "TrainingInterpolations"
model_testing_interpolations_subdir = "TestingInterpolations"
params_history = {
"bedroom_full1":{
"procedure": "bedroom2",
"split_train": "test64/all.json",
"split_test": "test64/all.json",
"num_epochs": 1000,
"batch_size": 64,
"learning_rate": 0.001,
"step_size": 250,
"step_gamma": 0.5,
"latent_size": 1024,
"max_num_points": 20,
"geometric_weight": 1,
"orientation_weight": 1,
"categorical_weight": 1,
"existence_weight": 1,
"shape_weight": 1000,
"dimensions_matching_weight": 0.5,
"encoder_hidden_dims": [512, 512, 512, 512],
"decoder_hidden_dims": [512, 512],
"shape_code_encoder_hidden_dims": [512, 512],
"shape_code_encoder_output_size": 512,
"shape_code_decoder_hidden_dims": [512, 512],
},
"bedroom_full1a":{
"procedure": "bedroom2",
"split_train": "test64/all.json",
"split_test": "test64/all.json",
"num_epochs": 1000,
"batch_size": 64,
"learning_rate": 0.001,
"step_size": 250,
"step_gamma": 0.5,
"latent_size": 1024,
"max_num_points": 20,
"geometric_weight": 1,
"orientation_weight": 1,
"categorical_weight": 1,
"existence_weight": 1,
"shape_weight": 1000,
"dimensions_matching_weight": 0.5,
"encoder_hidden_dims": [512, 512],
"decoder_hidden_dims": [512, 512],
"shape_code_encoder_hidden_dims": [512, 512],
"shape_code_encoder_output_size": 512,
"shape_code_decoder_hidden_dims": [512, 512],
},
"bedroom_full1b":{
"procedure": "bedroom2",
"split_train": "test64/train.json",
"split_test": "test64/test.json",
"num_epochs": 1000,
"batch_size": 64,
"learning_rate": 0.001,
"step_size": 250,
"step_gamma": 0.5,
"latent_size": 1024,
"max_num_points": 20,
"geometric_weight": 1,
"orientation_weight": 1,
"categorical_weight": 1,
"existence_weight": 1,
"shape_weight": 1000,
"dimensions_matching_weight": 0.5,
"encoder_hidden_dims": [512, 512],
"decoder_hidden_dims": [512, 512],
"shape_code_encoder_hidden_dims": [512, 512],
"shape_code_encoder_output_size": 512,
"shape_code_decoder_hidden_dims": [512, 512],
},
"bedroom_full1c1":{ # bedroom_full1b
"procedure": "bedroom2",
"split_train": "test64/train.json",
"split_test": "test64/test.json",
"num_epochs": 1000,
"batch_size": 64,
"learning_rate": 0.001,
"step_size": 250,
"step_gamma": 0.5,
"latent_size": 512,
"max_num_points": 20,
"geometric_weight": 1,
"orientation_weight": 1,
"categorical_weight": 1,
"existence_weight": 1,
"shape_weight": 1000,
"dimensions_matching_weight": 0.5,
"encoder_hidden_dims": [512, 512],
"decoder_hidden_dims": [512, 512],
"shape_code_encoder_hidden_dims": [512, 512],
"shape_code_encoder_output_size": 512,
"shape_code_decoder_hidden_dims": [512, 512],
},
"bedroom_full1c2":{ # bedroom_full1b
"procedure": "bedroom2",
"split_train": "test64/train.json",
"split_test": "test64/test.json",
"num_epochs": 1000,
"batch_size": 64,
"learning_rate": 0.001,
"step_size": 250,
"step_gamma": 0.5,
"latent_size": 256,
"max_num_points": 20,
"geometric_weight": 1,
"orientation_weight": 1,
"categorical_weight": 1,
"existence_weight": 1,
"shape_weight": 1000,
"dimensions_matching_weight": 0.5,
"encoder_hidden_dims": [512, 512],
"decoder_hidden_dims": [512, 512],
"shape_code_encoder_hidden_dims": [512, 512],
"shape_code_encoder_output_size": 512,
"shape_code_decoder_hidden_dims": [512, 512],
},
"bedroom_full1c3":{ # bedroom_full1b
"procedure": "bedroom2",
"split_train": "test64/train.json",
"split_test": "test64/test.json",
"num_epochs": 1000,
"batch_size": 64,
"learning_rate": 0.001,
"step_size": 250,
"step_gamma": 0.5,
"latent_size": 128,
"max_num_points": 20,
"geometric_weight": 1,
"orientation_weight": 1,
"categorical_weight": 1,
"existence_weight": 1,
"shape_weight": 1000,
"dimensions_matching_weight": 0.5,
"encoder_hidden_dims": [512, 512],
"decoder_hidden_dims": [512, 512],
"shape_code_encoder_hidden_dims": [512, 512],
"shape_code_encoder_output_size": 512,
"shape_code_decoder_hidden_dims": [512, 512],
},
"bedroom_full1c4":{ # bedroom_full1b
"procedure": "bedroom2",
"split_train": "test64/train.json",
"split_test": "test64/test.json",
"num_epochs": 1000,
"batch_size": 64,
"learning_rate": 0.001,
"step_size": 250,
"step_gamma": 0.5,
"latent_size": 64,
"max_num_points": 20,
"geometric_weight": 1,
"orientation_weight": 1,
"categorical_weight": 1,
"existence_weight": 1,
"shape_weight": 1000,
"dimensions_matching_weight": 0.5,
"encoder_hidden_dims": [512, 512],
"decoder_hidden_dims": [512, 512],
"shape_code_encoder_hidden_dims": [512, 512],
"shape_code_encoder_output_size": 512,
"shape_code_decoder_hidden_dims": [512, 512],
},
"bedroom_full2a":{ # bedroom_full1c1
"procedure": "bedroom2",
"split_train": "test64/train.json",
"split_test": "test64/test.json",
"num_epochs": 1000,
"batch_size": 64,
"learning_rate": 0.0005,
"step_size": 250,
"step_gamma": 0.5,
"latent_size": 512,
"max_num_points": 20,
"geometric_weight": 1,
"orientation_weight": 1,
"categorical_weight": 1,
"existence_weight": 1,
"shape_weight": 1000,
"dimensions_matching_weight": 0.5,
"encoder_hidden_dims": [512, 512],
"decoder_hidden_dims": [512, 512],
"shape_code_encoder_hidden_dims": [512, 512],
"shape_code_encoder_output_size": 512,
"shape_code_decoder_hidden_dims": [512, 512],
},
"bedroom_full2b":{ # bedroom_full1c1
"procedure": "bedroom2",
"split_train": "test64/train.json",
"split_test": "test64/test.json",
"num_epochs": 1000,
"batch_size": 64,
"learning_rate": 0.0002,
"step_size": 250,
"step_gamma": 0.5,
"latent_size": 512,
"max_num_points": 20,
"geometric_weight": 1,
"orientation_weight": 1,
"categorical_weight": 1,
"existence_weight": 1,
"shape_weight": 1000,
"dimensions_matching_weight": 0.5,
"encoder_hidden_dims": [512, 512],
"decoder_hidden_dims": [512, 512],
"shape_code_encoder_hidden_dims": [512, 512],
"shape_code_encoder_output_size": 512,
"shape_code_decoder_hidden_dims": [512, 512],
},
"bedroom_full2bALT":{ # with REGRESS_UNMATCHED_DIM = False
"procedure": "bedroom2",
"split_train": "test64/train.json",
"split_test": "test64/test.json",
"num_epochs": 1000,
"batch_size": 64,
"learning_rate": 0.0002,
"step_size": 250,
"step_gamma": 0.5,
"latent_size": 512,
"max_num_points": 20,
"geometric_weight": 1,
"orientation_weight": 1,
"categorical_weight": 1,
"existence_weight": 1,
"shape_weight": 1000,
"dimensions_matching_weight": 0.5,
"encoder_hidden_dims": [512, 512],
"decoder_hidden_dims": [512, 512],
"shape_code_encoder_hidden_dims": [512, 512],
"shape_code_encoder_output_size": 512,
"shape_code_decoder_hidden_dims": [512, 512],
},
"bedroom_full2c":{ # bedroom_full1c1
"procedure": "bedroom2",
"split_train": "test64/train.json",
"split_test": "test64/test.json",
"num_epochs": 1000,
"batch_size": 64,
"learning_rate": 0.0001,
"step_size": 250,
"step_gamma": 0.5,
"latent_size": 512,
"max_num_points": 20,
"geometric_weight": 1,
"orientation_weight": 1,
"categorical_weight": 1,
"existence_weight": 1,
"shape_weight": 1000,
"dimensions_matching_weight": 0.5,
"encoder_hidden_dims": [512, 512],
"decoder_hidden_dims": [512, 512],
"shape_code_encoder_hidden_dims": [512, 512],
"shape_code_encoder_output_size": 512,
"shape_code_decoder_hidden_dims": [512, 512],
},
"bedroom_full2d":{ # bedroom_full1c1
"procedure": "bedroom2",
"split_train": "test64/train.json",
"split_test": "test64/test.json",
"num_epochs": 1000,
"batch_size": 64,
"learning_rate": 0.00005,
"step_size": 250,
"step_gamma": 0.5,
"latent_size": 512,
"max_num_points": 20,
"geometric_weight": 1,
"orientation_weight": 1,
"categorical_weight": 1,
"existence_weight": 1,
"shape_weight": 1000,
"dimensions_matching_weight": 0.5,
"encoder_hidden_dims": [512, 512],
"decoder_hidden_dims": [512, 512],
"shape_code_encoder_hidden_dims": [512, 512],
"shape_code_encoder_output_size": 512,
"shape_code_decoder_hidden_dims": [512, 512],
},
"bedroom_full3a":{ # bedroom_full2a
"procedure": "bedroom2",
"split_train": "test64/train.json",
"split_test": "test64/test.json",
"num_epochs": 1000,
"batch_size": 64,
"learning_rate": 0.0005,
"step_size": 250,
"step_gamma": 0.5,
"latent_size": 512,
"max_num_points": 20,
"geometric_weight": 1,
"orientation_weight": 1,
"categorical_weight": 1,
"existence_weight": 1,
"shape_weight": 300,
"dimensions_matching_weight": 0.5,
"encoder_hidden_dims": [512, 512],
"decoder_hidden_dims": [512, 512],
"shape_code_encoder_hidden_dims": [512, 512],
"shape_code_encoder_output_size": 512,
"shape_code_decoder_hidden_dims": [512, 512],
},
"bedroom_full3b":{ # bedroom_full2a
"procedure": "bedroom2",
"split_train": "test64/train.json",
"split_test": "test64/test.json",
"num_epochs": 1000,
"batch_size": 64,
"learning_rate": 0.0005,
"step_size": 250,
"step_gamma": 0.5,
"latent_size": 512,
"max_num_points": 20,
"geometric_weight": 1,
"orientation_weight": 1,
"categorical_weight": 1,
"existence_weight": 1,
"shape_weight": 100,
"dimensions_matching_weight": 0.5,
"encoder_hidden_dims": [512, 512],
"decoder_hidden_dims": [512, 512],
"shape_code_encoder_hidden_dims": [512, 512],
"shape_code_encoder_output_size": 512,
"shape_code_decoder_hidden_dims": [512, 512],
},
"bedroom_full3c":{ # bedroom_full2a
"procedure": "bedroom2",
"split_train": "test64/train.json",
"split_test": "test64/test.json",
"num_epochs": 1000,
"batch_size": 64,
"learning_rate": 0.0005,
"step_size": 250,
"step_gamma": 0.5,
"latent_size": 512,
"max_num_points": 20,
"geometric_weight": 1,
"orientation_weight": 1,
"categorical_weight": 1,
"existence_weight": 1,
"shape_weight": 30,
"dimensions_matching_weight": 0.5,
"encoder_hidden_dims": [512, 512],
"decoder_hidden_dims": [512, 512],
"shape_code_encoder_hidden_dims": [512, 512],
"shape_code_encoder_output_size": 512,
"shape_code_decoder_hidden_dims": [512, 512],
},
"bedroom_full3d":{ # bedroom_full2a
"procedure": "bedroom2",
"split_train": "test64/train.json",
"split_test": "test64/test.json",
"num_epochs": 1000,
"batch_size": 64,
"learning_rate": 0.0005,
"step_size": 250,
"step_gamma": 0.5,
"latent_size": 512,
"max_num_points": 20,
"geometric_weight": 1,
"orientation_weight": 1,
"categorical_weight": 1,
"existence_weight": 1,
"shape_weight": 10,
"dimensions_matching_weight": 0.5,
"encoder_hidden_dims": [512, 512],
"decoder_hidden_dims": [512, 512],
"shape_code_encoder_hidden_dims": [512, 512],
"shape_code_encoder_output_size": 512,
"shape_code_decoder_hidden_dims": [512, 512],
},
# "bedroom_full3e":{ # bedroom_full2a
# "procedure": "bedroom2",
# "split_train": "test64/train.json",
# "split_test": "test64/test.json",
# "num_epochs": 1000,
# "batch_size": 64,
# "learning_rate": 0.0005,
# "step_size": 250,
# "step_gamma": 0.5,
# "latent_size": 512,
# "max_num_points": 20,
# "geometric_weight": 1,
# "orientation_weight": 1,
# "categorical_weight": 1,
# "existence_weight": 1,
# "shape_weight": 3,
# "dimensions_matching_weight": 0.5,
# "encoder_hidden_dims": [512, 512],
# "decoder_hidden_dims": [512, 512],
# "shape_code_encoder_hidden_dims": [512, 512],
# "shape_code_encoder_output_size": 512,
# "shape_code_decoder_hidden_dims": [512, 512],
# },
# "bedroom_full3f":{ # bedroom_full2a
# "procedure": "bedroom2",
# "split_train": "test64/train.json",
# "split_test": "test64/test.json",
# "num_epochs": 1000,
# "batch_size": 64,
# "learning_rate": 0.0005,
# "step_size": 250,
# "step_gamma": 0.5,
# "latent_size": 512,
# "max_num_points": 20,
# "geometric_weight": 1,
# "orientation_weight": 1,
# "categorical_weight": 1,
# "existence_weight": 1,
# "shape_weight": 1,
# "dimensions_matching_weight": 0.5,
# "encoder_hidden_dims": [512, 512],
# "decoder_hidden_dims": [512, 512],
# "shape_code_encoder_hidden_dims": [512, 512],
# "shape_code_encoder_output_size": 512,
# "shape_code_decoder_hidden_dims": [512, 512],
# },
"bedroom_partial6": {
"procedure": "bedroom1",
"split_train": "test64/all.json",
"split_test": "test64/all.json",
"num_epochs": 800,
"batch_size": 64,
"learning_rate": 0.001,
"step_size": 200,
"step_gamma": 0.5,
"latent_size": 1024,
"max_num_points": 5,
"geometric_weight": 1,
"orientation_weight": 1,
"categorical_weight": 1,
"existence_weight": 1,
"shape_weight": 1000,
"dimensions_matching_weight": 0.5,
"encoder_hidden_dims": [512, 512, 512, 512],
"decoder_hidden_dims": [512, 512],
"shape_code_encoder_hidden_dims": [512, 512],
"shape_code_encoder_output_size": 512,
"shape_code_decoder_hidden_dims": [512, 512],
},
"bedroom_partial6e": {
"procedure": "bedroom1",
"split_train": "test64/all.json",
"split_test": "test64/all.json",
"num_epochs": 1001,
"batch_size": 64,
"learning_rate": 0.001,
"step_size": 250,
"step_gamma": 0.5,
"latent_size": 1024,
"max_num_points": 5,
"geometric_weight": 1,
"orientation_weight": 1,
"categorical_weight": 1,
"existence_weight": 1,
"shape_weight": 1000,
"dimensions_matching_weight": 0.5,
"encoder_hidden_dims": [512, 512],
"decoder_hidden_dims": [512, 512],
"shape_code_encoder_hidden_dims": [512, 512],
"shape_code_encoder_output_size": 512,
"shape_code_decoder_hidden_dims": [512, 512],
},
"bedroom_partial6h1": { # 6g
"procedure": "bedroom1",
"split_train": "test64/train.json",
"split_test": "test64/test.json",
"num_epochs": 1001,
"batch_size": 64,
"learning_rate": 0.001,
"step_size": 250,
"step_gamma": 0.5,
"latent_size": 512,
"max_num_points": 5,
"geometric_weight": 1,
"orientation_weight": 1,
"categorical_weight": 1,
"existence_weight": 1,
"shape_weight": 1000,
"dimensions_matching_weight": 0.5,
"encoder_hidden_dims": [512, 512],
"decoder_hidden_dims": [512, 512],
"shape_code_encoder_hidden_dims": [512, 512],
"shape_code_encoder_output_size": 512,
"shape_code_decoder_hidden_dims": [512, 512],
},
"bedroom_partial6h2": { # 6g BEST
"procedure": "bedroom1",
"split_train": "test64/train.json",
"split_test": "test64/test.json",
"num_epochs": 1001,
"batch_size": 64,
"learning_rate": 0.001,
"step_size": 250,
"step_gamma": 0.5,
"latent_size": 256,
"max_num_points": 5,
"geometric_weight": 1,
"orientation_weight": 1,
"categorical_weight": 1,
"existence_weight": 1,
"shape_weight": 1000,
"dimensions_matching_weight": 0.5,
"encoder_hidden_dims": [512, 512],
"decoder_hidden_dims": [512, 512],
"shape_code_encoder_hidden_dims": [512, 512],
"shape_code_encoder_output_size": 512,
"shape_code_decoder_hidden_dims": [512, 512],
},
"bedroom_partial6h3": { # 6g
"procedure": "bedroom1",
"split_train": "test64/train.json",
"split_test": "test64/test.json",
"num_epochs": 1001,
"batch_size": 64,
"learning_rate": 0.001,
"step_size": 250,
"step_gamma": 0.5,
"latent_size": 128,
"max_num_points": 5,
"geometric_weight": 1,
"orientation_weight": 1,
"categorical_weight": 1,
"existence_weight": 1,
"shape_weight": 1000,
"dimensions_matching_weight": 0.5,
"encoder_hidden_dims": [512, 512],
"decoder_hidden_dims": [512, 512],
"shape_code_encoder_hidden_dims": [512, 512],
"shape_code_encoder_output_size": 512,
"shape_code_decoder_hidden_dims": [512, 512],
},
"bedroom_partial6h4": { # 6g
"procedure": "bedroom1",
"split_train": "test64/train.json",
"split_test": "test64/test.json",
"num_epochs": 1001,
"batch_size": 64,
"learning_rate": 0.001,
"step_size": 250,
"step_gamma": 0.5,
"latent_size": 64,
"max_num_points": 5,
"geometric_weight": 1,
"orientation_weight": 1,
"categorical_weight": 1,
"existence_weight": 1,
"shape_weight": 1000,
"dimensions_matching_weight": 0.5,
"encoder_hidden_dims": [512, 512],
"decoder_hidden_dims": [512, 512],
"shape_code_encoder_hidden_dims": [512, 512],
"shape_code_encoder_output_size": 512,
"shape_code_decoder_hidden_dims": [512, 512],
}
}
procedure_params_all = {
"bedroom1": {
"room_name": "Bedroom1",
"geometry_size": 2 + 2, # position and dimensions
"orientation_size": 2,
"num_categories": 2,
"shape_size": 512,
},
"bedroom2": {
"room_name": "Bedroom2",
"geometry_size": 2 + 2, # position and dimensions
"orientation_size": 2,
"num_categories": 9,
"shape_size": 512,
},
}
colors = {
"bed": "blue",
"cabinet": "grey",
"chair": "red",
"smallStool": "red",
"largeSofa": "yellow",
"largeTable": "brown",
"nightstand": "orange",
"tvStand": "green",
"smallTable": "purple",
# "lighting": "yellow",
}
colors_light = {
"bed": "#ADD8EE",
"cabinet": "#D3D3D3",
"nightstand": "#FFB580",
"chair": "#FF7F7F",
"smallStool": "#FF7F7F",
"largeSofa": "#FFFF99",
"largeTable": "#C89D7C",
"tvStand": "#B0EEB0",
"smallTable": "#b19cd9",
# "lighting": "#FFFF99",
}
params = params_history[model_name]
procedure = params["procedure"]
procedure_params = procedure_params_all[procedure]
num_examples = params["num_examples"] if "num_examples" in params else None
num_epochs = params["num_epochs"]
batch_size = params["batch_size"]
learning_rate = params["learning_rate"]
step_size = params["step_size"]
step_gamma = params["step_gamma"]
latent_size = params["latent_size"]
max_num_points = params["max_num_points"]
room_name = procedure_params["room_name"]
geometry_size = procedure_params["geometry_size"]
orientation_size = procedure_params["orientation_size"]
num_categories = procedure_params["num_categories"]
shape_size = procedure_params["shape_size"]
point_size_intermediate = geometry_size + orientation_size + num_categories
point_size = point_size_intermediate + shape_size
geometric_weight = params["geometric_weight"]
orientation_weight = params["orientation_weight"]
categorical_weight = params["categorical_weight"]
existence_weight = params["existence_weight"]
shape_weight = params["shape_weight"]
dimensions_matching_weight = params["dimensions_matching_weight"]
encoder_hidden_dims = params["encoder_hidden_dims"]
decoder_hidden_dims = params["decoder_hidden_dims"]
shape_code_encoder_hidden_dims = params["shape_code_encoder_hidden_dims"]
shape_code_encoder_output_size = params["shape_code_encoder_output_size"]
shape_code_decoder_hidden_dims = params["shape_code_decoder_hidden_dims"]
split_train = params["split_train"]
split_test = params["split_test"]
|
StarcoderdataPython
|
3253644
|
import math
import numpy
class DelayBlock(object):
""" A block of delays for a subvertex
"""
def __init__(
self, n_delay_stages, delay_per_stage, vertex_slice):
self._delay_per_stage = delay_per_stage
self._n_delay_stages = n_delay_stages
n_words_per_row = int(math.ceil(vertex_slice.n_atoms / 32.0))
self._delay_block = numpy.zeros(
(n_delay_stages, n_words_per_row), dtype="uint32")
def add_delay(self, source_id, stage):
word_id = int(source_id / 32.0)
bit_id = source_id - (word_id * 32)
self._delay_block[stage - 1][word_id] |= (1 << bit_id)
@property
def delay_block(self):
return self._delay_block
|
StarcoderdataPython
|
71110
|
import re
from collections import namedtuple
from copy import copy
from difflib import SequenceMatcher
from pprint import pformat
from bs4 import BeautifulSoup
from bs4 import NavigableString
from bs4 import Tag
logger = None
def restore_refs(old_content: str,
new_content: str,
resolved_ids: list,
logger_,
resolve_changed: bool = False):
'''
Restore inline-comments from the old_content in the new_content and return
the resulting html string.
If `resolve_changed` is False β only restore the comments in the text that
wasn't changed.
'''
# setting up global logger
global logger
logger = logger_
old_bs = BeautifulSoup(old_content, 'html.parser')
new_bs = BeautifulSoup(new_content, 'html.parser')
if is_empty(new_bs):
logger.debug('New content is empty, all inline comments will be omitted.')
return new_content
remove_outline_resolved(old_bs)
ref_dict = generate_ref_dict(old_bs)
new_strings = [s for s in new_bs.strings if s.strip()]
old_strings = [s for s in old_bs.strings if s.strip()]
places = find_place2(old_strings, new_strings, ref_dict)
correct_places(places, new_strings)
equal, not_equal = divide_places(places)
restore_equal_refs(equal, new_strings)
if not resolve_changed:
insert_unequal_refs(not_equal, new_strings, resolved_ids)
return str(new_bs)
def is_empty(soup):
'''Check whether `soup` is an empty page (whitespaces ignored)'''
for s in soup.strings:
if s.strip():
return False
return True
def remove_outline_resolved(bs: BeautifulSoup):
"""
Remove from bs object all inline comments which have nested comments inside
them. These may be only resolved comments, and they cause a lot of trouble.
In place.
"""
logger.debug('remove_outline_resolved START')
while True:
restart = False
comments = bs.find_all(re.compile('ac:inline-comment-marker'))
for comment in comments:
for child in comment.children:
if child.name == 'ac:inline-comment-marker':
logger.debug(f'Comment has nested comments, removing: \n{comment}')
basic_unwrap(comment)
restart = True
break
if restart:
break
else:
logger.debug('remove_outline_resolved END')
return
def basic_unwrap(element):
"""
Unwrap element from its tag in place. Concatenate adjacent NavigableStrings
which may have appeared after anwrapping:
<b>'One '<to_unwrap>' Two '</to_unwrap>' Three'</b>
<b>'One '' Two '' Three'</b>
<b>'One Two Three'</b>
"""
parent = element.parent
element.unwrap()
groupped = []
accumulate = False
for el in parent.contents:
if isinstance(el, NavigableString):
if accumulate:
groupped[-1].append(el)
else:
groupped.append([el])
accumulate = True
else:
accumulate = False
groupped = [g for g in groupped if len(g) > 1]
for g in groupped:
g[0].replace_with(''.join(g))
g.pop(0)
for i in g:
i.extract()
def generate_ref_dict(bs: BeautifulSoup) -> dict:
'''
Receives a BeautifulSoup object and generates a dictionary with info about
inline comments.
Output dictionary structure:
Key: python id of a string, which contains the inline comment. It's one of
the strings that may be obtained by BeautifulSoup.strings method.
Value: {info_dict}, dictionary with info on the inline comment.
{info_dict} structure:
{
'full': Full unwrapped NavigableString which contained inline comment. It
is in fact right now a part of the bs tree.
'before': NavigableString that was before the inline comment until next tag
or end of parent OR another {info_dict} if there were several
comments in one paragraph.
'comment': The inline comment tag which was unwrapped, with commented text
included.
'after': NavigableString that was after the inline comment until next tag
or end of parent.
'ref_id': For convenience, the id of a comment from the 'ac:ref' attribute.
}
'''
logger.debug('generate_ref_dict START')
logger.debug('Collecting comments from the old article (remote)')
result = {}
refs = bs.find_all(re.compile('ac:inline-comment-marker'))
for ref in refs:
ref_id = ref.attrs['ac:ref']
try:
full, (before, comment, after) = unwrap(ref)
except RuntimeError:
logger.debug("Inline comment tag has other tags inside. We can't"
f" process such yet, skipping:\n{ref}")
continue
cs = dict(full=full,
ref_id=ref_id,
before=before,
comment=comment,
after=after)
# if 'before string' was already added to result β absorb the comment
# dictionary instead
if cs['before'] and id(cs['before']) in result:
cs['before'] = result.pop(id(cs['before']))
result[id(cs['full'])] = cs
logger.debug(f'Collected comments:\n\n{pformat(result)}')
logger.debug('generate_ref_dict END')
return result
def unwrap(element):
'''
Unwrap an element from a tag in place. The tag must only contain one string inside.
The string will be connected to text before and after tag.
Function returns two elements:
full_string, (before, element, after)
- full_string β a full NavigableString, which replaced the tag and the text before/after;
- A tuple of three elements:
- before β original NavigableString, that was before the tag or None if there wasn't any.
- element β original tag itself.
- after β original NavigableString, that was after the tag or None if there wasn't any.
'''
before = after = None
children = list(element.children)
if len(children) > 1:
raise RuntimeError('Tag should wrap just one string')
if len(children) == 1 and not isinstance(children[0], NavigableString):
raise RuntimeError('Tag should include only string')
content = element.text
if isinstance(element.previous_sibling, NavigableString):
before = element.previous_sibling.extract()
content = before + content
if isinstance(element.next_sibling, NavigableString):
after = element.next_sibling.extract()
content = content + after
ns = NavigableString(content)
element.replace_with(ns)
return ns, (before, element, after)
def find_place2(old_strings, new_strings: list, ref_dict: dict) -> dict:
'''
Compare `old_strings` and `new_strings`.
For each element of ref_dict: Find strings in `new_strings` which correspond
to the commented string, described by `ref_dict` element. This string is one
of the `old_strings`.
Return a list of tuples, each containing three elements:
[(info_dict, indeces, equal)]
- info_dict β an {info_dict} of the inline comment.
- indeces β a list of indeces of the `new_strings` which correspond to the
inline comment in the old text.
- equal β a boolean value which tells whether the commented paragraph was changed
or not. True β unchanged, False β changed.
'''
logger.debug('find_place2 START')
result = []
# strip all strings from indentations and formatting for comparison
s_old_strings = [s.strip() for s in old_strings]
s_new_strings = [s.strip() for s in new_strings]
sm = SequenceMatcher(None, s_old_strings, s_new_strings)
sm.ratio()
Opcode = namedtuple('opcode', ('tag', 'a_s', 'a_e', 'b_s', 'b_e'))
opcodes = [Opcode(*opc) for opc in sm.get_opcodes()]
logger.debug(f'Opcodes after matching: {sm.get_opcodes()}')
# We use IDs to determine the correct string because the tree may contain
# strings with equal values, but located in different parts of the tree. ID
# allows to determine the correct string precisely.
old_string_ids = [id(s) for s in old_strings]
for cs_id in ref_dict:
equal = False
ind = old_string_ids.index(cs_id)
for i in range(len(opcodes)):
if opcodes[i].a_s <= ind < opcodes[i].a_e:
break
else:
i = None
if i is None:
continue
if opcodes[i].tag == 'equal':
indeces = [opcodes[i].b_s + (ind - opcodes[i].a_s)]
equal = True
elif opcodes[i].tag == 'replace':
indeces = list(range(opcodes[i].b_s, opcodes[i].b_e))
elif opcodes[i].tag == 'delete':
indeces = []
if i and opcodes[i - 1].tag == 'insert':
indeces.extend(range(opcodes[i - 1].b_s, opcodes[i - 1].b_e))
if i + 2 <= len(opcodes) and opcodes[i + 1].tag == 'insert':
indeces.extend(range(opcodes[i + 1].b_s, opcodes[i + 1].b_e))
if not indeces:
indeces.append(opcodes[i].b_s - 1 if opcodes[i].b_s else 0)
indeces.append(opcodes[i].b_e if opcodes[i].b_e + 1 <= len(new_strings) else opcodes[i].b_e - 1)
result.append((ref_dict[cs_id], indeces, equal))
logger.debug(f'List of found places:\n\n{pformat(result)}')
logger.debug('find_place2 END')
return result
def add_unique(a: list, b: list, at_beginning: bool = True) -> None:
'''
Add only unique elements from b to a in place.
If `at_beginning` is True β elements are inserted at the beginning
of the a list. If False β they are appended at the end.'''
for i in b:
if i not in a:
if at_beginning:
a.insert(0, i)
else:
a.append(i)
def correct_places(places: list, strings: list):
'''
Looks for strings which are inside confluence-tags <ac:... and removes such
strings from the links (we cannot add inline comments into macros).
In place.
:param places: list of tuples, got from find_place2 function:
[(info_dict, indeces, equal)]
:param strings: list of NavigableStrings from the new content, which are
right now a part of the tree.
'''
logger.debug('correct_places START')
for place in places:
to_remove = []
for i in range(len(place[1])):
index = place[1][i]
cur = strings[index]
while cur:
if cur.name and cur.name.startswith('ac:'):
logger.debug(f"string '{strings[index]}' is inside macro {cur.name}"
" and will be removed")
to_remove.append(i)
break
cur = cur.parent
for i in reversed(to_remove):
s = place[1].pop(i)
logger.debug(f"Removed string [{s}]: '{strings[s]}'")
logger.debug('correct_places END')
def divide_places(places: list) -> dict:
'''
Takes a list of tuples, got from find_place2 function:
[(info_dict, indeces, equal)]
Looks for the places with equal == True and gathers them into a separate list.
Removes all indeces which were mentioned in `equal` places from other places.
Gathers references in the correct order from the remaining places and saves them
in a dictionary with key = string index, value = list of ref_ids, which point
to this string.
Returns a tuple with two items:
(equal, not_equal)
- equal = [(info_dict, indeces, equal)] : list of equal places;
- not_equal = {index: [ref_list]} : dictionary with references for strings
which are not equal.
'''
logger.debug('divide_places START')
equal_places = [(info_dict, copy(indeces), equal)
for info_dict, indeces, equal in places if equal]
# remove all places where equal strings are mentioned
for _, equal_indeces, _ in equal_places:
for _, indeces, _ in places:
equal_index = equal_indeces[0]
if equal_index in indeces:
indeces.pop(indeces.index(equal_index))
# remove all places where strings are empty after prev. stage
places = [p for p in places if p[1]]
def get_refs(info_dict: dict) -> list:
'''Get all ref_ids from a nested place in the correct order'''
refs = [info_dict['ref_id']]
if isinstance(info_dict['before'], dict):
add_unique(refs, get_refs(info_dict['before']))
return refs
# make a dictionary with refs list for each string index
unequal = {}
for info_dict, indeces, _ in places:
refs = get_refs(info_dict)
for pos in indeces:
add_unique(unequal.setdefault(pos, []), refs, False)
logger.debug(f'Equal places:\n\n{pformat(equal_places)}\n\n'
f'References for changed strings:\n\n{pformat(unequal)}')
logger.debug('divide_places END')
return equal_places, unequal
def restore_equal_refs(places: list, new_strings: list) -> None:
"""
Receive a list of `place` tuples and a list of strings of the new tree `new_strings`:
places = [(info_dict, indeces, equal)]
new_strings = [NavigableString]
Restore the inline comments in corresponding strings of new_strings (determined
by place[1]) in the same way it was present in the old string. The way is
determined by the `place` tuple.
Function returns nothing, the comments are restored in place.
"""
def get_content_list(ref_dict: dict) -> list:
'''
Get consequetive list of html elements and strings in the correct order
to be inserted instead of the target string.
'''
content = []
if isinstance(ref_dict['before'], dict):
content.extend(get_content_list(ref_dict['before']))
elif ref_dict['before'] is not None:
content.append(ref_dict['before'])
content.append(ref_dict['comment'])
# if isinstance(ref_dict['after'], dict):
# content.extend(get_content_list(ref_dict['after']))
if ref_dict['after'] is not None:
content.append(ref_dict['after'])
return content
logger.debug('restore_equal_refs START')
for info_dict, indeces, _ in places:
logger.debug(f'Source info_dict:\n\n{pformat(info_dict)}')
content_list = get_content_list(info_dict)
logger.debug(f'Content list to insert:\n\n{pformat(content_list)}')
target = new_strings[indeces[0]]
logger.debug(f'String to be replaced: {target}')
# we use copy to detach element from previous tree
new_elem = copy(content_list[0])
target.replace_with(new_elem)
target = new_elem
for i in range(1, len(content_list)):
new_elem = copy(content_list[i])
target.insert_after(new_elem)
target = new_elem
logger.debug('restore_equal_refs END')
def insert_unequal_refs(unequal: dict, new_strings: list, resolved_ids: list):
'''
Receive an `unequal` dictionary with ref_ids and a list of strings of the
new tree `new_strings`:
unequal = {index: [list_of_ref_ids]}
new_strings = [NavigableString]
resolved_ids = [resolved_ref_ids]
Wrap each NavigableString determined by index from `unequal` dictionary in
the corresponding inline-comment tag from the dict value. If the value
contains several ref_ids β divide the string into equal chunks of text for
each ref_id. If one or more of these several ref_ids are resolved β they are
filtered out for better output. They will be removed from the source.
Function returns nothing, the comments are restored in place.
'''
logger.debug('insert_unequal_refs START')
for pos, refs in unequal.items():
logger.debug(f'Inserting refs into string #{pos}')
if len(refs) > 1:
logger.debug('More than one ref claim for this string.'
'Leaving out resolved: '
f'{[ref for ref in refs if ref in resolved_ids]}')
refs = [ref for ref in refs if ref not in resolved_ids]
if not refs:
logger.debug('All refs for the string were resolved. Skipping')
continue
logger.debug(f'Refs to insert: {refs}')
contents = []
ns = new_strings[pos]
logger.debug(f'String to be replaced: {ns}')
# if number of refs more than chars in string β ignore the rest
num_refs = min(len(refs), len(ns))
chunk_size = len(ns) // num_refs
logger.debug(f'Dividing string equally into {num_refs} chunks by {chunk_size} chars.')
for i in range(num_refs):
tag = Tag(name='ac:inline-comment-marker',
attrs={'ac:ref': refs[i]})
start = i * chunk_size
end = start + chunk_size if i != num_refs - 1 else None
tag.string = ns[start:end]
contents.append(tag)
ns.replace_with(contents[0])
target = contents[0]
for i in range(1, len(contents)):
target.insert_after(contents[i])
target = contents[i]
logger.debug('insert_unequal_refs END')
|
StarcoderdataPython
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.