max_stars_repo_path
stringlengths 3
269
| max_stars_repo_name
stringlengths 4
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.05M
| score
float64 0.23
5.13
| int_score
int64 0
5
|
---|---|---|---|---|---|---|
applitools/target.py | applitools/eyes.selenium.python | 11 | 12791051 | <reponame>applitools/eyes.selenium.python
from applitools.selenium.target import * # noqa
from applitools.core import logger
| 1.148438 | 1 |
main.py | gonzalofh/travelban-notification-gcloud-function | 0 | 12791052 | import base64
import os
from datastore import DataStore
from emailsender import EmailSender
sendgrid_api_key = os.environ.get('SENDGRID_EMAIL_API_KEY', 'Specified environment variable is not set.')
travel_site_url = os.environ.get('TRAVEL_SITE_URL', 'Specified environment variable is not set.')
sender = os.environ.get('SENDER', 'Specified environment variable is not set.')
datastore_client = DataStore()
email_sender = EmailSender(sendgrid_api_key)
def get_message_content(last_update):
return ("US Gov Travel restrictions page was recently updated (" + last_update + ").\n"
"Go to " + travel_site_url)
def send_email_notification(event, context):
last_update = base64.b64decode(event['data']).decode('utf-8')
context = datastore_client.get_context()
last_updated_saved = context['last_updated_at']
print('Last saved update date was: ' + last_updated_saved)
print('Current update date is: ' + last_update)
if last_update != last_updated_saved:
print('A new update was pushed. Updating database and notifying subscribers')
datastore_client.update_context(context, last_update)
recipients = datastore_client.get_recipients()
content = get_message_content(last_update)
subject = 'Travel Ban Cron Job Notification'
email_sender.send(sender, recipients, subject, content) | 2.25 | 2 |
examples/ex_toolkit.py | RI-imaging/FDTD_sinogram | 0 | 12791053 | """Tools used by the examples """
import numpy as np
import os
import sys
sys.path.insert(0, os.path.dirname(os.path.abspath(__file__))+"/../meep_tomo")
from meep_tomo import extract, common
import ex_bpg
def compute_metrices(tomo_path, approx, autofocus=False):
"""Compute RMS and TV metrices for a MEEP-simulated ODT reconstruction
Parameters
----------
tomo_path: str
Simulation directory or .npy file of a reconstructed simulation
approx: str
Approximation to use, one of ["radon", "born", "rytov"]
autofocus: bool
If `True`, perform autofocusing. If `False` uses the exact
focusing (the center of rotation in the simulation).
This only makes sense if `tomo_path` is not an .npy file.
Returns
-------
rms, tv: floats
root-mean-square and total variation errors
Notes
-----
A second call with the same arguments will be fast, because the
result is saved on disk.
See Also
--------
metric_rms, metric_tv: The used metrics
"""
assert approx in ["radon", "born", "rytov"]
tomo_path = os.path.abspath(tomo_path)
if os.path.isdir(tomo_path):
sim_dir = os.path.abspath(tomo_path)
res_dir = os.path.abspath(tomo_path)+"_results"
common.mkdir_p(res_dir)
metr_file = os.path.join(res_dir, "metrices.txt")
npy_file = False
elif tomo_path.endswith(".npy"):
res_dir = os.path.dirname(os.path.abspath(tomo_path))
sim_dir = res_dir[:-8]
msg = "Simulation directory not found! The .npy file should be in a " +\
"folder named after the simulation with '_results' appended!"
assert os.path.exists(sim_dir), msg
metr_file = tomo_path[:-4]+"_metrices.txt"
npy_file = tomo_path
else:
raise ValueError("simulation must be a directory or an .npy file!")
tv = None
ss = None
# Check if the results_file exists and read parameters
if os.path.exists(metr_file):
with open(metr_file, "r") as fd:
lines = fd.readlines()
for line in lines:
line = line.strip()
if line.startswith("TV_"+approx):
try:
tv = float(line.split()[1])
except:
pass
elif line.startswith("SS_"+approx):
try:
ss = float(line.split()[1])
except:
pass
if tv is None or ss is None:
if npy_file:
ri = np.load(npy_file)
assert autofocus == False, "`autofocus` has no effect for .npy files!"
else:
# Recompute everything
ri = ex_bpg.backpropagate_fdtd_data(sim_dir,
approximation=approx,
autofocus=autofocus)
# reference
riref = extract.get_tomo_ri_structure(sim_dir)
ss = metric_rms(ri, riref)
tv = metric_tv(ri, riref)
# Save result in resf files
with open(metr_file, "a") as resfdata:
lines = "# metrices of ri-riref\n"
lines += "TV_{} {:.15e}\n".format(approx, tv)
lines += "SS_{} {:.15e}\n".format(approx, ss)
resfdata.writelines(lines)
return ss, tv
def cutout(a):
"""Cut out circle/sphere from 2D/3D square/cubic array"""
x = np.arange(a.shape[0])
c = a.shape[0] / 2
if len(a.shape) == 2:
x = x.reshape(-1, 1)
y = x.reshape(1, -1)
zero = ((x-c)**2 + (y-c)**2) < c**2
elif len(a.shape) == 3:
x = x.reshape(-1, 1, 1)
y = x.reshape(1, -1, 1)
z = x.reshape(1, -1, 1)
zero = ((x-c)**2 + (y-c)**2 + (z-c)**2) < c**2
else:
raise ValueError("Cutout array must have dimension 2 or 3!")
a *= zero
#tool.arr2im(a, scale=True).save("test.png")
return a
def metric_rms(ri, ref):
"""Root mean square metric (normalized)
This metric was used and described in
Müller et. al, "ODTbrain: a Python library for full-view,
dense diffraction tomography" Bioinformatics 2015
"""
rms = np.sum(cutout(ri.real-ref.real)**2)
norm = np.sum(cutout(ref.real-1)**2)
return np.sqrt(rms/norm)
def metric_tv(ri, ref):
"""Total variation metric (normalized)
This metric was used and described in
Müller et. al, "ODTbrain: a Python library for full-view,
dense diffraction tomography" Bioinformatics 2015
"""
grad = np.gradient(ri.real-ref)
result = 0
for g in grad:
result += np.sum(cutout(np.abs(g)))
tv = result / len(grad)
norm = np.sum(cutout(ref.real-1)**2)
return np.sqrt(tv/norm)
| 2.859375 | 3 |
Data Collection/DownloadHash.py | ahlashkari/AndroidAppStaticlyzer | 8 | 12791054 | <gh_stars>1-10
import pyandrozoo
import csv
def Download(inputfile):
# The list of hashs can be cerated using the VT_Labeling.py script
with open(inputfile) as csvfile:
readCSV = csv.reader(csvfile, delimiter=',')
apks = []
for row in readCSV:
apk = row[0]
apks.append(apk)
print(apks)
print("Hash files list uploaded ...")
androzoo = pyandrozoo.pyAndroZoo('<IP to access the Androzoo Dataset')
androzoo.get(apks)
def main():
script = argparse.ArgumentParser(description="You have to provide the CSV file with the list of hash to download as input")
script.add_argument('-i', '--input', required=True)
args = script.parse_args()
Download(args.input)
print("apks downloaded ...")
print("Done")
if __name__ == '__main__':
main()
| 3.09375 | 3 |
src/apis/json_schemas/auth.py | beratakuzum/spam-detector-api | 3 | 12791055 |
REGISTER_SCHEMA = {
'$schema': 'http://json-schema.org/schema#',
'type': 'object',
'properties': {
'username': {
"type": "string",
'minLength': 3,
'maxLength': 100
},
'password': {
"type": "string",
'minLength': 5,
'maxLength': 100
}
},
"additionalProperties": False,
'required': ['username', 'password']
} | 1.5625 | 2 |
model.py | jasonjimnz/dondelotiro-api | 2 | 12791056 | <reponame>jasonjimnz/dondelotiro-api
import json
import csv
from neo4j.v1 import GraphDatabase
from neo4j.v1 import basic_auth
from bs4 import BeautifulSoup
from config import GRAPH_DATABASE as gdb
graph_driver = GraphDatabase.driver(
'bolt://%s:%s' % (
gdb['host'],
gdb['port']
), auth=basic_auth(
gdb['user'], gdb['auth']
)
)
graph_session = graph_driver.session()
class PuntosLimpiosExtractor(object):
file = None
puntos_limpios = []
def __init__(self, file_path):
self.file = open(file_path).read()
def load_punto_limpio(self, **kwargs):
self.puntos_limpios.append(kwargs)
def read_puntos_limpios_xml(self):
b_object = BeautifulSoup(self.file, 'html.parser')
lista_puntos = b_object.find_all('contenido')
for l in lista_puntos:
self.load_punto_limpio(
**{
"id_entidad": l.select_one('atributo[nombre="ID-ENTIDAD"]').text,
"nombre": l.select_one('atributo[nombre="NOMBRE"]').text,
"horario": l.select_one('atributo[nombre="HORARIO"]').text,
"transporte": l.select_one('atributo[nombre="TRANSPORTE"]').text,
"descripcion": l.select_one('atributo[nombre="DESCRIPCION"]').text,
"accesibilidad": l.select_one('atributo[nombre="ACCESIBILIDAD"]').text,
"content-url": l.select_one('atributo[nombre="CONTENT-URL"]').text,
"localizacion": "%s %s %s" % (
l.select_one('atributo[nombre="CLASE-VIAL"]').text,
l.select_one('atributo[nombre="NOMBRE-VIA"]').text,
l.select_one('atributo[nombre="NUM"]').text
),
"localidad": l.select_one('atributo[nombre="LOCALIDAD"]').text,
"provincia": l.select_one('atributo[nombre="PROVINCIA"]').text,
"codigo_postal": l.select_one('atributo[nombre="CODIGO-POSTAL"]').text,
"barrio": l.select_one('atributo[nombre="BARRIO"]').text,
"distrito": l.select_one('atributo[nombre="DISTRITO"]').text,
"coord_x": l.select_one('atributo[nombre="COORDENADA-X"]').text,
"coord_y": l.select_one('atributo[nombre="COORDENADA-Y"]').text,
"lat": l.select_one('atributo[nombre="LATITUD"]').text,
"lon": l.select_one('atributo[nombre="LONGITUD"]').text,
}
)
class GraphModel(object):
def __init__(self):
print("Initialized graph model")
def fill_model(self):
entries = json.load(open('puntos_limpios.json'))
base_query = "MERGE (p:Containers{%s}) RETURN p;"
# Clean points
for e in entries:
params = ''
params += 'container_type: "clean_point",'
params += 'entity_id: %s,' % e['id_entidad']
params += 'name: "%s",' % e['nombre']
params += 'schedule: "%s",' % e['horario']
params += 'public_transport: "%s",' % e['transporte']
params += 'description: "%s",' % e['descripcion']
params += 'accesibility: %s,' % e['accesibilidad']
params += 'address: "%s",' % e['localizacion']
params += 'city: "%s",' % e['localidad']
params += 'province: "%s",' % e['provincia']
params += 'postal_code: %s,' % e['codigo_postal']
params += 'neighborhood: "%s",' % e['barrio']
params += 'district:" %s",' % e['distrito']
params += 'lat: %s,' % e['lat']
params += 'lon: %s' % e['lon']
graph_session.run(base_query % params)
# Trash types
trash_types = ['furniture', 'electronics', 'batteries', 'dog_shit']
container_types = [
'clean_point',
'dog_shit_trash',
'battery_recycling_point'
]
for t in trash_types:
query = """
MERGE (t:TrashType{name:"%s"})
"""
graph_session.run(query % t)
# marquesinas
list_batteries_container = json.load(open('Marquesinas_contenedores_pilas_2017.json'))
for i, e in enumerate(list_batteries_container):
params = ''
params += 'container_type: "battery_recycling_point",'
params += 'entity_id: %s,' % e['Parada']
params += 'name: "bus_stop_%s-%s",' % (str(i),e['Parada'])
params += 'district:" %s",' % e['DISTRITO']
params += 'lat: %s,' % e['Latitud']
params += 'lon: %s' % e['Longitud']
graph_session.run(base_query % params)
print("Saved %d batteries container" % i)
list_dog_shit_container = json.load(open('Papeleras_con_expendedor_de_bolsas.json'))
for i,e in enumerate(list_dog_shit_container):
print(e['latitud'].replace(',','.'), e['longitud'].replace(',','.'))
params = ''
params += 'container_type: "dog_shit_trash",'
params += 'entity_id: %s,' % str(e['codigo'])
params += 'name: "%s-%s %s",' % (str(i), str(e['codigo']), e['direccion'])
params += 'address: "%s",' % e['direccion']
params += 'district:" %s",' % str(e['distrito'])
params += 'lat: %s,' % str(e['latitud'].replace(',','.'))
params += 'lon: %s' % str(e['longitud'].replace(',','.'))
try:
graph_session.run(base_query % params)
except:
import pdb; pdb.set_trace()
print("Saved %d dog shit containers" % i)
# Link clean_point related trash types
graph_session.run("""
MATCH (t:TrashType), (c:Containers)
WHERE t.name in ["furniture","electronics","batteries"]
AND c.container_type = "clean_point"
MERGE (t)-[:CAN_BE_DEPLOYED_IN]->(c)
RETURN t,c
""")
# Link batteries container with their trash type
graph_session.run("""
MATCH (t:TrashType), (c:Containers)
WHERE t.name in ["batteries"]
AND c.container_type = "battery_recycling_point"
MERGE (t)-[:CAN_BE_DEPLOYED_IN]->(c)
RETURN t,c
""")
# Link dog shit container with their trash type
graph_session.run("""
MATCH (t:TrashType), (c:Containers)
WHERE t.name in ["dog_shit"]
AND c.container_type = "dog_shit_trash"
MERGE (t)-[:CAN_BE_DEPLOYED_IN]->(c)
RETURN t,c
""")
def get_distances(self, lat, lon, container_type):
query = """
MATCH p=(n:Containers)<-[r:CAN_BE_DEPLOYED_IN]-(t:TrashType)
WHERE n.container_type = "%s"
RETURN
n.lat as latitude,
n.lon as longitude,
n.container_type as container_type,
collect(t.name) as trash_types,
distance(
point(
{latitude: n.lat, longitude:n.lon}
),
point(
{latitude: %s, longitude: %s}
)
)
as point_distance
ORDER BY point_distance LIMIT 5
""" % (
container_type,
str(lat),
str(lon)
)
return graph_session.run(query)
| 2.71875 | 3 |
boadata/core/data_object.py | janpipek/boadata | 2 | 12791057 | <filename>boadata/core/data_object.py
from __future__ import annotations
import weakref
from collections import OrderedDict
from typing import TYPE_CHECKING, Final
import blinker
import numexpr as ne
import numpy as np
from boadata.core.data_conversion import ConversionUnknown, DataConversion
if TYPE_CHECKING:
from typing import Any, ClassVar, List, Optional, Tuple, Type, Union, Callable
class UnknownDataObjectError(Exception):
""""""
class InvalidDataObjectError(Exception):
""""""
class UnsupportedDataOperationError(Exception):
""""""
class _DataObjectRegistry:
registered_types: Final[OrderedDict] = OrderedDict()
registered_default_types = {}
@staticmethod
def register_type(default: bool = False) -> Callable[[type], type]:
"""Decorator that registers the data type
:param default: Whether to serve as DataObject.from_native handler
for the real type of the data object.
Automatically discovers conversion in the form of __to_type__ and __from_type__
(see DataConversion.discover)
"""
if isinstance(default, type):
raise RuntimeError("Invalid use of decorator. Please, use DataObject.register_type() ")
def wrap(boadata_type: type) -> type:
DataObject.registered_types[boadata_type.type_name] = boadata_type
DataConversion.discover(boadata_type)
if default:
DataObject.registered_default_types[boadata_type.real_type] = boadata_type
boadata_type._registered = True
return boadata_type
return wrap
class _DataObjectConversions:
"""DataObject methods related to conversions."""
@classmethod
def accepts_uri(cls, uri: str) -> bool:
return False
@classmethod
def from_uri(cls, uri: str, **kwargs) -> DataObject:
""""Create an object of this class from an URI.
:param uri: URI in the odo sense
This method should be overridden in daughter classes.
When called as DataObject.from_uri, it first tries to find an appropriate class
by checking all registered types.
"""
if cls == DataObject:
last_exception = None
for type_ in DataObject.registered_types.values():
if type_.accepts_uri(uri):
try:
return type_.from_uri(uri, **kwargs)
except Exception as exc:
last_exception = exc
if last_exception:
raise last_exception
raise UnknownDataObjectError(f"Cannot interpret '{uri}'.")
else:
raise UnknownDataObjectError(f"Cannot interpret '{uri}' as {cls.__name__}.")
@classmethod
def from_native(cls, native_object: Any, **kwargs) -> DataObject:
"""
:param native_object:
:param kwargs:
:return:
Is idempotent
"""
if cls == DataObject:
if isinstance(native_object, DataObject):
return native_object
boadata_type = DataObject.registered_default_types.get(type(native_object))
if not boadata_type:
raise UnknownDataObjectError("Cannot interpret native object of the type {0}.".format(type(native_object)))
return boadata_type.from_native(native_object, **kwargs)
else:
if isinstance(native_object, DataObject):
return native_object.convert(cls.type_name, **kwargs)
return cls(inner_data=native_object, **kwargs)
def is_convertible_to(self, new_type_name: Union[str, type]) -> bool:
"""
"""
if isinstance(new_type_name, type):
new_type, new_type_name = new_type_name, new_type_name.type_name
else:
if not new_type_name in DataObject.registered_types:
return False
new_type = DataObject.registered_types[new_type_name]
if isinstance(self, new_type):
return True
if not (self.type_name, new_type_name) in DataConversion.registered_conversions:
return False
conversion = DataConversion.registered_conversions[(self.type_name, new_type_name)]
return conversion.applies(self)
@classmethod
def is_convertible_from(cls, data_object: DataObject) -> bool:
return data_object.is_convertible_to(cls)
@property
def allowed_conversions(self) -> List[Tuple[str, str]]:
return [ key for (key, conversion) in DataConversion.registered_conversions.items() if key[0] == self.type_name and conversion.applies(self)]
def convert(self, new_type_name: str, **kwargs) -> DataObject:
"""Convert to another boadata-supported type.
Auto-conversion returns the same object.
Default implementation is based on odo.
"""
if not new_type_name:
available = [key[1] for key in DataConversion.registered_conversions.keys() if key[0] == self.__class__.type_name]
raise TypeError("convert() missing 1 required positional argument: 'new_type_name', available argument values: {0}".format(", ".join(available)))
# TODO: check argument?
new_type = DataObject.registered_types[new_type_name]
if isinstance(self, new_type):
return self
conversion = DataConversion.registered_conversions.get((self.__class__.type_name, new_type_name))
if not conversion:
available = [key[1] for key in DataConversion.registered_conversions.keys() if key[0] == self.__class__.type_name]
raise ConversionUnknown("Unknown conversion from {0} to {1}. Available: {2}".format(self.__class__.type_name, new_type_name, ", ".join(available)))
return conversion.convert(self, new_type, **kwargs)
class _DataObjectInterface:
"""
Possible methods:
- add_column(key, expression, **kwargs) - based on evaluate
-
"""
@property
def shape(self) -> Tuple[int, ...]:
"""Shape of the data.
Example: Shape of the 4x3 matrix is (4, 3)
"""
if hasattr(self.inner_data, "shape"):
return tuple(self.inner_data.shape)
return ()
@property
def ndim(self) -> int:
"""Dimensionality of the data.
Example: A 4x3 matrix has dimensionality 2.
"""
if hasattr(self.inner_data, "ndim"):
return int(self.inner_data.ndim)
else:
return len(self.shape)
@property
def size(self) -> int:
if hasattr(self.inner_data, "size"):
return int(self.inner_data.size)
else:
from operator import mul
from functools import reduce
reduce(mul, self.shape, 1)
@property
def dtype(self):
if hasattr(self.inner_data, "dtype"):
return self.inner_data.dtype
else:
return None
@property
def columns(self) -> Optional[List[str]]:
"""Column names (in multidimensional mappings, the value variables)
Default variant understands pandas DataFrames
"""
if hasattr(self.inner_data, "columns"):
return list(self.inner_data.columns.values)
else:
return None
@property
def name(self) -> Optional[str]:
if hasattr(self.inner_data, "name"):
return self.inner_data.name
else:
return None
class DataObject(_DataObjectRegistry, _DataObjectConversions, _DataObjectInterface):
'''A basic object that contains data representable by boadata.
:type registered_types: OrderedDict[str, type]
:param source: From where we obtained the object (kept as weak reference)
It is necessary to keep all arguments keyword (enforceable in Python 3).
'''
def __init__(self, inner_data: Any = None, uri: str = None, source: 'DataObject' = None, **kwargs):
if self.real_type and not isinstance(inner_data, self.real_type):
raise InvalidDataObjectError("Invalid type of inner data: `{0}` instead of expected `{1}`".format(
inner_data.__class__.__name__, self.real_type.__name__
))
self.inner_data = inner_data
self.uri = uri
if source:
self.source = weakref.ref(source)
changed = blinker.Signal("changed") # For dynamic data objects
real_type: ClassVar[Type] = None
type_name: ClassVar[str] = None
@property
def title(self) -> str:
return repr(self)
def __repr__(self):
return "{0}(\"{1}\")".format(self.__class__.__name__, self.uri)
@staticmethod
def proxy_methods(methods, wrap: bool = True, unwrap_args: bool = True, same_class: bool = True, through: Optional[type] = None):
"""Decorator to apply on DataObject descendants.
:param wrap: Whether to wrap result
:param unwrap_args: Whether to unwrap arguments
:param same_class: Whether to try to convert to self's class
:param through: if None, done via inner_data, otherwise through a named type
It is not possible to proxy slots, but it is possible to inherit proxied slots :-)
"""
import boadata
def wrapper(boadata_type):
if isinstance(methods, str):
method_names = [methods]
else:
method_names = methods
def make_method(method_name):
def proxied_method(self, *args, **kwargs):
if unwrap_args:
args = [boadata.unwrap(arg) for arg in args]
kwargs = {key: boadata.unwrap(value) for key, value in kwargs.items()}
if through:
native_method = getattr(self.convert(through), method_name)
else:
native_method = getattr(self.inner_data, method_name)
result = native_method(*args, **kwargs)
if not wrap:
return result
elif same_class and isinstance(result, self.real_type):
return self.__class__.from_native(result)
else:
try:
return DataObject.from_native(result)
except:
return result
return proxied_method
for method_name in method_names:
setattr(boadata_type, method_name, make_method(method_name))
return boadata_type
return wrapper
def evaluate(self, expression: str, wrap: bool = True) -> Any:
"""Do calculation on columns of the dataset.
:param expression: a valid expression
:param wrap: whether to convert back to DataObject or return the native result
Based on numexpr library
"""
local_dict = {
col : self[col].inner_data for col in self.columns if isinstance(col, str)
}
global_dict = {
"nan" : np.nan,
"inf" : np.inf
}
result = ne.evaluate(expression, local_dict=local_dict, global_dict=global_dict)
if wrap:
return DataObject.from_native(result, source=self)
else:
return result
def where(self, condition: str, sql: bool = False) -> 'DataObject':
"""Choose a subset of a dataset.
:param condition: a valid condition returning boolean
:param sql: if True, the condition is evaluated as sql WHERE clause
"""
if sql:
if not "sql" in dir(self):
raise RuntimeError("Object {0} does not support SQL.".format(self.__class__.__name__))
query = "SELECT * FROM data WHERE {0}".format(condition)
return self.sql(query, table_name="data")
else:
# TODO: Allow to be lambda
import numpy as np
if not self.size:
mask = []
else:
mask = self.evaluate(condition, wrap=False)
if mask.dtype != np.dtype(bool):
raise UnsupportedDataOperationError("The result of condition has to be a boolean array")
return DataObject.from_native(self.inner_data[mask], source=self)
def apply_native(self, method_name: str, *args, **kwargs):
"""Apply a method defined on the native object.
If possible, converts the result to DataObject.
"""
# TODO: Check that it is ok (see proxy etc., consider a clever proxy attribute)
method = getattr(self.inner_data, method_name)
result = method(*args, **kwargs)
try:
result = DataObject.from_native(result)
except:
pass
return result
| 2.46875 | 2 |
python/veles/tests/data/test_repack.py | pombredanne/veles | 918 | 12791058 | <reponame>pombredanne/veles<gh_stars>100-1000
# Copyright 2017 CodiLime
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from veles.data.bindata import BinData
from veles.data.repack import Endian, Repacker
class TestRepacker(unittest.TestCase):
def test_endian(self):
self.assertNotEqual(Endian.LITTLE, Endian.BIG)
def test_simple_copy(self):
r = Repacker(endian=Endian.LITTLE, from_width=8, to_width=8)
self.assertEqual(r.repack_unit, 8)
self.assertEqual(r.repack_size(num_elements=2), 2)
self.assertEqual(r.repackable_size(from_size=2), 2)
a = BinData(8, [1, 2, 3, 4])
b = r.repack(a, start=1, num_elements=2)
self.assertEqual(b, BinData(8, [2, 3]))
self.assertEqual(r.repack(a), a)
def test_gather_8to16_little(self):
r = Repacker(endian=Endian.LITTLE, from_width=8, to_width=16)
self.assertEqual(r.repack_unit, 16)
self.assertEqual(r.repack_size(2), 4)
self.assertEqual(r.repackable_size(2), 1)
self.assertEqual(r.repackable_size(3), 1)
self.assertEqual(r.repackable_size(4), 2)
a = BinData(8, [1, 2, 3, 4, 5, 6])
b = r.repack(a, start=1, num_elements=2)
self.assertEqual(b, BinData.from_spaced_hex(16, '0302 0504'))
c = r.repack(a, start=1)
self.assertEqual(b, c)
d = r.repack(a)
self.assertEqual(d, BinData.from_spaced_hex(16, '0201 0403 0605'))
def test_gather_8to16_big(self):
r = Repacker(endian=Endian.BIG, from_width=8, to_width=16)
self.assertEqual(r.repack_unit, 16)
self.assertEqual(r.repack_size(2), 4)
self.assertEqual(r.repackable_size(2), 1)
self.assertEqual(r.repackable_size(3), 1)
self.assertEqual(r.repackable_size(4), 2)
a = BinData(8, [1, 2, 3, 4, 5, 6])
b = r.repack(a, start=1, num_elements=2)
self.assertEqual(b, BinData.from_spaced_hex(16, '0203 0405'))
c = r.repack(a, start=1)
self.assertEqual(b, c)
d = r.repack(a)
self.assertEqual(d, BinData.from_spaced_hex(16, '0102 0304 0506'))
def test_mash_8to12_little(self):
r = Repacker(Endian.LITTLE, 8, 12)
self.assertEqual(r.repack_unit, 24)
self.assertEqual(r.repack_size(1), 2)
self.assertEqual(r.repack_size(2), 3)
self.assertEqual(r.repackable_size(1), 0)
self.assertEqual(r.repackable_size(2), 1)
self.assertEqual(r.repackable_size(3), 2)
self.assertEqual(r.repackable_size(4), 2)
a = BinData.from_spaced_hex(8, '12 34 56 78 9a')
b = r.repack(a, 1, 2)
self.assertEqual(b, BinData.from_spaced_hex(12, '634 785'))
c = r.repack(a, 1)
self.assertEqual(b, c)
d = r.repack(a)
self.assertEqual(d, BinData.from_spaced_hex(12, '412 563 a78'))
def test_mash_8to12_big(self):
r = Repacker(Endian.BIG, 8, 12)
self.assertEqual(r.repack_unit, 24)
self.assertEqual(r.repack_size(1), 2)
self.assertEqual(r.repack_size(2), 3)
self.assertEqual(r.repackable_size(1), 0)
self.assertEqual(r.repackable_size(2), 1)
self.assertEqual(r.repackable_size(3), 2)
self.assertEqual(r.repackable_size(4), 2)
a = BinData.from_spaced_hex(8, '12 34 56 78 9a')
b = r.repack(a, 1, 2)
self.assertEqual(b, BinData.from_spaced_hex(12, '345 678'))
c = r.repack(a, 1)
self.assertEqual(b, c)
d = r.repack(a)
self.assertEqual(d, BinData.from_spaced_hex(12, '123 456 789'))
def test_split_8to1_little(self):
r = Repacker(Endian.LITTLE, 8, 1)
self.assertEqual(r.repack_unit, 8)
self.assertEqual(r.repack_size(12), 2)
self.assertEqual(r.repack_size(8), 1)
self.assertEqual(r.repack_size(9), 2)
self.assertEqual(r.repack_size(17), 3)
self.assertEqual(r.repackable_size(1), 8)
a = BinData.from_spaced_hex(8, '12 34 56')
b = r.repack(a, 1, 12)
c = BinData.from_spaced_hex(1, ' '.join(format(0x634, '012b')[::-1]))
self.assertEqual(b, c)
def test_split_8to1_big(self):
r = Repacker(Endian.BIG, 8, 1)
self.assertEqual(r.repack_unit, 8)
self.assertEqual(r.repack_size(12), 2)
self.assertEqual(r.repack_size(8), 1)
self.assertEqual(r.repack_size(9), 2)
self.assertEqual(r.repack_size(17), 3)
self.assertEqual(r.repackable_size(1), 8)
a = BinData.from_spaced_hex(8, '12 34 56')
b = r.repack(a, 1, 12)
c = BinData.from_spaced_hex(1, ' '.join(format(0x345, '012b')))
self.assertEqual(b, c)
def test_split_60to20_little(self):
r = Repacker(Endian.LITTLE, 60, 20)
self.assertEqual(r.repack_unit, 60)
self.assertEqual(r.repack_size(1), 1)
self.assertEqual(r.repack_size(2), 1)
self.assertEqual(r.repack_size(3), 1)
self.assertEqual(r.repack_size(4), 2)
self.assertEqual(r.repackable_size(1), 3)
a = BinData(60, [0xfedcba987654321])
b = r.repack(a)
self.assertEqual(b, BinData.from_spaced_hex(20, '54321 a9876 fedcb'))
def test_split_60to20_big(self):
r = Repacker(Endian.BIG, 60, 20)
self.assertEqual(r.repack_unit, 60)
self.assertEqual(r.repack_size(1), 1)
self.assertEqual(r.repack_size(2), 1)
self.assertEqual(r.repack_size(3), 1)
self.assertEqual(r.repack_size(4), 2)
self.assertEqual(r.repackable_size(1), 3)
a = BinData(60, [0xfedcba987654321])
b = r.repack(a)
self.assertEqual(b, BinData.from_spaced_hex(20, 'fedcb a9876 54321'))
def test_split_16to8_little(self):
r = Repacker(Endian.LITTLE, 16, 8)
self.assertEqual(r.repack_unit, 16)
self.assertEqual(r.repack_size(3), 2)
self.assertEqual(r.repackable_size(3), 6)
a = BinData(16, [0x1234, 0x5678, 0x9abc])
b = r.repack(a, 1, 3)
self.assertEqual(b, BinData.from_spaced_hex(8, '78 56 bc'))
def test_split_16to8_big(self):
r = Repacker(Endian.BIG, 16, 8)
self.assertEqual(r.repack_unit, 16)
self.assertEqual(r.repack_size(3), 2)
self.assertEqual(r.repackable_size(3), 6)
a = BinData(16, [0x1234, 0x5678, 0x9abc])
b = r.repack(a, 1, 3)
self.assertEqual(b, BinData.from_spaced_hex(8, '56 78 9a'))
def test_padded_8to23_left_little(self):
r = Repacker(Endian.LITTLE, 8, 23, high_pad=9)
self.assertEqual(r.repack_unit, 32)
self.assertEqual(r.repack_size(2), 8)
self.assertEqual(r.repackable_size(7), 1)
self.assertEqual(r.repackable_size(8), 2)
a = BinData.from_spaced_hex(8, '11 22 33 44 55 66 77 88 99 aa')
b = r.repack(a, 1, 2)
self.assertEqual(b, BinData.from_spaced_hex(23, '443322 087766'))
def test_padded_8to23_right_little(self):
r = Repacker(Endian.LITTLE, 8, 23, low_pad=9)
self.assertEqual(r.repack_unit, 32)
self.assertEqual(r.repack_size(2), 8)
self.assertEqual(r.repackable_size(7), 1)
self.assertEqual(r.repackable_size(8), 2)
a = BinData.from_spaced_hex(8, '11 22 33 44 55 66 77 88 99 aa')
b = r.repack(a, 1, 2)
self.assertEqual(b, BinData.from_spaced_hex(23, '2aa219 4cc43b'))
def test_padded_8to23_mixed_little(self):
r = Repacker(Endian.LITTLE, 8, 23, low_pad=8, high_pad=1)
self.assertEqual(r.repack_unit, 32)
self.assertEqual(r.repack_size(2), 8)
self.assertEqual(r.repackable_size(7), 1)
self.assertEqual(r.repackable_size(8), 2)
a = BinData.from_spaced_hex(8, '11 22 33 44 55 66 77 88 99 aa')
b = r.repack(a, 1, 2)
self.assertEqual(b, BinData.from_spaced_hex(23, '554433 198877'))
def test_padded_8to23_left_big(self):
r = Repacker(Endian.BIG, 8, 23, high_pad=9)
self.assertEqual(r.repack_unit, 32)
self.assertEqual(r.repack_size(2), 8)
self.assertEqual(r.repackable_size(7), 1)
self.assertEqual(r.repackable_size(8), 2)
a = BinData.from_spaced_hex(8, '11 22 33 44 55 66 77 88 99 aa')
b = r.repack(a, 1, 2)
self.assertEqual(b, BinData.from_spaced_hex(23, '334455 778899'))
def test_padded_8to23_right_big(self):
r = Repacker(Endian.BIG, 8, 23, low_pad=9)
self.assertEqual(r.repack_unit, 32)
self.assertEqual(r.repack_size(2), 8)
self.assertEqual(r.repackable_size(7), 1)
self.assertEqual(r.repackable_size(8), 2)
a = BinData.from_spaced_hex(8, '11 22 33 44 55 66 77 88 99 aa')
b = r.repack(a, 1, 2)
self.assertEqual(b, BinData.from_spaced_hex(23, '1119a2 333bc4'))
def test_padded_8to23_mixed_big(self):
r = Repacker(Endian.BIG, 8, 23, low_pad=8, high_pad=1)
self.assertEqual(r.repack_unit, 32)
self.assertEqual(r.repack_size(2), 8)
self.assertEqual(r.repackable_size(7), 1)
self.assertEqual(r.repackable_size(8), 2)
a = BinData.from_spaced_hex(8, '11 22 33 44 55 66 77 88 99 aa')
b = r.repack(a, 1, 2)
self.assertEqual(b, BinData.from_spaced_hex(23, '223344 667788'))
| 2.171875 | 2 |
docs/source/pyplots/loglog.py | zangobot/secml | 63 | 12791059 | from secml.array import CArray
from secml.figure import CFigure
fig = CFigure(fontsize=14)
fig.title('loglog base 4 on x')
t = CArray.arange(0.01, 20.0, 0.01)
fig.sp.loglog(t, 20 * (-t / 10.0).exp(), basex=2)
fig.sp.grid()
fig.show()
| 2.15625 | 2 |
pyradiosky/spherical_coords_transforms.py | samirchoudhuri/pyradiosky | 8 | 12791060 | <gh_stars>1-10
# -*- mode: python; coding: utf-8 -*
# Copyright (c) 2019 Radio Astronomy Software Group
# Licensed under the 3-clause BSD License
"""Methods for doing spherical coordinate transformations on vectors."""
import numpy as np
def r_hat(theta, phi):
"""
Get the r hat unit vectors in cartesian coordinates for points on a sphere.
Parameters
----------
theta, phi : float
The co-latitude and azimuth coordinates, respectively, for a point
on the sphere, in radians. Azimuth is defined with respect to the
x axis, co-latitude is the angle with the positive z axis.
Returns
-------
array of float
Array of r hat vectors, shape (3, Npoints)
"""
theta = np.array(theta)
phi = np.array(phi)
if theta.shape != phi.shape:
raise ValueError("theta and phi must have the same shape")
rhx = np.cos(phi) * np.sin(theta)
rhy = np.sin(phi) * np.sin(theta)
rhz = np.cos(theta)
return np.stack((rhx, rhy, rhz))
def theta_hat(theta, phi):
"""
Get the theta hat unit vectors in cartesian coordinates for points on a sphere.
Parameters
----------
theta, phi : float
The co-latitude and azimuth coordinates, respectively, for a point
on the sphere, in radians. Azimuth is defined with respect to the
x axis, co-latitude is the angle with the positive z axis.
Returns
-------
array of float
Array of theta hat vectors, shape (3, Npoints)
"""
theta = np.array(theta)
phi = np.array(phi)
if theta.shape != phi.shape:
raise ValueError("theta and phi must have the same shape")
thx = np.cos(phi) * np.cos(theta)
thy = np.sin(phi) * np.cos(theta)
thz = -np.sin(theta)
return np.stack((thx, thy, thz))
def phi_hat(theta, phi):
"""
Get the phi hat unit vectors in cartesian coordinates for points on a sphere.
Parameters
----------
theta, phi : float
The co-latitude and azimuth coordinates, respectively, for a point
on the sphere, in radians. Azimuth is defined with respect to the
x axis, co-latitude is the angle with the positive z axis.
Returns
-------
array of float
Array of phi hat vectors, shape (3, Npoints)
"""
theta = np.array(theta)
phi = np.array(phi)
if theta.shape != phi.shape:
raise ValueError("theta and phi must have the same shape")
phx = -np.sin(phi)
phy = np.cos(phi)
phz = np.zeros_like(phi)
return np.stack((phx, phy, phz))
def rotate_points_3d(rot_matrix, theta, phi):
"""
Get the spherical coordinates of the point under a 3d rotation.
Finds the spherical coordinates for point p specified by p = R . q,
where q is the 3D position vector of the point specified by (theta,phi) and
R is the 3D rotation matrix that relates two coordinate charts.
The accuracy of this method may not be good enough near pols in either
coordinate system.
Parameters
----------
rot_matrix : array-like of float
rotation matrix to use
theta, phi : float
The co-latitude and azimuth coordinates, respectively, for a point
on the sphere, in radians. Azimuth is defined with respect to the
x axis, co-latitude is the angle with the positive z axis.
Returns
-------
beta, alpha : float
The theta, phi coordinates for the point on the sphere (using normal
mathematical conventions) in the rotated frame.
"""
# This is NOT written to be vectorized for multiple (theta, phi)
rot_matrix = np.array(rot_matrix)
if rot_matrix.shape != (3, 3):
raise ValueError("rot_matrix must be a 3x3 array")
# Replace with function call?
q_hat_1 = np.cos(phi) * np.sin(theta)
q_hat_2 = np.sin(phi) * np.sin(theta)
q_hat_3 = np.cos(theta)
q_hat = np.stack((q_hat_1, q_hat_2, q_hat_3))
# Should test for shape of p_hat
p_hat = np.einsum("ab...,b...->a...", rot_matrix, q_hat)
# Should write a function to do this as well, i.e., pull back angles from
# a vector
if np.isclose(p_hat[2], 1.0, rtol=0.0, atol=1e-12):
p_hat[2] = 1.0
beta = np.arccos(p_hat[2])
alpha = np.arctan2(p_hat[1], p_hat[0])
if alpha < 0:
alpha += 2.0 * np.pi
return (beta, alpha)
def spherical_basis_vector_rotation_matrix(
theta, phi, rot_matrix, beta=None, alpha=None
):
"""
Get the rotation matrix to take vectors in the theta/phi basis to a new reference frame.
Given a position (`theta`, `phi`) in “standard mathematical” coordinates
(0 < `theta` < pi, 0 < `phi` < 2 pi) which will typically be an ICRS RA/Dec
coordinate appropriately converted, and the point to which it is transformed
in another standard mathematical coordinate system (`beta`, `alpha`), which
will typically be local telescope Alt/Az appropriately converted, and a
3 x 3 rotation matrix `rot_matrix` which connects those two points,
calculate the rotation matrix which rotates the basis vectors associated
with (`theta`, `phi`) to those associated with (`beta`, `alpha`).
Parameters
----------
theta, phi : float
The co-latitude and azimuth coordinates, respectively, for a point
on the sphere, in radians. Azimuth is defined with respect to the
x axis, co-latitude is the angle with the positive z axis.
rot_matrix : array-like of float
Rotation matrix that takes 3-vectors from (theta, phi) to (beta, alpha)
beta, alpha : float, optional
The theta, phi coordinates for the point on the sphere (using normal
mathematical conventions) in the rotated frame. If either is not provided,
they are calculated using `rotate_points_3d`. Note these may
not be as exact as values calculated from astropy.
Returns
-------
array of float
2 x 2 rotation matrix that takes vectors in the theta/phi basis to
the beta/alpha basis.
"""
if alpha is None or beta is None:
beta, alpha = rotate_points_3d(rot_matrix, theta, phi)
th = theta_hat(theta, phi)
ph = phi_hat(theta, phi)
bh = np.einsum("ab...,b...->a...", rot_matrix.T, theta_hat(beta, alpha))
cosX = np.einsum("a...,a...", bh, th)
sinX = np.einsum("a...,a...", bh, ph)
return np.array([[cosX, sinX], [-sinX, cosX]])
def axis_angle_rotation_matrix(axis, angle):
"""
Get the rotation matrix using Rodrigues' rotation matrix formula.
Parameters
----------
axis : array-like of float
3 element unit vector specifying the axis to rotate around.
angle : float
angle to rotate by in radians
Returns
-------
array
3x3 rotation matrix to rotate vectors by `angle` around `axis`.
"""
if axis.shape != (3,):
raise ValueError("axis must be a must be length 3 vector")
if not is_unit_vector(axis):
raise ValueError("axis must be a unit vector")
K_matrix = np.array(
[[0.0, -axis[2], axis[1]], [axis[2], 0.0, -axis[0]], [-axis[1], axis[0], 0.0]]
)
I_matrix = np.identity(3)
rot_matrix = (
I_matrix
+ np.sin(angle) * K_matrix
+ (1.0 - np.cos(angle)) * np.dot(K_matrix, K_matrix)
)
return rot_matrix
def is_orthogonal(matrix, tol=1e-15):
"""
Test for matrix orthogonality.
Parameters
----------
matrix : array-like of float
square matrix to test
Returns
-------
bool
True if `matrix` is orthogonal, False otherwise.
"""
return np.allclose(np.matmul(matrix, matrix.T), np.eye(3), atol=tol)
def is_unit_vector(vec, tol=1e-15):
"""
Test for unit vectors.
Parameters
----------
vec : array-like of float
vector to test
Returns
-------
bool
True if `vec` is a unit vector, False otherwise.
"""
return np.allclose(np.dot(vec, vec), 1, atol=tol)
def vecs2rot(r1=None, r2=None, theta1=None, phi1=None, theta2=None, phi2=None):
"""
Get the rotation matrix that connects two points or unit vectors on the sphere.
Parameters
----------
r1, r2 : array-like of float, optional
length 3 unit vectors
theta1, phi1, theta2, phi2 : float, optional
The co-latitude and azimuth coordinates, respectively, for a point
on the sphere, in radians. Azimuth is defined with respect to the
x axis, co-latitude is the angle with the positive z axis.
Ignored if r1 and r2 are supplied.
Returns
-------
array
3x3 rotation matrix that rotates the first point or vector into the other.
"""
if r1 is None or r2 is None:
if theta1 is None or phi1 is None or theta2 is None or phi2 is None:
raise ValueError(
"Either r1 and r2 must be supplied or all of "
"theta1, phi1, theta2 and phi2 must be supplied."
)
r1 = r_hat(theta1, phi1)
r2 = r_hat(theta2, phi2)
assert is_unit_vector(r1), "r1 is not a unit vector: " + str(r1)
assert is_unit_vector(r2), "r2 is not a unit vector: " + str(r2)
else:
r1 = np.array(r1)
r2 = np.array(r2)
if r1.shape != (3,) or r2.shape != (3,):
raise ValueError("r1 and r2 must be length 3 vectors")
if not is_unit_vector(r1) or not is_unit_vector(r2):
raise ValueError("r1 and r2 must be unit vectors")
norm = np.cross(r1, r2)
# Note that Psi is between 0 and pi
sinPsi = np.sqrt(np.dot(norm, norm))
n_hat = norm / sinPsi # Trouble lurks if Psi = 0.
cosPsi = np.dot(r1, r2)
Psi = np.arctan2(sinPsi, cosPsi)
rotation = axis_angle_rotation_matrix(n_hat, Psi)
assert is_unit_vector(n_hat), "n_hat is not a unit vector: " + str(n_hat)
assert is_orthogonal(rotation), "rotation matrix is not orthogonal: " + str(
rotation
)
return rotation
| 3.578125 | 4 |
server.py | chvia223/python-networking | 0 | 12791061 | from gettext import find
import socket
import threading
import spotipy
from spotipy.oauth2 import SpotifyOAuth
HEADER = 64
PORT = 5050
SERVER = socket.gethostbyname(socket.gethostname())
ADDR = (SERVER, PORT)
FORMAT = 'utf-8'
DISCONNECT_MESSAGE = "!DISCONNECT"
server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server.bind(ADDR)
def init_auth_manager():
with open('token-data_server.txt', 'r') as file:
cred_data = file.readlines()
scope = 'user-library-read user-read-playback-state user-modify-playback-state user-read-currently-playing'
auth_manager = SpotifyOAuth(
client_id=cred_data[0].strip(),
client_secret=cred_data[1].strip(),
redirect_uri=cred_data[2].strip(),
scope=scope)
return auth_manager
"""
Asks user for which playback device they'd like and returns the device id.
"""
def select_device(avail_devices):
device_names = [*avail_devices]
device_names = list(avail_devices.keys())
if len(device_names) == 0:
return
user_input = -1
while user_input-1 not in range(len(device_names)):
try:
print()
for i in range(len(device_names)):
print(f"({i+1}) {device_names[i]}")
print()
print("Enter the number that corresponds with your player.")
user_input = int(input("> "))
except ValueError:
print("[ERROR] Please enter a valid number.")
return avail_devices[device_names[user_input-1]]
"""
Calls API to grab the available devices user can interact with.
"""
def get_avail_devices(sp):
avail_devices = dict()
results = sp.devices()
# print(len(results['devices']))
if len(results['devices']) != 0:
for i in range(len(results['devices'])):
avail_devices[results['devices'][i]['name']] = results['devices'][i]['id']
else:
print("[ERROR] There are no available devices.")
return avail_devices
"""
Plays a provided track on a provided device.
"""
def play_track(sp, device_id, track_id):
uris_list = []
uris_list.append(track_id)
sp.start_playback(device_id=device_id, uris=uris_list)
def handle_client(conn, addr, sp, device_id):
print(f"[NEW CONNECTION] {addr} connected.")
# conn.send("[CONNECTED] You connected to the host".encode(FORMAT))
connected = True
while connected:
msg_length = conn.recv(HEADER).decode(FORMAT)
if msg_length:
msg_length = int(msg_length)
msg = conn.recv(msg_length).decode(FORMAT)
if msg == DISCONNECT_MESSAGE:
connected = False
print(f"[{addr}] {msg}")
match msg:
case "playing":
track_info = sp.currently_playing()
track_name = track_info['item']['name']
track_artist = track_info['item']['album']['artists'][0]['name']
track_album = track_info['item']['album']['name']
conn.send(f"Name: {track_name} | Artist: {track_artist} | Album: {track_album}".encode(FORMAT))
if ("https://open.spotify.com/track/") in msg:
play_track(sp, device_id, msg)
track_info = sp.currently_playing()
track_name = track_info['item']['name']
track_artist = track_info['item']['album']['artists'][0]['name']
conn.send(f"[ADDED] ({track_name} by {track_artist}) added to queue.".encode(FORMAT))
conn.close()
def start():
server.listen()
print(f"[LISTENING] Server is listening on {SERVER}")
# Placed API build inside of start fuhnction for organization
auth_manager = init_auth_manager()
sp = spotipy.Spotify(auth_manager=auth_manager)
# Host must select device player when initializing server.
avail_devices = get_avail_devices(sp)
device_id = select_device(avail_devices)
while True:
if device_id == None:
break
conn, addr = server.accept()
thread = threading.Thread(target=handle_client, args=(conn, addr, sp, device_id))
thread.start()
print(f"[ACTIVE CONNECTIONS] {threading.active_count() - 1}")
print("[CLOSING] server is stopping...")
print("[STARTING] server is starting...")
start()
| 3.109375 | 3 |
nome/npm_command.py | joaopalmeiro/nome | 0 | 12791062 | from http import HTTPStatus
import requests
from cleo import Command
from clikit.api.io import flags
from .constants import (
AVAILABLE_MSG,
HTTP_STATUS_CODE_MSG,
NOT_AVAILABLE_MSG,
NPM_BASE_URL,
)
class NpmCommand(Command):
"""
Check the availability of a package name in npm
npm
{name : What package name do you want to see if it's available?}
"""
def handle(self):
name = self.argument("name")
url = f"{NPM_BASE_URL}{name}"
with requests.Session() as s:
r = s.get(url)
status_code = r.status_code
updated_url = r.url
status_code_description = HTTPStatus(status_code).phrase
self.line(
HTTP_STATUS_CODE_MSG.format(code=status_code, desc=status_code_description),
verbosity=flags.VERBOSE,
)
is_available = status_code == 404
if is_available:
self.line(AVAILABLE_MSG.format(name=name))
else:
self.line(NOT_AVAILABLE_MSG.format(name=name, url=updated_url))
| 2.5625 | 3 |
pyboto3/cloudwatchevents.py | gehad-shaat/pyboto3 | 91 | 12791063 | <reponame>gehad-shaat/pyboto3
'''
The MIT License (MIT)
Copyright (c) 2016 WavyCloud
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
'''
def can_paginate(operation_name=None):
"""
Check if an operation can be paginated.
:type operation_name: string
:param operation_name: The operation name. This is the same name
as the method name on the client. For example, if the
method name is create_foo, and you'd normally invoke the
operation as client.create_foo(**kwargs), if the
create_foo operation can be paginated, you can use the
call client.get_paginator('create_foo').
"""
pass
def delete_rule(Name=None):
"""
Deletes the specified rule.
You must remove all targets from a rule using RemoveTargets before you can delete the rule.
When you delete a rule, incoming events might continue to match to the deleted rule. Please allow a short period of time for changes to take effect.
See also: AWS API Documentation
:example: response = client.delete_rule(
Name='string'
)
:type Name: string
:param Name: [REQUIRED]
The name of the rule.
"""
pass
def describe_rule(Name=None):
"""
Describes the specified rule.
See also: AWS API Documentation
:example: response = client.describe_rule(
Name='string'
)
:type Name: string
:param Name: [REQUIRED]
The name of the rule.
:rtype: dict
:return: {
'Name': 'string',
'Arn': 'string',
'EventPattern': 'string',
'ScheduleExpression': 'string',
'State': 'ENABLED'|'DISABLED',
'Description': 'string',
'RoleArn': 'string'
}
"""
pass
def disable_rule(Name=None):
"""
Disables the specified rule. A disabled rule won't match any events, and won't self-trigger if it has a schedule expression.
When you disable a rule, incoming events might continue to match to the disabled rule. Please allow a short period of time for changes to take effect.
See also: AWS API Documentation
:example: response = client.disable_rule(
Name='string'
)
:type Name: string
:param Name: [REQUIRED]
The name of the rule.
"""
pass
def enable_rule(Name=None):
"""
Enables the specified rule. If the rule does not exist, the operation fails.
When you enable a rule, incoming events might not immediately start matching to a newly enabled rule. Please allow a short period of time for changes to take effect.
See also: AWS API Documentation
:example: response = client.enable_rule(
Name='string'
)
:type Name: string
:param Name: [REQUIRED]
The name of the rule.
"""
pass
def generate_presigned_url(ClientMethod=None, Params=None, ExpiresIn=None, HttpMethod=None):
"""
Generate a presigned url given a client, its method, and arguments
:type ClientMethod: string
:param ClientMethod: The client method to presign for
:type Params: dict
:param Params: The parameters normally passed to
ClientMethod.
:type ExpiresIn: int
:param ExpiresIn: The number of seconds the presigned url is valid
for. By default it expires in an hour (3600 seconds)
:type HttpMethod: string
:param HttpMethod: The http method to use on the generated url. By
default, the http method is whatever is used in the method's model.
"""
pass
def get_paginator(operation_name=None):
"""
Create a paginator for an operation.
:type operation_name: string
:param operation_name: The operation name. This is the same name
as the method name on the client. For example, if the
method name is create_foo, and you'd normally invoke the
operation as client.create_foo(**kwargs), if the
create_foo operation can be paginated, you can use the
call client.get_paginator('create_foo').
:rtype: L{botocore.paginate.Paginator}
"""
pass
def get_waiter():
"""
"""
pass
def list_rule_names_by_target(TargetArn=None, NextToken=None, Limit=None):
"""
Lists the rules for the specified target. You can see which of the rules in Amazon CloudWatch Events can invoke a specific target in your account.
See also: AWS API Documentation
:example: response = client.list_rule_names_by_target(
TargetArn='string',
NextToken='string',
Limit=123
)
:type TargetArn: string
:param TargetArn: [REQUIRED]
The Amazon Resource Name (ARN) of the target resource.
:type NextToken: string
:param NextToken: The token returned by a previous call to retrieve the next set of results.
:type Limit: integer
:param Limit: The maximum number of results to return.
:rtype: dict
:return: {
'RuleNames': [
'string',
],
'NextToken': 'string'
}
:returns:
(string) --
"""
pass
def list_rules(NamePrefix=None, NextToken=None, Limit=None):
"""
Lists your Amazon CloudWatch Events rules. You can either list all the rules or you can provide a prefix to match to the rule names.
See also: AWS API Documentation
:example: response = client.list_rules(
NamePrefix='string',
NextToken='string',
Limit=123
)
:type NamePrefix: string
:param NamePrefix: The prefix matching the rule name.
:type NextToken: string
:param NextToken: The token returned by a previous call to retrieve the next set of results.
:type Limit: integer
:param Limit: The maximum number of results to return.
:rtype: dict
:return: {
'Rules': [
{
'Name': 'string',
'Arn': 'string',
'EventPattern': 'string',
'State': 'ENABLED'|'DISABLED',
'Description': 'string',
'ScheduleExpression': 'string',
'RoleArn': 'string'
},
],
'NextToken': 'string'
}
"""
pass
def list_targets_by_rule(Rule=None, NextToken=None, Limit=None):
"""
Lists the targets assigned to the specified rule.
See also: AWS API Documentation
:example: response = client.list_targets_by_rule(
Rule='string',
NextToken='string',
Limit=123
)
:type Rule: string
:param Rule: [REQUIRED]
The name of the rule.
:type NextToken: string
:param NextToken: The token returned by a previous call to retrieve the next set of results.
:type Limit: integer
:param Limit: The maximum number of results to return.
:rtype: dict
:return: {
'Targets': [
{
'Id': 'string',
'Arn': 'string',
'RoleArn': 'string',
'Input': 'string',
'InputPath': 'string',
'InputTransformer': {
'InputPathsMap': {
'string': 'string'
},
'InputTemplate': 'string'
},
'KinesisParameters': {
'PartitionKeyPath': 'string'
},
'RunCommandParameters': {
'RunCommandTargets': [
{
'Key': 'string',
'Values': [
'string',
]
},
]
},
'EcsParameters': {
'TaskDefinitionArn': 'string',
'TaskCount': 123
}
},
],
'NextToken': 'string'
}
:returns:
(string) --
(string) --
"""
pass
def put_events(Entries=None):
"""
Sends custom events to Amazon CloudWatch Events so that they can be matched to rules.
See also: AWS API Documentation
:example: response = client.put_events(
Entries=[
{
'Time': datetime(2015, 1, 1),
'Source': 'string',
'Resources': [
'string',
],
'DetailType': 'string',
'Detail': 'string'
},
]
)
:type Entries: list
:param Entries: [REQUIRED]
The entry that defines an event in your system. You can specify several parameters for the entry such as the source and type of the event, resources associated with the event, and so on.
(dict) --Represents an event to be submitted.
Time (datetime) --The timestamp of the event, per RFC3339 . If no timestamp is provided, the timestamp of the PutEvents call is used.
Source (string) --The source of the event.
Resources (list) --AWS resources, identified by Amazon Resource Name (ARN), which the event primarily concerns. Any number, including zero, may be present.
(string) --
DetailType (string) --Free-form string used to decide what fields to expect in the event detail.
Detail (string) --In the JSON sense, an object containing fields, which may also contain nested subobjects. No constraints are imposed on its contents.
:rtype: dict
:return: {
'FailedEntryCount': 123,
'Entries': [
{
'EventId': 'string',
'ErrorCode': 'string',
'ErrorMessage': 'string'
},
]
}
"""
pass
def put_rule(Name=None, ScheduleExpression=None, EventPattern=None, State=None, Description=None, RoleArn=None):
"""
Creates or updates the specified rule. Rules are enabled by default, or based on value of the state. You can disable a rule using DisableRule .
When you create or update a rule, incoming events might not immediately start matching to new or updated rules. Please allow a short period of time for changes to take effect.
A rule must contain at least an EventPattern or ScheduleExpression. Rules with EventPatterns are triggered when a matching event is observed. Rules with ScheduleExpressions self-trigger based on the given schedule. A rule can have both an EventPattern and a ScheduleExpression, in which case the rule triggers on matching events as well as on a schedule.
Most services in AWS treat : or / as the same character in Amazon Resource Names (ARNs). However, CloudWatch Events uses an exact match in event patterns and rules. Be sure to use the correct ARN characters when creating event patterns so that they match the ARN syntax in the event you want to match.
See also: AWS API Documentation
:example: response = client.put_rule(
Name='string',
ScheduleExpression='string',
EventPattern='string',
State='ENABLED'|'DISABLED',
Description='string',
RoleArn='string'
)
:type Name: string
:param Name: [REQUIRED]
The name of the rule that you are creating or updating.
:type ScheduleExpression: string
:param ScheduleExpression: The scheduling expression. For example, 'cron(0 20 * * ? *)', 'rate(5 minutes)'.
:type EventPattern: string
:param EventPattern: The event pattern. For more information, see Events and Event Patterns in the Amazon CloudWatch Events User Guide .
:type State: string
:param State: Indicates whether the rule is enabled or disabled.
:type Description: string
:param Description: A description of the rule.
:type RoleArn: string
:param RoleArn: The Amazon Resource Name (ARN) of the IAM role associated with the rule.
:rtype: dict
:return: {
'RuleArn': 'string'
}
"""
pass
def put_targets(Rule=None, Targets=None):
"""
Adds the specified targets to the specified rule, or updates the targets if they are already associated with the rule.
Targets are the resources that are invoked when a rule is triggered. Example targets include EC2 instances, AWS Lambda functions, Amazon Kinesis streams, Amazon ECS tasks, AWS Step Functions state machines, and built-in targets. Note that creating rules with built-in targets is supported only in the AWS Management Console.
For some target types, PutTargets provides target-specific parameters. If the target is an Amazon Kinesis stream, you can optionally specify which shard the event goes to by using the KinesisParameters argument. To invoke a command on multiple EC2 instances with one rule, you can use the RunCommandParameters field.
To be able to make API calls against the resources that you own, Amazon CloudWatch Events needs the appropriate permissions. For AWS Lambda and Amazon SNS resources, CloudWatch Events relies on resource-based policies. For EC2 instances, Amazon Kinesis streams, and AWS Step Functions state machines, CloudWatch Events relies on IAM roles that you specify in the RoleARN argument in PutTarget . For more information, see Authentication and Access Control in the Amazon CloudWatch Events User Guide .
When you specify Input , InputPath , or InputTransformer , you must use JSON dot notation, not bracket notation.
When you add targets to a rule and the associated rule triggers soon after, new or updated targets might not be immediately invoked. Please allow a short period of time for changes to take effect.
This action can partially fail if too many requests are made at the same time. If that happens, FailedEntryCount is non-zero in the response and each entry in FailedEntries provides the ID of the failed target and the error code.
See also: AWS API Documentation
:example: response = client.put_targets(
Rule='string',
Targets=[
{
'Id': 'string',
'Arn': 'string',
'RoleArn': 'string',
'Input': 'string',
'InputPath': 'string',
'InputTransformer': {
'InputPathsMap': {
'string': 'string'
},
'InputTemplate': 'string'
},
'KinesisParameters': {
'PartitionKeyPath': 'string'
},
'RunCommandParameters': {
'RunCommandTargets': [
{
'Key': 'string',
'Values': [
'string',
]
},
]
},
'EcsParameters': {
'TaskDefinitionArn': 'string',
'TaskCount': 123
}
},
]
)
:type Rule: string
:param Rule: [REQUIRED]
The name of the rule.
:type Targets: list
:param Targets: [REQUIRED]
The targets to update or add to the rule.
(dict) --Targets are the resources to be invoked when a rule is triggered. Target types include EC2 instances, AWS Lambda functions, Amazon Kinesis streams, Amazon ECS tasks, AWS Step Functions state machines, Run Command, and built-in targets.
Id (string) -- [REQUIRED]The ID of the target.
Arn (string) -- [REQUIRED]The Amazon Resource Name (ARN) of the target.
RoleArn (string) --The Amazon Resource Name (ARN) of the IAM role to be used for this target when the rule is triggered. If one rule triggers multiple targets, you can use a different IAM role for each target.
Input (string) --Valid JSON text passed to the target. In this case, nothing from the event itself is passed to the target. You must use JSON dot notation, not bracket notation. For more information, see The JavaScript Object Notation (JSON) Data Interchange Format .
InputPath (string) --The value of the JSONPath that is used for extracting part of the matched event when passing it to the target. You must use JSON dot notation, not bracket notation. For more information about JSON paths, see JSONPath .
InputTransformer (dict) --Settings to enable you to provide custom input to a target based on certain event data. You can extract one or more key-value pairs from the event and then use that data to send customized input to the target.
InputPathsMap (dict) --Map of JSON paths to be extracted from the event. These are key-value pairs, where each value is a JSON path. You must use JSON dot notation, not bracket notation.
(string) --
(string) --
InputTemplate (string) -- [REQUIRED]Input template where you can use the values of the keys from InputPathsMap to customize the data sent to the target.
KinesisParameters (dict) --The custom parameter you can use to control shard assignment, when the target is an Amazon Kinesis stream. If you do not include this parameter, the default is to use the eventId as the partition key.
PartitionKeyPath (string) -- [REQUIRED]The JSON path to be extracted from the event and used as the partition key. For more information, see Amazon Kinesis Streams Key Concepts in the Amazon Kinesis Streams Developer Guide .
RunCommandParameters (dict) --Parameters used when you are using the rule to invoke Amazon EC2 Run Command.
RunCommandTargets (list) -- [REQUIRED]Currently, we support including only one RunCommandTarget block, which specifies either an array of InstanceIds or a tag.
(dict) --Information about the EC2 instances that are to be sent the command, specified as key-value pairs. Each RunCommandTarget block can include only one key, but this key may specify multiple values.
Key (string) -- [REQUIRED]Can be either tag: tag-key or InstanceIds .
Values (list) -- [REQUIRED]If Key is tag: tag-key , Values is a list of tag values. If Key is InstanceIds , Values is a list of Amazon EC2 instance IDs.
(string) --
EcsParameters (dict) --Contains the Amazon ECS task definition and task count to be used, if the event target is an Amazon ECS task. For more information about Amazon ECS tasks, see Task Definitions in the Amazon EC2 Container Service Developer Guide .
TaskDefinitionArn (string) -- [REQUIRED]The ARN of the task definition to use if the event target is an Amazon ECS cluster.
TaskCount (integer) --The number of tasks to create based on the TaskDefinition . The default is one.
:rtype: dict
:return: {
'FailedEntryCount': 123,
'FailedEntries': [
{
'TargetId': 'string',
'ErrorCode': 'string',
'ErrorMessage': 'string'
},
]
}
:returns:
Rule (string) -- [REQUIRED]
The name of the rule.
Targets (list) -- [REQUIRED]
The targets to update or add to the rule.
(dict) --Targets are the resources to be invoked when a rule is triggered. Target types include EC2 instances, AWS Lambda functions, Amazon Kinesis streams, Amazon ECS tasks, AWS Step Functions state machines, Run Command, and built-in targets.
Id (string) -- [REQUIRED]The ID of the target.
Arn (string) -- [REQUIRED]The Amazon Resource Name (ARN) of the target.
RoleArn (string) --The Amazon Resource Name (ARN) of the IAM role to be used for this target when the rule is triggered. If one rule triggers multiple targets, you can use a different IAM role for each target.
Input (string) --Valid JSON text passed to the target. In this case, nothing from the event itself is passed to the target. You must use JSON dot notation, not bracket notation. For more information, see The JavaScript Object Notation (JSON) Data Interchange Format .
InputPath (string) --The value of the JSONPath that is used for extracting part of the matched event when passing it to the target. You must use JSON dot notation, not bracket notation. For more information about JSON paths, see JSONPath .
InputTransformer (dict) --Settings to enable you to provide custom input to a target based on certain event data. You can extract one or more key-value pairs from the event and then use that data to send customized input to the target.
InputPathsMap (dict) --Map of JSON paths to be extracted from the event. These are key-value pairs, where each value is a JSON path. You must use JSON dot notation, not bracket notation.
(string) --
(string) --
InputTemplate (string) -- [REQUIRED]Input template where you can use the values of the keys from InputPathsMap to customize the data sent to the target.
KinesisParameters (dict) --The custom parameter you can use to control shard assignment, when the target is an Amazon Kinesis stream. If you do not include this parameter, the default is to use the eventId as the partition key.
PartitionKeyPath (string) -- [REQUIRED]The JSON path to be extracted from the event and used as the partition key. For more information, see Amazon Kinesis Streams Key Concepts in the Amazon Kinesis Streams Developer Guide .
RunCommandParameters (dict) --Parameters used when you are using the rule to invoke Amazon EC2 Run Command.
RunCommandTargets (list) -- [REQUIRED]Currently, we support including only one RunCommandTarget block, which specifies either an array of InstanceIds or a tag.
(dict) --Information about the EC2 instances that are to be sent the command, specified as key-value pairs. Each RunCommandTarget block can include only one key, but this key may specify multiple values.
Key (string) -- [REQUIRED]Can be either tag: tag-key or InstanceIds .
Values (list) -- [REQUIRED]If Key is tag: tag-key , Values is a list of tag values. If Key is InstanceIds , Values is a list of Amazon EC2 instance IDs.
(string) --
EcsParameters (dict) --Contains the Amazon ECS task definition and task count to be used, if the event target is an Amazon ECS task. For more information about Amazon ECS tasks, see Task Definitions in the Amazon EC2 Container Service Developer Guide .
TaskDefinitionArn (string) -- [REQUIRED]The ARN of the task definition to use if the event target is an Amazon ECS cluster.
TaskCount (integer) --The number of tasks to create based on the TaskDefinition . The default is one.
"""
pass
def remove_targets(Rule=None, Ids=None):
"""
Removes the specified targets from the specified rule. When the rule is triggered, those targets are no longer be invoked.
When you remove a target, when the associated rule triggers, removed targets might continue to be invoked. Please allow a short period of time for changes to take effect.
This action can partially fail if too many requests are made at the same time. If that happens, FailedEntryCount is non-zero in the response and each entry in FailedEntries provides the ID of the failed target and the error code.
See also: AWS API Documentation
:example: response = client.remove_targets(
Rule='string',
Ids=[
'string',
]
)
:type Rule: string
:param Rule: [REQUIRED]
The name of the rule.
:type Ids: list
:param Ids: [REQUIRED]
The IDs of the targets to remove from the rule.
(string) --
:rtype: dict
:return: {
'FailedEntryCount': 123,
'FailedEntries': [
{
'TargetId': 'string',
'ErrorCode': 'string',
'ErrorMessage': 'string'
},
]
}
"""
pass
def test_event_pattern(EventPattern=None, Event=None):
"""
Tests whether the specified event pattern matches the provided event.
Most services in AWS treat : or / as the same character in Amazon Resource Names (ARNs). However, CloudWatch Events uses an exact match in event patterns and rules. Be sure to use the correct ARN characters when creating event patterns so that they match the ARN syntax in the event you want to match.
See also: AWS API Documentation
:example: response = client.test_event_pattern(
EventPattern='string',
Event='string'
)
:type EventPattern: string
:param EventPattern: [REQUIRED]
The event pattern. For more information, see Events and Event Patterns in the Amazon CloudWatch Events User Guide .
:type Event: string
:param Event: [REQUIRED]
The event, in JSON format, to test against the event pattern.
:rtype: dict
:return: {
'Result': True|False
}
"""
pass
| 1.921875 | 2 |
service-configs/zabbix/partitioning/zabbix-partitioning.py | digideskio/adminscripts | 110 | 12791064 | <gh_stars>100-1000
#!/usr/bin/python
import psycopg2
from optparse import OptionParser
tables = {
'history':'daily',
'history_sync':'daily',
'history_uint':'daily',
'history_uint_sync':'daily',
'history_str':'daily',
'history_str_sync':'daily',
'history_log':'daily',
'history_text':'daily',
'trends':'monthly',
'trends_uint':'monthly',
'acknowledges':'monthly',
'alerts':'monthly',
'auditlog':'monthly',
'events':'monthly',
'service_alarms':'monthly',
}
#change these settings
db_user = 'zabbix'
db_pw = '<PASSWORD>'
db = 'zabbix'
db_host = 'localhost'
#####
parser = OptionParser()
parser.add_option("-i", "--init", dest="init",help="partitioning init",action="store_true", default=False)
(options, args) = parser.parse_args()
if options.init:
init = 1
else:
init = 0
db_connection = psycopg2.connect(database=db, user=db_user, password=<PASSWORD>,host=db_host)
db_cursor = db_connection.cursor()
for table_key, table_value in tables.iteritems():
db_cursor.execute('''select create_zbx_partitions(%s,%s,%s)''',[table_key,table_value,init])
db_connection.commit()
db_cursor.close()
db_connection.close()
| 2.15625 | 2 |
aws_cost_explorer_converter/command_line.py | wilbur4321/aws-cost-explorer-converter | 0 | 12791065 | import boto3
import argparse
import json
from datetime import timedelta, date
from pprint import pprint
import aws_cost_explorer_converter
def parse_args():
parser = argparse.ArgumentParser(
description='Fetch cost explorer data from AWS and display and/or save it',
usage='%(prog)s [options]',
epilog='Standard environment variables for AWS connection information are supported'
)
global args
parser.add_argument('--start', help='Start date; if a negative number, is taken as a delta from today; if zero, then as the start of the current month')
parser.add_argument('--end', help='End date')
parser.add_argument('--granularity', default='DAILY', help='Granularity, MONTHLY, DAILY or HOURLY (untested)')
parser.add_argument('--filter', type=json.loads, help='JSON filter expression (see AWS documentation)')
parser.add_argument('--metrics', type=json.loads, default=['UnblendedCost'], help='JSON metrics expression, eg \'[ "UnblendedCost", "NetUnblendedCost"]\'')
parser.add_argument('--group-by', type=json.loads, help='JSON group_by expression (see AWS documentation)')
parser.add_argument('--display', action='store_true', help='Display (truncated) output table')
parser.add_argument('--out', help='File to store CSV in (not stored if not specified')
args = parser.parse_args()
# Handle special cases of start
try:
x = int(args.start)
if x == 0:
args.start = date.today().replace(day = 1)
elif x < 0:
args.start = date.today() + timedelta(days = x)
except:
pass
return args
def main():
args = parse_args()
if not args.display and not args.out:
raise Exception('Not showing or saving output, no reason to run')
client = boto3.client('ce', region_name='us-east-1')
converted = aws_cost_explorer_converter.CostExplorerConverter(
client,
start = args.start,
end = args.end,
granularity = args.granularity,
filter = args.filter,
group_by = args.group_by,
metrics = args.metrics
).to_df()
if args.display:
print('Converted:')
pprint(converted)
print('')
if args.out:
converted.to_csv(path_or_buf = args.out, index = False, encoding = 'utf-8')
print('Wrote csv to %s' % (args.out))
| 2.9375 | 3 |
neutron/objects/base.py | BobzhouCH/neutron-nfv-acc | 0 | 12791066 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
import copy
import itertools
from neutron_lib import exceptions
from oslo_db import exception as obj_exc
from oslo_utils import reflection
from oslo_versionedobjects import base as obj_base
import six
from neutron._i18n import _
from neutron.objects.db import api as obj_db_api
class NeutronObjectUpdateForbidden(exceptions.NeutronException):
message = _("Unable to update the following object fields: %(fields)s")
class NeutronDbObjectDuplicateEntry(exceptions.Conflict):
message = _("Failed to create a duplicate %(object_type)s: "
"for attribute(s) %(attributes)s with value(s) %(values)s")
def __init__(self, object_class, db_exception):
super(NeutronDbObjectDuplicateEntry, self).__init__(
object_type=reflection.get_class_name(object_class,
fully_qualified=False),
attributes=db_exception.columns,
values=db_exception.value)
class NeutronPrimaryKeyMissing(exceptions.BadRequest):
message = _("For class %(object_type)s missing primary keys: "
"%(missing_keys)s")
def __init__(self, object_class, missing_keys):
super(NeutronPrimaryKeyMissing, self).__init__(
object_type=reflection.get_class_name(object_class,
fully_qualified=False),
missing_keys=missing_keys
)
def get_updatable_fields(cls, fields):
fields = fields.copy()
for field in cls.fields_no_update:
if field in fields:
del fields[field]
return fields
@six.add_metaclass(abc.ABCMeta)
class NeutronObject(obj_base.VersionedObject,
obj_base.VersionedObjectDictCompat,
obj_base.ComparableVersionedObject):
synthetic_fields = []
def __init__(self, context=None, **kwargs):
super(NeutronObject, self).__init__(context, **kwargs)
self.obj_set_defaults()
def to_dict(self):
return dict(self.items())
@classmethod
def clean_obj_from_primitive(cls, primitive, context=None):
obj = cls.obj_from_primitive(primitive, context)
obj.obj_reset_changes()
return obj
@classmethod
def get_object(cls, context, **kwargs):
raise NotImplementedError()
@classmethod
def validate_filters(cls, **kwargs):
bad_filters = [key for key in kwargs
if key not in cls.fields or key in cls.synthetic_fields]
if bad_filters:
bad_filters = ', '.join(bad_filters)
msg = _("'%s' is not supported for filtering") % bad_filters
raise exceptions.InvalidInput(error_message=msg)
@classmethod
@abc.abstractmethod
def get_objects(cls, context, **kwargs):
raise NotImplementedError()
def create(self):
raise NotImplementedError()
def update(self):
raise NotImplementedError()
def delete(self):
raise NotImplementedError()
class DeclarativeObject(abc.ABCMeta):
def __init__(cls, name, bases, dct):
super(DeclarativeObject, cls).__init__(name, bases, dct)
for base in itertools.chain([cls], bases):
if hasattr(base, 'primary_keys'):
cls.fields_no_update += base.primary_keys
# avoid duplicate entries
cls.fields_no_update = list(set(cls.fields_no_update))
@six.add_metaclass(DeclarativeObject)
class NeutronDbObject(NeutronObject):
# should be overridden for all persistent objects
db_model = None
primary_keys = ['id']
fields_no_update = []
# dict with name mapping: {'field_name_in_object': 'field_name_in_db'}
fields_need_translation = {}
def from_db_object(self, *objs):
db_objs = [self.modify_fields_from_db(db_obj) for db_obj in objs]
for field in self.fields:
for db_obj in db_objs:
if field in db_obj:
setattr(self, field, db_obj[field])
break
self.obj_reset_changes()
@classmethod
def modify_fields_to_db(cls, fields):
"""
This method enables to modify the fields and its
content before data is inserted into DB.
It uses the fields_need_translation dict with structure:
{
'field_name_in_object': 'field_name_in_db'
}
:param fields: dict of fields from NeutronDbObject
:return: modified dict of fields
"""
result = copy.deepcopy(dict(fields))
for field, field_db in cls.fields_need_translation.items():
if field in result:
result[field_db] = result.pop(field)
return result
@classmethod
def modify_fields_from_db(cls, db_obj):
"""
This method enables to modify the fields and its
content after data was fetched from DB.
It uses the fields_need_translation dict with structure:
{
'field_name_in_object': 'field_name_in_db'
}
:param db_obj: dict of object fetched from database
:return: modified dict of DB values
"""
result = dict(db_obj)
for field, field_db in cls.fields_need_translation.items():
if field_db in result:
result[field] = result.pop(field_db)
return result
@classmethod
def get_object(cls, context, **kwargs):
"""
This method fetches object from DB and convert it to versioned
object.
:param context:
:param kwargs: multiple primary keys defined key=value pairs
:return: single object of NeutronDbObject class
"""
missing_keys = set(cls.primary_keys).difference(kwargs.keys())
if missing_keys:
raise NeutronPrimaryKeyMissing(object_class=cls.__class__,
missing_keys=missing_keys)
db_obj = obj_db_api.get_object(context, cls.db_model, **kwargs)
if db_obj:
obj = cls(context, **cls.modify_fields_from_db(db_obj))
obj.obj_reset_changes()
return obj
@classmethod
def get_objects(cls, context, **kwargs):
cls.validate_filters(**kwargs)
db_objs = obj_db_api.get_objects(context, cls.db_model, **kwargs)
result = []
for db_obj in db_objs:
obj = cls(context, **cls.modify_fields_from_db(db_obj))
obj.obj_reset_changes()
result.append(obj)
return result
@classmethod
def is_accessible(cls, context, db_obj):
return (context.is_admin or
context.tenant_id == db_obj.tenant_id)
def _get_changed_persistent_fields(self):
fields = self.obj_get_changes()
for field in self.synthetic_fields:
if field in fields:
del fields[field]
return fields
def _validate_changed_fields(self, fields):
fields = fields.copy()
forbidden_updates = set(self.fields_no_update) & set(fields.keys())
if forbidden_updates:
raise NeutronObjectUpdateForbidden(fields=forbidden_updates)
return fields
def create(self):
fields = self._get_changed_persistent_fields()
try:
db_obj = obj_db_api.create_object(self._context, self.db_model,
self.modify_fields_to_db(fields))
except obj_exc.DBDuplicateEntry as db_exc:
raise NeutronDbObjectDuplicateEntry(object_class=self.__class__,
db_exception=db_exc)
self.from_db_object(db_obj)
def _get_composite_keys(self):
keys = {}
for key in self.primary_keys:
keys[key] = getattr(self, key)
return self.modify_fields_to_db(keys)
def update(self):
updates = self._get_changed_persistent_fields()
updates = self._validate_changed_fields(updates)
if updates:
db_obj = obj_db_api.update_object(self._context, self.db_model,
self.modify_fields_to_db(updates),
**self._get_composite_keys())
self.from_db_object(self, db_obj)
def delete(self):
obj_db_api.delete_object(self._context, self.db_model,
**self._get_composite_keys())
| 1.890625 | 2 |
setup.py | OLC-LOC-Bioinformatics/AzureStorage | 0 | 12791067 | #!/usr/bin/env python
from setuptools import setup, find_packages
setup(
name="AzureStorage",
version="0.0.2",
entry_points={
'console_scripts': [
'AzureCredentials = azure_storage.azure_credentials:cli',
'AzureAutomate = azure_storage.azure_automate:cli',
'AzureDownload = azure_storage.azure_download:cli',
'AzureDelete = azure_storage.azure_delete:cli',
'AzureUpload = azure_storage.azure_upload:cli',
'AzureList = azure_storage.azure_list:cli',
'AzureMove = azure_storage.azure_move:cli',
'AzureTier = azure_storage.azure_tier:cli',
'AzureSAS = azure_storage.azure_sas:cli'
],
},
packages=find_packages(),
include_package_data=True,
author="<NAME>",
author_email="<EMAIL>",
url="https://github.com/OLC-LOC-Bioinformatics/AzureStorage",
)
| 1.195313 | 1 |
base/urls/order_urls.py | vishnu-sagubandi/LIAVYS_BACKEND | 0 | 12791068 | from django.urls import path
from base.views.order_views import *
urlpatterns = [
path('', getOrders, name='orders'),
path('add/', addOrderItems, name='orders-add'),
path('gettoken/', getTokenView, name='get-client-token'),
path('myorders/', getMyOrders, name='myorders'),
path('<str:pk>/', getOrderById, name='get-order-by-id'),
path('<str:pk>/deliver/', updateOrderToDelivered, name='order-delivered'),
path('<str:pk>/pay/', updateOrderToPaid, name='pay'),
]
| 1.796875 | 2 |
tests/test_workflow.py | asavpatel92/argo-python-dsl | 0 | 12791069 | import flexmock
import pytest
import requests
from argo.workflows.dsl import Workflow
from ._base import TestCase
"""Workflow test suite."""
@pytest.fixture # type: ignore
def url() -> str:
"""Fake URL fixture."""
class TestWorkflow(TestCase):
"""Test Workflow."""
_WORKFLOW_FILE = TestCase.DATA / "workflows" / "hello-world.yaml"
def test_from_file(self) -> None:
"""Test `Workflow.from_file` methods."""
wf = Workflow.from_file(self._WORKFLOW_FILE)
assert isinstance(wf, Workflow)
assert wf.name == "test"
assert wf.kind == "Workflow"
assert len(wf.spec.templates) == 1
def test_from_url(self, url: str) -> None:
"""Test `Workflow.from_url` methods."""
fake_response = type(
"Response",
(),
{"text": self._WORKFLOW_FILE.read_text(), "raise_for_status": lambda: None},
)
flexmock(requests).should_receive("get").and_return(fake_response)
wf = Workflow.from_url(url)
assert isinstance(wf, Workflow)
assert wf.name == "test"
assert wf.kind == "Workflow"
assert len(wf.spec.templates) == 1
| 2.40625 | 2 |
tests/tests/app_tests.py | centergy/flex_ussd | 0 | 12791070 | <reponame>centergy/flex_ussd
import pytest
from flex.ussd.core import UssdApp
xfail = pytest.mark.xfail
parametrize = pytest.mark.parametrize
# pytestmark = pytest.mark.usefixtures("db")
class AppTest(object):
def test_init(self):
app = UssdApp('test_app')
assert app.name == 'test_app'
| 2.296875 | 2 |
stacks/linkedlistnode.py | nataz77/cs-py | 0 | 12791071 | <reponame>nataz77/cs-py
class LinkedListNode:
"""Represents a linked list node"""
def __init__(self, val=None):
self.data = val
self.next = None
self.previous = None
@property
def HasNext(self) -> bool:
return self.next is not None
| 3.453125 | 3 |
DeezerIcon.py | 19po/deezer-player | 4 | 12791072 | from PyQt4 import QtGui
import webbrowser
__author__ = 'postrowski'
# -*-coding: utf-8-*-
class DeezerIcon(object):
def __init__(self, parent):
self.iconLabel = parent.iconLabel
self.timer = parent.timer
def hover_button(self):
if self.iconLabel.underMouse():
self.timer.start(10)
pixmap = QtGui.QPixmap("images/icon_hover.svg")
self.iconLabel.setPixmap(pixmap)
else:
pixmap = QtGui.QPixmap("images/icon.svg")
self.iconLabel.setPixmap(pixmap)
def click_button(self):
if self.iconLabel.underMouse():
self.timer.start(200)
pixmap = QtGui.QPixmap("images/icon_clicked.svg")
self.iconLabel.setPixmap(pixmap)
webbrowser.open(str("http://www.deezer.com"), new=1, autoraise=True)
else:
pixmap = QtGui.QPixmap("images/icon.svg")
self.iconLabel.setPixmap(pixmap)
| 3.125 | 3 |
Source/FileDataNulltest.py | apj-graham/XPS-File-Converter | 0 | 12791073 | from FileData import FileData
import pandas as pd
import numpy as np
file_data = FileData("F:\\Python Projects\\170622_MDS.txt")
print(file_data.df)
file_data.df.fillna(0)
print(file_data.df)
df = pd.DataFrame([[np.nan, 2, np.nan, 0],
[3, 4, np.nan, 1],
[np.nan, np.nan, np.nan, 5],
[np.nan, 3, np.nan, 4]],
columns=list('ABCD'))
print(df)
df.fillna(0, inplace = True)
print(df)
| 2.921875 | 3 |
iatransfer/research/data/__init__.py | KamilPiechowiak/iatransfer | 4 | 12791074 | from .fgvc_aircraft import FGVCAircraft
from .flowers import Flowers102
from .food101 import Food101
| 0.945313 | 1 |
100-Exercises-Guanabara/Ex81-85.py | Neil-Iyer/Basic-Exercises-in-Python | 0 | 12791075 | ''' Estes exercícios fazem parte do curso de Introdução a Algoritmos, ministrado pelo prof. <NAME> e podem ser encontrados no site https://www.cursoemvideo.com/wp-content/uploads/2019/08/exercicios-algoritmos.pdf
81) Crie um programa que leia a idade de 8 pessoas e guarde-as em um vetor. No final, mostre:
a) Qual é a média de idade das pessoas cadastradas
b) Em quais posições temos pessoas com mais de 25 anos
c) Qual foi a maior idade digitada (podem haver repetições)
d) Em que posições digitamos a maior idade '''
print("Questão 81\n")
x = []
media = 0
position = []
position25 = []
for i in range(8):
x.append(int(input("Digite sua idade: ")))
print("\nIdades inseridas:", x)
maiorIdade = max(x)
j = 0
k = 0
for i in x:
media += i
if i > 25:
value = x.index(i, j) + 1 # posição de valores > 25
position25.append(value) # add posição na lista
j = value # alterando valor de j para que a função index conte a partir da posição seguinte
if i == maiorIdade:
place = x.index(i, k) + 1
position.append(place)
k = place
media = media / 8
print("Média das idades cadastradas:", media)
print("Posições com idades acima de 25 anos:", position25)
print("Maior idade digitada:", maiorIdade)
print("Posições com a maior idade:", position)
''' 82) Faça um algoritmo que leia a nota de 10 alunos de uma turma e guarde-as em um vetor. No final, mostre:
a) Qual é a média da turma
b) Quantos alunos estão acima da média da turma
c) Qual foi a maior nota digitada
d) Em que posições a maior nota aparece '''
print("\nQuestão 82\n")
x = []
media = 0
for i in range(10):
nota = float(input("Qual a nota do aluno? "))
x.append(nota)
media += nota
media = media / 10
maiorNota = max(x)
excel = 0
k = 0
position = []
for i in x:
if i > media:
excel += 1
if i == maiorNota:
place = x.index(i, k) + 1
position.append(place)
k = place
print("\nTodas as notas:", x)
print("Média da turma:", round(media, 2))
print("Qtd de alunos acima da média:", excel)
print("Maior nota:", maiorNota)
print("Posições em que a maior nota aparece:", position)
''' 83) [DESAFIO] Crie uma lógica que preencha um vetor de 20 posições com números aleatórios (entre 0 e 99) gerados pelo computador. Logo em seguida, mostre os números gerados e depois coloque o vetor em ordem crescente, mostrando no final os valores ordenados. '''
print("\nQuestão 83\n")
import random
vetor = []
for i in range(20):
vetor.append(random.randint(0, 99))
print("Números gerados:", vetor)
print("Números ordenados:", sorted(vetor))
''' 84) Crie um programa que leia o nome e a idade de 9 pessoas e guarde esses valores em dois vetores, em posições relacionadas. No final, mostre uma listagem contendo apenas os dados das pessoas menores de idade. '''
print("\nQuestão 84\n")
# https://stackoverflow.com/questions/8356501/python-format-tabular-output
from tabulate import tabulate
nomes = []
idades = []
table = []
for i in range(9):
nomes.append(input("Digite o seu nome: "))
idades.append(int(input("Digite a sua idade: ")))
if idades[i] < 18:
table.append([nomes[i], idades[i]])
if table != []:
print("\nPessoas menores de idade:")
print(tabulate(table))
''' 85) Faça um algoritmo que leia o nome, o sexo e o salário de 5 funcionários e guarde esses dados em três vetores. No final, mostre uma listagem contendo apenas os dados das funcionárias mulheres que ganham mais de R$5 mil. '''
# Testando se o usuário digitou a letra correta
def test(choice):
while True:
if choice == "F" or choice == "M":
break
else:
print("Você precisa escolher F para Feminino ou M para Masculino. Tente de novo!")
choice = input("Qual o seu gênero? [F/M] ")
return choice
print("\nQuestão 85\n")
nome = []
genero = []
salario = []
table = []
for i in range(5):
nome.append(input("Digite o seu nome: "))
resposta = input("Qual o seu gênero? [F/M] ")
resposta = test(resposta)
genero.append(resposta)
salario.append(float(input("Qual o seu salário? R$")))
if genero[i] == "F" and salario[i] > 5000:
table.append([nome[i], genero[i], "R$" + str(round(salario[i], 2))])
if table != []:
print("\nNome | Gênero | Salário")
print(tabulate(table)) | 4.21875 | 4 |
src/olympia/versions/urls.py | dante381/addons-server | 0 | 12791076 | from django.urls import re_path
from olympia.addons.urls import ADDON_ID
from olympia.amo.views import frontend_view
from . import views
urlpatterns = [
re_path(r'^$', frontend_view, name='addons.versions'),
re_path(
r'^(?P<version_num>[^/]+)/updateinfo/$',
views.update_info,
name='addons.versions.update_info',
),
]
download_patterns = [
# /<locale>/<app>/file/<id>/filename.xpi
# /<locale>/<app>/file/<id>/type:attachment/filename.xpi
# See comment in File.get_url_path(): do not change this without checking
# with Fenix first, the pattern is hardcoded in their code.
re_path(
(
r'^file/(?P<file_id>\d+)/'
r'(?:type:(?P<download_type>\w+)/)?'
r'(?:(?P<filename>[\w+.-]*))?$'
),
views.download_file,
name='downloads.file',
),
re_path(
r'^source/(?P<version_id>\d+)', views.download_source, name='downloads.source'
),
# /latest/<id>/type:xpi/platform:5/lol.xpi - everything after the addon id
# is ignored though.
re_path(
(
r'^latest/%s/'
r'(?:type:(?P<download_type>\w+)/)?'
r'(?:platform:(?P<platform>\d+)/)?'
r'(?:(?P<filename>[\w+.-]*))?$'
)
% ADDON_ID,
views.download_latest,
name='downloads.latest',
),
]
| 1.945313 | 2 |
yukicoder/yuki087.py | knuu/competitive-programming | 1 | 12791077 | N = int(input())
def isLeap(year):
if (year % 4 == 0 and year % 100 != 0) or year % 400 == 0:
return True
else:
return False
leap400 = 0
leap400List = []
y = 0
for i in range(2015, 2415):
if isLeap(i):
y += 366
else:
y += 365
if y % 7 == 0:
leap400 += 1
leap400List.append(leap400)
print(leap400 * ((N-2014) // 400) + leap400List[(N-2014)%400-1])
| 3.21875 | 3 |
threeSum.py | JiJingYu/LeetCode_practice | 0 | 12791078 | <reponame>JiJingYu/LeetCode_practice<gh_stars>0
class Solution(object):
def threeSum(self, nums):
"""
:type nums: List[int]
:rtype: List[List[int]]
"""
ans = list()
nums = sorted(nums)
for i, num_0 in enumerate(nums):
if i>0 and nums[i] == nums[i-1]:
continue
left = i+1
right = len(nums)-1
while left < right:
sum = num_0 + nums[left] + nums[right]
if sum < 0:
left += 1
elif sum > 0:
right -=1
else:
ans.append([num_0, nums[left], nums[right]])
while left < right and nums[left]==nums[left+1]:
left +=1
while left < right and nums[right]==nums[right-1]:
right -=1
left += 1
right -= 1
return ans
def test():
nums = [-1, 0, 1, 2, -1, -4]
solution = Solution()
ans = solution.threeSum(nums)
print(ans)
if __name__=='__main__':
test()
| 3.5625 | 4 |
hotword.py | jackoson/squeezebox-google-assistant | 0 | 12791079 | <reponame>jackoson/squeezebox-google-assistant
#!/usr/bin/env python
# Copyright (C) 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import argparse
import json
import os.path
import pathlib2 as pathlib
import google.oauth2.credentials
from google.assistant.library import Assistant
from google.assistant.library.event import EventType
from google.assistant.library.file_helpers import existing_file
from google.assistant.library.device_helpers import register_device
import assistant_squeezebox_controller as squeezebox
import sys
import datetime
try:
FileNotFoundError
except NameError:
FileNotFoundError = IOError
WARNING_NOT_REGISTERED = """
This device is not registered. This means you will not be able to use
Device Actions or see your device in Assistant Settings. In order to
register this device follow instructions at:
https://developers.google.com/assistant/sdk/guides/library/python/embed/register-device
"""
class Logger(object):
def __init__(self, filename):
self.filename = filename
def write(self, message):
with open(self.filename, "a") as f:
f.write(message)
def flush(self):
pass
def log(x):
now = datetime.datetime.now().strftime('%F_%X')
x['time'] = now
print(x)
def process_event(event):
"""
Args:
event(event.Event): The current event to process.
"""
if event.type == EventType.ON_DEVICE_ACTION:
for command, params in event.actions:
log({'type': 'device action', 'command': command, 'params': params})
try:
if command == "com.example.commands.SqueezeBoxCommand":
squeeze_controller.simple_command(params)
elif command == "com.example.commands.SqueezeBoxQuery":
ans = str(squeeze_controller.simple_query(params))
speak(ans)
log({'type': 'squeezebox response', 'message': ans})
elif command == "com.example.commands.SqueezeBoxSearch":
ans = str(squeeze_controller.search_and_play(params))
speak(ans)
log({'type': 'squeezebox response', 'message': ans})
elif command == "com.example.commands.SqueezeBoxPlayNext":
ans = str(squeeze_controller.search_and_play_next(params))
speak(ans)
log({'type': 'squeezebox response', 'message': ans})
elif command == "com.example.commands.SqueezeBoxPlayEnd":
ans = str(squeeze_controller.search_and_play_end(params))
speak(ans)
log({'type': 'squeezebox response', 'message': ans})
elif command == "com.example.commands.SqueezeBoxSpotifySearch":
ans = str(squeeze_controller.spotify_search_and_play(params))
speak(ans)
log({'type': 'squeezebox response', 'message': ans})
elif command == "com.example.commands.SqueezeBoxVolume":
squeeze_controller.set_volume(params)
elif command == "com.example.commands.SqueezeBoxSleep":
squeeze_controller.sleep_in(params)
elif command == "com.example.commands.SqueezeBoxSendMusic":
squeeze_controller.send_music(params)
elif command == "com.example.commands.SqueezeBoxSync":
squeeze_controller.sync_player(params)
elif command == "com.example.commands.SqueezeBoxRadio4":
squeeze_controller.play_radio4(params)
except squeezebox.UserException as e:
e = str(e)
speak(e)
log({'type': 'squeezebox response', 'message': e})
except Exception as e:
e = str(e)
speak(e)
log({'type': 'exception', 'message': e})
elif event.type == EventType.ON_RECOGNIZING_SPEECH_FINISHED:
log({'type': 'speech', 'text': event.args['text']})
elif event.type == EventType.ON_RENDER_RESPONSE:
log({'type': 'google response', 'text': event.args['text']})
elif event.type == EventType.ON_CONVERSATION_TURN_STARTED:
log({'type': 'listening'})
if event.type in [EventType.ON_CONVERSATION_TURN_STARTED, EventType.ON_RESPONDING_STARTED]:
squeeze_controller.quiet()
log({'type': 'quiet'})
elif event.type in [EventType.ON_END_OF_UTTERANCE, EventType.ON_RESPONDING_FINISHED]:
squeeze_controller.return_volume()
log({'type': 'return volume'})
def setup_controllers(credentials_path):
global squeeze_controller
with open(credentials_path, "r") as f:
creds = json.loads(f.read())
squeeze_controller = squeezebox.AssistantSqueezeBoxController(creds['squeezebox_server']['ip'], creds['squeezebox_server']['port'], main_squeezebox=creds['nearest_squeezebox'])
def setup_speech(assistant):
global speak
def speak(x):
assistant.send_text_query("repeat after me " + x)
def main():
parser = argparse.ArgumentParser(
formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument('--device-model-id', '--device_model_id', type=str,
metavar='DEVICE_MODEL_ID', required=False,
help='the device model ID registered with Google')
parser.add_argument('--project-id', '--project_id', type=str,
metavar='PROJECT_ID', required=False,
help='the project ID used to register this device')
parser.add_argument('--device-config', type=str,
metavar='DEVICE_CONFIG_FILE',
default=os.path.join(
os.path.expanduser('~/.config'),
'googlesamples-assistant',
'device_config_library.json'
),
help='path to store and read device configuration')
parser.add_argument('--credentials', type=existing_file,
metavar='OAUTH2_CREDENTIALS_FILE',
default=os.path.join(
os.path.expanduser('~/.config'),
'google-oauthlib-tool',
'credentials.json'
),
help='path to store and read OAuth2 credentials')
parser.add_argument('-v', '--version', action='version',
version='%(prog)s ' + Assistant.__version_str__())
parser.add_argument('--logfile', type=str, required=False,
help='file to write the log to')
parser.add_argument('--home_control_credentials', type=str, required=True,
help='path of home control credentials')
args = parser.parse_args()
if args.logfile:
sys.stdout = sys.stderr = Logger(args.logfile)
with open(args.credentials, 'r') as f:
credentials = google.oauth2.credentials.Credentials(token=None,
**json.load(f))
device_model_id = None
last_device_id = None
try:
with open(args.device_config) as f:
device_config = json.load(f)
device_model_id = device_config['model_id']
last_device_id = device_config.get('last_device_id', None)
except FileNotFoundError:
pass
if not args.device_model_id and not device_model_id:
raise Exception('Missing --device-model-id option')
# Re-register if "device_model_id" is given by the user and it differs
# from what we previously registered with.
should_register = (
args.device_model_id and args.device_model_id != device_model_id)
device_model_id = args.device_model_id or device_model_id
with Assistant(credentials, device_model_id) as assistant:
events = assistant.start()
device_id = assistant.device_id
log({
"type": "starting up",
"device_model_id": device_model_id,
"device_id": device_id
})
# Re-register if "device_id" is different from the last "device_id":
if should_register or (device_id != last_device_id):
if args.project_id:
register_device(args.project_id, credentials,
device_model_id, device_id)
pathlib.Path(os.path.dirname(args.device_config)).mkdir(
exist_ok=True)
with open(args.device_config, 'w') as f:
json.dump({
'last_device_id': device_id,
'model_id': device_model_id,
}, f)
else:
print(WARNING_NOT_REGISTERED)
setup_controllers(args.home_control_credentials)
setup_speech(assistant)
for event in events:
process_event(event)
if __name__ == '__main__':
main()
| 2.5625 | 3 |
Text Categorisation/PreProcess.py | Bennygmate/Machine-Learning | 0 | 12791080 | <gh_stars>0
# <NAME>
import sys
import os
import glob
import sklearn.datasets
from colorama import init
from termcolor import colored
from random import sample
import random
import numpy as np
def main():
init()
# get the dataset
path = "20news-18828"
# Create new dataset for processing
cmd = "cp -r " + path + " dataset_process"
print colored("Copying dataset into dataset_process", 'blue', attrs=['bold'])
os.system(cmd)
new_path = "dataset_process"
change_incompatible_files(new_path)
monte_carlo_cross(new_path)
os.system("rm -r dataset_process")
def monte_carlo_cross(path):
# Monte Carlo Cross Validation
os.system("mkdir cross_valid")
# Copy path into train and test dataset
os.makedirs(os.path.join("cross_valid", "1"))
os.makedirs(os.path.join("cross_valid/1", "train"))
os.makedirs(os.path.join("cross_valid/1", "test"))
os.makedirs(os.path.join("cross_valid", "2"))
os.makedirs(os.path.join("cross_valid/2", "train"))
os.makedirs(os.path.join("cross_valid/2", "test"))
os.makedirs(os.path.join("cross_valid", "3"))
os.makedirs(os.path.join("cross_valid/3", "train"))
os.makedirs(os.path.join("cross_valid/3", "test"))
os.makedirs(os.path.join("cross_valid", "4"))
os.makedirs(os.path.join("cross_valid/4", "train"))
os.makedirs(os.path.join("cross_valid/4", "test"))
os.makedirs(os.path.join("cross_valid", "5"))
os.makedirs(os.path.join("cross_valid/5", "train"))
os.makedirs(os.path.join("cross_valid/5", "test"))
os.makedirs(os.path.join("cross_valid", "6"))
os.makedirs(os.path.join("cross_valid/6", "train"))
os.makedirs(os.path.join("cross_valid/6", "test"))
os.makedirs(os.path.join("cross_valid", "7"))
os.makedirs(os.path.join("cross_valid/7", "train"))
os.makedirs(os.path.join("cross_valid/7", "test"))
os.makedirs(os.path.join("cross_valid", "8"))
os.makedirs(os.path.join("cross_valid/8", "train"))
os.makedirs(os.path.join("cross_valid/8", "test"))
os.makedirs(os.path.join("cross_valid", "9"))
os.makedirs(os.path.join("cross_valid/9", "train"))
os.makedirs(os.path.join("cross_valid/9", "test"))
os.makedirs(os.path.join("cross_valid", "10"))
os.makedirs(os.path.join("cross_valid/10", "train"))
os.makedirs(os.path.join("cross_valid/10", "test"))
for f in range(1,11):
pathing = "cross_valid/%d" %(f)
V = os.listdir(path)
for vj in V:
tmp_path = path + "/" + vj
tmp_pathing = pathing + "/train/" + vj
tmp_pathings = pathing + "/test/" + vj
cmd_1 = "mkdir " + tmp_pathing
cmd_2 = "mkdir " + tmp_pathings
os.system(cmd_1)
os.system(cmd_2)
folders = glob.glob(os.path.join(tmp_path, '*'))
train_split = int(round(len(folders) * 0.6))
indices = sample(range(0, len(folders)-1), train_split)
i = 0
folder_indices = len(folders) -1
while i <= folder_indices:
if i in indices:
cmd_train = "cp " + folders[i] + " " + tmp_pathing + "/" + str(i)
os.system(cmd_train)
else:
cmd_test = "cp " + folders[i] + " " + tmp_pathings + "/" + str(i)
os.system(cmd_test)
i += 1
print colored("Made train and test for:", 'blue', attrs=['bold'])
print vj
print colored("Made train and test for cross-valid dataset:", 'blue', attrs=['bold'])
print f
def change_incompatible_files(path):
# find incompatible files
print colored('Finding files incompatible with utf8: ', 'green', attrs=['bold'])
count_vector = sklearn.feature_extraction.text.CountVectorizer()
files = sklearn.datasets.load_files(path)
incompatible_files = []
for i in range(len(files.filenames)):
try:
count_vector.fit_transform(files.data[i:i + 1])
except UnicodeDecodeError:
incompatible_files.append(files.filenames[i])
except ValueError:
pass
print colored(len(incompatible_files), 'yellow'), 'files found'
# delete them
if(len(incompatible_files) > 0):
print colored('Converting incompatible files', 'red', attrs=['bold'])
for f in incompatible_files:
print colored("Changing file to UTF-8:", 'red'), f
cmd = "iconv -f ISO-8859-1 " + f + " -t UTF-8 -o tmp"
cmdd = "cp tmp " + f
os.system(cmd)
os.system(cmdd)
os.remove("tmp")
main()
| 2.375 | 2 |
python/robomodules/__init__.py | jhol0613/pacbot2018 | 0 | 12791081 | <gh_stars>0
import os
from .server import Server
from .protoModule import ProtoModule
__path__.append(os.path.join(os.path.dirname(__file__), 'comm'))
__all__ = ['Server', 'ProtoModule']
| 1.460938 | 1 |
lib/functionality/device.py | je-c/CryptoClassifier | 0 | 12791082 | import sys, humanize, psutil, GPUtil, time, torch
import torchvision.transforms as tt
from torchvision.datasets import ImageFolder
from torch.utils.data import DataLoader
class DeviceDataLoader():
"""
DeviceDataLoader Class
----------------------
Wraps and sends a pytorch dataloader to current device
"""
def __init__(self, dl, device):
self.dl = dl
self.device = device
def __iter__(self):
"""
Move dataloader to device and yield a single batched sample
"""
for b in self.dl:
yield to_device(b, self.device)
def __len__(self):
"""
Number of batches
"""
return len(self.dl)
def mem_report():
"""
Returns available device and device properties
"""
print("CPU RAM Free: " + humanize.naturalsize(psutil.virtual_memory().available))
GPUs = GPUtil.getGPUs()
for i, gpu in enumerate(GPUs):
print(f'GPU {i:d} ... Mem Free: {gpu.memoryFree:.0f}MB / {gpu.memoryTotal:.0f}MB | Utilization {gpu.memoryUtil*100:3.0f}%')
def get_default_device():
"""
Return current default device
"""
if torch.cuda.is_available():
return torch.device('cuda')
else:
return torch.device('cpu')
def to_device(data, device):
"""
Loads data onto default device
* :param data(torch.tensor): Dataset to load
* :param device(torch.device): Device to load to
:return (torch.device): Data loaded onto default device
"""
if isinstance(data, (list,tuple)): return [to_device(x, device) for x in data]
return data.to(device, non_blocking=True)
def load_set(param, device, dataset):
"""
Loads a batch of data to the device
* :param param(dict): Batch parameters
* :param device(torch.device): Device to load to
* :param dataset(torch.tensor): Data to load
:return (DeviceDataLoader): Batch data loaded onto default device
"""
path, shuffle_, batch_size = [value for key, value in param.items()]
transforms = tt.Compose([tt.ToTensor()])
ds = ImageFolder(dataset+path, transforms)
dl = DataLoader(
ds,
batch_size,
shuffle = shuffle_,
num_workers=8,
pin_memory=True
)
device_dl = DeviceDataLoader(dl, device)
return device_dl
def predict_image(image, model, classMap, device):
"""
Predicts the class of a single image
* :param img(np.ndarray): Numpy array of pixel/channel values
* :param model(torch.nn.module): Model
* :param classMap(dict): Mapped class values for prediction output
* :param device(torch.device): Device to load data onto
:return (str): Class prediction for the image
"""
X = to_device(image.unsqueeze(0), device)
_, prediction = torch.max(model(X), dim=1)
return classMap[prediction[0].item()] | 2.609375 | 3 |
workflow_main/scripts/read_extractor_lite.py | a13xk13m/covidcg | 0 | 12791083 | <reponame>a13xk13m/covidcg
# coding: utf-8
"""Extract variable regions from an aligned segment, in a flexible
and SNP-tolerant manner
Modified and heavily trimmed down version of read_extractor.py (v0.1.0)
from the variant_extractor project
Author: <NAME> - Vector Engineering Team (<EMAIL>)
"""
import numpy as np
import pandas as pd
from collections import defaultdict
from scripts.util import translate, reverse_complement
class ReadExtractor:
"""Extract variable regions from a pysam AlignedSegment
"""
RefSeq = ""
def __init__(self, read):
"""Build the extactor object for a read (pysam.AlignedSegment)
or a pair of reads if using paired-end sequencing
Parameters
----------
read: pysam.AlignedSegment
"""
self.read = read
# Build our own mutation string to store mutational information
# Since both the CIGAR and MD string don't fit our needs
# Format: Position:Ref:Alt;...
# Where position is relative to the reference (0-indexed)
# For insertions, the position is the position on the reference
# after the insertion
# For deletions, the position is the position on the reference
# that was deleted
# Store it as a list of tuples, (Position, Ref, Alt) for now.
# Mutations will be individually serialized then joined by ';' later
# to serialize into one big string
self.mutation_str = []
# Any invalidation errors that flag this variant as successfully extracted,
# but not passing filters, will be stored in this array
# Later when writing to disk we'll serialize this array as a semicolon-delimited string
self.invalid_errors = []
# Store SNPs
self.dna_snps = []
# Read data from the pysam.AlignedSegment object into python variables
self.load_read()
def load_read(self):
"""Load data in from the pysam.AlignedSegment object into Python
"""
# Nucleotide sequence of the read
self.read_seq = self.read.get_forward_sequence()
# If reverse complement, flip the sequence and the quality scores
if self.read.is_reverse:
self.read_seq = reverse_complement(self.read_seq)
# Don't try to do anything else if this read is unmapped
if self.read.is_unmapped:
return
# Get the reference sequence
self.reference_seq = ReadExtractor.RefSeq
"""Expand CIGAR tuples to a list of CIGAR operations on the read (query)
https://pysam.readthedocs.io/en/latest/api.html#pysam.AlignedSegment.cigartuples
https://drive5.com/usearch/manual/cigar.html
https://samtools.github.io/hts-specs/SAMv1.pdf
Op Code Description
-----------------------------------------------------------------------------------------
M BAM_CMATCH 0 Match (alignment column containing two letters). This could
contain two different letters (mismatch) or two identical
letters. USEARCH generates CIGAR strings containing Ms rather
than X's and ='s (see below).
I BAM_CINS 1 Insertion (gap in the query sequence).
D BAM_CDEL 2 Deletion (gap in the target sequence).
N BAM_CREF_SKIP 3 skipped region from the reference
S BAM_CSOFT_CLIP 4 Segment of the query sequence that does not appear in the
alignment. This is used with soft clipping, where the
full-length query sequence is given (field 10 in the SAM record)
. In this case, S operations specify segments at the start and/
or end of the query that do not appear in a local alignment.
H BAM_CHARD_CLIP 5 Segment of the query sequence that does not appear in the
alignment. This is used with hard clipping, where only the
aligned segment of the query sequences is given (field 10 in
the SAM record). In this case, H operations specify segments at
the start and/or end of the query that do not appear in the SAM
record.
P BAM_CPAD 6 padding (silent deletion from padded reference)
= BAM_CEQUAL 7 Alignment column containing two identical letters. USEARCH can
read CIGAR strings using this operation, but does not generate
them.
X BAM_CDIFF 8 Alignment column containing a mismatch, i.e. two different
letters. USEARCH can read CIGAR strings using this operation,
but does not generate them.
B BAM_CBACK 9
"""
self.cigar_ops = []
for op_group in self.read.cigartuples:
# First element of the tuple is the operation code
# Second element of the tuple is the number of operations
# Create a new list [# of operations] long and add it to the
# master operations list
self.cigar_ops.extend([op_group[0],] * op_group[1])
# Reset the cigar index
self.cigar_i = 0
# Start the reference at the position it is mapped onto the read
# using read.reference_start
self.ref_i = self.read.reference_start
# Start the read at the position it is mapped onto the reference
# using read.query_alignment_start
self.read_i = 0
def crawl_to(self, destination):
"""Iterate (consume bases) through both the read and the reference
Use the CIGAR operations and other stats to stay on the same
"aligned" base (as if we did a multiple sequence alignment on the read and ref)
Parameters
----------
destination: int
- Index on the reference of where we want to crawl to
"""
while self.ref_i < destination:
# If we've reached the end of the CIGAR string, break out
if self.cigar_i >= len(self.cigar_ops) or self.read_i >= len(self.read_seq):
return
# Grab the current CIGAR operation
op = self.cigar_ops[self.cigar_i]
"""
https://samtools.github.io/hts-specs/SAMv1.pdf
---------------------------------------------------
| Op | Code | Consume Read | Consume Reference |
---------------------------------------------------
| M | 0 | Yes | Yes |
| I | 1 | Yes | No |
| D | 2 | No | Yes |
| N | 3 | No | Yes |
| S | 4 | Yes | No |
| H | 5 | No | No |
| P | 6 | No | No |
| = | 7 | Yes | Yes |
| X | 8 | Yes | Yes |
| B | 9 | ? | ? |
---------------------------------------------------
"""
# MATCH - can be match or mismatch (SNP)
if op == 0 or op == 7 or op == 8:
# Check for SNPs
# If the OP code is 0, then we have to check both the read
# and the reference to see if there's a mismatch
# If bowtie2 gave us the OP code of 8, then we know there's a mismatch
if (
# Check for a mismatch OP code or a base mismatch for a
# generic 0 OP code
(
(op == 8)
or (
op == 0
and self.read_seq[self.read_i]
!= self.reference_seq[self.ref_i]
)
)
and
# If the reference has an X as the base, then
# ignore any SNPs at this position
(self.reference_seq[self.ref_i] != "X")
):
# Add substitution information to mutation string
self.mutation_str.append(
(
self.read.query_name,
self.ref_i,
self.reference_seq[self.ref_i],
self.read_seq[self.read_i],
)
)
self.read_i += 1
self.ref_i += 1
# Insertion or Soft Clip
elif op == 1 or op == 4:
# Add insertion information to mutation string
self.mutation_str.append(
(self.read.query_name, self.ref_i, "", self.read_seq[self.read_i])
)
self.read_i += 1
# Deletion or Skip
elif op == 2 or op == 3:
# Add deletion information to mutation string
self.mutation_str.append(
(
self.read.query_name,
self.ref_i,
self.reference_seq[self.ref_i],
"",
)
)
self.ref_i += 1
# Hard Clip, Padding
else:
# Do nothing
pass
# Always iterate the CIGAR index
self.cigar_i += 1
# END WHILE
def get_dna_snps(self):
"""Store list of NT SNPs/indels"""
# Join adjacent indels
self.dna_snps = []
i = 0
while i < len(self.mutation_str):
(query_name, pos, ref, alt) = self.mutation_str[i]
# mut is a tuple: (Position, Ref, Alt)
# Offset the position back to 1-indexed, starting at the genome start
pos = pos + 1
# If it's a SNP, then add and continue
if ref and alt:
i += 1
# Actually, skip adding it if either the ref or the alt
# is an ambiguous base (N)
# This is useless data bloat and should be removed as
# early as possible
if alt not in ["A", "C", "G", "T"]:
continue
self.dna_snps.append((query_name, pos, ref, alt))
continue
# Check ahead for adjacent positions and the same indel type
j = i
while j < len(self.mutation_str) and (
# Both insertions
(
(not self.mutation_str[j][2] and not ref)
# Both deletions
or (not self.mutation_str[j][3] and not alt)
)
# New position must be adjacent to the previous one
and self.mutation_str[j][1] == int(pos - 1 + (j - i))
):
j += 1
# Get adjacent indels
adj_muts = self.mutation_str[i:j]
# Combine bases, but keep first position and type
self.dna_snps.append(
(
query_name,
pos,
"".join([m[2] for m in adj_muts]),
"".join([m[3] for m in adj_muts]),
)
)
# Skip ahead to the end of the adjacent mutations
i = j
def process_all(self):
"""Do everything, return everything"""
# Travel to the end of the read
# so that we can collect additional mutations (if they exist)
# Don't throw an error once we reach the end
self.crawl_to(len(self.reference_seq))
self.get_dna_snps()
return self.dna_snps
| 2.640625 | 3 |
montapi.py | crimsonskyrem/pyNovice | 0 | 12791084 | from random import uniform
from math import hypot
n = int(input('input n:'))
m = 0
for i in range(n):
d = hypot(uniform(0,1),uniform(0,1))
if d < 1:
m+=1
print(float(m*4 /n))
| 3.25 | 3 |
test/unit/test_structures.py | mattsb42/rhodes | 1 | 12791085 | """Unit tests for ``rhodes.structures``."""
import pytest
from rhodes.structures import ContextPath, Parameters
pytestmark = [pytest.mark.local, pytest.mark.functional]
_VALID_STATIC_CONTEXT_PATHS = (
"$$",
"$$.Execution",
"$$.Execution.Id",
"$$.Execution.StartTime",
"$$.State",
"$$.State.EnteredTime",
"$$.State.Name",
"$$.State.RetryCount",
"$$.StateMachine",
"$$.StateMachine.Id",
"$$.Task",
"$$.Task.Token",
"$$.Map",
"$$.Map.Item",
"$$.Map.Item.Index",
)
_VALID_CONTEXT_PATHS_WITH_INPUT = _VALID_STATIC_CONTEXT_PATHS + (
"$$.Execution.Input",
"$$.Execution.Input.foo",
"$$.Execution.Input.foo.bar",
"$$.Execution.Input.foo.bar.baz",
"$$.Map.Item.Value",
"$$.Map.Item.Value.foo",
"$$.Map.Item.Value.foo.bar",
"$$.Map.Item.Value.foo.bar.baz",
)
@pytest.mark.parametrize("path", _VALID_CONTEXT_PATHS_WITH_INPUT)
def test_contextpath_valid(path):
ContextPath(path=path)
@pytest.mark.parametrize("path", _VALID_CONTEXT_PATHS_WITH_INPUT)
def test_contextpath_getattr_valid(path):
expected = ContextPath(path=path)
names = path.split(".")[1:]
test = ContextPath()
for name in names:
test = getattr(test, name)
assert test == expected
def test_contextpath_getattr_readable():
"""The real testing is via ``test_contextpath_getattr_valid``.
This test is just to show a more human-readable form.
"""
assert ContextPath() == ContextPath("$$")
assert ContextPath().Execution == ContextPath("$$.Execution")
assert ContextPath().Map.Item.Index == ContextPath("$$.Map.Item.Index")
assert ContextPath().Execution.Input.foo.bar.baz == ContextPath("$$.Execution.Input.foo.bar.baz")
@pytest.mark.parametrize(
"path",
(pytest.param("", id="empty path"), pytest.param("$.Execution", id="valid child but invalid root"))
+ tuple(pytest.param(val + ".foo", id="valid prefix but invalid child") for val in _VALID_STATIC_CONTEXT_PATHS),
)
def test_contextpath_invalid(path):
with pytest.raises(ValueError) as excinfo:
ContextPath(path=path)
excinfo.match("Invalid Context Path")
def test_parameters_repr():
test = Parameters(a="A", b=3, c=True)
assert repr(test) == "Parameters(a='A', b=3, c=True)"
| 2.640625 | 3 |
components/interface.py | thautwarm/inc_demo | 0 | 12791086 | from abc import abstractmethod
class UserParser:
@abstractmethod
def __init__(self, user):
"""
"""
@abstractmethod
def get_user_info(self):
"""
:param user:
:return: {
username: 用户名
number: 学号或工号
avatar: 头像
}
"""
class DefaultUserParser(UserParser):
def __init__(self, user):
self.user = user
def get_user_info(self):
user = self.user
return user['username'], user['number'], user['avatar']
| 3.984375 | 4 |
parsetab.py | wangxiaoying/Compiler-Project | 0 | 12791087 | <reponame>wangxiaoying/Compiler-Project
# parsetab.py
# This file is automatically generated. Do not edit.
_tabversion = '3.2'
_lr_method = 'LALR'
_lr_signature = 'q\xf5X\xfc\x8b\xfa\xfdP\xca\xd7\xc4c\xe9Bv\x05'
_lr_action_items = {'WORD':([2,3,4,5,6,7,18,],[10,10,10,10,10,10,10,]),'SPACE':([10,],[18,]),'H2':([0,17,],[2,2,]),'H3':([0,17,],[3,3,]),'H1':([0,17,],[4,4,]),'H6':([0,17,],[5,5,]),'H4':([0,17,],[6,6,]),'H5':([0,17,],[7,7,]),'CR':([8,9,10,11,12,13,14,15,16,19,20,],[17,-2,-10,-5,-6,-4,-9,-7,-8,-3,-11,]),'$end':([1,8,9,10,11,12,13,14,15,16,19,20,],[0,-1,-2,-10,-5,-6,-4,-9,-7,-8,-3,-11,]),}
_lr_action = { }
for _k, _v in _lr_action_items.items():
for _x,_y in zip(_v[0],_v[1]):
if not _x in _lr_action: _lr_action[_x] = { }
_lr_action[_x][_k] = _y
del _lr_action_items
_lr_goto_items = {'body':([0,],[1,]),'expression':([0,17,],[9,19,]),'statement':([0,],[8,]),'factor':([2,3,4,5,6,7,18,],[11,12,13,14,15,16,20,]),}
_lr_goto = { }
for _k, _v in _lr_goto_items.items():
for _x,_y in zip(_v[0],_v[1]):
if not _x in _lr_goto: _lr_goto[_x] = { }
_lr_goto[_x][_k] = _y
del _lr_goto_items
_lr_productions = [
("S' -> body","S'",1,None,None,None),
('body -> statement','body',1,'p_body','md2html.py',62),
('statement -> expression','statement',1,'p_state','md2html.py',66),
('statement -> statement CR expression','statement',3,'p_state','md2html.py',67),
('expression -> H1 factor','expression',2,'p_exp_cr','md2html.py',74),
('expression -> H2 factor','expression',2,'p_exp_cr','md2html.py',75),
('expression -> H3 factor','expression',2,'p_exp_cr','md2html.py',76),
('expression -> H4 factor','expression',2,'p_exp_cr','md2html.py',77),
('expression -> H5 factor','expression',2,'p_exp_cr','md2html.py',78),
('expression -> H6 factor','expression',2,'p_exp_cr','md2html.py',79),
('factor -> WORD','factor',1,'p_factor_text','md2html.py',92),
('factor -> WORD SPACE factor','factor',3,'p_factor_text','md2html.py',93),
]
| 1.742188 | 2 |
.github/workflows/clang_lint/inline_comments/post_pr_review_comments.py | CarlosNihelton/WSL | 23 | 12791088 | #!/usr/bin/python3
""" Posts pull request review comments, excluding the existing ones and
the ones not affecting files modified in the current pull_request_id."""
#
# Copyright (C) 2021 Canonical Ltd
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Source code adapted from https://github.com/platisd/clang-tidy-pr-comments.
import itertools
import json
import os
import time
import re
import requests
def chunks(lst, n):
# Copied from: https://stackoverflow.com/a/312464
"""Yield successive n-sized chunks from lst."""
for i in range(0, len(lst), n):
yield lst[i: i + n]
def _files_from_this_pr(github_api_url, repo, pull_request_id, github_token):
"""Lists which files and lines are allowed to receive comments, i.e.
those modified by the current pull_request_id Pull Request."""
pull_request_files = list()
# Request a maximum of 100 pages (3000 files)
for page_num in range(1, 101):
pull_files_url = "%s/repos/%s/pulls/%d/files?page=%d" % (
github_api_url,
repo,
pull_request_id,
page_num,
)
pull_files_result = requests.get(
pull_files_url,
headers={
"Accept": "application/vnd.github.v3+json",
"Authorization": "token %s" % github_token,
},
)
if pull_files_result.status_code != requests.codes.ok:
print(
"Request to get list of files failed with error code: "
+ str(pull_files_result.status_code)
)
return None
pull_files_chunk = json.loads(pull_files_result.text)
if len(pull_files_chunk) == 0:
break
pull_request_files.extend(pull_files_chunk)
files_and_lines_available_for_comments = dict()
for pull_request_file in pull_request_files:
# Not all PR file metadata entries may contain a patch section
# E.g., entries related to removed binary files may not contain it
if "patch" not in pull_request_file:
continue
git_line_tags = re.findall(r"@@ -\d+,\d+ \+\d+,\d+ @@",
pull_request_file["patch"])
lines_and_changes = [
line_tag.replace("@@", "").strip().split()[1].replace("+", "")
for line_tag in git_line_tags
]
lines_available_for_comments = [
list(
range(
int(change.split(",")[0]),
int(change.split(",")[0]) + int(change.split(",")[1]),
)
)
for change in lines_and_changes
]
lines_available_for_comments = list(
itertools.chain.from_iterable(lines_available_for_comments)
)
files_and_lines_available_for_comments[
pull_request_file["filename"]
] = lines_available_for_comments
return files_and_lines_available_for_comments
def post_pr_review_comments(repository: str, pull_request_id: int,
review_comments: dict):
""" Posts a PR Review event from each 15 review_comments which
matching the output of `files_and_lines_available_for_comments`"""
github_api_url = os.environ.get("GITHUB_API_URL")
github_token = os.environ.get("INPUT_GITHUB_TOKEN")
files_and_lines_available_for_comments = \
_files_from_this_pr(github_api_url, repository,
pull_request_id, github_token)
if files_and_lines_available_for_comments is None:
print("Couldn't get the files of this PR from GitHub")
return 1
# Dismanteling the review_comments object for filtering purposes.
review_body = review_comments["body"]
review_event = review_comments["event"]
comments = review_comments["comments"]
actual_comments = dict()
# Ignore comments on lines that were not changed in the pull request
# Remove entries we cannot comment on as the files weren't changed in this
# pull request
actual_comments = [
c
for c in comments
if c["path"]
in files_and_lines_available_for_comments.keys()
and c["line"] in files_and_lines_available_for_comments[c["path"]]
]
# Load the existing review comments
existing_pull_request_comments = list()
# Request a maximum of 100 pages (3000 comments)
for page_num in range(1, 101):
pull_comments_url = "%s/repos/%s/pulls/%d/comments?page=%d" % (
github_api_url,
repository,
pull_request_id,
page_num,
)
pull_comments_result = requests.get(
pull_comments_url,
headers={
"Accept": "application/vnd.github.v3+json",
"Authorization": "token %s" % github_token,
},
)
if pull_comments_result.status_code != requests.codes.ok:
print(
"Request to get pull request comments failed with error code: "
+ str(pull_comments_result.status_code)
)
return 1
pull_comments_chunk = json.loads(pull_comments_result.text)
if len(pull_comments_chunk) == 0:
break
existing_pull_request_comments.extend(pull_comments_chunk)
# Exclude already posted comments
for comment in existing_pull_request_comments:
actual_comments = list(
filter(
lambda review_comment: not (
review_comment["path"] == comment["path"] and
review_comment["line"] == comment["line"] and
review_comment["side"] == comment["side"] and
review_comment["body"] == comment["body"]
),
actual_comments,
)
)
if len(actual_comments) == 0:
print("No new warnings found for this pull request.")
return 0
# Split the comments in chunks to avoid overloading the server
# and getting 502 server errors as a response for large reviews
suggestions_per_comment = 15
actual_comments = list(chunks(actual_comments, suggestions_per_comment))
total_reviews = len(actual_comments)
current_review = 1
for comments_chunk in actual_comments:
warning_comment = (
(review_body + " (%i/%i)") % (current_review, total_reviews)
)
current_review += 1
pull_request_reviews_url = "%s/repos/%s/pulls/%d/reviews" % (
github_api_url,
repository,
pull_request_id,
)
post_review_result = requests.post(
pull_request_reviews_url,
json={
"body": warning_comment,
"event": review_event,
"comments": comments_chunk,
},
headers={
"Accept": "application/vnd.github.v3+json",
"Authorization": "token %s" % github_token,
},
)
if post_review_result.status_code != requests.codes.ok:
print(post_review_result.text)
# Ignore bad gateway errors (false negatives?)
if post_review_result.status_code != requests.codes.bad_gateway:
print(
"Posting review comments failed with error code: "
+ str(post_review_result.status_code)
)
print("Please report this error to the CI maintainer")
return 1
# Wait before posting all chunks so to avoid triggering abuse detection
time.sleep(5)
return 0
| 2.546875 | 3 |
api.py | prajwalccc13/Pragmatic-Web-Framework | 0 | 12791089 | from webob import Request, Response
from parse import parse
import inspect
from requests import Session as RequestsSession
from wsgiadapter import WSGIAdapter as RequestsWSGIAdapter
import os
from jinja2 import Environment, FileSystemLoader
from whitenoise import WhiteNoise
from middleware import Middleware
from static import cut_static_root, request_for_static
class API:
def __init__(self, templates_dir="templates", static_dir="static"):
self.routes = {}
self.templates_env = Environment(loader=FileSystemLoader(os.path.abspath(templates_dir)))
self.exception_handler = None
self.whitenoise = WhiteNoise(self.wsgi_app, root=static_dir)
self.static_dir = os.path.abspath(static_dir)
self._static_root = "/static"
self.middleware = Middleware(self)
def wsgi_app(self, environ, start_response):
request = Request(environ)
response = self.handle_request(request)
return response(environ, start_response)
def __call__(self, environ, start_response):
path_info = environ["PATH_INFO"]
if request_for_static(path_info, self._static_root):
environ["PATH_INFO"] = cut_static_root(path_info, self._static_root)
return self.whitenoise(environ, start_response)
return self.middleware(environ, start_response)
def add_middleware(self, middleware_cls):
self.middleware.add(middleware_cls)
def route(self, path):
def wrapper(handler):
self.add_route(path, handler)
return handler
return wrapper
def add_route(self, path, handler):
assert path not in self.routes, f"{path} already exists."
self.routes[path] = handler
def test_session(self,base_url="http:''testserver"):
session = RequestsSession()
session.mount(prefix=base_url, adapter=RequestsWSGIAdapter(self))
return session
def handle_request(self, request):
response = Response()
handler, kwargs = self.find_handler(request_path=request.path)
try:
if handler is not None:
if inspect.isclass(handler):
handler = getattr(handler(), request.method.lower(), None)
if handler is None:
raise AttributeError("Method in not allowed", request.method)
handler(request, response, **kwargs)
else:
self.default_response(response)
except Exception as e:
if self.exception_handler is None:
raise e
else:
self.exception_handler(request, response, e)
return response
def default_response(self, response):
response.status_code = 404
response.text = "Not found"
def find_handler(self, request_path):
for path, handler in self.routes.items():
parse_result = parse(path, request_path)
if parse_result is not None:
return handler, parse_result.named
return None, None
def template(self, template_name, context=None):
if context is None:
context = {}
return self.templates_env.get_template(template_name).render(**context)
def add_exception_handler(self, exception_handler):
self.exception_handler = exception_handler
| 2.21875 | 2 |
trunk/Documentacion/Memoria/trozos-codigo/codigo-9-logsources-class.py | MGautier/security-sensor | 2 | 12791090 | class LogSourcesTestCase(TestCase):
def setUp(self):
LogSources.objects.create(
Description="Firewall of gnu/linux kernel",
Type="Iptables",
Model="iptables v1.4.21",
Active=1,
Software_Class="Firewall",
Path="iptables",
)
| 1.703125 | 2 |
epcboot_gui/epcboot_gui.py | EPC-MSU/epcboot-gui | 0 | 12791091 | <gh_stars>0
"""
This .py file offers GUI for EPCboot.
It allows:
* browse firmware on PC and load it to controller
* browse key file (*.txt) and load it to controller (developer only)
* update serial and version (developer only)
"""
import argparse
import ctypes
import ntpath
import sys
import threading
import tkinter as tk
from tkinter import filedialog, font, messagebox, scrolledtext, ttk
import serial
import serial.tools.list_ports
import epcbootlib
import urlparse
from tip import ToolTip
from version import Version
parser = argparse.ArgumentParser()
parser.add_argument("-m", "--method",
choices=("dev", "cust"),
help="Choosing method: dev or cust "
"(developer or customer)")
args = parser.parse_args()
# Event handlers
def com_chosen(event=None):
"""Sets URL."""
global URL
try:
test_port = serial.Serial(port=URL.get())
test_port.close()
except serial.SerialException:
log.insert(
tk.END,
"Something is wrong! If you use Linux, open epcboot_gui with "
"root.\nIn case of using Windows, make sure that the device is not"
" used by another program.\n")
if sys.platform.startswith("win"):
URL.set(r"com:\\.\{}".format(URL.get()))
elif sys.platform.startswith("linux"):
URL.set(r"com://{}".format(URL.get()))
if upd_button.state() == ():
# in case of enabled upd_button, method .state() returns empty tuple
upd_button.focus()
else:
firmware_browse_button.focus()
log.insert(tk.END, "{} is chosen!\n".format(combox.get()))
def _update_combox():
"""Updates COM list."""
combox.config(values=[comport.device
for comport in serial.tools.list_ports.comports()])
def clean_log():
"""Cleans log."""
log.delete('1.0', tk.END)
def browse_firmware():
"""Function opens file dialog.
We are going to read binary files (.cod). So .encode() isn't needed."""
global FIRM_PATH
global FIRMWARE
global URL
main_win.firmware = filedialog.askopenfile(
mode="rb",
initialdir="/",
title="Select firmware",
filetypes=(("Firmware file", "*.cod"), ("All files", "*.*")))
if not isinstance(main_win.firmware, type(None)):
# File was opened
FIRM_PATH.set(main_win.firmware.name)
FIRMWARE = main_win.firmware.read()
upd_button.config(state=tk.NORMAL)
if URL.get() == "":
combox.focus()
else:
upd_button.focus()
def start_update():
"""Function starts the firmware update."""
global UPDATE_LOCK
if URL.get() == "":
log.insert(tk.END, "You must specify device URL.\n")
return
if FIRMWARE == "":
log.insert(tk.END, "You must specify firmware file.\n")
return
error_text = urlparse.validate(URL.get())
if error_text:
log.insert(tk.END, error_text)
return
UPDATE_LOCK.release()
def set_buttons_to_state(state):
"""Function sets the given state to all buttons.
:param: given state."""
firmware_browse_button.config(state=state)
upd_button.config(state=state)
collapse_button.config(state=state)
key_browse_button.config(state=state)
set_key_button.config(state=state)
set_ident_button.config(state=state)
log_button.config(state=state)
def update_firmware():
"""Updates firmware."""
global FIRMWARE
global FIRM_PATH
global RUNNING
global UPDATE_LOCK
global UPDATE_RUNNING
global URL
while RUNNING:
UPDATE_LOCK.acquire()
if not RUNNING:
break
UPDATE_RUNNING = True
# Button is clicked
set_buttons_to_state(tk.DISABLED)
# The statement below is necessary to work with url as C char*
url = ctypes.create_string_buffer(URL.get().encode())
log.insert(
tk.END,
"Starting firmware update. Port: {}. Firmware file: {}\n". format(
URL.get(),
ntpath.basename(
FIRM_PATH.get())))
log.insert(tk.END, "Please wait\n")
main_win.update()
res = epcbootlib.urpc_firmware_update(url, FIRMWARE, len(FIRMWARE))
if res == 0:
log.insert(tk.END, "Ok\n")
else:
log.insert(tk.END, "Fail\n")
set_buttons_to_state(tk.NORMAL)
UPDATE_RUNNING = False
def key_browse():
"""Opens file dialog. Key must be .txt file."""
global KEY
main_win.key_file = filedialog.askopenfile(
mode="r",
initialdir="/",
title="Select key",
filetypes=(("Text files", "*.txt"), ("All files", "*.*")))
if not isinstance(main_win.key_file, type(None)):
# File was opened
KEY.set(main_win.key_file.read().rstrip())
main_win.key_file.close()
def set_key():
"""Function sets cryptographic key."""
global KEY
global URL
if URL.get() == "":
log.insert(tk.END, "You must specify device URL.\n")
return
if KEY.get() == "":
log.insert(tk.END, "You must specify key.\n")
return
error_text = urlparse.validate(URL.get())
if error_text:
log.insert(tk.END, error_text)
return
# The statement below is necessary to work with url as C char*
url = ctypes.create_string_buffer(URL.get().encode())
key = ctypes.create_string_buffer(KEY.get().encode())
log.insert(tk.END, "Starting key setting. Port: {}\n".format(URL.get()))
log.insert(tk.END, "Please wait\n")
main_win.update()
res = epcbootlib.urpc_write_key(url, key)
if res == 0:
log.insert(tk.END, "Ok\n")
else:
log.insert(tk.END, "Fail\n")
def ident_and_key_set():
"""Sets serial number, hardware version and key."""
global URL
global KEY
if URL.get() == "":
log.insert(tk.END, "You must specify device URL.\n")
return
if KEY.get() == "":
log.insert(tk.END, "You must specify key.\n")
return
if serial_entry.get() == "xxx":
log.insert(tk.END, "You must specify serial number.\n")
return
if version_entry.get() == "x.x.x":
log.insert(tk.END, "You must specify version.\n")
return
error_text = urlparse.validate(URL.get())
if error_text:
log.insert(tk.END, error_text)
return
# Checking serial and version format
if not serial_entry.validate() or not version_entry.validate():
return
# The statement below is necessary to work with url as C char*
url = ctypes.create_string_buffer(URL.get().encode())
key = ctypes.create_string_buffer(KEY.get().encode())
version = ctypes.create_string_buffer(version_entry.get().encode())
log.insert(
tk.END,
"Starting identificator and key setting. Port: {}\n Serial number: "
"{}\n Hardware version: {}\n".format(URL.get(), serial_entry.get(),
version_entry.get()))
log.insert(tk.END, "Please wait\n")
main_win.update()
res = epcbootlib.urpc_write_ident(url, key,
int(serial_entry.get()), version)
if res == 0:
log.insert(tk.END, "Ok\n")
_autoincrement_serial()
else:
log.insert(tk.END, "Fail\n")
def _autoincrement_serial():
global AUTOINCR
if AUTOINCR.get():
serial_number = int(serial_entry.get())
serial_entry.delete(0, tk.END)
serial_entry.insert(0, str(serial_number + 1))
log.insert(tk.END, "Serial number incremented.")
else:
return
def _serial_validation(content, trigger_type):
if content == "xxx" and trigger_type == "focusin":
# clears the hint
serial_entry.delete(0, tk.END)
return tk.TRUE
if content == "xxx" and trigger_type == "focusout":
# just leave the entry
return tk.TRUE
if content == "":
serial_entry.config(font=("Calibri Italic", 10),
foreground="grey")
if trigger_type == "focusout":
serial_entry.delete(0, tk.END)
serial_entry.insert(tk.END, "xxx")
return tk.TRUE
if not content.isdigit():
log.insert(tk.END, "Serial number must be a number!\n")
return tk.FALSE
serial_entry.config(font=("Calibri", 10), foreground="green")
return tk.TRUE
def _version_validation(content, trigger_type="focusout"):
"""Returns tk.TRUE if version format is correct"""
if content == "x.x.x" and trigger_type == "focusin":
# clears the hint
version_entry.delete(0, tk.END)
return tk.TRUE
if content != "x.x.x" and trigger_type == "focusin":
# just enter the entry
return tk.TRUE
print(content)
if content == "":
if trigger_type == "focusout":
# sets the hint
version_entry.config(
font=(
"Calibri Italic",
10),
foreground="grey")
version_entry.insert(tk.END, "x.x.x")
return tk.TRUE
# the .find(".", x) returns -1 if "." is not found
first_dot_index = content.find(".", 0)
second_dot_index = content.find(".", first_dot_index + 1)
third_dot_index = content.find(".", second_dot_index + 1)
# Consider -1 as dot absence
if first_dot_index == -1: #
second_dot_index, third_dot_index = -1, -1 # dot indices
if second_dot_index == -1: # correction
third_dot_index = -1 #
if third_dot_index != -1:
# there cannot be three or more dots
return tk.FALSE
major = content[0:first_dot_index]
minor = content[first_dot_index + 1:second_dot_index]
patch = content[second_dot_index + 1:]
if second_dot_index == -1: #
patch = "" #
# minor correction
minor = content[first_dot_index + 1:]
if first_dot_index == -1: #
patch = "" #
minor = "" #
# major correction
major = content[second_dot_index + 1:]
if not major.isdigit():
log.insert(tk.END, "MAJOR should be a number!\n")
return tk.FALSE
if not minor.isdigit():
if (minor == "" and second_dot_index == -1 and
trigger_type != "focusout"):
version_entry.config(font=("Calibri", 10), foreground="green")
return tk.TRUE
log.insert(tk.END, "MINOR should be a number!\n")
return tk.FALSE
if patch != "" and not patch.isdigit():
log.insert(tk.END, "PATCH should be a number!\n")
return tk.FALSE
version_entry.config(font=("Calibri", 10), foreground="green")
return tk.TRUE
def validation_command(widget_name, content, trigger_type):
"""Checks entry format and changes font
If format is ok: green Calibri 10
If format uncorrect: red Calibri 10
If empty: sets hint, grey Calibri Italic 10
"""
instance = main_win.nametowidget(widget_name) # getting certain entry
if instance is serial_entry:
if _serial_validation(content, trigger_type):
return tk.TRUE
if instance is version_entry:
if _version_validation(content, trigger_type):
return tk.TRUE
return tk.FALSE
def invalid_command(widget_name, content):
"""Starts if validation commands return False."""
instance = main_win.nametowidget(widget_name) # getting certain entry
instance.delete(0, tk.END)
instance.insert(tk.END, content)
instance.config(foreground="red")
def collapse():
"""Function collapses or expands developer tab"""
global DEV_STATE
height = main_win.winfo_height()
width = main_win.winfo_width()
if DEV_STATE:
log_frame.pack_forget()
separator.pack(expand=tk.TRUE, side=tk.RIGHT, fill=tk.X, padx=5)
developer_tab.pack_forget()
log_frame.pack(expand=tk.TRUE, side=tk.BOTTOM, fill=tk.BOTH)
DEV_STATE = False
if sys.platform.startswith("win"):
height -= (530 - 383)
elif sys.platform.startswith("linux"):
height -= (568 - 412)
else:
separator.pack_forget()
log_frame.pack_forget()
developer_tab.pack(expand=tk.FALSE, side=tk.TOP, fill=tk.BOTH)
log_frame.pack(expand=tk.TRUE, side=tk.BOTTOM, fill=tk.BOTH)
DEV_STATE = True
if sys.platform.startswith("win"):
height += 530 - 383
elif sys.platform.startswith("linux"):
height += (568 - 412)
main_win.geometry(f"{width}x{height}")
def on_modification(event=None):
log.see(tk.END)
log.edit_modified(0)
def close_window():
"""This function breaks an infinite loop in the update stream."""
global RUNNING
global UPDATE_LOCK
global UPDATE_RUNNING
if UPDATE_RUNNING:
messagebox.showinfo("Information",
"You need to wait for the update to complete")
return
UPDATE_LOCK.release()
RUNNING = False
main_win.destroy()
# Creating main window
main_win = tk.Tk()
# Setting window geometry and title
if sys.platform.startswith("win"):
main_win.geometry("500x383")
elif sys.platform.startswith("linux"):
main_win.geometry("640x412")
else:
print("Unknown system!")
main_win.title(f"EPCboot {Version.full}")
FIRMWARE = "" # string containing firmware
# firmware tab:
firmware_tab = ttk.Frame(main_win)
com_frame = ttk.Labelframe(firmware_tab, text="COM settings")
com_label = ttk.Label(com_frame, text="COM port:")
URL = tk.StringVar() # URL of port
combox = ttk.Combobox(com_frame, postcommand=_update_combox, width=15,
textvariable=URL)
combox.bind("<<ComboboxSelected>>", com_chosen)
com_hint = ttk.Label(com_frame, font=("Calibri Italic", 10))
underlined_font = font.Font(com_hint, com_hint.cget("font"))
underlined_font.configure(underline=True)
com_hint.configure(font=underlined_font)
tip_com_hin = ToolTip(com_hint)
if sys.platform.startswith("win"):
com_hint.config(text="Input format", foreground="grey")
tip_com_hin.set_text(r"com:\\.\COMx")
elif sys.platform.startswith("linux"):
com_hint.config(text="Input format", foreground="grey")
tip_com_hin.set_text("com:///dev/ttyUSBx\ncom:///dev/ttyACMx\n"
"com:///dev/ttySx")
firmware_frame = ttk.Labelframe(firmware_tab, text="Firmware update")
firmware_label = ttk.Label(firmware_frame, text="Firmware:")
FIRM_PATH = tk.StringVar() # path to firmware
firmware_entry = ttk.Entry(firmware_frame, textvariable=FIRM_PATH, width=17)
firmware_browse_button = ttk.Button(firmware_frame, text="Browse...", width=10,
command=browse_firmware)
upd_button = ttk.Button(firmware_tab, text="Update firmware",
state=tk.DISABLED, width=20, command=start_update)
com_frame.pack(side=tk.TOP, fill=tk.X, padx=5, pady=3, ipady=6)
firmware_frame.pack(side=tk.TOP, fill=tk.X, padx=5, pady=3, ipady=5)
com_label.pack(side=tk.LEFT)
combox.pack(side=tk.LEFT, padx=10)
com_hint.pack(side=tk.LEFT)
firmware_label.pack(side=tk.LEFT)
firmware_entry.pack(expand=tk.TRUE, side=tk.LEFT, fill=tk.X, padx=14)
firmware_browse_button.pack(side=tk.LEFT, padx=5)
upd_button.pack(side=tk.TOP, pady=0)
# end of firmware tab
# developer tab:
developer_tab = ttk.Frame(main_win)
key_frame = ttk.Labelframe(developer_tab, text="Key")
ident_frame = ttk.Labelframe(developer_tab, text="Identification")
key_label = ttk.Label(key_frame, text="Key:")
KEY = tk.StringVar() # cryptographic key
key_entry = ttk.Entry(key_frame, textvariable=KEY)
set_key_button = ttk.Button(key_frame, text=" Set key ", command=set_key)
key_browse_button = ttk.Button(key_frame, text="Browse...", command=key_browse)
left_frame = ttk.Frame(ident_frame)
right_frame = ttk.Frame(ident_frame)
left_frame.pack(expand=tk.TRUE, side=tk.LEFT, fill=tk.BOTH)
right_frame.pack(expand=tk.TRUE, side=tk.RIGHT, fill=tk.BOTH)
serial_frame = ttk.Frame(left_frame)
serial_label = ttk.Label(serial_frame, text="Serial number:")
serial_entry = ttk.Entry(serial_frame, foreground="grey",
font=("Calibri Italic", 10))
serial_entry.insert(tk.END, "xxx")
version_frame = ttk.Frame(left_frame)
version_label = ttk.Label(version_frame, text="HW version:")
version_entry = ttk.Entry(version_frame, foreground="grey",
font=("Calibri Italic", 10))
version_entry.insert(tk.END, "x.x.x")
AUTOINCR = tk.BooleanVar()
set_autoincrement_button = ttk.Checkbutton(
right_frame, text="Auto increment", width=30, variable=AUTOINCR)
set_ident_button = ttk.Button(
right_frame, text="Set serial and hardware version", width=30,
command=ident_and_key_set)
set_autoincrement_button.pack(expand=tk.TRUE, side=tk.TOP)
set_ident_button.pack(expand=tk.TRUE, side=tk.BOTTOM)
# Setting validation to entries (serial_entry and version_entry)
vcmd = main_win.register(validation_command)
ivcmd = main_win.register(invalid_command)
serial_entry.config(validatecommand=(vcmd, "%W", "%P", "%V"),
invalidcommand=(ivcmd, "%W", "%P"),
validate="all")
version_entry.config(validatecommand=(vcmd, "%W", "%P", "%V"),
invalidcommand=(ivcmd, "%W", "%P"),
validate="all")
# end of developer tab
# creating collapse button
DEV_STATE = False # developer tab state
collapse_frame = ttk.Frame(main_win)
collapse_button = ttk.Button(collapse_frame, text="Developer mode",
command=collapse)
separator = ttk.Separator(collapse_frame, orient="horizontal")
collapse_button.pack(side=tk.LEFT, anchor=tk.NW, padx=5)
separator.pack(expand=tk.TRUE, side=tk.RIGHT, fill=tk.X, padx=5)
# end of collapse button
firmware_tab.pack(expand=tk.FALSE, side=tk.TOP, fill=tk.X, anchor=tk.N)
collapse_frame.pack(expand=tk.FALSE, side=tk.TOP, fill=tk.X, anchor=tk.N)
key_frame.pack(side=tk.TOP, fill=tk.X, padx=5, pady=3, ipady=4)
ident_frame.pack(expand=tk.FALSE, side=tk.TOP, padx=5, fill=tk.BOTH, pady=3,
ipady=4)
serial_frame.pack(side=tk.TOP, fill=tk.X, pady=7)
version_frame.pack(side=tk.TOP, fill=tk.X)
key_label.pack(side=tk.LEFT)
key_entry.pack(expand=tk.TRUE, side=tk.LEFT, fill=tk.X, padx=15)
set_key_button.pack(side=tk.RIGHT, padx=4)
key_browse_button.pack(side=tk.RIGHT)
serial_label.pack(expand=tk.FALSE, side=tk.LEFT)
serial_entry.pack(side=tk.LEFT, padx=14)
version_label.pack(side=tk.LEFT)
version_entry.pack(side=tk.LEFT, padx=26)
set_ident_button.pack(side=tk.TOP)
# log_frame
log_frame = ttk.Labelframe(main_win, text="Log")
log = scrolledtext.ScrolledText(log_frame, height=8, wrap=tk.WORD)
log.edit_modified(0)
log_button_frame = ttk.LabelFrame(log_frame)
log_button = ttk.Button(log_button_frame, text="Clean log", command=clean_log)
log_button_frame.pack(side=tk.BOTTOM, fill=tk.X)
log_frame.pack(expand=tk.TRUE, side=tk.BOTTOM, fill=tk.BOTH)
log.pack(expand=tk.TRUE, side=tk.BOTTOM, fill=tk.BOTH)
log_button.pack(side=tk.RIGHT)
log.bind("<<Modified>>", on_modification)
# Add a thread to update firmware
RUNNING = True
UPDATE_LOCK = threading.Lock()
UPDATE_LOCK.acquire()
UPDATE_RUNNING = False
thread_upd = threading.Thread(target=update_firmware)
thread_upd.start()
main_win.protocol("WM_DELETE_WINDOW", close_window)
tk.mainloop()
thread_upd.join()
| 2.640625 | 3 |
src/apply_cross_fade.py | maxsolomonhenry/pyphase | 0 | 12791092 | import numpy as np
def apply_cross_fade(clips, cross_fade_ms, sr):
"""Concatenate audio clips with a cross fade."""
num_clips = len(clips)
cross_fade_samples = int(np.floor(cross_fade_ms * sr / 1000))
fade_ramp = np.arange(cross_fade_samples) / cross_fade_samples
# if not is_even(cross_fade_samples):
# cross_fade_samples += 1
raw_num_samples = 0
for clip in clips:
raw_num_samples += len(clip)
total_overlap_samples = (num_clips - 1) * cross_fade_samples
num_samples = raw_num_samples - total_overlap_samples
y = np.zeros(num_samples)
write_in = 0
for clip in clips:
write_out = write_in + len(clip)
# Update pointers.
ramp_in = write_out - cross_fade_samples
ramp_out = write_out
# Fade in and place.
clip[:cross_fade_samples] *= fade_ramp
y[write_in:write_out] += clip
# Fade out.
y[ramp_in:ramp_out] *= (1 - fade_ramp)
# Advance write pointer.
write_in = ramp_in
return y
if __name__ == '__main__':
import matplotlib.pyplot as plt
import scipy.io.wavfile
file_path = "../audio/008-you-possess-the-treasure-you-seek-seed001.wav"
# Test audio file.
sr, x = scipy.io.wavfile.read(file_path)
x = x / np.iinfo(np.int16).max
time_x = np.arange(len(x)) / sr
plt.plot(time_x, x, label='Original')
# Quick list-of-clips demo.
tmp = []
for i in range(20):
tmp.append(x[i * 1000:(i + 1) * 1000])
cross_fade_ms = 20
y = apply_cross_fade(tmp, cross_fade_ms, sr)
time_y = np.arange(len(y)) / sr
plt.plot(time_y, y, label='Cross fade')
plt.show()
| 2.78125 | 3 |
Convolutional-Neural-Network-Python-without-machine-learning-framework/cnn.py | FrogGamesDev/Convolutional-Neural-Network-Python-without-machine-learning-framework | 1 | 12791093 | import numpy as np
import cv2
import os
from conv import *
import multiprocessing
from multiprocessing import Pool
from itertools import product
from numba import njit
from functools import partial
import math
import sklearn
from sklearn import linear_model
def load_images_from_folder(folder):
images = []
for filename in os.listdir(folder):
images.append(cv2.imread(os.path.join(folder,filename),0))
return images
def load_data(folder):
images=[]
n=len(os.listdir(folder))
#print(n)
output=[]
iters = 0
for filename in os.listdir(folder):
path=folder+"\\"+filename
pictures = load_images_from_folder(path)
for pics in pictures:
images.append(pics)
y=np.zeros((n,1))
y[iters,:] =1
y.reshape(1,n)
output.append(y)
iters += 1
return images,output
def convert(l):
return (*l,)
def data_preprocessing(data,reshape_dim):
for i in range(0,len(data)):
data[i]=ConvNet(cv2.resize(data[i]/255,reshape_dim,interpolation=cv2.INTER_AREA))
data[i]=data[i].reshape(data[i].size,1)
return data
def prepare(data,reshape_dim,i):
data[i]=ConvNet(cv2.resize(data[i]/255,reshape_dim,interpolation=cv2.INTER_AREA))
data[i]=data[i].reshape(data[i].size,1)
def prepare_2(data):
data=ConvNet(cv2.resize(data/255,(256,256),interpolation=cv2.INTER_AREA))
data=data.reshape(data.size,1)
return data
def parallel(data,reshape_dim):
process=[]
for i in range(len(data)):
p=multiprocessing.Process(target=prepare,args=(data,reshape_dim,i))
process.append(p)
for x in process:
x.start()
for x in process:
x.join()
for i in data:
print(i.shape)
return data
def square(x):
return x**2
def parallel_2(data,reshape_dim):
x=0
pool=Pool(4)
x=pool.map(prepare_2,data)
print(x)
pool.close()
pool.join()
return x
def softmax(Z):
e_Z = np.exp(Z - np.max(Z, axis = 0, keepdims = True,initial=-np.inf))
return e_Z / e_Z.sum(axis = 0)
def predict(X,weights):
return softmax(weights.T@X)
def cross_entropy(y_hat, y):
return - np.log(y_hat[range(len(y_hat)), y])
def update_weights(features,output,weights,learning_rate):
predicted=predict(features,weights)
print(features.shape)
print(weights.shape)
print(predicted.shape)
#print(np.linalg.norm(predicted-output))
weights=weights-learning_rate*(((output-predicted)@features.T).T)
return weights
def Adam(features,output,weights,lr,t,beta1=0.9,beta2=0.999,epsilon=1e-08):
#print(features.shape)
#print(output.shape)
#print(weights)
#print(type(weights))
predicted=predict(features,weights)
g=(-(output-predicted)@features.T).T
m=np.zeros(weights.shape)
v=np.zeros(weights.shape)
m=beta1*m+(1-beta1)*g
v=beta2*v+(1-beta2)*(g*g)
m_hat=m/(1-(beta1**(t+1)))
v_hat=v/(1-(beta2**(t+1)))
#print(m_hat,v_hat)
#print(type(((lr*m_hat)/(np.sqrt(v_hat)+epsilon)).T))
weights=weights-((lr*m_hat)/(np.sqrt(v_hat)+epsilon))
return weights
def softmax_regression(data,output,learning_rate,epoch):
data_hat=np.array(data)
data_hat=data_hat.reshape(data_hat.shape[0],data_hat.shape[1]).T
output_hat=np.array(output)
output_hat=output_hat.reshape(output_hat.shape[0],output_hat.shape[1]).T
pre_weights=0
weights=np.zeros((len(data[0]),len(output[0])))
model=linear_model.LogisticRegression(C=1e5,solver='lbfgs',multi_class='multinomial')
"""for i in range(epoch):
predicted=predict(data_hat,weights)
print(np.linalg.norm(predicted-output_hat))
#for n in np.random.permutation(len(output)):
weights=Adam(data_hat,output_hat,weights,learning_rate,i)
#if np.linalg.norm(weights-pre_weights)<0.0001:
# print(i)
# break"""
return weights
def softmax_regression_2(data,output,x1,x2,x3):
output=np.asarray(output)
output=output.reshape(output.shape[0],output.shape[1]).T
output=output.reshape(-1)
data=np.asarray(data)
data=data.reshape(data.shape[0],data.shape[1]).T
weights=np.zeros((len(data),len(output)))
model=sklearn.linear_model.LogisticRegression(C=1e5,solver='lbfgs',multi_class='multinomial')
model.fit(data,output)
y1=model.predict(x1)
y2=model.predict(x2)
y3=model.predict(x3)
#for i in range(epoch):
# weights=update_weights(data,output,weights,learning_rate)
return y1,y2,y3
def CNN(data,output,lr,epoch):
k1=np.random.rand(3,3)
k2=np.random.rand(3,3)
k3=np.random.rand(3,3)
k4=np.random.rand(3,3)
k5=np.random.rand(3,3)
k6=np.random.rand(3,3)
k7=np.random.rand(3,3)
k8=np.random.rand(3,3)
pool=Pool(4)
conv1=pool.map(partial(conv_layer,kernel=k1),data)
pool.close()
pool.join()
conv1[conv1<=0]=0
pool=Pool(4)
m1_=pool.map(max_pooling_,conv1)
pool.close()
pool.join()
m1=[i[0] for i in m1_]
pos1=[i[1]for i in m1_]
u1=[i[2]for i in m1_]
r1=[i[3]for i in m1_]
pool=Pool(4)
conv2=pool.map(partial(conv_layer,kernel=k2),m1)
pool.close()
pool.join()
conv2[conv2<=0]=0
pool=Pool(4)
m1_=pool.map(max_pooling_,conv2)
pool.close()
pool.join()
m2=[i[0] for i in m1_]
pos2=[i[1]for i in m1_]
u2=[i[2]for i in m1_]
r2=[i[3]for i in m1_]
pool=Pool(4)
conv3=pool.map(partial(conv_layer,kernel=k3),m2)
pool.close()
pool.join()
conv3[conv3<=0]=0
pool=Pool(4)
m1_=pool.map(max_pooling_,conv3)
pool.close()
pool.join()
m3=[i[0] for i in m1_]
pos3=[i[1]for i in m1_]
u3=[i[2]for i in m1_]
r3=[i[3]for i in m1_]
pool=Pool(4)
conv4=pool.map(partial(conv_layer,kernel=k4),m3)
pool.close()
pool.join()
conv4[conv4<=0]=0
pool=Pool(4)
m1_=pool.map(max_pooling_,conv4)
pool.close()
pool.join()
m4=[i[0] for i in m1_]
pos4=[i[1]for i in m1_]
u4=[i[2]for i in m1_]
r4=[i[3]for i in m1_]
pool=Pool(4)
conv5=pool.map(partial(conv_layer,kernel=k5),m4)
pool.close()
pool.join()
conv5[conv5<=0]=0
pool=Pool(4)
m1_=pool.map(max_pooling_,conv5)
pool.close()
pool.join()
m5=[i[0] for i in m1_]
pos5=[i[1]for i in m1_]
u5=[i[2]for i in m1_]
r5=[i[3]for i in m1_]
pool=Pool(4)
conv6=pool.map(partial(conv_layer,kernel=k6),m5)
pool.close()
pool.join()
conv6[conv6<=0]=0
pool=Pool(4)
m1_=pool.map(max_pooling_,conv6)
pool.close()
pool.join()
m6=[i[0] for i in m1_]
pos6=[i[1]for i in m1_]
u6=[i[2]for i in m1_]
r6=[i[3]for i in m1_]
pool=Pool(4)
conv7=pool.map(partial(conv_layer,kernel=k7),m6)
pool.close()
pool.join()
conv7[conv7<=0]=0
pool=Pool(4)
m1_=pool.map(max_pooling_,conv7)
pool.close()
pool.join()
m7=[i[0] for i in m1_]
pos7=[i[1]for i in m1_]
u7=[i[2]for i in m1_]
r7=[i[3]for i in m1_]
pool=Pool(4)
conv8=pool.map(partial(conv_layer,kernel=k8),m7)
pool.close()
pool.join()
conv8[conv8<=0]=0
pool=Pool(4)
m1_=pool.map(max_pooling_,conv1)
pool.close()
pool.join()
m8=[i[0] for i in m1_]
pos8=[i[1]for i in m1_]
u8=[i[2]for i in m1_]
r8=[i[3]for i in m1_]
def train(folder,reshape_dim,learning_rate,epoch):
data,output=load_data(folder)
#data=[1,2,3,4,5,6,7,8,9,10,11,12,13]
#print(output)
#print(output[0].shape)
#print(data[0].shape)
#print(data[1])
data=parallel_2(data,reshape_dim)
weights=softmax_regression(data,output,learning_rate,epoch)
return weights
def train_with_sklearn(folder,reshape_dim,x1,x2,x3):
data,output=load_data(folder)
data=parallel_2(data,reshape_dim)
y1,y2,y3=softmax_regression_2(data,output,x1,x2,x3)
return y1,y2,y3
| 2.6875 | 3 |
HostAgent/agentSndModule.py | pupeng/hone | 5 | 12791094 | # Copyright (c) 2011-2013 <NAME>. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the COPYRIGHT file.
# agentSndModule
# Host agent send module
# send stats, hostJoin, etc to the controller
import socket
import sys
import logging
import cPickle as pickle
from uuid import getnode as get_mac
from cStringIO import StringIO
from agentUtil import LogUtil
from hone_message import *
ctrlCommPort = 8866
class HostAgentSndSocket:
def __init__(self, controllerAddress = 'localhost', controllerPort = ctrlCommPort):
try:
self.hostSock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.hostSock.connect((controllerAddress, controllerPort))
message = HoneMessage()
message.messageType = HoneMessageType_HostJoin
message.hostId = str(get_mac())
#message.hostId = str(random.randint(0, 1000000))
self.sendMessage(message)
except socket.error, msg:
logging.error('connection to controller error: {0}'.format(msg))
print 'connect error '
print msg
if self.hostSock:
self.hostSock.close()
self.hostSock = None
except Exception:
if self.hostSock:
self.hostSock.close()
self.hostSock = None
if self.hostSock is None:
logging.error('Connection to controller error in HostAgentSndSocket. Agent will stop.')
print 'Connection to controller error in HostAgentSndSocket. Agent will stop.'
sys.exit()
def sendMessage(self, message):
if self.hostSock:
src = StringIO()
pickle.dump(message, src, pickle.HIGHEST_PROTOCOL)
data = src.getvalue() + '\r\n'
src.close()
self.hostSock.sendall(data)
#debugLog('sndModule', 'send message. messageType:', \
# message.messageType, 'jobId', message.jobId, \
# 'flowId:', message.flowId, 'sequence:', \
# message.sequence, 'content:', message.content)
def closeSocket(self):
self.hostSock.close()
def recvMessage(self):
return self.hostSock.recv(1024)
class HostAgentRelaySndSocket:
def __init__(self, middleAddress, port):
try:
self.hostSock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.hostSock.connect((middleAddress, port))
except socket.error, msg:
logging.warning('Relay connection to middle error: {0}'.format(msg))
print 'connect error '
print msg
if self.hostSock:
self.hostSock.close()
self.hostSock = None
except Exception:
self.hostSock = None
if self.hostSock is None:
logging.warning('socket error in HostAgentRelaySndSocket')
print 'socket error in HostAgentRelaySndSocket'
def sendMessage(self, message):
if self.hostSock:
src = StringIO()
pickle.dump(message, src, pickle.HIGHEST_PROTOCOL)
data = src.getvalue() + '\r\n'
src.close()
self.hostSock.sendall(data)
#debugLog('sndModule', 'send message. messageType:',\
# message.messageType, 'jobId', message.jobId,\
# 'flowId:', message.flowId, 'sequence:',\
# message.sequence, 'content:', message.content)
| 2.03125 | 2 |
roda_core/monitoring/constants.py | matitalatina/roda | 0 | 12791095 | TYRE_POSITION_FRONT_LEFT = 'FL'
TYRE_POSITION_FRONT_RIGHT = 'FR'
TYRE_POSITION_REAR_LEFT = 'RL'
TYRE_POSITION_REAR_RIGHT = 'RR'
TYRE_POSITION_FRONT_LEFT_LABEL = 'Front Left'
TYRE_POSITION_FRONT_RIGHT_LABEL = 'Front Right'
TYRE_POSITION_REAR_LEFT_LABEL = 'Rear Left'
TYRE_POSITION_REAR_RIGHT_LABEL = 'Rear Right'
TYRE_POSITIONS = (
(TYRE_POSITION_FRONT_LEFT, TYRE_POSITION_FRONT_LEFT_LABEL),
(TYRE_POSITION_FRONT_RIGHT, TYRE_POSITION_FRONT_RIGHT_LABEL),
(TYRE_POSITION_REAR_LEFT, TYRE_POSITION_REAR_LEFT_LABEL),
(TYRE_POSITION_REAR_RIGHT, TYRE_POSITION_REAR_RIGHT_LABEL),
)
SAMPLE_DATA_PATH = 'resources/data_measurements_finals.csv'
FIELD_TIMESTAMP_FORMAT = '%Y-%m-%d %H:%M:%S' | 1.40625 | 1 |
other/time_travel.py | SimoneABNto/My-Code-Py | 0 | 12791096 | <filename>other/time_travel.py<gh_stars>0
from datetime import datetime, timedelta
class TimeTravel:
def __init__(self):
self.now = datetime.now()
def fuck_go_back_by(self, hours=0, minutes=0):
return self.now - timedelta(hours=hours, minutes=minutes)
if __name__ == '__main__':
tt = TimeTravel()
time = tt.fuck_go_back_by(hours=1, minutes=10)
print(time)
| 3.0625 | 3 |
entrypoint.py | webmsgr/PyOneLife | 1 | 12791097 | # use after installing the client to run the client
import sys
import multiprocessing
try:
import pyOHOL
except ImportError as e:
print("Client is not installed")
raise e
def main():
multiprocessing.freeze_support()
pyOHOL.main()
if __name__ == "__main__":
main()
| 2.234375 | 2 |
Questionnaire2/Questionnaire/Api/decorators.py | riverstation/project-all | 2 | 12791098 | <reponame>riverstation/project-all
import json
from Api.utils import *
def admin_required(func):
def _wrapper(self, request, *args, **kwargs):
if request.user.is_authenticated and hasattr(request.user, 'admin'):
return func(self, request, *args, **kwargs)
else:
return not_authenticated()
return _wrapper
def customer_required(func):
def _wrapper(self, request, *args, **kwargs):
if request.user.is_authenticated and hasattr(request.user, 'customer'):
return func(self, request, *args, **kwargs)
else:
return not_authenticated()
return _wrapper
def userinfo_required(func):
def _wrapper(self, request, *args, **kwargs):
if request.user.is_authenticated and hasattr(request.user, 'userinfo'):
return func(self, request, *args, **kwargs)
else:
return not_authenticated()
return _wrapper
| 2.453125 | 2 |
airflow/migrations/versions/53bee4c621a1_create_lastdeployedtime_table.py | TriggerMail/incubator-airflow | 1 | 12791099 | #
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Create LastDeployedTime table
Revision ID: 53bee4c621a1
Revises: <PASSWORD>
Create Date: 2020-05-03 23:18:22.731457
"""
# revision identifiers, used by Alembic.
revision = '53bee4c621a1'
down_revision = 'c2<PASSWORD>'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
from airflow.models import LastDeployedTime
from datetime import datetime
def upgrade():
op.create_table(
'last_deployed_time',
sa.Column('last_deployed', sa.DateTime(), primary_key=True)
)
LastDeployedTime().set_last_deployed(datetime.utcnow())
def downgrade():
op.drop_table("last_deployed_time")
| 1.515625 | 2 |
BugsApp/admin.py | safia88/BugTracker | 0 | 12791100 | from django.contrib import admin
from.models import Ticket,Customeuser
# Register your models here.
admin.site.register(Ticket)
admin.site.register(Customeuser) | 1.3125 | 1 |
start_alibaba.py | dgdell/AlibabaInternation | 0 | 12791101 | <reponame>dgdell/AlibabaInternation
# coding:utf-8
import os
import sys
from scrapy import cmdline
if __name__ == '__main__':
sys.path.append(os.path.dirname(os.path.abspath(__file__)))
cmdline.execute(['scrapy', 'crawl', 'alibaba'])
| 1.875 | 2 |
qclib/machine_learning/datasets/common.py | carstenblank/qclib | 0 | 12791102 | <reponame>carstenblank/qclib
# Copyright 2021 qclib project.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Common dataset preprocessing routine
"""
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler, MinMaxScaler
from sklearn.decomposition import PCA
def preprocessing(training_size:int, test_size:int, features:int, max_features:int,
data, class_labels:list, num_classes:int, random_seed=42, normalize=True):
"""
Common dataset preprocessing routine
"""
# pylint: disable=no-member
sample_train, sample_test, label_train, label_test = \
train_test_split(data.data, data.target, test_size=test_size*num_classes,
random_state=random_seed,
shuffle=True,
stratify=data.target)
# Standardize for gaussian around 0 with unit variance
std_scale = StandardScaler().fit(sample_train)
sample_train = std_scale.transform(sample_train)
sample_test = std_scale.transform(sample_test)
# Reduce the number of features
if features < max_features:
pca = PCA(n_components=features).fit(sample_train)
sample_train = pca.transform(sample_train)
sample_test = pca.transform(sample_test)
# Scale to the range (0, +1)
samples = np.append(sample_train, sample_test, axis=0)
minmax_scale = MinMaxScaler((0, 1)).fit(samples)
sample_train = minmax_scale.transform(sample_train)
sample_test = minmax_scale.transform(sample_test)
# Normalize rows.
if normalize:
sample_train = sample_train / \
np.linalg.norm(sample_train, axis=1).reshape((len(sample_train),1))
sample_test = sample_test / \
np.linalg.norm(sample_test, axis=1).reshape((len(sample_test),1))
# Pick training and test size number of samples for each class label
training_input = {key: (sample_train[label_train == key, :])[:training_size]
for key in class_labels}
test_input = {key: (sample_test[label_test == key, :])[:test_size]
for key in class_labels}
return sample_train, training_input, test_input, class_labels
| 2.484375 | 2 |
tests/test_timer_dec.py | pnpnpn/timy | 300 | 12791103 | <gh_stars>100-1000
from unittest import mock
from timy import timer
from timy.settings import timy_config
@mock.patch('timy.output')
def test_timer_no_tracking(p_output):
timy_config.tracking = False
@timer()
def func():
pass
func()
p_output.assert_not_called()
@mock.patch('timy.output')
@mock.patch('time.perf_counter')
def test_timer_include_sleeptime(p_perf_counter, p_output):
timy_config.tracking = True
@timer()
def func():
pass
p_perf_counter.return_value = 1
func()
p_output.assert_has_calls([
mock.call(
timy_config.DEFAULT_IDENT,
'executed (func) for 1 time in 0.000000'),
mock.call(
timy_config.DEFAULT_IDENT,
'best time was 0.000000'),
])
@mock.patch('timy.output')
@mock.patch('time.process_time')
def test_timer_include_sleeptime_no(p_process_time, p_output):
timy_config.tracking = True
@timer(include_sleeptime=False)
def func():
pass
p_process_time.return_value = 1
func()
p_output.assert_has_calls([
mock.call(
timy_config.DEFAULT_IDENT,
'executed (func) for 1 time in 0.000000'),
mock.call(
timy_config.DEFAULT_IDENT,
'best time was 0.000000'),
])
@mock.patch('timy.output')
@mock.patch('time.perf_counter')
def test_timer_with_loops(p_perf_counter, p_output):
timy_config.tracking = True
LOOPS = 4
@timer(loops=LOOPS)
def func():
pass
p_perf_counter.return_value = 1
func()
p_output.assert_has_calls([
mock.call(
timy_config.DEFAULT_IDENT,
'executed (func) for {} times in 0.000000'.format(LOOPS)),
mock.call(
timy_config.DEFAULT_IDENT,
'best time was 0.000000'),
])
| 2.28125 | 2 |
openprocurement/auctions/appraisal/tests/blanks/item_blanks.py | bdmbdsm/openprocurement.auctions.appraisal | 0 | 12791104 | # -*- coding: utf-8 -*-
from uuid import uuid4
from copy import deepcopy
from datetime import timedelta
from openprocurement.auctions.core.utils import calculate_business_date
from openprocurement.auctions.appraisal.models import AppraisalAuction
def check_items_listing(self):
self.app.authorization = ('Basic', ('broker', ''))
data = self.initial_data.copy()
# Auction creation
response = self.app.post_json('/auctions', {'data': data})
self.assertEqual(response.status, '201 Created')
self.assertEqual(response.content_type, 'application/json')
auction_id = response.json['data']['id']
owner_token = response.json['access']['token']
access_header = {'X-Access-Token': str(owner_token)}
self.app.patch_json(
'/auctions/{}'.format(auction_id),
{'data': {'status': 'active.tendering'}},
headers=access_header
)
response = self.app.get(
'/auctions/{}/items'.format(auction_id),
)
self.assertEqual(len(response.json['data']), len(data['items']))
# Create one item and check listing
response = self.app.post_json(
'/auctions/{}/items'.format(auction_id),
{'data': self.initial_item_data},
headers=access_header
)
self.assertEqual(response.status, '201 Created')
self.assertEqual(response.content_type, 'application/json')
response = self.app.get(
'/auctions/{}/items'.format(auction_id),
)
self.assertEqual(len(response.json['data']), len(data['items']) + 1)
def check_item_creation(self):
self.app.authorization = ('Basic', ('broker', ''))
data = self.initial_data.copy()
# Auction creation
response = self.app.post_json('/auctions', {'data': data})
self.assertEqual(response.status, '201 Created')
self.assertEqual(response.content_type, 'application/json')
auction_id = response.json['data']['id']
owner_token = response.json['access']['token']
access_header = {'X-Access-Token': str(owner_token)}
self.app.patch_json(
'/auctions/{}'.format(auction_id),
{'data': {'status': 'active.tendering'}},
headers=access_header
)
# Item creation
response = self.app.post_json(
'/auctions/{}/items'.format(auction_id),
{'data': self.initial_item_data},
headers=access_header
)
item_id = response.json['data']['id']
self.assertEqual(response.status, '201 Created')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(self.initial_item_data['id'], response.json['data']['id'])
self.assertIn(item_id, response.headers['Location'])
self.assertEqual(self.initial_item_data['description'], response.json["data"]["description"])
self.assertEqual(self.initial_item_data['quantity'], response.json["data"]["quantity"])
self.assertEqual(self.initial_item_data['address'], response.json["data"]["address"])
# Get item
response = self.app.get('/auctions/{}/items/{}'.format(auction_id, item_id))
self.assertEqual(item_id, response.json['data']['id'])
self.assertEqual(self.initial_item_data['description'], response.json["data"]["description"])
self.assertEqual(self.initial_item_data['quantity'], response.json["data"]["quantity"])
self.assertEqual(self.initial_item_data['address'], response.json["data"]["address"])
def check_item_patch(self):
self.app.authorization = ('Basic', ('broker', ''))
data = self.initial_data.copy()
# Auction creation
response = self.app.post_json('/auctions', {'data': data})
self.assertEqual(response.status, '201 Created')
self.assertEqual(response.content_type, 'application/json')
auction_id = response.json['data']['id']
owner_token = response.json['access']['token']
access_header = {'X-Access-Token': str(owner_token)}
self.app.patch_json(
'/auctions/{}'.format(auction_id),
{'data': {'status': 'active.tendering'}},
headers=access_header
)
# Item creation
response = self.app.post_json(
'/auctions/{}/items'.format(auction_id),
{'data': self.initial_item_data},
headers=access_header
)
item_id = response.json['data']['id']
self.assertEqual(response.status, '201 Created')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(self.initial_item_data['id'], response.json['data']['id'])
self.assertIn(item_id, response.headers['Location'])
self.assertEqual(self.initial_item_data['description'], response.json["data"]["description"])
self.assertEqual(self.initial_item_data['quantity'], response.json["data"]["quantity"])
self.assertEqual(self.initial_item_data['address'], response.json["data"]["address"])
# Get item
response = self.app.get('/auctions/{}/items/{}'.format(auction_id, item_id))
self.assertEqual(item_id, response.json['data']['id'])
self.assertEqual(self.initial_item_data['description'], response.json["data"]["description"])
self.assertEqual(self.initial_item_data['quantity'], response.json["data"]["quantity"])
self.assertEqual(self.initial_item_data['address'], response.json["data"]["address"])
# Patch item
patch_data = {'description': 'DESCRIPTION_' + uuid4().hex, 'id': '0*32'}
response = self.app.patch_json(
'/auctions/{}/items/{}'.format(auction_id, item_id),
{'data': patch_data},
headers=access_header
)
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
self.assertNotEqual(patch_data['id'], response.json['data']['id'])
self.assertEqual(patch_data['description'], response.json["data"]["description"])
def check_patch_auction_in_not_editable_statuses(self):
self.app.authorization = ('Basic', ('broker', ''))
# Auction creation
data = self.initial_data.copy()
response = self.app.post_json('/auctions', {'data': data})
self.assertEqual(response.status, '201 Created')
self.assertEqual(response.content_type, 'application/json')
auction_id = response.json['data']['id']
owner_token = response.json['access']['token']
access_header = {'X-Access-Token': str(owner_token)}
self.auction_id = auction_id
self.set_status('active.tendering')
# Item creation
response = self.app.post_json(
'/auctions/{}/items'.format(auction_id),
{'data': self.initial_item_data},
headers=access_header
)
item_id = response.json['data']['id']
self.assertEqual(response.status, '201 Created')
self.assertEqual(response.content_type, 'application/json')
# Change status in which you can edit auction
desired_status = 'active.auction'
self.set_status(desired_status)
self.app.authorization = ('Basic', ('broker', ''))
# Trying to create new item
response = self.app.post_json(
'/auctions/{}/items'.format(auction_id),
{'data': self.initial_item_data},
headers=access_header,
status=403
)
self.assertEqual(response.status, '403 Forbidden')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(
response.json['errors'][0]['description'],
"You can't change items in this status ({})".format(desired_status)
)
# Trying to update new item
response = self.app.patch_json(
'/auctions/{}/items/{}'.format(auction_id, item_id),
{'data': {'description': uuid4().hex}},
headers=access_header,
status=403
)
self.assertEqual(response.status, '403 Forbidden')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(
response.json['errors'][0]['description'],
"You can't change items in this status ({})".format(desired_status)
)
def validate_change_items_after_rectification_period(self):
self.app.authorization = ('Basic', ('broker', ''))
# Auction creation
data = self.initial_data.copy()
response = self.app.post_json('/auctions', {'data': data})
self.assertEqual(response.status, '201 Created')
self.assertEqual(response.content_type, 'application/json')
auction_id = response.json['data']['id']
owner_token = response.json['access']['token']
access_header = {'X-Access-Token': str(owner_token)}
self.auction_id = auction_id
self.set_status('active.tendering')
# Item creation
response = self.app.post_json(
'/auctions/{}/items'.format(auction_id),
{'data': self.initial_item_data},
headers=access_header
)
item_id = response.json['data']['id']
self.assertEqual(response.status, '201 Created')
# Change rectification period
fromdb = self.db.get(auction_id)
fromdb = AppraisalAuction(fromdb)
fromdb.tenderPeriod.startDate = calculate_business_date(
fromdb.tenderPeriod.startDate,
-timedelta(days=15),
fromdb,
working_days=True
)
fromdb.tenderPeriod.endDate = calculate_business_date(
fromdb.tenderPeriod.startDate,
timedelta(days=7),
fromdb,
working_days=True
)
fromdb = fromdb.store(self.db)
self.assertEqual(fromdb.id, auction_id)
# Check if items can`t be edited
response = self.app.post_json(
'/auctions/{}/items'.format(auction_id),
{'data': self.initial_item_data},
headers=access_header,
status=403
)
self.assertEqual(response.json['errors'][0]['description'], 'You can\'t change items after rectification period')
response = self.app.patch_json(
'/auctions/{}/items/{}'.format(auction_id, item_id),
{'data': {'description': uuid4().hex}},
headers=access_header,
status=403
)
self.assertEqual(response.json['errors'][0]['description'], 'You can\'t change items after rectification period')
def batch_create_items(self):
self.app.authorization = ('Basic', ('broker', ''))
data = self.initial_data.copy()
data['items'] = [self.initial_item_data]
# Auction creation
response = self.app.post_json('/auctions', {'data': data})
self.assertEqual(response.status, '201 Created')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(len(response.json['data']['items']), len(data['items']))
def batch_update_items(self):
self.app.authorization = ('Basic', ('broker', ''))
data = self.initial_data.copy()
data['items'] = [self.initial_item_data]
# Auction creation
response = self.app.post_json('/auctions', {'data': data})
self.assertEqual(response.status, '201 Created')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(len(response.json['data']['items']), len(data['items']))
auction_id = response.json['data']['id']
owner_token = response.json['access']['token']
access_header = {'X-Access-Token': str(owner_token)}
self.app.patch_json(
'/auctions/{}'.format(auction_id),
{'data': {'status': 'active.tendering'}},
headers=access_header
)
# Update items with batch mode
item_2 = deepcopy(self.initial_item_data)
del item_2['id']
patch_items = {'items': [self.initial_item_data, item_2]}
response = self.app.patch_json(
'/auctions/{}'.format(auction_id),
{'data': patch_items},
headers=access_header
)
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(len(response.json['data']['items']), len(patch_items['items']))
def check_bids_invalidation(self):
self.app.authorization = ('Basic', ('broker', ''))
# Auction creation
data = self.initial_data.copy()
response = self.app.post_json('/auctions', {'data': data})
self.assertEqual(response.status, '201 Created')
self.assertEqual(response.content_type, 'application/json')
auction_id = response.json['data']['id']
owner_token = response.json['access']['token']
access_header = {'X-Access-Token': str(owner_token)}
self.auction_id = auction_id
self.set_status('active.tendering')
# Create and activate bid
response = self.app.post_json(
'/auctions/{}/bids'.format(auction_id),
{'data': {'tenderers': [self.initial_organization], "status": "draft", 'qualified': True, 'eligible': True}}
)
self.assertEqual(response.status, '201 Created')
self.assertEqual(response.content_type, 'application/json')
bidder_id = response.json['data']['id']
bid_token = response.json['access']['token']
self.app.patch_json(
'/auctions/{}/bids/{}?acc_token={}'.format(auction_id, bidder_id, bid_token),
{'data': {'status': 'active'}}
)
# Create item
response = self.app.post_json(
'/auctions/{}/items'.format(auction_id),
{'data': self.initial_item_data},
headers=access_header
)
item_id = response.json['data']['id']
# Check if bid invalidated
response = self.app.get(
'/auctions/{}/bids/{}?acc_token={}'.format(auction_id, bidder_id, bid_token)
)
self.assertEqual(response.json['data']['status'], 'invalid')
response = self.app.get('/auctions/{}'.format(auction_id))
self.assertIn('invalidationDate', response.json['data']['rectificationPeriod'])
invalidation_date = response.json['data']['rectificationPeriod']['invalidationDate']
# Activate bid again and check if status changes
self.app.patch_json(
'/auctions/{}/bids/{}?acc_token={}'.format(auction_id, bidder_id, bid_token),
{'data': {'status': 'active'}}
)
response = self.app.get(
'/auctions/{}/bids/{}?acc_token={}'.format(auction_id, bidder_id, bid_token)
)
self.assertEqual(response.json['data']['status'], 'active')
# Patch item
response = self.app.patch_json(
'/auctions/{}/items/{}'.format(auction_id, item_id),
{'data': {}},
headers=access_header
)
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
response = self.app.get(
'/auctions/{}/bids/{}?acc_token={}'.format(auction_id, bidder_id, bid_token)
)
self.assertEqual(response.json['data']['status'], 'invalid')
response = self.app.get('/auctions/{}'.format(auction_id))
self.assertIn('invalidationDate', response.json['data']['rectificationPeriod'])
self.assertNotEqual(invalidation_date, response.json['data']['rectificationPeriod']['invalidationDate']) | 2.234375 | 2 |
scrapy_proxy/spiders/run.py | cain19811028/scrapy_proxy | 0 | 12791105 | # -*- coding: utf-8 -*-
import base64
import json
import scrapy
from scrapy import Request
class ProxyList(scrapy.Spider):
name = "proxy_list"
allowed_domains = ["proxy-list.org"]
def start_requests(self):
for i in range(1, 4):
print(i)
yield Request('https://proxy-list.org/english/index.php?p=%s' % i)
def parse(self, response):
list = response.xpath('//div[@class="table-wrap"]//ul')
for item in list:
proxy = item.xpath('.//li[@class="proxy"]//script').extract()[0]
proxy = base64.b64decode(proxy.split("'")[1])
ip = proxy.split(':')[0]
print(proxy)
protocol = item.xpath('.//li[@class="https"]/text()').extract()
protocol = 'http' if len(protocol) > 0 else 'https'
url = '%s://httpbin.org/ip' % protocol
proxy = '%s://%s' % (protocol, proxy)
meta = {
'ip': ip,
'proxy': proxy,
'dont_retry': True,
'download_timeout': 15
}
yield Request(
url,
callback=self.check_available,
meta=meta,
dont_filter=True
)
def check_available(self, response):
ip = response.meta['ip']
if ip == json.loads(response.text)['origin']:
yield {
'proxy':response.meta['proxy']
}
| 2.625 | 3 |
src/edit.py | PalAditya/NotePrompt | 0 | 12791106 | <gh_stars>0
import json
import colored
from colored import stylize
# from pyautogui import typewrite
def log(string, color, font="slant", figlet=False):
print(stylize(string, colored.fg(color)), end = " ")
def readtasks(task_id):
with open("tasks.json","r") as f:
tasklist = f.read()
tasklist = json.loads(tasklist)
tasks = tasklist['todo']
for k, v in tasks.items():
if str(k) != str(task_id):
continue
log("-------------- Editing Task: " + v['name'] + " ---------------", "spring_green_3a")
print()
data = edit(v)
tasks[k] = data
tasklist = json.dumps(tasklist)
with open("tasks.json","w") as f:
f.write(tasklist)
def edit(taskdata):
allowed_details = ["priority", "name"] #TODO: Add Alarm
edited_task = {}
for k, v in taskdata.items():
allowed_details.remove(k)
log(k + ": " , "turquoise_2")
typewrite(v)
user_val = input()
edited_task[k] = user_val
for details in allowed_details:
log(details + ": " , "turquoise_2")
user_val = input()
edited_task[details] = user_val
return edited_task
| 2.8125 | 3 |
fractal/clib/__init__.py | pysrc/fractal | 26 | 12791107 | <gh_stars>10-100
# C 库封装
try:
from fractension import jCalc, mCalc
print("Using fractension...")
except:
import sys
import os
import ctypes
# 当前文件所处的文件夹
__curdir = os.path.split(os.path.realpath(__file__))[0]
__dll = None
if sys.platform == "win32": # Windows
if "32 bit" in sys.version:
__dll = ctypes.CDLL(os.path.join(__curdir, "calc.dll"))
else: # 64位
__dll = ctypes.CDLL(os.path.join(__curdir, "calc64.dll"))
__jCalc = __dll.jCalc
__jCalc.argtypes = [ctypes.c_double * 13, ]
__jCalc.restype = ctypes.c_int
__mCalc = __dll.mCalc
__mCalc.argtypes = [ctypes.c_double * 13, ]
__mCalc.restype = ctypes.c_int
def jCalc(args):
k = (13 * ctypes.c_double)()
for i in range(13):
k[i] = args[i]
res = __jCalc(k)
r = k[0]
del k
return res, r
def mCalc(args):
k = (13 * ctypes.c_double)()
for i in range(13):
k[i] = args[i]
res = __mCalc(k)
r = k[0]
del k
return res, r
print("Using Dll...")
| 2.28125 | 2 |
toolbox/metrics/metrics_base.py | ML-Dashboard/ML-ToolBox | 0 | 12791108 | <filename>toolbox/metrics/metrics_base.py<gh_stars>0
from abc import ABC
from toolbox.trackable import Trackable
class Metrics(Trackable, ABC):
pass
| 1.359375 | 1 |
challenges/python-solutions/day-13.py | elifloresch/thirty-days-challenge | 0 | 12791109 | # abstract Book class is provided.
# Write just the MyBook class
from abc import ABCMeta, abstractmethod
class Book(object, metaclass=ABCMeta):
def __init__(self, title, author):
self.title = title
self.author = author
@abstractmethod
def display(self):
pass
class MyBook(Book):
def __init__(self, title, author, price):
super().__init__(title, author)
self.price = price
def display(self):
print('Title: ' + self.title)
print('Author: ' + self.author)
print('Price: ' + str(self.price))
new_novel = MyBook(input(), input(), int(input()))
new_novel.display()
| 4.21875 | 4 |
powell_torch.py | necst/faber_biocas | 3 | 12791110 | <filename>powell_torch.py
# /******************************************
# *MIT License
# *
# *Copyright (c) [2021] [<NAME>, <NAME>, <NAME>, <NAME>]
# *
# *Permission is hereby granted, free of charge, to any person obtaining a copy
# *of this software and associated documentation files (the "Software"), to deal
# *in the Software without restriction, including without limitation the rights
# *to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# *copies of the Software, and to permit persons to whom the Software is
# *furnished to do so, subject to the following conditions:
# *
# *The above copyright notice and this permission notice shall be included in all
# *copies or substantial portions of the Software.
# *
# *THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# *IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# *FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# *AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# *LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# *OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# *SOFTWARE.
# ******************************************/
import os
import pydicom
import cv2
import numpy as np
import math
import glob
import time
import pandas as pd
from torch.multiprocessing import Pool, Process, set_start_method
import struct
import statistics
import argparse
import kornia
import torch
compute_metric = None
precompute_metric = None
device = "cpu"
ref_vals = None
move_data = None
def no_transfer(input_data):
return input_data
def to_cuda(input_data):
return input_data.cuda(non_blocking=True)
def batch_transform(images, pars):
img_warped = kornia.geometry.warp_affine(images, pars, mode="nearest", dsize=(images.shape[2], images.shape[3]))
return img_warped
def transform(image, par):
tmp_img = image.reshape((1, 1, *image.shape)).float()
t_par = torch.unsqueeze(par, dim=0)
img_warped = kornia.geometry.warp_affine(tmp_img, t_par, mode="nearest", dsize=(tmp_img.shape[2], tmp_img.shape[3]))
return img_warped
def compute_moments(img):
moments = torch.empty(6, device=device)
l = torch.arange(img.shape[0], device=device)
moments[0] = torch.sum(img) # m00
moments[1] = torch.sum(img * l) # m10
moments[2] = torch.sum(img * (l**2)) # m20
moments[3] = torch.sum(img * l.reshape((img.shape[0], 1)) ) # m01
moments[4] = torch.sum(img * (l.reshape((img.shape[0], 1)))**2 ) # m02
moments[5] = torch.sum(img * l * l.reshape((img.shape[0], 1))) # m11
return moments
def to_matrix_blocked(vector_params):
mat_params=torch.empty((2,3))
mat_params[0][2]=vector_params[0]
mat_params[1][2]=vector_params[1]
if vector_params[2] > 1 or vector_params[2] < -1:
mat_params[0][0]=1 #cos_teta
mat_params[1][1]=1 #cos_teta
mat_params[0][1]=0
mat_params[1][0]=0
else:
mat_params[0][0]=vector_params[2] #cos_teta
mat_params[1][1]=vector_params[2] #cos_teta
mat_params[0][1]=torch.sqrt(1-(vector_params[2]**2))
mat_params[1][0]=-mat_params[0][1]
return (mat_params)
def estimate_initial(Ref_uint8, Flt_uint8, params):
ref_mom = compute_moments(Ref_uint8)
flt_mom = compute_moments(Flt_uint8)
flt_avg_10 = flt_mom[1]/flt_mom[0]
flt_avg_01 = flt_mom[3]/flt_mom[0]
flt_mu_20 = (flt_mom[2]/flt_mom[0]*1.0)-(flt_avg_10*flt_avg_10)
flt_mu_02 = (flt_mom[4]/flt_mom[0]*1.0)-(flt_avg_01*flt_avg_01)
flt_mu_11 = (flt_mom[5]/flt_mom[0]*1.0)-(flt_avg_01*flt_avg_10)
ref_avg_10 = ref_mom[1]/ref_mom[0]
ref_avg_01 = ref_mom[3]/ref_mom[0]
ref_mu_20 = (ref_mom[2]/ref_mom[0]*1.0)-(ref_avg_10*ref_avg_10)
ref_mu_02 = (ref_mom[4]/ref_mom[0]*1.0)-(ref_avg_01*ref_avg_01)
ref_mu_11 = (ref_mom[5]/ref_mom[0]*1.0)-(ref_avg_01*ref_avg_10)
params[0][2] = ref_mom[1]/ref_mom[0]-flt_mom[1]/flt_mom[0]
params[1][2] = ref_mom[3]/ref_mom[0] - flt_mom[3]/flt_mom[0]
rho_flt=0.5*torch.atan((2.0*flt_mu_11)/(flt_mu_20-flt_mu_02))
rho_ref=0.5*torch.atan((2.0*ref_mu_11)/(ref_mu_20-ref_mu_02))
delta_rho=rho_ref-rho_flt
roundness=(flt_mom[2]/flt_mom[0]) / (flt_mom[4]/flt_mom[0])
if torch.abs(roundness-1.0)>=0.3:
params[0][0]= torch.cos(delta_rho)
params[0][1] = -torch.sin(delta_rho)
params[1][0] = torch.sin(delta_rho)
params[1][1] = torch.cos(delta_rho)
else:
params[0][0]= 1.0
params[0][1] = 0.0
params[1][0] = 0.0
params[1][1] = 1.0
return (params)
def my_squared_hist2d_t(sample, bins, smin, smax):
D, N = sample.shape
edges = torch.linspace(smin, smax, bins + 1, device=device)
nbin = edges.shape[0] + 1
# Compute the bin number each sample falls into.
Ncount = D*[None]
for i in range(D):
Ncount[i] = torch.searchsorted(edges, sample[i, :], right=True)
# Using digitize, values that fall on an edge are put in the right bin.
# For the rightmost bin, we want values equal to the right edge to be
# counted in the last bin, and not as an outlier.
for i in range(D):
# Find which points are on the rightmost edge.
on_edge = (sample[i, :] == edges[-1])
# Shift these points one bin to the left.
Ncount[i][on_edge] -= 1
# Compute the sample indices in the flattened histogram matrix.
xy = Ncount[0]*nbin+Ncount[1]
# Compute the number of repetitions in xy and assign it to the
hist = torch.bincount(xy, None, minlength=nbin*nbin)
# Shape into a proper matrix
hist = hist.reshape((nbin, nbin))
hist = hist.float()
# Remove outliers (indices 0 and -1 for each dimension).
hist = hist[1:-1,1:-1]
return hist
def precompute_mutual_information(Ref_uint8_ravel):
href = torch.histc(Ref_uint8_ravel, bins=256)
href /= Ref_uint8_ravel.numel()
href=href[href>0.000000000000001]
eref=(torch.sum(href*(torch.log2(href))))*-1
return eref
def mutual_information(Ref_uint8_ravel, Flt_uint8_ravel, eref):
if(device == "cuda"):
idx_joint = torch.stack((Ref_uint8_ravel, Flt_uint8_ravel)).long()
j_h_init = torch.sparse.IntTensor(idx_joint, ref_vals, torch.Size([hist_dim, hist_dim])).to_dense()/Ref_uint8_ravel.numel()
else:
idx_joint = torch.stack((Ref_uint8_ravel, Flt_uint8_ravel))
j_h_init = my_squared_hist2d_t(idx_joint, hist_dim, 0, 255)/Ref_uint8_ravel.numel()
j_h = j_h_init[j_h_init>0.000000000000001]
entropy=(torch.sum(j_h*(torch.log2(j_h))))*-1
hflt=torch.sum(j_h_init,axis=0)
hflt=hflt[hflt>0.000000000000001]
eflt=(torch.sum(hflt*(torch.log2(hflt))))*-1
mutualinfo=eref+eflt-entropy
return(mutualinfo)
def precompute_cross_correlation(Ref_uint8_ravel):
return torch.sum(Ref_uint8_ravel * Ref_uint8_ravel)
def cross_correlation(Ref_uint8_ravel, Flt_uint8_ravel, cc_ref):
cc_ref_flt = torch.sum(Ref_uint8_ravel * Flt_uint8_ravel)
cc_flt = torch.sum(Flt_uint8_ravel * Flt_uint8_ravel)
return - cc_ref_flt/torch.sqrt(cc_ref*cc_flt)
def precompute_mean_squared_error(Ref_uint8_ravel):
pass
def mean_squared_error(Ref_uint8_ravel, Flt_uint8_ravel, mse_ref):
return torch.sum((Ref_uint8_ravel - Flt_uint8_ravel)**2)
def compute_mi(ref_img, flt_imgs, t_mats, eref):
flt_warped = batch_transform(flt_imgs, t_mats)
#flt_img = transform(flt_img, t_mat)
mi_a = mutual_information(ref_img, flt_warped[0].ravel(), eref)
mi_b = mutual_information(ref_img, flt_warped[1].ravel(), eref)
return torch.exp(-mi_a).cpu(), torch.exp(-mi_b).cpu()
def compute_cc(ref_img, flt_imgs, t_mats, cc_ref):
flt_warped = batch_transform(flt_imgs, t_mats)
cc_a = cross_correlation(ref_img, flt_warped[0].ravel(), cc_ref)
cc_b = cross_correlation(ref_img, flt_warped[1].ravel(), cc_ref)
return cc_a.cpu(), cc_b.cpu()
def compute_mse(ref_img, flt_imgs, t_mats, mse_ref):
flt_warped = batch_transform(flt_imgs, t_mats)
mse_a = mean_squared_error(ref_img, flt_warped[0].ravel(), mse_ref)
mse_b = mean_squared_error(ref_img, flt_warped[1].ravel(), mse_ref)
return mse_a.cpu(), mse_b.cpu()
def optimize_goldsearch(par, rng, ref_sup_ravel, flt_stack, linear_par, i, eref):
start=par-0.382*rng
end=par+0.618*rng
c=(end-(end-start)/1.618)
d=(start+(end-start)/1.618)
best_mi = 0.0
while(math.fabs(c-d)>0.005):
linear_par[i]=c
a_mat=to_matrix_blocked(linear_par)
linear_par[i]=d
b_mat=to_matrix_blocked(linear_par)
mats = move_data(torch.stack((a_mat, b_mat)))
mi_a, mi_b = compute_metric(ref_sup_ravel, flt_stack, mats, eref)
if(mi_a < mi_b):
end=d
best_mi = mi_a
linear_par[i]=c
else:
start=c
best_mi = mi_b
linear_par[i]=d
c=(end-(end-start)/1.618)
d=(start+(end-start)/1.618)
return (end+start)/2, best_mi
def optimize_powell(rng, par_lin, ref_sup_ravel, flt_stack, eref):
converged = False
eps = 0.000005
last_mut=100000.0
it=0
while(not converged):
converged=True
it=it+1
for i in range(par_lin.numel()):
cur_par = par_lin[i]
cur_rng = rng[i]
param_opt, cur_mi = optimize_goldsearch(cur_par, cur_rng, ref_sup_ravel, flt_stack, par_lin, i, eref)
par_lin[i]=cur_par
if last_mut-cur_mi>eps:
par_lin[i]=param_opt
last_mut=cur_mi
converged=False
else:
par_lin[i]=cur_par
#print("Iterations "+str(it))
return (par_lin)
def register_images(Ref_uint8, Flt_uint8):
params = torch.empty((2,3), device=device)
estimate_initial(Ref_uint8, Flt_uint8, params)
params_cpu = params.cpu()
rng = torch.tensor([80.0, 80.0, 1.0])
pa = torch.tensor([params_cpu[0][2],params_cpu[1][2],params_cpu[0][0]])
Ref_uint8_ravel = Ref_uint8.ravel().double()
eref = precompute_metric(Ref_uint8_ravel)
flt_u = torch.unsqueeze(Flt_uint8, dim=0).float()
flt_stack = torch.stack((flt_u, flt_u))
optimal_params = optimize_powell(rng, pa, Ref_uint8_ravel, flt_stack, eref)
params_trans=to_matrix_blocked(optimal_params)
flt_transform = transform(Flt_uint8, move_data(params_trans))
return (flt_transform)
def save_data(OUT_STAK, name, res_path):
for i in range(len(OUT_STAK)):
b=name[i].split('/')
c=b.pop()
d=c.split('.')
cv2.imwrite(os.path.join(res_path, d[0][0:2]+str(int(d[0][2:5]))+'.png'), kornia.tensor_to_image(OUT_STAK[i].cpu().byte())) #Creare cartelle
def compute(CT, PET, name, curr_res, t_id, patient_id):
final_img=[]
times=[]
t = 0.0
it_time = 0.0
hist_dim = 256
dim = 512
global ref_vals
ref_vals = torch.ones(dim*dim, dtype=torch.int, device=device)
global move_data
move_data = no_transfer if device=="cpu" else to_cuda
for c,ij in enumerate(zip(CT, PET)):
i = ij[0]
j = ij[1]
ref = pydicom.dcmread(i)
Ref_img = torch.tensor(ref.pixel_array.astype(np.int16), dtype=torch.int16, device=device)
Ref_img[Ref_img==-2000]=1
flt = pydicom.dcmread(j)
Flt_img = torch.tensor(flt.pixel_array.astype(np.int16), dtype=torch.int16, device=device)
Ref_img = (Ref_img - Ref_img.min())/(Ref_img.max() - Ref_img.min())*255
Ref_uint8 = Ref_img.round().type(torch.uint8)
Flt_img = (Flt_img - Flt_img.min())/(Flt_img.max() - Flt_img.min())*255
Flt_uint8 = Flt_img.round().type(torch.uint8)
start_time = time.time()
f_img = register_images(Ref_uint8, Flt_uint8)
end_time= time.time()
final_img.append(f_img.cpu())
it_time = (end_time - start_time)
times.append(it_time)
t=t+it_time
df = pd.DataFrame([t, np.mean(times), np.std(times)],columns=['Test'+str(patient_id)])#+str(config)accel_id.get_config())])
times_df = pd.DataFrame(times,columns=['Test'+str(patient_id)])#+str(config)accel_id.get_config())])
df_path = os.path.join(curr_res,'Time_powll_%02d.csv' % (t_id))
times_df_path = os.path.join(curr_res,'Img_powll_%02d.csv' % (t_id))
df.to_csv(df_path, index=False)
times_df.to_csv(times_df_path, index=False)
save_data(final_img,PET,curr_res)
def compute_wrapper(args, num_threads=1):
config=args.config
for k in range(args.offset, args.patient):
pool = []
curr_prefix = args.prefix+str(k)
curr_ct = os.path.join(curr_prefix,args.ct_path)
curr_pet = os.path.join(curr_prefix,args.pet_path)
curr_res = os.path.join("",args.res_path)
os.makedirs(curr_res,exist_ok=True)
CT=glob.glob(curr_ct+'/*dcm')
PET=glob.glob(curr_pet+'/*dcm')
PET.sort()
CT.sort()
assert len(CT) == len(PET)
images_per_thread = len(CT) // num_threads
print(images_per_thread)
for i in range(num_threads):
start = images_per_thread * i
end = images_per_thread * (i + 1) if i < num_threads - 1 else len(CT)
name = "t%02d" % (i)
pool.append(Process(target=compute, args=(CT[start:end], PET[start:end], name, curr_res, i, k)))
for t in pool:
t.start()
for t in pool:
t.join()
hist_dim = 256
dim = 512
def main():
parser = argparse.ArgumentParser(description='Iron software for IR onto a python env')
parser.add_argument("-pt", "--patient", nargs='?', help='Number of the patient to analyze', default=1, type=int)
parser.add_argument("-o", "--offset", nargs='?', help='Starting patient to analyze', default=0, type=int)
parser.add_argument("-cp", "--ct_path", nargs='?', help='Path of the CT Images', default='./')
parser.add_argument("-pp", "--pet_path", nargs='?', help='Path of the PET Images', default='./')
parser.add_argument("-rp", "--res_path", nargs='?', help='Path of the Results', default='./')
parser.add_argument("-t", "--thread_number", nargs='?', help='Number of // threads', default=1, type=int)
parser.add_argument("-px", "--prefix", nargs='?', help='prefix Path of patients folder', default='./')
parser.add_argument("-im", "--image_dimension", nargs='?', help='Target images dimensions', default=512, type=int)
parser.add_argument("-c", "--config", nargs='?', help='prefix Path of patients folder', default='./')
parser.add_argument("-mtr", "--metric", nargs='?', help='Metric accelerator to be tested', choices=['MI', 'CC', 'MSE'], default='MI')
parser.add_argument("-dvc", "--device", nargs='?', help='Target device', choices=['cpu', 'cuda'], default='cpu')
args = parser.parse_args()
num_threads=args.thread_number
patient_number=args.patient
print(args.config)
print(args)
global compute_metric, precompute_metric
if args.metric == "MI":
compute_metric = compute_mi
precompute_metric = precompute_mutual_information
elif args.metric == "CC":
compute_metric = compute_cc
precompute_metric = precompute_cross_correlation
elif args.metric == "MSE":
compute_metric = compute_mse
precompute_metric = precompute_mean_squared_error
else:
print("Unsupported metric!")
exit()
global device
device = args.device
compute_wrapper(args, num_threads)
print("Faber Powell python is at the end :)")
if __name__== "__main__":
main()
| 1.609375 | 2 |
demos/json2rdf.py | houzw/knowledge-base-data | 0 | 12791111 | <gh_stars>0
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# author: houzhiwei
# time: 2018/7/11 16:37
# https://blog.csdn.net/headwind_/article/details/70234169
from pprint import pprint
import json
from rdflib import BNode, Literal, Graph, Namespace, ConjunctiveGraph, URIRef, resource
from rdflib.namespace import SKOS, DCTERMS, RDF, RDFS, OWL
ont_uri = 'http://www.egc.org/ont/model/geospatial'
ns = Namespace(ont_uri + '#')
g = ConjunctiveGraph(identifier=ont_uri)
g.bind("skos",SKOS)
g.bind("dcterms", DCTERMS)
g.bind("model", ns)
ont_cls = ['Contact', 'TechnicalSpecs', 'InputOutput', 'Input', 'Output', 'Process', 'Testing', 'Other', 'Component',
'Publication']
keys = ['contact', 'technical', 'io', 'input', 'output', 'process', 'testing', 'other', 'component',
'publications']
for cls in ont_cls:
g.add((ns[cls], RDF.type, OWL.Class))
g.add((ns[cls], RDFS.subClassOf, OWL.Thing))
# 创建顶层类
model_cls = ns['GeoSpatialModel']
g.add((model_cls, RDF.type, OWL.Class))
g.add((model_cls, RDFS.subClassOf, OWL.Thing))
count = 0
with open('../CSDMS/csdms3.json', 'r') as f:
data = json.load(f) # list
# pprint(data[0]['component'])
for model_dict in data:
# model individual
model_name = model_dict['model_name'].replace(' ', '_')
sub = ns[model_name]
g.add((sub, RDF.type, model_cls))
g.add((sub, SKOS.prefLabel, Literal(model_dict['model_name'], lang='en')))
for k, v in model_dict.items():
k.replace(' ', '_')
pred = ns[k]
if k in keys:
i = keys.index(k)
else:
continue
if type(v) == str:
g.add((pred, RDF.type, OWL.DatatypeProperty))
g.add((sub, pred, Literal(v)))
elif type(v) == dict:
v_node = BNode()
g.add((v_node, RDF.type, ns[ont_cls[i]]))
g.add((pred, RDF.type, OWL.ObjectProperty))
g.add((sub, pred, v_node))
# csdms json 最多只有两层
for vk, vv in v.items():
v_pred = ns[vk]
g.add((v_pred, RDF.type, OWL.DatatypeProperty))
g.add((v_node, v_pred, Literal(vv)))
elif type(v) == list and len(v) > 0:
for l in v:
l_node = BNode()
g.add((l_node, RDF.type, ns[ont_cls[i]]))
g.add((sub, pred, l_node))
for lk, lv in l.items():
l_pred = ns[lk]
g.add((l_pred, RDF.type, OWL.DatatypeProperty))
g.add((l_node, l_pred, Literal(lv)))
# pprint(g.serialize(format='application/rdf+xml'))
g.serialize(destination='csdms_test.owl')
| 2.234375 | 2 |
test/square_matrix_multiply/test_divide_and_conquer.py | ahmadgh74/clrs | 0 | 12791112 | import unittest
import numpy as np
from src.square_matrix_multiply import square_matrix_multiply
class TestStrassenMultiply(unittest.TestCase):
def test_square_1(self):
matrix_a = np.array([[1, 3],
[7, 5]])
matrix_b = np.array([[6, 8],
[4, 2]])
expected = np.array([[18, 14],
[62, 66]])
self.assertTrue(bool((square_matrix_multiply(matrix_a, matrix_b) == expected).all()))
| 3.03125 | 3 |
setup.py | kcraley/azdevman | 0 | 12791113 | from setuptools import setup
setup(
name = 'azdevman',
version = '0.0.1',
packages = ['azdevman'],
entry_points = {
'console_scripts': [
'azdevman = azdevman.main:cli'
]
}
)
| 1 | 1 |
data-computing-giants/python-computing/src/gda/tools/database.py | zhoujiagen/giant-data-analysis | 2 | 12791114 | # -*- coding: utf-8 -*-
"""
数据库工具.
@author: zhoujiagen
Created on 03/11/2018 10:02 AM
"""
import pymysql
def connect_mysql(host='127.0.0.1',
port=3306,
user='root',
password='<PASSWORD>',
database='pci',
charset='utf8'):
"""
获取MySQL连接.
:param host:
:param port:
:param user:
:param password:
:param database:
:param charset:
:return:
"""
return pymysql.connect(host=host,
port=port,
user=user,
password=password,
database=database,
charset=charset)
| 2.828125 | 3 |
aleph/views/reconcile_api.py | Ueland/aleph | 0 | 12791115 | import json
import math
import logging
from pprint import pprint # noqa
from flask import Blueprint, request
from werkzeug.exceptions import BadRequest
from followthemoney import model
from followthemoney.compare import compare
from aleph.core import settings, url_for
from aleph.model import Entity
from aleph.search import SearchQueryParser
from aleph.search import EntitiesQuery, MatchQuery
from aleph.views.util import jsonify
from aleph.logic.util import entity_url
from aleph.index.util import unpack_result
# See: https://github.com/OpenRefine/OpenRefine/wiki/Reconciliation-Service-API
blueprint = Blueprint('reconcile_api', __name__)
log = logging.getLogger(__name__)
def get_freebase_types():
types = []
for schema in model:
if schema.matchable:
types.append({
'id': schema.name,
'name': schema.label
})
return types
def reconcile_op(query):
"""Reconcile operation for a single query."""
parser = SearchQueryParser({
'limit': query.get('limit', '5'),
'strict': 'false'
}, request.authz)
name = query.get('query', '')
schema = query.get('type') or Entity.THING
proxy = model.make_entity(schema)
proxy.add('name', query.get('query', ''))
for p in query.get('properties', []):
proxy.add(p.get('pid'), p.get('v'), quiet=True)
query = MatchQuery(parser, entity=proxy)
matches = []
for doc in query.search().get('hits').get('hits'):
entity = unpack_result(doc)
if entity is None:
continue
entity = model.get_proxy(entity)
score = math.ceil(compare(model, proxy, entity) * 100)
match = {
'id': entity.id,
'name': entity.caption,
'score': score,
'uri': entity_url(entity.id),
'match': False
}
for type_ in get_freebase_types():
if entity.schema.name == type_['id']:
match['type'] = [type_]
matches.append(match)
log.info("Reconciled: %r -> %d matches", name, len(matches))
return {
'result': matches,
'num': len(matches)
}
def reconcile_index():
domain = settings.APP_UI_URL.strip('/')
meta = {
'name': settings.APP_TITLE,
'identifierSpace': 'http://rdf.freebase.com/ns/type.object.id',
'schemaSpace': 'http://rdf.freebase.com/ns/type.object.id',
'view': {
'url': entity_url('{{id}}')
},
'preview': {
'url': entity_url('{{id}}'),
'width': 800,
'height': 400
},
'suggest': {
'entity': {
'service_url': domain,
'service_path': url_for('reconcile_api.suggest_entity',
_authorize=True)
},
'type': {
'service_url': domain,
'service_path': url_for('reconcile_api.suggest_type')
},
'property': {
'service_url': domain,
'service_path': url_for('reconcile_api.suggest_property')
}
},
'defaultTypes': [{
'id': Entity.THING,
'name': model.get(Entity.THING).label
}]
}
return jsonify(meta)
@blueprint.route('/api/freebase/reconcile', methods=['GET', 'POST'])
def reconcile():
"""
Reconciliation API, emulates Google Refine API.
See: http://code.google.com/p/google-refine/wiki/ReconciliationServiceApi
"""
if 'query' in request.values:
# single
q = request.values.get('query')
if q.startswith('{'):
try:
q = json.loads(q)
except ValueError:
raise BadRequest()
else:
q = request.values
return jsonify(reconcile_op(q))
elif 'queries' in request.values:
# multiple requests in one query
qs = request.values.get('queries')
try:
qs = json.loads(qs)
except ValueError:
raise BadRequest()
queries = {}
for k, q in qs.items():
queries[k] = reconcile_op(q)
return jsonify(queries)
else:
return reconcile_index()
@blueprint.route('/api/freebase/suggest', methods=['GET', 'POST'])
def suggest_entity():
"""Suggest API, emulates Google Refine API."""
args = {
'prefix': request.args.get('prefix'),
'filter:schemata': request.args.getlist('type')
}
matches = []
parser = SearchQueryParser(args, request.authz)
if parser.prefix is not None:
query = EntitiesQuery(parser)
for doc in query.search().get('hits').get('hits'):
source = doc.get('_source')
match = {
'quid': doc.get('_id'),
'id': doc.get('_id'),
'name': source.get('name'),
'r:score': doc.get('_score'),
}
for type_ in get_freebase_types():
if source.get('schema') == type_['id']:
match['n:type'] = type_
match['type'] = [type_['name']]
matches.append(match)
return jsonify({
"code": "/api/status/ok",
"status": "200 OK",
"prefix": request.args.get('prefix', ''),
"result": matches
})
@blueprint.route('/api/freebase/property', methods=['GET', 'POST'])
def suggest_property():
prefix = request.args.get('prefix', '').lower().strip()
matches = []
for prop in model.properties:
match = not len(prefix)
if not match:
match = prefix in prop.name.lower()
match = match or prefix in prop.label.lower()
if match:
matches.append({
'id': prop.name,
'quid': prop.name,
'name': prop.label,
'r:score': 100,
'n:type': {
'id': '/properties/property',
'name': 'Property'
}
})
return jsonify({
"code": "/api/status/ok",
"status": "200 OK",
"prefix": request.args.get('prefix', ''),
"result": matches
})
@blueprint.route('/api/freebase/type', methods=['GET', 'POST'])
def suggest_type():
prefix = request.args.get('prefix', '').lower().strip()
matches = []
for type_ in get_freebase_types():
name = type_.get('name').lower()
if not len(prefix) or prefix in name:
matches.append(type_)
return jsonify({
"code": "/api/status/ok",
"status": "200 OK",
"prefix": request.args.get('prefix', ''),
"result": matches
})
| 2.109375 | 2 |
test/test_shell.py | maxgarvey/python-ffmpeg | 0 | 12791116 | <filename>test/test_shell.py
from shell import run, join_command
def test_run_method():
return_value, output = run('echo "Hello"')
assert return_value == 0
assert output == ['Hello\n']
def test_bad_run_method():
return_value, output = run('not-a-real-binary')
assert return_value == 127
assert output == ['/bin/sh: 1: not-a-real-binary: not found\n']
def test_join():
command_elements = ['ls', '-lah', 'dir_name']
command = join_command(command_elements)
assert command == 'ls -lah dir_name'
| 2.8125 | 3 |
259. 3Sum Smaller.py | rohitpatwa/leetcode | 1 | 12791117 | <filename>259. 3Sum Smaller.py
# Sort nums. Run and fix i from 1 to n-2. Run j=i+1, k=n-1. if nums[i]+nums[j]+nums[k]<target, count+= k-j, j+1; else: k-= 1.
class Solution:
def threeSumSmaller(self, nums: List[int], target: int) -> int:
nums.sort()
count = 0
for i in range(len(nums)-2):
j, k = i+1, len(nums) - 1
while j < k:
if nums[i] + nums[j]+nums[k] < target:
count += k-j
j += 1
else:
k -= 1
return count | 3.328125 | 3 |
transmute_core/tests/frameworks/test_flask/test_full.py | pwesthagen/transmute-core | 42 | 12791118 | <reponame>pwesthagen/transmute-core<gh_stars>10-100
import json
def test_happy_path(test_app):
r = test_app.get("/multiply?left=3&right=3")
assert json.loads(r.data.decode()) == 9
def test_headers(test_app):
r = test_app.get("/api/v1/header")
assert r.headers["x-nothing"] == "value"
def test_complex(test_app):
r = test_app.post("/complex/3",
data=json.dumps({"body": "1"}),
headers={
"header": "2",
"content-type": "application/json"
})
assert json.loads(r.data.decode()) == "1:2:3"
def test_api_exception(test_app):
r = test_app.get("/exception")
assert r.status_code == 400
resp = json.loads(r.data.decode())
assert resp["code"] == 400
assert resp["success"] is False
def test_swagger(test_app):
r = test_app.get("/swagger.json")
swagger = json.loads(r.data.decode())
assert swagger["info"] == {
'version': '1.0',
'title': 'example'
}
assert '/multiply' in swagger['paths']
assert '/exception' in swagger['paths']
# test blueprint is documented as well
assert '/blueprint/foo' in swagger['paths']
def test_swagger_html(test_app):
r = test_app.get("/api/")
assert "/swagger.json" in r.data.decode()
assert r.status_code == 200
| 2.265625 | 2 |
pyforms/terminal/Controls/ControlProgress.py | sunj1/my_pyforms | 0 | 12791119 | from pyforms.terminal.Controls.ControlBase import ControlBase
class ControlProgress(ControlBase):
_min = 0
_max = 100
def __init__(self, label = "%p%", defaultValue = 0, min = 0, max = 100, helptext=None):
self._updateSlider = True
self._min = min
self._max = max
ControlBase.__init__(self, label, defaultValue)
def initControl(self):
#return """<div id='id%s' class='progressbar' ></div>""" % ( self._name )
return "controls.push(new ControlProgress('"+self._name+"'));"
@property
def value(self): return self._value
@value.setter
def value(self, value): self._form.horizontalSlider.setValue( value )
@property
def min(self): return self._form.horizontalSlider.minimum()
@min.setter
def min(self, value): self._form.horizontalSlider.setMinimum(value)
@property
def max(self): return self._form.horizontalSlider.maximum()
@max.setter
def max(self, value): self._form.horizontalSlider.setMaximum(value)
| 2.859375 | 3 |
DE10.py | mipsparc/DE15 | 2 | 12791120 | <reponame>mipsparc/DE15
#coding:utf-8
import math
from Brake import BrakeStatues
# 日本国有鉄道DE10液体式ディーゼル機関車の動作を再現するライブラリ
class DE10:
def __init__(self):
# 車速(m/s)
self.speed = 0
# マスコンノッチ(0-14)
self.mascon_level = 0
# 非常ブレーキシリンダ圧力
self.BC_MAX_EB = 3.5
# 常用最大ブレーキシリンダ圧力 本物は5.7kg/cm2
self.BC_MAX = 3.0
# ブレーキシリンダ圧力(減速度)
self.bc = self.BC_MAX
# ブレーキ装置状態
self.brake_status = BrakeStatues.FIX
# 0(運転) - 1(全ブレーキ) のブレーキレベル
self.brake_level = 0
# 非常ブレーキ状態
self.eb = False
# 客貨車牽引時の加速度減少(単機: 1)
self.freight = 1
# 目標ブレーキシリンダ圧力
self.goal_bc = self.BC_MAX
# 方向 0は切
self.way = 0
def getSmoothLevel(self):
# y = log2(x+1) 最大が1
return (math.log2(self.mascon_level+1))/4.0
# 0.1秒進める
def advanceTime(self):
# 加速度を求める(m/s2)
if self.speed < 3.33: # 12kph
accel = self.getSmoothLevel() * 0.803
elif self.speed < 6.94: # 25kph
accel = self.getSmoothLevel() * 0.5
elif self.speed < 12.5: # 45kph
accel = self.getSmoothLevel() * 0.333
elif self.speed < 23.5: # 84.6kph
accel = self.getSmoothLevel() * 0.194
# 最高速度では加速は0になる
else:
accel = 0
# 切位置時は空吹かしになって加速はしない
if self.getWay() == 0:
print('空吹かし')
accel = 0
# ブレーキ装置状態から目標ブレーキシリンダ圧を求める
if self.brake_status in (BrakeStatues.ERROR_SENSOR, BrakeStatues.ERROR, BrakeStatues.EMER):
self.eb = True
elif self.brake_status in (BrakeStatues.FIX, BrakeStatues.MAX_BRAKE):
self.goal_bc = self.BC_MAX
elif self.brake_status == BrakeStatues.BRAKE:
self.goal_bc = round(self.BC_MAX * self.brake_level, 2)
elif self.brake_status == BrakeStatues.RUN:
self.goal_bc = 0.0
elif self.brake_status == BrakeStatues.LOWER_BRAKE:
self.goal_bc = 0.0
# 非常ブレーキ
if self.eb:
self.goal_bc = self.BC_MAX_EB
self.setMascon(0)
# 停車で復位
if self.speed == 0:
self.eb = False
# 0.1秒あたりのブレーキ作用・寛解 ここは実物に則さない
# 目標ブレーキシリンダ圧力を設定すると、ゆっくりとそこに向けて動いていく
# bc: 減速度(m/s2)とする。ここも実物に則さない
if abs(self.bc - self.goal_bc) < 0.1:
self.goal_bc = self.bc
elif self.bc > self.goal_bc:
self.bc -= (self.bc - self.goal_bc) / 5.0
elif self.bc < self.goal_bc:
self.bc += (self.goal_bc - self.bc) / 5.0
# 丸める
self.bc = round(self.bc, 2)
# 走行抵抗
if self.bc < 0.1:
self.bc = 0.1
elif self.bc > self.BC_MAX_EB:
self.bc = self.BC_MAX_EB
# 加減速計算
self.speed = self.speed + (accel * 1.3 - self.bc / 1.5) * 0.1 * self.freight
if self.speed < 0:
self.speed = 0
def getSpeed(self):
return self.speed
def setWay(self, way):
self.way = way
def getWay(self):
return self.way
# 0 ~ 14のマスコンノッチを入力 EB時は力行不可
def setMascon(self, mascon_level):
if not self.eb:
self.mascon_level = mascon_level
else:
self.mascon_level = 0
# 0(運転) ~ 1(全ブレーキ) のブレーキレベルを入力
def setBrake(self, brake_level):
self.brake_level = brake_level
# ブレーキ装置の状態(非常、ブレーキ、ユルメ…)を入力
def setBrakeStatus(self, brake_status):
self.brake_status = brake_status
def getBp(self):
# 490から始まって、BCが増えるごとに減る
return (self.bc / self.BC_MAX_EB) * -10 + 490 + 10
# 実際のブレーキ管圧力を便宜上のブレーキシリンダ圧力値から求める
# ブレーキ管圧力は通常490kPa 140kPa減圧して350kPaになると最大がかかる
def getBc(self):
return (self.bc / self.BC_MAX_EB) * 350
| 2.546875 | 3 |
spherical_point_source.py | BrokenVoodooDoll/MirrorRayPath | 0 | 12791121 | import matplotlib.pyplot as plt
import numpy as np
from numpy.lib.function_base import angle
radius = 100 # curvature radius of the mirror in mm (must be positive)
angle_d = 30 # maximum angle of incidence of the incident beam in degrees
num_rays = 21 # number of rays
source_pos = 80 # source position in mm (must be positive)
focal_length = radius / 2 # focal length of the mirror
y = np.linspace(-radius, radius, 1000)
# mirror equation z = sqrt(R^2 - y^2) - R
def surface(y):
return np.sqrt(radius ** 2 - y ** 2) - radius
# angle between the incident ray and the line connecting the point of incidence
# of the ray on the mirror and the center of curvature of the mirror
def epsilon(inc_angle):
q = radius - source_pos
return np.arcsin(q / radius * np.sin(inc_angle))
# angle of reflected ray
def ref_angle(inc_angle):
return inc_angle - 2 * epsilon(inc_angle)
# the z-coordinate of the intersection of the reflected ray with the axis
def ref_z(inc_angle):
q = radius * np.sin(-epsilon(inc_angle)) / np.sin(ref_angle(inc_angle))
return radius - q
# the y-coordinate of the intersection of the incident ray with the mirror
def height(inc_angle):
phi = ref_angle(inc_angle) + epsilon(inc_angle)
return radius * np.sin(phi)
# line equation for extension of the reflected ray
def line(inc_angle, z, z0):
return np.tan(inc_angle) * (z - z0)
plt.figure(figsize=(13, 8))
plt.plot(surface(y), y) # mirror surface visualization
plt.plot([-2 * radius, 0], [0, 0]) # axis of the mirror
plt.plot([-focal_length], [0], 'o') # focal point
for ang in np.linspace(-angle_d, angle_d, num_rays):
inc_angle = ang * np.pi / 180
h = height(inc_angle)
z_inc = np.array([-source_pos, surface(h)])
y_inc = np.array([0, h])
plt.plot(z_inc, y_inc, 'k', lw=1) # draw incident beam
z_0 = ref_z(inc_angle)
if np.isnan(z_0):
z_0 = -2 * radius
if source_pos >= focal_length:
z_0 = -z_0 if z_0 > 0 else z_0
else:
z_0 = z_0 if z_0 > 0 else -z_0
z_ref = np.array([surface(h), -2 * radius])
y_ref = np.array([h, line(ref_angle(inc_angle), -2 * radius, z_0)])
if abs(source_pos) < abs(2 * focal_length) and abs(source_pos) > abs(focal_length) and abs(z_0) > abs(2 * radius):
z_ref = np.array([surface(h), z_0])
y_ref = np.array([h, 0])
plt.plot(z_ref, y_ref, 'r', lw=1)
plt.title("Radius = {:.1f} mm. Focal length = {:.1f} mm. Source position = {:.1f} mm.\nMaximum incident angle = {:.1f} deg. Number of rays = {}".format(radius, focal_length, -source_pos, angle_d, num_rays))
plt.xlabel("z, mm")
plt.ylabel("r, mm")
plt.ylim(-radius, radius)
plt.xlim(-2 * radius, 0)
plt.grid()
plt.show() | 3.40625 | 3 |
peyotl/test/test_tokenizer.py | mtholder/peyotl | 6 | 12791122 | <filename>peyotl/test/test_tokenizer.py
#! /usr/bin/env python
from peyotl.utility.tokenizer import NewickTokenizer, NewickEvents, NewickEventFactory
from peyotl.utility.str_util import StringIO
from peyotl.utility import get_logger
import unittest
from copy import deepcopy
_LOG = get_logger(__name__)
class TestNewickTokenizer(unittest.TestCase):
def testSimple(self):
exp = ['(', '(', 'h', ',', 'p', ')', 'hp', ',', 'g', ')', 'hpg', ';']
content = '((h,p)hp,g)hpg;'
self._do_test(content, exp)
content = '((h,p[test])hp,g)hpg;'
self._do_test(content, exp)
content = ' ( ( h , p[test] [test2]) hp, g) hpg ;'
self._do_test(content, exp)
def testQuoted(self):
exp = ['(', '(', 'h ', ',', 'p', ')', 'h p', ',', "g()[],':_", ')', 'hpg', ';']
content = "((h_ ,'p')h p,'g()[],'':_')hpg;"
self._do_test(content, exp)
content = "(('h ',p)h p,'g()[],'':_')hpg;"
self._do_test(content, exp)
def _do_test(self, content, expected):
self.assertEqual([i for i in NewickTokenizer(StringIO(content))], expected)
def testOddQuotes(self):
content = "((h_ ,'p)h p,g()[],:_)hpg;"
tok = NewickTokenizer(StringIO(content))
content = "((h_ ,'p')h p,'g()[]',:_')hpg;"
tok = NewickTokenizer(StringIO(content))
self.assertRaises(Exception, tok.tokens)
def testBranchLen(self):
exp = ['(', '(', 'h', ':', '4.0', ',', 'p', ':', '1.1461E-5', ')',
'hp', ':', '1351.146436', ',', 'g', ')', 'hpg', ';']
content = '((h:4.0,p:1.1461E-5)hp:1351.146436,g)hpg;'
self._do_test(content, exp)
class TestNewickEvents(unittest.TestCase):
def testSimple(self):
exp = [{'type': NewickEvents.OPEN_SUBTREE, 'comments': []},
{'type': NewickEvents.OPEN_SUBTREE, 'comments': []},
{'edge_info': None, 'type': NewickEvents.TIP, 'comments': [], 'label': 'h'},
{'edge_info': None, 'type': NewickEvents.TIP, 'comments': [], 'label': 'p'},
{'edge_info': None, 'type': NewickEvents.CLOSE_SUBTREE, 'comments': [], 'label': 'hp'},
{'edge_info': None, 'type': NewickEvents.TIP, 'comments': [], 'label': 'g'},
{'edge_info': None, 'type': NewickEvents.CLOSE_SUBTREE, 'comments': [], 'label': 'hpg'}
]
content = '((h,p)hp,g)hpg;'
self._do_test(content, exp)
content = '((h,[pretest]p[test][posttest])hp,g)hpg;'
exp = [{'type': NewickEvents.OPEN_SUBTREE, 'comments': []},
{'type': NewickEvents.OPEN_SUBTREE, 'comments': []},
{'edge_info': None, 'type': NewickEvents.TIP, 'comments': [], 'label': 'h'},
{'edge_info': None, 'type': NewickEvents.TIP, 'comments': ['pretest', 'test', 'posttest'], 'label': 'p'},
{'edge_info': None, 'type': NewickEvents.CLOSE_SUBTREE, 'comments': [], 'label': 'hp'},
{'edge_info': None, 'type': NewickEvents.TIP, 'comments': [], 'label': 'g'},
{'edge_info': None, 'type': NewickEvents.CLOSE_SUBTREE, 'comments': [], 'label': 'hpg'}
]
self._do_test(content, exp)
def _do_test(self, content, expected):
e = [deepcopy(i) for i in NewickEventFactory(tokenizer=NewickTokenizer(stream=StringIO(content)))]
# print(e)
self.assertEqual(e, expected)
if __name__ == "__main__":
unittest.main()
| 2.453125 | 2 |
imgprocalgs/algorithms/utilities.py | mateuszz0000/imgprocalgs | 8 | 12791123 | """ Module including utilities for main algorithms"""
from PIL import Image as PillowImage
from collections import namedtuple
ImageData = namedtuple("ImgData", 'header image')
HSV = namedtuple("HSV", 'h s v')
RGB = namedtuple("RGB", 'r g b')
class Image:
""" Wrapper for Image class for easier usage"""
def __init__(self, image_path: str):
self.image_path = image_path
self.image: PillowImage = PillowImage.open(self.image_path)
self.pixels = self.image.load()
def get_size(self):
"""
:return: x, y in pixels
"""
return self.image.size[0], self.image.size[1]
def create_empty_image(width: int, height: int) -> PillowImage:
return PillowImage.new("RGB", (width, height), "#000000")
def get_greyscale(red: int, green: int, blue: int) -> float:
return 0.2126 * red + 0.587 * green + 0.114 * blue
def rgb_to_hsv(red: int, green: int, blue: int) -> namedtuple:
_red = red / 255
_green = green / 255
_blue = blue / 255
c_max = max(_red, _green, _blue)
c_min = min(_red, _green, _blue)
delta = c_max - c_min
if delta > 0:
if c_max == _red:
h = 60 * (((_green - _blue) / delta) % 6)
elif c_max == _green:
h = 60 * (((_blue - _red) / delta) + 2)
elif c_max == _blue:
h = 60 * (((_red - _green) / delta) + 4)
else:
raise ValueError(f"c_max ({c_max} is not equal {_red}/{_green}/{_blue})")
else:
h = 0
s = 0 if c_max == 0 else delta/c_max
return HSV(h, s, c_max)
def hsv_to_rgb(h: float, s: float, v: float) -> namedtuple:
c = v * s
x = c * (1 - abs((h/60) % 2 - 1))
m = v - c
if 0 <= h < 60:
red, green, blue = c, x, 0
elif 60 <= h < 120:
red, green, blue = x, c, 0
elif 120 <= h < 180:
red, green, blue = 0, c, x
elif 180 <= h < 240:
red, green, blue = 0, x, c
elif 240 <= h < 300:
red, green, blue = x, 0, c
elif 300 <= h < 360:
red, green, blue = c, 0, x
else:
raise ValueError(f"h value: {h} is out of range (0, 360)")
return RGB(
int((red + m) * 255),
int((green + m) * 255),
int((blue + m) * 255)
)
| 3.71875 | 4 |
d18/tokenizer.py | RuedigerLudwig/advent2020 | 2 | 12791124 | <filename>d18/tokenizer.py
import re
from typing import Optional
from common.utils import safe_get
from .token import Op, Operations, Token
class Tokenizer:
@staticmethod
def simple(line: str) -> Token:
return Simple(line).token
@staticmethod
def advanced(line: str) -> Token:
return Advanced(line).token
@staticmethod
def sum(tokens: list[Token]) -> int:
return sum(t.get_value() for t in tokens)
class TokenException(Exception):
pass
class Simple:
pattern = re.compile(r"\s+|\d+|[+*]|\(|\)")
def __init__(self, line: str):
self.bits: list[str] = [
p for p in Simple.pattern.findall(line.strip()) if p.strip() != ""
]
if not self.bits:
raise TokenException("Got nothing to parse")
token, pos = self.parse_expression(0)
if pos != len(self.bits):
raise TokenException("Could not parse line")
self.token = token
def parse_item(self, pos: int) -> tuple[Token, int]:
if self.bits[pos] == "(":
item, pos = self.parse_expression(pos + 1)
if safe_get(self.bits, pos, "") != ")":
raise TokenException("Bracket not closed correctly")
else:
item = Token.from_int(self.bits[pos])
return item, pos + 1
def parse_expression(self, pos: int) -> tuple[Token, int]:
token: Optional[Token] = None
op: Optional[Op] = None
while True:
item, pos = self.parse_item(pos)
token = Token.create(token, op, item)
if (s := safe_get(self.bits, pos, "")) not in Operations:
return token, pos
op = s # type: ignore
pos += 1
class Advanced(Simple):
def parse_sub_expression(self, pos: int) -> tuple[Token, int]:
token: Optional[Token] = None
while True:
item, pos = self.parse_item(pos)
token = Token.create(token, '+', item)
if safe_get(self.bits, pos, "") != "+":
return token, pos
pos += 1
def parse_expression(self, pos: int) -> tuple[Token, int]:
token: Optional[Token] = None
while True:
item, pos = self.parse_sub_expression(pos)
token = Token.create(token, '*', item)
if safe_get(self.bits, pos, "") != "*":
return token, pos
pos += 1
| 3.0625 | 3 |
test/test_surface.py | simonpf/gprof_nn | 1 | 12791125 | """
Tests for the loading of surface maps for the GPROF-NN data processing.
"""
from datetime import datetime
import pytest
import numpy as np
from gprof_nn.data.surface import (read_land_mask,
read_autosnow,
read_emissivity_classes)
from gprof_nn.data.preprocessor import has_preprocessor
HAS_PREPROCESSOR = has_preprocessor()
@pytest.mark.skipif(not HAS_PREPROCESSOR, reason="Preprocessor missing.")
def test_read_land_mask():
"""
Test reading of land mask.
"""
mask = read_land_mask("GMI")
assert mask.mask.shape == (180 * 32, 360 * 32)
mask = read_land_mask("MHS")
assert mask.mask.shape == (180 * 16, 360 * 16)
# Ensure point in North Atlantic is classified as Ocean.
m = mask.interp({"longitude": -46.0, "latitude": 35.0})
assert np.isclose(m.mask.data, 0)
# Ensure point in Africa is classified as land.
m = mask.interp({"longitude": 0.0, "latitude": 20.0})
assert np.all(m.mask.data > 0)
@pytest.mark.skipif(not HAS_PREPROCESSOR, reason="Preprocessor missing.")
def test_read_autosnow():
"""
Test reading of autosnow files.
"""
autosnow = read_autosnow("2021-01-01T00:00:00")
# Ensure no snow around equator
autosnow_eq = autosnow.interp({"latitude": 0.0, "longitude": 0.0}, "nearest")
assert np.all(autosnow_eq.snow.data == 0)
@pytest.mark.skipif(not HAS_PREPROCESSOR, reason="Preprocessor missing.")
def test_read_emissivity_classes():
"""
Test reading of emissivity classes.
"""
data = read_emissivity_classes()
# Ensure point in North Atlantic is classified as Ocean.
data_i = data.interp({"longitude": -46.0, "latitude": 35.0})
assert np.all(np.isclose(data_i.emissivity.data, 0))
# Ensure point in Africa is classified as land.
data_i = data.interp({"longitude": 0.0, "latitude": 20.0})
assert np.all(data_i.emissivity.data > 0)
| 2.59375 | 3 |
scripts/split_tool_yml.py | usegalaxy-no/usegalaxy-no-tools | 1 | 12791126 | #!/usr/bin/env python
import yaml
from collections import defaultdict
import re
import os
import argparse
def slugify(value):
"""
Normalizes string, converts to lowercase, removes non-alpha characters,
and converts spaces to hyphens.
"""
value = re.sub('[^\w\s-]', '', value).strip().lower()
value = re.sub('[-\s]+', '_', value)
return value
def main():
VERSION = 0.1
parser = argparse.ArgumentParser(description="Splits up a Ephemeris `get_tool_list` yml file for a Galaxy server into individual files for each Section Label.")
parser.add_argument("-i", "--infile", help="The returned `get_tool_list` yml file to split.")
parser.add_argument("-o", "--outdir", help="The output directory to put the split files into. Defaults to infile without the .yml.")
parser.add_argument("--version", action='store_true')
parser.add_argument("--verbose", action='store_true')
args = parser.parse_args()
if args.version:
print("split_tool_yml.py version: %.1f" % VERSION)
return
filename = args.infile
a = yaml.safe_load(open(filename, 'r'), )
outdir = re.sub('\.yml', '', filename)
if args.outdir:
outdir = args.outdir
if args.verbose:
print('Outdir: %s' % outdir)
if not os.path.isdir(outdir):
os.mkdir(outdir)
tools = a['tools']
categories = defaultdict(list)
for tool in tools:
categories[tool['tool_panel_section_label']].append(tool)
for cat in categories:
fname = str(cat)
good_fname = outdir + "/" + slugify(fname) + ".yml"
tool_yaml = {'tools': sorted(categories[cat], key=lambda x: x['name'] + x['owner'])}
if args.verbose:
print("Working on: %s" % good_fname)
with open(good_fname, 'w') as outfile:
yaml.dump(tool_yaml, outfile, default_flow_style=False)
return
if __name__ == "__main__":
main()
| 2.953125 | 3 |
tests/test_pod.py | RecursionSpace/OpenPod | 1 | 12791127 | ''' Unit test for pod.py '''
import sys
# import unittest
sys.path.insert(0, "openpod/")
# import hub
# class TestHub(unittest.TestCase):
# '''
# General tests for the hub.py file
# '''
# def test_xbee_flag_set_true(self):
# '''
# Check if the xbee flag is set to true.
# '''
# global XBEE_FLAG
# XBEE_FLAG = False
# hub.incoming_xbee_data()
# self.assertTrue(XBEE_FLAG)
| 2.34375 | 2 |
mantle/xilinx/mantle6/mothball/fulladder.py | splhack/mantle | 33 | 12791128 | from magma import *
from ..spartan6.CLB import CARRY
from ..spartan6.LUT import LUT5x2
__all__ = ['FullAdder']
#
# return I0, ..., In, CIN -> O, COUT
#
def FullCarry(k, expr6, expr5):
assert k <= 5
lut = LUT5x2(expr5, expr6)
args = []
if k >= 1:
args += ['input I0', lut.I0]
else:
wire(1, lut.I0)
if k >= 2:
args += ['input I1', lut.I1]
else:
wire(1, lut.I1)
if k >= 3:
args += ['input I2', lut.I2]
else:
wire(1, lut.I2)
if k >= 4:
args += ['input I3', lut.I3]
else:
wire(1, lut.I3)
if k >= 5:
args += ['input I4', lut.I4]
else:
wire(1, lut.I4)
CIN = Bit()
O, COUT = CARRY(lut.O6, lut.O5, CIN)
args += [ "output O", O,
"input CIN", CIN,
"output COUT", COUT ]
return AnonymousCircuit( args )
def FullAdder():
pass
| 2.25 | 2 |
tests/models.py | robertvaugh/durabledict | 42 | 12791129 | <filename>tests/models.py
from django.conf import settings
if not settings.configured:
settings.configure(
DATABASES={
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': 'test.db'
}
},
INSTALLED_APPS=[
'tests',
],
DEBUG=True,
)
from django.db import models
class Setting(models.Model):
key = models.CharField(max_length=32, unique=True)
value = models.CharField(max_length=32, default='')
| 2.046875 | 2 |
src/jt/pyjava/_pyjava/_typehandler/string_handler.py | karpierz/jtypes.pyjava | 2 | 12791130 | # Copyright (c) 2015-2022 <NAME>
# Licensed under the MIT License
# https://opensource.org/licenses/MIT
from jvm.lib import public
from jvm.lib import cached
from .._constants import EJavaType
from .._constants import EMatch
from .._jvm import JVM
from ._base_handler import _ObjectHandler
@public
class StringHandler(_ObjectHandler):
__slots__ = ()
def __init__(self, state):
super().__init__(state, EJavaType.STRING,
JVM.jvm.JClass.getStringClass())
@cached
def _class(self):
return self._state.class_importer.java_lang_String
def match(self, val):
if val is None:
return EMatch.IMPLICIT
elif isinstance(val, str):
return EMatch.PERFECT
elif isinstance(val, self._class()):#and val.__javaobject__.getClass() == self._class():
return EMatch.PERFECT
return EMatch.NONE
def toJava(self, val):
if val is None:
return None
elif isinstance(val, str):
return self._jt_jvm.JObject.newString(val)
elif isinstance(val, self._class()):#and val.__javaobject__.getClass() == self._class():
return val.__javaobject__
raise TypeError("Cannot convert value to Java string")
def toPython(self, val):
if val is None:
return None
else:
if isinstance(val, self._jt_jvm.JObject):
val = val.stringValue()
return val
def getStatic(self, fld, cls):
return fld.getStaticString(cls)
def setStatic(self, fld, cls, val):
if val is None:
fld.setStaticString(cls, None)
elif isinstance(val, str):
fld.setStaticString(cls, val)
elif isinstance(val, self._class()):
fld.setStaticObject(cls, val.__javaobject__)
else:
raise TypeError("Cannot convert value to Java string")
def getInstance(self, fld, this):
return fld.getString(this)
def setInstance(self, fld, this, val):
if val is None:
fld.setString(this, None)
elif isinstance(val, str):
fld.setString(this, val)
elif isinstance(val, self._class()):
fld.setObject(this, val.__javaobject__)
else:
raise TypeError("Cannot convert value to Java string")
def setArgument(self, pdescr, args, pos, val):
if val is None:
args.setString(pos, None)
elif isinstance(val, str):
args.setString(pos, val)
elif isinstance(val, self._class()):
args.setObject(pos, val.__javaobject__)
else:
raise TypeError("Cannot convert value to Java string")
def callStatic(self, meth, cls, args):
value = meth.callStaticString(cls, args)
return value
def callInstance(self, meth, this, args):
value = meth.callInstanceString(this, args)
return value
| 2 | 2 |
app/resources/root.py | abdghani995/docker-falcon | 0 | 12791131 | <gh_stars>0
import os
import uuid
import json
import redis
from app.config import settings
from app.model import *
def set_cache(key, value):
rclient = redis.Redis.from_url(settings.get('REDIS_URL'))
rclient.set(key, json.dumps(value))
def get_cache(key):
rclient = redis.Redis.from_url(settings.get('REDIS_URL'))
return rclient.get(key)
class RootResources:
def on_get(self, req, resp):
if req.params.get('name'):
self.sess.add(Countries(
country_id= uuid.uuid4(),
country_name= req.params.get('name')
))
self.sess.commit()
countries_list = self.sess.query(Countries).all()
resp.media = {
"message": "Helloorld!",
"country": [_country.repr for _country in countries_list]
}
class RootNameResources:
def on_post(self, req, resp, name):
resp.media = {
"message": "Hello, {}!".format(name.capitalize())
}
| 2.296875 | 2 |
sandbox/mlp.py | luiarthur/CytoPy | 1 | 12791132 | # I intend to use this as a Variational auto-encoder for the
# missing y.
# See paper: https://arxiv.org/abs/1312.6114
import torch
# Define sizes
input_size = 3
output_size = 2
hidden_size = 5
# Create multi-layer perceptron
fc1 = torch.nn.Linear(input_size, hidden_size)
act_fn = torch.nn.Tanh()
fc2 = torch.nn.Linear(hidden_size, output_size)
# Main
num_obs = 100
x = torch.randn(num_obs, input_size)
out = fc1(x)
out = act_fn(out)
out = fc2(out)
print(out)
# Test dims
y = torch.randn(20, 5)
m = torch.randn(20, 5)
b = torch.randn(3) * torch.ones(20, 3)
# I want this to be 20 x (5 + 5 + 3)
input_vec = torch.cat([y, m, b], dim=-1).shape
| 2.65625 | 3 |
pinecone/exceptions.py | amourao/pinecone-python-client | 7 | 12791133 | #
# Copyright (c) 2020-2021 Pinecone Systems Inc. All right reserved.
#
from .core.exceptions import PineconeException, PineconeProtocolError
from .core.client.exceptions import OpenApiException, ApiAttributeError, ApiTypeError, ApiValueError, \
ApiKeyError, ApiException, NotFoundException, UnauthorizedException, ForbiddenException, ServiceException
__all__ = [
"PineconeException",
"PineconeProtocolError",
"OpenApiException",
"ApiAttributeError",
"ApiTypeError",
"ApiValueError",
"ApiKeyError",
"ApiException",
"NotFoundException",
"UnauthorizedException",
"ForbiddenException",
"ServiceException",
]
| 1.125 | 1 |
schemas/base.py | sr-vazkez/ecommerce-fastapi | 0 | 12791134 | <gh_stars>0
from pydantic import BaseModel
class UserBase(BaseModel):
email: str
class BaseComplaint(BaseModel):
title : str
description : str
photo_url : str
amount: float | 2.125 | 2 |
app.py | govle-192-21-2/govle | 0 | 12791135 | <filename>app.py
# Main application file for GoVLê
from controllers import init_api
from controllers.database import Database
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import serialization
from dotenv import load_dotenv
from firebase_admin import credentials, db
from flask import Flask, redirect, request, url_for
from flask_login import LoginManager
from models.profile import Profile
from os import environ
from views import init_views
import firebase_admin
# Environment variables
load_dotenv()
# Flask app
app = Flask(__name__)
# Authentication
if 'FLASK_SECRET_KEY' not in environ:
raise RuntimeError('FLASK_SECRET_KEY environment variable not set')
app.secret_key = environ['FLASK_SECRET_KEY']
login_manager = LoginManager()
login_manager.init_app(app)
# Firebase: Database URL is required
if 'FIREBASE_DATABASE_URL' not in environ:
raise RuntimeError('FIREBASE_DATABASE_URL environment variable not set')
# Firebase: Parse credentials.json (not in tree, must be supplied)
cred = credentials.Certificate('credentials.json')
# Firebase: Initialize and get database root ref
firebase_admin.initialize_app(cred, {
'databaseURL': environ['FIREBASE_DATABASE_URL']
})
app.config['DB'] = Database(db.reference())
# RSA cipher for decrypting credentials
with open('priv.pem', 'rb') as priv_key_file:
app.config['RSA_CIPHER'] = serialization.load_pem_private_key(
priv_key_file.read(),
password=<PASSWORD>,
backend=default_backend())
# User loader
@login_manager.user_loader
def user_loader(user_id: str) -> Profile:
return app.config['DB'].lookup_user_by_id(user_id)
# Unauthorized error handler
@login_manager.unauthorized_handler
def unauthorized_handler():
return redirect(url_for('login.login_page') + '?next=' + request.path)
# Routes
init_views(app)
init_api(app)
# Run app
if __name__ == '__main__':
app.run(debug=True)
| 2.21875 | 2 |
scripts/trial/turtlesim/turtlesim_motion_server.py | miroslavradojevic/agv_motion | 0 | 12791136 | <reponame>miroslavradojevic/agv_motion
#!/usr/bin/env python
import rospy
import actionlib
import math
from geometry_msgs.msg import Twist
from turtlesim.msg import Pose
from agv_motion.msg import MoveTurtlesimAction
from agv_motion.msg import MoveTurtlesimFeedback
from agv_motion.msg import MoveTurtlesimResult
class TurtlesimMotionServer:
def __init__(self):
self._as = actionlib.SimpleActionServer("/turtlesim_action", MoveTurtlesimAction, execute_cb=self.on_goal, auto_start=False)
self._as.start()
# parameters
self.TOLERANCE = 0.01
self.XMIN = 1.0
self.XMAX = 10.0
self.YMIN = 1.0
self.YMAX = 10.0
self.K_LINEAR = 1.0
self.K_ANGULAR = 3.0
self._x = None
self._y = None
self._yaw = None
self._received_position = False
self._trajectory_length = 0
self._turtlesim_pub = rospy.Publisher("/turtle1/cmd_vel", Twist, queue_size=10)
self._turtlesim_sub = rospy.Subscriber("/turtle1/pose", Pose, self.pose_callback)
rospy.logwarn("waiting position...")
while True:
if self._received_position:
rospy.loginfo("received position")
break
rospy.loginfo("Server has been started with robot at ({}, {} | {})".format(self._x, self._y, self._yaw))
def send_feedback(self):
feedback = MoveTurtlesimFeedback()
feedback.trajectory_length = self._trajectory_length
self._as.publish_feedback(feedback)
def on_goal(self, goal):
rospy.loginfo("Goal " + str(goal) + " received")
goal_x = goal.x
goal_y = goal.y
success = False
preempted = False
invalid_parameters = False
out_of_boundaries = False
message = ""
if abs(math.sqrt((goal_x-self._x)**2 + (goal_y-self._y)**2)) < self.TOLERANCE:
message = "Current position is already at the goal"
success = True
if not (self.XMIN <= goal_x <= self.XMAX) or not (self.YMIN <= goal_y <= self.YMAX):
message = "Invalid goal position"
invalid_parameters = True
rate = rospy.Rate(10.0)
self._trajectory_length = 0.0
velocity_message = Twist()
while not rospy.is_shutdown() and not success and not invalid_parameters and not out_of_boundaries:
if self._as.is_preempt_requested():
if abs(math.sqrt((goal_x-self._x)**2 + (goal_y-self._y)**2)) < self.TOLERANCE:
message = "Preempted but already at goal position"
success = True
break
else:
message = "Preempted and stopped execution"
preempted = True
break
if not (self.XMIN <= self._x <= self.XMAX) or not (self.YMIN <= self._y <= self.YMAX):
message = "Out of boundaries"
out_of_boundaries = True
break
diff = abs(math.sqrt((goal_x-self._x)**2 + (goal_y-self._y)**2))
desired_angle_goal = math.atan2(goal_y-self._y, goal_x-self._x)
if diff < self.TOLERANCE:
message = "Success - reached the goal"
success = True
break
else:
self._trajectory_length += diff
# execute move to goal
linear_speed = self.K_LINEAR * diff
angular_speed = self.K_ANGULAR * (desired_angle_goal-self._yaw)
velocity_message.linear.x = linear_speed
velocity_message.angular.z = angular_speed
self._turtlesim_pub.publish(velocity_message)
# publish after each update
self.send_feedback()
rate.sleep()
# stop
velocity_message.linear.x = 0
velocity_message.angular.z = 0
self._turtlesim_pub.publish(velocity_message)
# send result
result = MoveTurtlesimResult()
result.message = message
rospy.loginfo("Send goal result to client")
if preempted:
rospy.loginfo("Preempted")
self._as.set_preempted(result)
elif success:
rospy.loginfo("Success")
self._as.set_succeeded(result)
elif out_of_boundaries:
rospy.loginfo("Aborted - out of boundaries")
self._as.set_aborted(result)
else:
rospy.loginfo("Aborted - invalid goal parameters")
self._as.set_aborted(result)
def pose_callback(self, pose_message):
self._x = pose_message.x
self._y = pose_message.y
self._yaw = pose_message.theta
self._received_position = True
if __name__ == '__main__':
rospy.init_node("turtlesim_move_server") # , anonymous=True
server = TurtlesimMotionServer()
rospy.spin() | 2.390625 | 2 |
presenter.py | cloudRoutine/curveship | 1 | 12791137 | 'Format and display the output text.'
__author__ = '<NAME>'
__copyright__ = 'Copyright 2011 <NAME>'
__license__ = 'ISC'
__version__ = '0.5.0.0'
__status__ = 'Development'
import os
import re
import struct
def ioctl_term_size(filed):
'Attempt to find terminal dimensions using an IO Control system call.'
try:
import fcntl, termios
packed = fcntl.ioctl(filed, termios.TIOCGWINSZ, '1234')
rows_cols = struct.unpack('hh', packed)
except ImportError:
return None
if rows_cols == (0, 0):
return None
return rows_cols
def terminal_size():
"""Determine the terminal size or set a default size if that fails.
From <NAME>'s code, http://pdos.csail.mit.edu/~cblake/cls/cls.py
Modifications by <NAME> to allow Curveship to run in GNU Emacs."""
rows_cols = ioctl_term_size(0) or ioctl_term_size(1) or ioctl_term_size(2)
if not rows_cols:
try:
filed = os.open(os.ctermid(), os.O_RDONLY)
rows_cols = ioctl_term_size(filed)
os.close(filed)
except AttributeError:
pass
if not rows_cols:
# Some shells may set these environment variables.
rows_cols = (os.environ.get('LINES', 25), os.environ.get('COLUMNS', 80))
return int(rows_cols[1]), int(rows_cols[0]) # Reverses it to cols, rows.
def _break_words(string, char_limit):
'Lineate the string based on the passed-in character limit.'
if len(string) <= char_limit:
next_line = string
string = ''
elif '\n' in string[0:char_limit]:
first_newline = string.index('\n')
next_line = string[0:first_newline]
string = string[(first_newline + 1):]
elif ' ' not in string[0:char_limit]:
next_line = string[0:char_limit]
string = string[char_limit:]
else:
last_space = string[0:char_limit].rindex(' ')
next_line = string[0:last_space]
string = string[(last_space + 1):]
return (next_line, string)
def present(string, out_streams, pre='', post='\n\n'):
'Print the string, broken into lines, to the output streams.'
if len(string) == 0:
return
if string[-1:] == '\n':
post = re.sub('^[ \t]+', '', post)
string = pre + string + post
while len(string) > 0:
(cols, _) = terminal_size()
(next_line, string) = _break_words(string, cols)
out_streams.write(next_line)
if len(string) > 0:
out_streams.write('\n')
out_streams.write(string)
def center(string, out_streams, pre='', post='\n'):
'Center the output and print it to the output streams.'
string = pre + string + post
(cols, _) = terminal_size()
while len(string) > 0:
(next_line, string) = _break_words(string, cols)
while len(next_line) > 0 and next_line[0] == '\n':
out_streams.write('\n')
next_line = next_line[1:]
spaces = ''
i = 1
while i <= (cols - len(next_line))/2:
spaces += ' '
i += 1
out_streams.write(' ' + spaces + next_line)
if len(string) > 0:
out_streams.write('\n')
| 2.890625 | 3 |
app/routes/build.py | lsst-sqre/ltd-dasher | 0 | 12791138 | <filename>app/routes/build.py
"""Routes at ``/build`` that implement dashboard builds."""
from flask import jsonify, request, current_app
from . import api
from .logging import log_route
from ..worker import build_dashboard_for_product
@api.route('/build', methods=['POST'])
@log_route
def build_dashboards():
"""Build dashboard(s).
:statuscode 202: Dashboard rebuild trigger sent.
"""
for product_resource_url in request.json['product_urls']:
build_dashboard_for_product(product_resource_url, current_app.config)
# Ideally we'd provide a status endpoint, and put that URL in the header
return jsonify({}), 202, {}
| 2.328125 | 2 |
decision-trees/trees.py | HectorJuarezL/machine-learning-codes | 0 | 12791139 | from math import log
import operator
def createDataSet(): #Funcion que retorna un mequeño dataset
dataSet = [[1, 1, 'yes'],
[1, 1, 'yes'],
[1, 0, 'no'],
[0, 1, 'no'],
[0, 1, 'no']]
labels = ['no surfacing','flippers']
#change to discrete values
return dataSet, labels
def calcShannonEnt(dataSet): #Esta funcion calcula la entropia de shannon del dataset
numEntries = len(dataSet) #Obtiene el numero de filas del dataset
labelCounts = {} #Diccionario donde se guarda la cuenta de cada clase
for featVec in dataSet: #Obtiene los elementos únicos con sus caracteristicas
currentLabel = featVec[-1] #Establece currentLable al valor del ultimo elemento del vector de caracteristicas
if currentLabel not in labelCounts.keys(): labelCounts[currentLabel] = 0 #Comprueba si ya se encuentra la clase actual en la variable de labelCounts, en caso de que no, añade la key al diccionario y la establece en 0
labelCounts[currentLabel] += 1 #aumenta en uno el valor de la clase
shannonEnt = 0.0 #Inicializa la entropia en ceros
for key in labelCounts: #En este for calcula la probabilidad de cada una de las etiquetas y lo añade a la "suma"
prob = float(labelCounts[key])/numEntries #aqui calcula la probabilidad
shannonEnt -= prob * log(prob,2) #aqui calcula la entropia y la añade a la suma, utiliza logaritmo base 2
return shannonEnt #retorna la entropia
def splitDataSet(dataSet, axis, value): #Funcion que retorna todas las filas del dataset cuya columna (axis) sea igual al valor dado
retDataSet = [] #variable donde se retornará la division
for featVec in dataSet: #for que recorre cada uno de las filas del dataset
if featVec[axis] == value: #comprueba si cumple con la condicion dada como parametro
reducedFeatVec = featVec[:axis] #crea una fila con todos los datos hasta antes del axis
reducedFeatVec.extend(featVec[axis+1:]) #añade los elementos restantes menos la columna del axis
retDataSet.append(reducedFeatVec) #agrega la fila a la matriz de retorno
return retDataSet #retorna todos aquellos datos segun el valor de la columna dado
def chooseBestFeatureToSplit(dataSet): #Esta es la funcion nucleo del algoritmo
numFeatures = len(dataSet[0]) - 1 #Obtiene el numero de caracteristicas (columnas). Es importante recalcar que la ultima columna pertenece a la etiqueta de clase
baseEntropy = calcShannonEnt(dataSet) #calcula la entropia completa del dataset
bestInfoGain = 0.0; bestFeature = -1 #inicializa los valores para la mejor ganancia y la mejor caracteristica
for i in range(numFeatures): #for que recorre todas las caracteristicas
featList = [example[i] for example in dataSet]#Crea una lista con todas las filas que tienen la caracteristica "i"
uniqueVals = set(featList) #Obtiene un conjunto con todos los valores únicos para esa caracteristica dada
newEntropy = 0.0 #Inicializa el valor de la entropia
for value in uniqueVals: #For que recorre todos los valores únicos de la caracteristica "i"
subDataSet = splitDataSet(dataSet, i, value) #Obtiene el subconjunto de datos con la caracteristica i
prob = len(subDataSet)/float(len(dataSet)) #Obtiene la probabilidad de este subconjunto
newEntropy += prob * calcShannonEnt(subDataSet) #Suma la entropia dada para este subconjunto
infoGain = baseEntropy - newEntropy #Calcula la ganancia
if (infoGain > bestInfoGain): #Compara la ganancia con la de mejor ganancia
bestInfoGain = infoGain #Si es mejor que la de la variable, asigna esta como la nueva mejor
bestFeature = i #Tambien guarda el valor de i, es decir, el índice de la mejor caracteristica
return bestFeature #Al final, retorna el índice de la mejor caracteristica.
def majorityCnt(classList): #Funcion que retorna el valor de la clase mayoritaria
classCount={} #Diccionario donde se guardará la cuenta de cada clase
for vote in classList: #For que recorre la lista de clases
if vote not in classCount.keys(): classCount[vote] = 0 #En caso de que no exista la key, la añade como cero
classCount[vote] += 1 #suma uno al valor de la key
sortedClassCount = sorted(classCount.iteritems(), key=operator.itemgetter(1), reverse=True) #Ordena de mayor a menor
return sortedClassCount[0][0] #retorna el primer elemento, es decir, la clase mayoritaria
def createTree(dataSet,labels): #Esta funcion se encarga de la creación del arbol de manera recursiva
classList = [example[-1] for example in dataSet] #La lista de clases pertenece a la ultima columna del dataset, por eso la asigna a classList
if classList.count(classList[0]) == len(classList): #Comprueba si el numero de clases es equivalente al tamaño de la lista de clases
return classList[0]#En caso de que si, significa que todas las clases son iguales por lo que ahi termina la recursividad y retorna la clase
if len(dataSet[0]) == 1: #De la misma manera, si no hay más caracteristicas en el dataset, deja de dividirse
return majorityCnt(classList) #Retorna el valor de la clase mayoritaria
bestFeat = chooseBestFeatureToSplit(dataSet) #Obtiene la mejor caracteristica para dividir
bestFeatLabel = labels[bestFeat] #Obtiene las etiquetas de aquellos con la mejor caracteristica
myTree = {bestFeatLabel:{}} #Crea un diccionario de diccionarios cuya llave es la de la etiqueta con la mejor caracteristica
del(labels[bestFeat]) #elimina las etiquetas de las mejores caracteristicas del vector de etiquetas, en resumen, dividió las etiquetas en 2
featValues = [example[bestFeat] for example in dataSet] #Obtiene los valores de la mejor caracteristica para despues poder hacer las siguientes divisiones
uniqueVals = set(featValues) #aqui obtiene los valores únicos de esta caractetistica
for value in uniqueVals: #for que recorre los valores unicos de la mejor caracteristica
subLabels = labels[:] #copia todas las etiqutas, pues si enviara tal cual el valor de labels, entre los subarboles estarian accediendo a la misma variable
myTree[bestFeatLabel][value] = createTree(splitDataSet(dataSet, bestFeat, value),subLabels) #asigna por cada uno de los valores únicos de la mejor etiqueta el siguiente subarbol
return myTree #retorna el arbol
def classify(inputTree,featLabels,testVec): #Funcion que recorre un arbol para dar el resultado de la clasificacion, tambien es una funcion recursiva
firstStr = list(inputTree)[0] #como el arbol es un diccionario de diccionarios, con esto obtiene el valor de la primer llave, es decir, el valor de la etiqueta
secondDict = inputTree[firstStr] #Aqui obtiene el valor de la primera llave, que viene a ser un conjunto de arboles
featIndex = featLabels.index(firstStr) #Dentro del vector de caracteristicas, obtiene aquella que corresponde con la del nivel actual del arbol
key = testVec[featIndex] #Obtiene el valor de la caracteristica que corresponde con la del arbol del vector de entrada
valueOfFeat = secondDict[key] #Se adentra al arbol, obteniendo el siguiente arbol que corresponde a la caracteristica dada por el vector de entrada
if isinstance(valueOfFeat, dict): #Comprueba si realmente existe ese subarbol
classLabel = classify(valueOfFeat, featLabels, testVec) #En caso de que si, se llama a si mismo con los nuevos valores del arbol (recursividad)
else: classLabel = valueOfFeat #en caso contrario, el valor de la etiqueta de clase corresponde al valor de la caracteristica
return classLabel #retorna la etiqueta de clase
def storeTree(inputTree,filename): #Funcion que guarda un arbol en un archivo
import pickle
fw = open(filename,'wb')
pickle.dump(inputTree,fw)
fw.close()
def grabTree(filename): #Funcion que carga un arbol de un archivo
import pickle
fr = open(filename,'rb')
return pickle.load(fr)
| 3.546875 | 4 |
config/fitbit_webservice_config.py | PruthviKumarBK/fitbit_webservice | 11 | 12791140 | __author__ = '<NAME>'
"""
Author: <NAME>
Email: <EMAIL>
Date: February 2, 2018
This is a simple python file that holds all the configuration parameters required for fitbit webservice wrapper.
"""
class Fitbit_Webservice_Config(object):
def __init__(self):
super(Fitbit_Webservice_Config, self).__init__()
self.get_fitbit_endpoints = self._getFitbitEndpoints
def _getFitbitEndpoints(self):
"""
All the valid endpoints supported by Fitbit API's.
:return: A dictionary with valid hash map for fitbit API's which will be utilized by the wrapper.
"""
return {
'todays_steps_realtime': '/1/user/-/activities/steps/date/today/1d.json',
'last_7_days_steps': '/1/user/-/activities/steps/date/today/7d.json',
'todays_calories_realtime': '/1/user/-/activities/calories/date/today/1d.json',
'last_7_days_calories': '/1/user/-/activities/calories/date/today/7d.json',
'todays_sedentary_minutes_realtime': '/1/user/-/activities/minutesSedentary/date/today/1d.json',
'last_7_days_sedentary_minutes': '/1/user/-/activities/minutesSedentary/date/today/7d.json',
'todays_lightly_active_minutes_realtime': '/1/user/-/activities/minutesLightlyActive/date/today/1d.json',
'todays_fairly_active_minutes_realtime': '/1/user/-/activities/minutesFairlyActive/date/today/1d.json',
'todays_very_active_minutes_realtime': '/1/user/-/activities/minutesVeryActive/date/today/1d.json',
'last_7_days_lightly_active_minutes': '/1/user/-/activities/minutesLightlyActive/date/today/7d.json',
'last_7_days_fairly_active_minutes': '/1/user/-/activities/minutesFairlyActive/date/today/7d.json',
'last_7_days_very_active_minutes': '/1/user/-/activities/minutesVeryActive/date/today/7d.json',
'todays_realtime_distance_covered': '/1/user/-/activities/distance/date/today/1d.json',
'last_7_days_distance_covered': '/1/user/-/activities/distance/date/today/7d.json',
'lifetime_activities_details': '/1/user/-/activities.json',
'get_friends_leader_board': '/1/user/-/friends/leaderboard.json',
'todays_sleep_details': '/1.2/user/-/sleep/date/today.json',
'todays_heart_details': '/1/user/-/activities/heart/date/today/1d.json',
} | 2.15625 | 2 |
ema.py | hvt1609/kagglebirdcall | 43 | 12791141 | <gh_stars>10-100
import time
import warnings
import numpy as np
import torch
import torch.nn as nn
import src.configuration as C
import src.models as models
import src.utils as utils
from copy import deepcopy
from pathlib import Path
from fastprogress import progress_bar
from sklearn.metrics import average_precision_score, f1_score
class AveragedModel(nn.Module):
def __init__(self, model, device=None, avg_fn=None):
super().__init__()
self.module = deepcopy(model)
if device is not None:
self.module = self.module.to(device)
self.register_buffer("n_averaged",
torch.tensor(0, dtype=torch.long, device=device))
if avg_fn is None:
def avg_fn(averaged_model_parameter, model_parameter, num_averaged):
return averaged_model_parameter + \
(model_parameter - averaged_model_parameter) / (num_averaged + 1)
self.avg_fn = avg_fn
def forward(self, *args, **kwargs):
return self.module(*args, **kwargs)
def update_parameters(self, model):
for p_swa, p_model in zip(self.parameters(), model.parameters()):
device = p_swa.device
p_model_ = p_model.detach().to(device)
if self.n_averaged == 0:
p_swa.detach().copy_(p_model_)
else:
p_swa.detach().copy_(self.avg_fn(p_swa.detach(), p_model_, self.n_averaged.to(device)))
self.n_averaged += 1
def update_bn(loader, model, device=None, input_key=""):
r"""Updates BatchNorm running_mean, running_var buffers in the model.
It performs one pass over data in `loader` to estimate the activation
statistics for BatchNorm layers in the model.
Arguments:
loader (torch.utils.data.DataLoader): dataset loader to compute the
activation statistics on. Each data batch should be either a
tensor, or a list/tuple whose first element is a tensor
containing data.
model (torch.nn.Module): model for which we seek to update BatchNorm
statistics.
device (torch.device, optional): If set, data will be transferred to
:attr:`device` before being passed into :attr:`model`.
Example:
>>> loader, model = ...
>>> torch.optim.swa_utils.update_bn(loader, model)
.. note::
The `update_bn` utility assumes that each data batch in :attr:`loader`
is either a tensor or a list or tuple of tensors; in the latter case it
is assumed that :meth:`model.forward()` should be called on the first
element of the list or tuple corresponding to the data batch.
"""
momenta = {}
for module in model.modules():
if isinstance(module, torch.nn.modules.batchnorm._BatchNorm):
module.running_mean = torch.zeros_like(module.running_mean)
module.running_var = torch.ones_like(module.running_var)
momenta[module] = module.momentum
if not momenta:
return
was_training = model.training
model.train()
for module in momenta.keys():
module.momentum = None
module.num_batches_tracked *= 0
for input in loader:
if isinstance(input, (list, tuple)):
input = input[0]
if isinstance(input, dict):
input = input[input_key]
if device is not None:
input = input.to(device)
model(input)
for bn_module in momenta.keys():
bn_module.momentum = momenta[bn_module]
model.train(was_training)
def train_one_epoch(model,
ema_model,
dataloader,
optimizer,
scheduler,
criterion,
device,
n=10,
input_key="image",
input_target_key="targets"):
avg_loss = 0.0
model.train()
preds = []
targs = []
cnt = n
for step, batch in enumerate(progress_bar(dataloader)):
cnt -= 1
x = batch[input_key].to(device)
y = batch[input_target_key].to(device).float()
outputs = model(x)
loss = criterion(outputs, y)
optimizer.zero_grad()
loss.backward()
optimizer.step()
avg_loss += loss.item() / len(dataloader)
if cnt == 0:
ema_model.update_parameters(model)
cnt = n
clipwise_output = outputs["clipwise_output"].detach().cpu().numpy()
target = y.detach().cpu().numpy()
preds.append(clipwise_output)
targs.append(target)
update_bn(dataloader, ema_model, device=device, input_key=input_key)
scheduler.step()
y_pred = np.concatenate(preds, axis=0)
y_true = np.concatenate(targs, axis=0)
return avg_loss, y_pred, y_true
def eval_one_epoch(model,
dataloader,
criterion,
device,
input_key="image",
input_target_key="targets"):
avg_loss = 0.0
model.eval()
preds = []
targs = []
for step, batch in enumerate(progress_bar(dataloader)):
with torch.no_grad():
x = batch[input_key].to(device)
y = batch[input_target_key].to(device).float()
outputs = model(x)
loss = criterion(outputs, y).detach()
avg_loss += loss.item() / len(dataloader)
clipwise_output = outputs["clipwise_output"].detach().cpu().numpy()
target = y.detach().cpu().numpy()
preds.append(clipwise_output)
targs.append(target)
y_pred = np.concatenate(preds, axis=0)
y_true = np.concatenate(targs, axis=0)
return avg_loss, y_pred, y_true
def calc_metrics(y_true: np.ndarray, y_pred: np.ndarray, threshold=0.5):
mAP = average_precision_score(y_true, y_pred, average=None)
mAP = np.nan_to_num(mAP).mean()
classwise_f1s = []
for i in range(len(y_true[0])):
class_i_pred = y_pred[:, i] > threshold
class_i_targ = y_true[:, i]
if class_i_targ.sum() == 0 and class_i_pred.sum() == 0:
classwise_f1s.append(1.0)
else:
classwise_f1s.append(f1_score(y_true=class_i_targ, y_pred=class_i_pred))
classwise_f1 = np.mean(classwise_f1s)
y_pred_thresholded = (y_pred > threshold).astype(int)
sample_f1 = f1_score(y_true=y_true, y_pred=y_pred_thresholded, average="samples")
return mAP, classwise_f1, sample_f1
def save_model(model, logdir: Path, filename: str):
state_dict = {}
state_dict["model_state_dict"] = model.state_dict()
weights_path = logdir / filename
with open(weights_path, "wb") as f:
torch.save(state_dict, f)
def save_best_model(model, logdir, filename, metric: float, prev_metric: float):
if metric > prev_metric:
save_model(model, logdir, filename)
return metric
else:
return prev_metric
def train(model,
ema_model,
dataloaders,
optimizer,
scheduler,
criterion,
device,
logdir: Path,
logger,
n=10,
main_metric="sample_f1",
epochs=75,
input_key="image",
input_target_key="targets"):
train_metrics = {}
eval_metrics = {}
best_metric = -np.inf
for epoch in range(epochs):
t0 = time.time()
epoch += 1
logger.info("=" * 20)
logger.info(f"Epoch [{epoch}/{epochs}]:")
logger.info("=" * 20)
logger.info("Train")
avg_loss, y_pred, y_true = train_one_epoch(
model=model,
ema_model=ema_model,
dataloader=dataloaders["train"],
optimizer=optimizer,
scheduler=scheduler,
criterion=criterion,
device=device,
n=n,
input_key=input_key,
input_target_key=input_target_key)
mAP, classwise_f1, sample_f1 = calc_metrics(y_true, y_pred)
train_metrics["loss"] = avg_loss
train_metrics["mAP"] = mAP
train_metrics["classwise_f1"] = classwise_f1
train_metrics["sample_f1"] = sample_f1
if len(dataloaders) == 1:
val_dataloader = dataloaders["train"]
else:
val_dataloader = dataloaders["valid"]
logger.info("Valid")
avg_loss, y_pred, y_true = eval_one_epoch(
model=model,
dataloader=val_dataloader,
criterion=criterion,
device=device,
input_key=input_key,
input_target_key=input_target_key)
mAP, classwise_f1, sample_f1 = calc_metrics(y_true, y_pred)
eval_metrics["loss"] = avg_loss
eval_metrics["mAP"] = mAP
eval_metrics["classwise_f1"] = classwise_f1
eval_metrics["sample_f1"] = sample_f1
logger.info("EMA")
avg_loss, y_pred, y_true = eval_one_epoch(
model=ema_model,
dataloader=val_dataloader,
criterion=criterion,
device=device,
input_key=input_key,
input_target_key=input_target_key)
mAP, classwise_f1, sample_f1 = calc_metrics(y_true, y_pred)
eval_metrics["EMA_loss"] = avg_loss
eval_metrics["EMA_mAP"] = mAP
eval_metrics["EMA_classwise_f1"] = classwise_f1
eval_metrics["EMA_sample_f1"] = sample_f1
logger.info("#" * 20)
logger.info("Train metrics")
for key, value in train_metrics.items():
logger.info(f"{key}: {value:.5f}")
logger.info("Valid metrics")
for key, value in eval_metrics.items():
logger.info(f"{key}: {value:.5f}")
logger.info("#" * 20)
best_metric = save_best_model(
model, logdir, "best.pth",
metric=eval_metrics[main_metric], prev_metric=best_metric)
save_model(ema_model, logdir, "ema.pth")
elapsed_sec = time.time() - t0
elapsed_min = int(elapsed_sec // 60)
elapsed_sec = elapsed_sec % 60
logger.info(f"Elapsed time: {elapsed_min}min {elapsed_sec:.4f}seconds.")
if __name__ == "__main__":
warnings.filterwarnings("ignore")
args = utils.get_parser().parse_args()
config = utils.load_config(args.config)
global_params = config["globals"]
output_dir = Path(global_params["output_dir"])
output_dir.mkdir(exist_ok=True, parents=True)
logger = utils.get_logger(output_dir / "output.log")
utils.set_seed(global_params["seed"])
device = C.get_device(global_params["device"])
df, datadir = C.get_metadata(config)
splitter = C.get_split(config)
calltype_labels = C.get_calltype_labels(df)
if config["data"].get("event_level_labels") is not None:
event_level_labels = C.get_event_level_labels(config)
else:
event_level_labels = None
if "Multilabel" in config["split"]["name"]:
y = calltype_labels
else:
y = df["ebird_code"]
for i, (trn_idx, val_idx) in enumerate(
splitter.split(df, y=y)):
if i not in global_params["folds"]:
continue
logger.info("=" * 20)
logger.info(f"Fold {i}")
logger.info("=" * 20)
trn_df = df.loc[trn_idx, :].reset_index(drop=True)
val_df = df.loc[val_idx, :].reset_index(drop=True)
loaders = {
phase: C.get_loader(df_, datadir, config, phase, event_level_labels)
for df_, phase in zip([trn_df, val_df], ["train", "valid"])
}
model = models.get_model(config).to(device)
criterion = C.get_criterion(config).to(device)
optimizer = C.get_optimizer(model, config)
scheduler = C.get_scheduler(optimizer, config)
ema_model = AveragedModel(
model,
avg_fn=lambda averaged_model_parameter, model_parameter, num_averaged:
0.1 * averaged_model_parameter + 0.9 * model_parameter)
(output_dir / f"fold{i}").mkdir(exist_ok=True, parents=True)
train(model=model,
ema_model=ema_model,
dataloaders=loaders,
optimizer=optimizer,
scheduler=scheduler,
criterion=criterion,
device=device,
logdir=output_dir / f"fold{i}",
logger=logger,
n=10,
main_metric=global_params["main_metric"],
epochs=global_params["num_epochs"],
input_key=global_params["input_key"],
input_target_key=global_params["input_target_key"])
| 2.21875 | 2 |
pertpy/api/__init__.py | theislab/pertpy | 1 | 12791142 | import scanpy
pertpy_settings = scanpy.settings
from pertpy.api import data as dt
from pertpy.api import plot as pl
from pertpy.api import preprocessing as pp
from pertpy.api import tools as tl
| 1.125 | 1 |
examples/gyrodata.py | EduardoNigro/pyev3 | 0 | 12791143 | """ gyrodata.py
Run one motor with a sinusoidal speed input and an attached gyro.
This example shows how use the gyro to measure angular position and velocity
by attaching it to the motor shaft.
Setup:
Connect one large motor to port 'A'
Connect the gyro sensor to port number 1.
Notes:
1. Remember there's a cable attached to the sensor, so limit the rotation
angle to approx. 180 degrees.
2. The maximum angular speed that the gyro can detect without saturating
is 440 deg./s (approx. 7.7 rad/s). Limit the motor speed % output to no
more than 35 %.
"""
# Importing modules and classes
import time
import numpy as np
from scipy import integrate
from pyev3.utils import plot_line
from pyev3.brick import LegoEV3
from pyev3.devices import Gyro, Motor
# Defining parameters (for one motor)
T = 2 # Period of sine wave (s)
u0 = 30 # Motor speed amplitude (%)
tstop = 2 # Sine wave duration (s)
# Pre-allocating output arrays
tmotor = []
theta = []
tgyro = []
angle = []
rate = []
# Creating LEGO EV3 objects
ev3 = LegoEV3()
motor = Motor(ev3, port='A')
gyro = Gyro(ev3, portnum=1, inputmode='angle&rate')
# Initializing motor
motor.outputmode = 'speed'
motor.output = 0
motor.reset_angle()
motor.start()
# Getting initial gyro sensor reading to remove drift in the data
angle0, rate0 = gyro.output
# Initializing current time stamp and starting clock
tcurr = 0
tstart = time.perf_counter()
# Running motor sine wave output
while tcurr <= tstop:
# Getting current time for motor (s)
tcurr = time.perf_counter() - tstart
# Assigning current motor sinusoidal
# output using the current time stamp
motor.output = u0 * np.sin((2*np.pi/T) * tcurr)
# Updating output arrays for motor
tmotor.append(tcurr)
theta.append(motor.angle)
# Getting current time for gyro (s)
tcurr = time.perf_counter() - tstart
# Updating output arrays for gyro
# (and converting from deg/s to rad/s)
anglecurr, ratecurr = gyro.output
tgyro.append(tcurr)
angle.append(anglecurr-angle0)
rate.append(np.pi/180 * (ratecurr-rate0))
# Stopping motor and closing brick connection
motor.stop(brake='off')
ev3.close()
# Calculating motor angular velocity (rad/s)
w = np.pi/180 * np.gradient(theta, tmotor)
# Plotting results
plot_line([tmotor, tgyro], [theta, angle], yname='Angular Position (deg.)',
legend=['Tacho', 'Gyro'], marker=True)
plot_line([tmotor, tgyro], [w, rate], yname='Angular velocity (rad/s)',
legend=['Tacho', 'Gyro'], marker=True)
| 3.546875 | 4 |
modules/may9/plug-ins/May9_Next.py | DavideAlidosi/May9 | 39 | 12791144 | <reponame>DavideAlidosi/May9
import __main__
import May9_Next
def initializePlugin(*args):
__main__.May9_Next = May9_Next
def uninitializePlugin(*args):
try:
__main__.__delattr__("May9_Next")
except AttributeError:
pass | 1.625 | 2 |
scrapqd/gql/enum/browser.py | dduraipandian/scrapqd | 0 | 12791145 | <reponame>dduraipandian/scrapqd<filename>scrapqd/gql/enum/browser.py
from graphql import GraphQLEnumType as EnumType
from scrapqd.executor.selenium_driver.factory import BrowserFactory
BrowserEnum = EnumType("Browser",
{d.upper(): d for d in BrowserFactory().mapping()},
description="Browser option in the selenium executor")
| 2.21875 | 2 |
wb/main/models/dataset_augmentation_job_model.py | apaniukov/workbench | 23 | 12791146 | """
OpenVINO DL Workbench
Class for ORM model describing dataset augmentation job
Copyright (c) 2021 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import json
try:
from typing import TypedDict
except ImportError:
from typing_extensions import TypedDict
from sqlalchemy import Column, Integer, ForeignKey, Boolean, Float, Text
from sqlalchemy.orm import relationship, backref
from wb.main.enumerates import JobTypesEnum
from wb.main.models.datasets_model import DatasetsModel, DatasetJobData
from wb.main.models.jobs_model import JobsModel
class DatasetAugmentationJobData(TypedDict):
applyHorizontalFlip: bool
applyVerticalFlip: bool
applyErase: bool
eraseRatio: int
eraseImages: int
applyNoise: bool
noiseRatio: int
noiseImages: int
applyImageCorrections: bool
imageCorrections: str
# pylint: disable=too-many-instance-attributes
class DatasetAugmentationJobModel(JobsModel):
__tablename__ = 'dataset_augmentation_jobs'
__mapper_args__ = {
'polymorphic_identity': JobTypesEnum.augment_dataset_type.value
}
job_id = Column(Integer, ForeignKey(JobsModel.job_id), primary_key=True)
dataset_id = Column(Integer, ForeignKey(DatasetsModel.id), nullable=False)
horizontal_flip = Column(Boolean, nullable=False, default=False)
vertical_flip = Column(Boolean, nullable=False, default=False)
apply_random_erase = Column(Boolean, nullable=False, default=False)
erase_ratio = Column(Float, nullable=True)
erase_images = Column(Integer, nullable=True)
apply_noise_injection = Column(Boolean, nullable=False, default=False)
noise_ratio = Column(Float, nullable=True)
noise_images = Column(Integer, nullable=True)
apply_image_corrections = Column(Boolean, nullable=False, default=False)
image_corrections = Column(Text, nullable=True)
dataset = relationship(DatasetsModel, foreign_keys=[dataset_id],
backref=backref('dataset_augmentation_job', lazy='subquery', cascade='delete,all',
uselist=False))
def __init__(self, data: DatasetJobData, augmentation_data: DatasetAugmentationJobData):
super().__init__(data)
self.dataset_id = data['datasetId']
self.vertical_flip = augmentation_data['applyVerticalFlip']
self.horizontal_flip = augmentation_data['applyHorizontalFlip']
self.apply_noise_injection = augmentation_data['applyNoise']
self.apply_random_erase = augmentation_data['applyErase']
self.erase_images = augmentation_data['eraseImages']
self.erase_ratio = augmentation_data['eraseRatio']
self.noise_ratio = augmentation_data['noiseRatio']
self.noise_images = augmentation_data['noiseImages']
self.apply_image_corrections = augmentation_data['applyImageCorrections']
self.image_corrections = json.dumps(augmentation_data['imageCorrections'])
def json(self) -> dict:
return {
**super().json(),
**self.dataset.json()
}
@property
def augmented_images_count(self) -> int:
augmented_images_count = 0
if self.apply_random_erase:
augmented_images_count += self.erase_images
if self.apply_noise_injection:
augmented_images_count += self.noise_images
if self.horizontal_flip:
augmented_images_count += 1
if self.vertical_flip:
augmented_images_count += 1
if self.apply_image_corrections:
augmented_images_count += len(self.image_corrections)
return augmented_images_count
| 2.09375 | 2 |
tilenol/event.py | paulie-g/tilenol | 42 | 12791147 | <filename>tilenol/event.py
import logging
from zorro import Condition, gethub
log = logging.getLogger(__name__)
class Event(object):
def __init__(self, name=None):
self.name = name
self._listeners = []
self._worker = None
def listen(self, fun):
self._listeners.append(fun)
def unlisten(self, fun):
self._listeners.remove(fun)
def emit(self):
log.debug("Emitting event %r", self.name)
if self._worker is None and self._listeners:
self._worker = gethub().do_spawn(self._do_work)
def _do_work(self):
try:
log.debug("Processing event %r", self.name)
for l in self._listeners:
l()
finally:
self._worker = None
| 2.4375 | 2 |
src/word_fix.py | thoppe/orthographic-pedant | 155 | 12791148 | <reponame>thoppe/orthographic-pedant<gh_stars>100-1000
import os, json, logging, glob, codecs, os, time, subprocess
from contextlib import contextmanager
import requests
logging.basicConfig(level=logging.INFO)
logging.getLogger("requests").setLevel(logging.WARNING)
FLAG_fork = True
FLAG_delete = True
fork_sleep_time = 10
clone_error_sleep_time = 60
# Verify that there is a token set as an env variable and load it
shell_token = "GITHUB_ORTHOGRAPHIC_TOKEN"
GITHUB_TOKEN = os.environ[shell_token]
login_params = {"access_token":GITHUB_TOKEN,}
API_URL = "https://api.github.com/repos"
fork_url = API_URL + "/{user_name}/{repo_name}/forks"
pulls_url = API_URL + "/{user_name}/{repo_name}/pulls"
delete_url = API_URL + "/{user_name}/{repo_name}"
push_url = "https://{bot_name}:{bot_password}@github.com/{bot_name}/{repo_name} {branch_name}:{branch_name}"
clone_url = "https://github.com/orthographic-pedant/{repo_name}"
# Load the PR text
with open("messages/pull_request.txt") as FIN:
pull_request_msg = ' '.join(FIN.read().split())
with open("messages/commit_header.txt") as FIN:
commit_header_msg = FIN.read().strip()
with open("messages/commit_text.txt") as FIN:
commit_text_msg = FIN.read().strip()
def is_branch_different_from_default(repo):
# Checks if any substantial commits have been made
cmd = "git diff {master_branch} --".format(**repo)
p = subprocess.check_output(cmd,shell=True).strip()
# If any edits have been made this will return True
return p
def pull_request_repo(repo):
if not is_branch_different_from_default(repo):
logging.info("No edits have been made, skipping!".format(**repo))
return False
logging.info("Creating pull request for {full_name}".format(**repo))
data = {
"head" :"{bot_name}:{branch_name}".format(**repo),
"base" : repo["master_branch"],
"title" : repo["commit_header"],
"body" : pull_request_msg.format(**repo),
}
url = pulls_url.format(**repo)
r = requests.post(url,params=login_params,json=data)
if "errors" in r.json():
from pprint import pprint
print pprint(r.json()["errors"])
logging.info("Pull request status {}".format(r))
return True
def fork_repo(repo):
f_url = fork_url.format(**repo)
r = requests.post(f_url,params=login_params)
status = r.status_code
logging.info("Creating fork, status {}".format(status))
assert(status == 202)
logging.info("Sleeping for {} seconds.".format(fork_sleep_time))
time.sleep(fork_sleep_time)
def push_commits(repo):
logging.info("Push new branch {bot_name}:{branch_name}".format(**repo))
cmd = "git push -u " + push_url.format(**repo)
os.system(cmd)
def clone_repo(repo):
git_endpoint = clone_url.format(**repo)
cmd = "git clone -q --single-branch --depth 1 " + git_endpoint
if not os.path.exists(repo["repo_name"]):
try:
msg = u"Cloning repo {full_name}".format(**repo)
logging.info(msg)
subprocess.check_output(cmd,shell=True)
except:
msg = u"Cloning repo {full_name} again after sleep".format(**repo)
logging.info(msg)
time.sleep(clone_error_sleep_time)
subprocess.check_output(cmd,shell=True)
os.system(cmd)
def does_git_branch_exist(repo):
# Checks if a branch already exists of a given name
cmd = "git rev-parse -q --verify {branch_name}".format(**repo)
try:
p = subprocess.check_output(cmd,shell=True).strip()
except subprocess.CalledProcessError:
return False
# Valid SHA1 hash will be forty characters long
return len(p.strip()) == 40
def create_branch(repo):
# Attempts to create the branch in repo["branch_name"]
if not does_git_branch_exist(repo):
logging.info("Creating new branch {branch_name}".format(**repo))
cmd = "git checkout -b {branch_name}".format(**repo)
os.system(cmd)
def delete_bot_repo(repo):
url = API_URL + "/{bot_name}/{repo_name}".format(**repo)
r = requests.delete(url,params=login_params)
msg = "Deleted bot repo {repo_name}, status {}"
logging.info(msg.format(r.status_code,**repo))
def fix_word(line,w1,w2):
line = line.replace(w1.title(),w2.title())
line = line.replace(w1,w2)
line = line.replace(w1.lower(),w2.lower())
line = line.replace(w1.upper(),w2.upper())
return line
def fix_file(f, w1, w2):
corrections = 0
newlines = []
with codecs.open(f,'r','utf-8') as FIN:
for line in FIN:
if w1.lower() in line.lower():
logging.info("Fixing {}->{} in {}".format(w1,w2,f))
line = fix_word(line,w1,w2)
corrections += 1
newlines.append(line)
with codecs.open(f,'w','utf-8') as FOUT:
FOUT.write(''.join(newlines))
return corrections
@contextmanager
def enter_repo(repo):
# Remember our original directory
org_dir = os.getcwd()
repo["bot_name"] = "orthographic-pedant"
repo["bot_password"] = <PASSWORD>
# Used so github can track the submissions...
repo["bot_email"] = "travis.hoppe"+"+orthographicpendant"+"@"+"<EMAIL>"
# Record the full name of the repo
repo["full_name"] = "{user_name}:{repo_name}".format(**repo)
logging.info("Entered {}".format(repo["full_name"]))
if FLAG_fork:
fork_repo(repo)
# Create the directories
os.system("mkdir -p forks")
os.chdir("forks")
clone_repo(repo)
# Enter the repo directory
os.chdir(repo["repo_name"])
# Get the current branch name
p = subprocess.check_output("git show-branch",shell=True)
repo["master_branch"] = p.split(']')[0].split('[')[1]
# Set the username
cmd = 'git config user.name "{bot_name}"'.format(**repo)
os.system(cmd)
cmd = 'git config user.email "{bot_email}"'.format(**repo)
os.system(cmd)
yield
logging.info("Exiting {}".format(repo["full_name"]))
if FLAG_delete:
delete_bot_repo(repo)
os.chdir(org_dir)
os.system("rm -rf forks")
def fix_repo(full_name, good_word, bad_word):
full_name = full_name.strip()
user_name, repo_name = full_name.split('/')
repo = {
"access_token" : GITHUB_TOKEN,
"user_name" : user_name,
"repo_name" : repo_name,
"good_word" : good_word,
"bad_word" : bad_word,
}
# Check if the user_name is a "bad_word", this is a false positive!
if bad_word.lower() in user_name.lower():
return False
# Check if repo_name is a "bad_word", this is also a false positive!
if bad_word.lower() in repo_name.lower():
return False
with enter_repo(repo):
# Find READMES
F_README = [x for x in glob.glob("*.*")
if 'readme.' in x.lower()]
repo["branch_name"] = "spell_check/{}".format(good_word)
create_branch(repo)
# Fix READMES
total_corrections = 0
for fr in F_README:
try:
correction_count = fix_file(fr, bad_word, good_word)
except UnicodeDecodeError:
# Skip the repo if the file is too funky for utf-8
msg = "UnicodeDecode Error"
logging.error(msg)
return False
total_corrections += correction_count
logging.info("Fixed {} spelling mistakes".format(total_corrections))
# Commit changes
repo["commit_header"] = commit_header_msg.format(**repo)
repo["commit_text"] = commit_text_msg.format(**repo)
cmd = 'git commit -a -m "{commit_header}" -m "{commit_text}"'.format(**repo)
os.system(cmd)
# Push the changes to bot directory
push_commits(repo)
# Create pull request
pull_status = pull_request_repo(repo)
return pull_status
###############################################################
if __name__ == "__main__":
# Target word
bad_word = "Celcius"
good_word = "Celsius"
full_name = "thoppe/I-am-error"
fix_repo(full_name, good_word, bad_word)
| 2.578125 | 3 |
udatetime/_pure.py | kashnick/udatetime | 244 | 12791149 | <reponame>kashnick/udatetime
from datetime import tzinfo, timedelta, datetime as dt_datetime
from time import time, gmtime
from math import floor, ceil
DATE_TIME_FORMAT = '%Y-%m-%dT%H:%M:%S.%f'
class TZFixedOffset(tzinfo):
def __init__(self, offset):
self.offset = offset
def utcoffset(self, dt=None):
return timedelta(seconds=self.offset * 60)
def dst(self, dt=None):
return timedelta(0)
def tzname(self, dt=None):
sign = '+'
if self.offset < 0:
sign = '-'
return "%s%d:%d" % (sign, self.offset / 60, self.offset % 60)
def __repr__(self):
return self.tzname()
def _timestamp_to_date_time(timestamp, tzinfo):
t_full = timestamp + (tzinfo.offset * 60)
timestamp = int(floor(t_full))
frac = (t_full - timestamp) * 1e6
us = int(floor(frac + 0.5) if frac >= 0.0 else ceil(frac - 0.5))
if us == 1e6:
timestamp += 1
us = 0
y, m, d, hh, mm, ss, weekday, jday, dst = gmtime(timestamp)
ss = min(ss, 59) # if sec > 59, set 59 (platform leap support)
return dt_datetime(y, m, d, hh, mm, ss, us, tzinfo)
def _format_date_time(date_time):
tm = date_time.timetuple()
offset = 0
sign = '+'
if date_time.tzinfo is not None:
if date_time.tzinfo.__class__ is not TZFixedOffset:
# TODO: Support all tzinfo subclasses by calling utcoffset()
raise ValueError('Only TZFixedOffset supported.')
offset = date_time.tzinfo.offset
if offset < 0:
offset = offset * -1
sign = '-'
return '%04d-%02d-%02dT%02d:%02d:%02d.%06d%c%02d:%02d' % (
tm.tm_year, tm.tm_mon, tm.tm_mday, tm.tm_hour, tm.tm_min, tm.tm_sec,
date_time.microsecond, sign, offset / 60, offset % 60
)
def _get_local_utc_offset():
ts = time()
return (
dt_datetime.fromtimestamp(ts) - dt_datetime.utcfromtimestamp(ts)
).total_seconds() / 60
local_utc_offset = _get_local_utc_offset()
local_timezone = TZFixedOffset(local_utc_offset)
utc_timezone = TZFixedOffset(0)
def utcnow():
'''datetime aware object in UTC with current date and time.'''
return _timestamp_to_date_time(time(), utc_timezone)
def now():
'''datetime aware object in local timezone with current date and time.'''
return _timestamp_to_date_time(time(), local_timezone)
def from_rfc3339_string(rfc3339_string):
'''Parse RFC3339 compliant date-time string.'''
rfc3339_string = rfc3339_string.replace(' ', '').lower()
if 't' not in rfc3339_string:
raise ValueError(
'Invalid RFC3339 string. Missing \'T\' date/time separator.'
)
(date, _, _time) = rfc3339_string.partition('t')
if not date or not _time:
raise ValueError('Invalid RFC3339 string.')
try:
(year, month, day) = date.split('-')
year = int(year)
month = int(month)
day = int(day)
except ValueError:
raise ValueError('Invalid RFC3339 string. Invalid date.')
try:
(hour, minute, second) = _time[:8].split(':')
hour = int(hour)
minute = int(minute)
second = int(second)
except ValueError:
raise ValueError('Invalid RFC3339 string. Invalid time.')
usec = 0
offset = None
if len(_time) > 8:
if _time[8] == '.':
usec_buf = ''
for c in _time[9:]:
if c in '0123456789':
usec_buf += c
else:
break
if len(usec_buf) > 6:
raise ValueError('Invalid RFC3339 string. Invalid fractions.')
usec = int(usec_buf)
if len(usec_buf) > 0 and len(usec_buf) < 6:
# ugly as shit, but good damn multiplication precision makes
# it a mess
usec = usec * int('1' + '0' * (6 - len(usec_buf)))
_time = _time[9 + len(usec_buf):]
elif _time[8] == 'z':
offset = 0
if len(_time[9:]):
raise ValueError(
'Invalid RFC3339 string. Remaining data after time zone.'
)
else:
_time = _time[8:]
else:
offset = 0
if offset is None and (len(_time) == 0 or _time[0] == 'z'):
offset = 0
if len(_time[1:]):
raise ValueError(
'Invalid RFC3339 string. Remaining data after time zone.'
)
elif offset is None:
if _time[0] not in '+-':
raise ValueError('Invalid RFC3339 string. Expected timezone.')
negative = True if _time[0] == '-' else False
try:
(off_hour, off_minute) = _time[1:].split(':')
off_hour = int(off_hour)
off_minute = int(off_minute)
except ValueError:
raise ValueError('Invalid RFC3339 string. Invalid timezone.')
offset = (off_hour * 60) + off_minute
if negative:
offset = offset * -1
return dt_datetime(
year, month, day, hour, minute, second, usec, TZFixedOffset(offset)
)
def to_rfc3339_string(date_time):
'''Serialize date_time to RFC3339 compliant date-time string.'''
if date_time and date_time.__class__ is not dt_datetime:
raise ValueError("Expected a datetime object.")
return _format_date_time(date_time)
def from_timestamp(timestamp, tz=None):
'''timestamp[, tz] -> tz's local time from POSIX timestamp.'''
if tz is None:
tz = local_timezone
elif tz.__class__ is not TZFixedOffset:
# TODO: Support all tzinfo subclasses by calling utcoffset()
raise ValueError('Only TZFixedOffset supported.')
return _timestamp_to_date_time(timestamp, tz)
def from_utctimestamp(timestamp):
'''timestamp -> UTC datetime from a POSIX timestamp (like time.time()).'''
return _timestamp_to_date_time(timestamp, utc_timezone)
def utcnow_to_string():
'''Current UTC date and time RFC3339 compliant date-time string.'''
return _format_date_time(utcnow())
def now_to_string():
'''Local date and time RFC3339 compliant date-time string.'''
return _format_date_time(now())
| 2.75 | 3 |
utils/__init__.py | carolinscholl/SORN | 1 | 12791150 | from .bunch import *
from .backup import backup_pickle
| 1 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.