max_stars_repo_path
stringlengths 3
269
| max_stars_repo_name
stringlengths 4
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.05M
| score
float64 0.23
5.13
| int_score
int64 0
5
|
---|---|---|---|---|---|---|
aioscrapy/middleware/middleware_Itempipeline.py | conlin-huang/aio-scrapy | 13 | 12787351 | <reponame>conlin-huang/aio-scrapy
import logging
from scrapy.utils.conf import build_component_list
from .middleware import MiddlewareManager
logger = logging.getLogger(__name__)
class ItemPipelineManager(MiddlewareManager):
component_name = 'item pipeline'
@classmethod
def _get_mwlist_from_settings(cls, settings):
return build_component_list(settings.getwithbase('ITEM_PIPELINES'))
def _add_middleware(self, pipe):
super()._add_middleware(pipe)
if hasattr(pipe, 'process_item'):
self.methods['process_item'].append(pipe.process_item)
async def process_item(self, item, spider):
return await self._process_chain('process_item', item, spider)
| 2.1875 | 2 |
tbcompanion/main/forms.py | GiorginoSerbuciano/techbasicscompanion | 0 | 12787352 | from flask_wtf.form import FlaskForm
from wtforms.fields.core import StringField
from wtforms.fields.simple import SubmitField
from wtforms.validators import DataRequired
class SearchBox(FlaskForm):
"""Placeholder for a future implementation"""
string = StringField('Search for a post, user or project', validators=[
DataRequired()])
submit = SubmitField('Search')
| 2.609375 | 3 |
tests/formats/dataclass/parsers/test_xml.py | pashashocky/xsdata | 0 | 12787353 | <reponame>pashashocky/xsdata
from unittest import mock
from tests.fixtures.books import Books
from xsdata.formats.dataclass.models.elements import XmlType
from xsdata.formats.dataclass.parsers.nodes import PrimitiveNode
from xsdata.formats.dataclass.parsers.nodes import SkipNode
from xsdata.formats.dataclass.parsers.xml import UserXmlParser
from xsdata.models.enums import EventType
from xsdata.utils.testing import FactoryTestCase
from xsdata.utils.testing import XmlVarFactory
class UserXmlParserTests(FactoryTestCase):
def setUp(self):
super().setUp()
self.parser = UserXmlParser()
self.parser.objects = [(x, x) for x in "abcde"]
@mock.patch.object(UserXmlParser, "emit_event")
def test_start(self, mock_emit_event):
attrs = {"a": "b"}
queue = []
self.parser.start(Books, queue, [], "{urn:books}books", attrs, {})
self.assertEqual(1, len(queue))
mock_emit_event.assert_called_once_with(
EventType.START, "{urn:books}books", attrs=attrs
)
@mock.patch.object(UserXmlParser, "emit_event")
def test_end(self, mock_emit_event):
objects = []
queue = []
var = XmlVarFactory.create(xml_type=XmlType.TEXT, name="foo", types=(bool,))
queue.append(PrimitiveNode(var, {}, None))
result = self.parser.end(queue, objects, "enabled", "true", None)
self.assertTrue(result)
self.assertEqual(0, len(queue))
self.assertEqual(("enabled", True), objects[-1])
mock_emit_event.assert_called_once_with(EventType.END, "enabled", obj=result)
@mock.patch.object(UserXmlParser, "emit_event")
def test_end_with_no_result(self, mock_emit_event):
objects = []
queue = [SkipNode()]
result = self.parser.end(queue, "author", "foobar", None, objects)
self.assertFalse(result)
self.assertEqual(0, len(objects))
self.assertEqual(0, len(queue))
self.assertEqual(0, mock_emit_event.call_count)
def test_emit_event(self):
mock_func = mock.Mock()
self.parser.foo_bar_el = mock_func
self.parser.emit_event("foo", "{tns}BarEl", a=1, b=2)
mock_func.assert_called_once_with(a=1, b=2)
self.assertEqual({("foo", "{tns}BarEl"): mock_func}, self.parser.emit_cache)
| 2.390625 | 2 |
modules/module1/Week1/5_fofin_interactive.py | tetov/ITA19 | 7 | 12787354 | from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
import os
import compas_rhino
from compas.utilities import geometric_key
from fofin.shell import Shell
from fofin.shellartist import ShellArtist
from compas_rhino.selectors import VertexSelector
from compas_rhino.selectors import EdgeSelector
from compas_rhino.modifiers import VertexModifier
from compas_rhino.modifiers import EdgeModifier
# ==============================================================================
# I/O
# ==============================================================================
HERE = os.path.dirname(__file__)
DATA = os.path.join(HERE, 'data')
FILE = os.path.join(DATA, 'fofin.json')
# ==============================================================================
# Shell
# ==============================================================================
shell = Shell.from_json(FILE)
# ==============================================================================
# Visualization helpers
# ==============================================================================
artist = ShellArtist(shell, layer="Mesh")
artist.clear_layer()
artist.draw_vertices()
artist.draw_edges()
artist.redraw()
def redraw():
artist.clear_layer()
artist.draw_vertices()
artist.draw_edges()
artist.redraw()
# ==============================================================================
# Vertex attributes
# ==============================================================================
while True:
keys = VertexSelector.select_vertices(shell)
if not keys:
break
if VertexModifier.update_vertex_attributes(shell, keys):
shell.fofin()
redraw()
# ==============================================================================
# Export result
# ==============================================================================
shell.to_json(FILE)
# ==============================================================================
# Visualize result
# ==============================================================================
artist.clear_layer()
artist.draw_vertices()
artist.draw_edges()
artist.draw_faces()
artist.draw_forces(scale=0.01)
artist.draw_reactions(scale=0.1)
| 1.578125 | 2 |
[1] BEGINNER/2708 - Turistas no Parque Huacachina.py | tiago040/URI-SOLUTIONS | 1 | 12787355 | '''
A agência de turismo municipal da cidade de Ica, no Peru montou um posto de controle de jipes de aventura que sobem para as dunas do parque Hucachina. Como durante o dia, são vários os off-roads que sobem e descem do parque nacional, e nem sempre os turistas usam um mesmo transporte para a ida e volta, a prefeitura precisava ter um melhor controle e segurança sobre fluxo de visitantes no parque. Desenvolva um programa que receba como entrada se um jipe está entrando ou voltando do parque e a quantidade de turistas que este veículo está transportando. Ao final do turno, o programa deve indicar a quantidade de veículos e de turistas que ainda faltam regressar da aventura.
Entrada
O programa deve receber sucessivos pares de entrada. Cada par deve indicar o movimento do jipe e a quantidade de turistas que este está transportando. A primeira entrada deve ser "SALIDA" ou "VUELTA". "SALIDA" deve indicar que o jipe está saindo da central e entrando no parque; e "VUELTA" que o jipe está retornando do passeio. Imediatamente na sequência, o programa recebe um número inteiro T (onde, 0 <= T <=20) que indica a quantidade de turistas que estão sendo transportados pelo jipe. A string "ABEND" deve ser o indicador de fim de processamento.
Saída
Como objetivo o programa deve apresentar duas saídas, uma em cada linha: a quantidade de turistas e a quantidade de jipes que ainda faltam voltar do parque.
'''
entrando = []
saindo = []
while True:
entrada = str(input()).split()
jipe = entrada[0].upper()
if jipe == 'ABEND':
break
else:
turista = int(entrada[1])
if jipe == 'SALIDA':
entrando.append(turista)
elif jipe == 'VUELTA':
saindo.append(turista)
print(sum(entrando) - sum(saindo))
print(len(entrando) - len(saindo))
| 3.515625 | 4 |
kalliope/core/ShellGui.py | joshuaboniface/kalliope | 1 | 12787356 | # coding: utf8
import locale
import logging
from dialog import Dialog
from kalliope.core import OrderListener
from kalliope.core.ConfigurationManager import SettingLoader
from kalliope.core.SynapseLauncher import SynapseLauncher
from kalliope.neurons.say.say import Say
logging.basicConfig()
logger = logging.getLogger("kalliope")
class ShellGui:
def __init__(self, brain=None):
"""
Load a GUI in a shell console for testing TTS, STT and brain configuration
:param brain: The Brain object provided by the brain.yml
:type brain: Brain
.. seealso:: Brain
"""
# override brain
self.brain = brain
# get settings
sl = SettingLoader()
self.settings = sl.settings
locale.setlocale(locale.LC_ALL, '')
self.d = Dialog(dialog="dialog")
self.d.set_background_title("Kalliope shell UI")
self.show_main_menu()
def show_main_menu(self):
"""
Main menu of the shell UI.
Provide a list of action the user can select to test his settings
"""
code, tag = self.d.menu("Test your Kalliope settings from this menu",
choices=[("TTS", "Text to Speech"),
("STT", "Speech to text"),
("Synapses", "Run a synapse")])
if code == self.d.OK:
if tag == "STT":
self.show_stt_test_menu()
if tag == "TTS":
self.show_tts_test_menu()
if tag == "Synapses":
self.show_synapses_test_menu()
def show_stt_test_menu(self):
"""
Show the list of available STT.
Clicking on a STT will load the engine to catch the user audio and return a text
"""
# we get STT from settings
stt_list = self.settings.stts
logger.debug("Loaded stt list: %s" % str(stt_list))
choices = self._get_choices_tuple_from_list(stt_list)
code, tag = self.d.menu("Select the STT to test:",
choices=choices)
# go back to the main menu if we choose "cancel"
if code == self.d.CANCEL:
self.show_main_menu()
# if ok, call the target TTS engine and catch audio
if code == self.d.OK:
self.d.infobox("Please talk now")
# the callback funtion will print the translated audio into text on the screen
order_listener = OrderListener(callback=self.callback_stt, stt=str(tag))
order_listener.load_stt_plugin()
def show_tts_test_menu(self, sentence_to_test=None):
"""
A menu for testing text to speech
- select a TTS engine to test
- type a sentence
- press ok and listen the generated audio from the typed text
:param sentence_to_test: the screen written sentence to test
"""
continue_bool = True
# if we don't have yet a sentence to test, we ask the user to type one
if sentence_to_test is None:
# First, we ask the user to type a sentence that will be passed in the TTS
code, sentence_to_test = self.d.inputbox("Please type the sentence you want to test", height=20, width=50)
if code == self.d.CANCEL:
self.show_main_menu()
continue_bool = False
if code == self.d.OK:
continue_bool = True
if continue_bool:
# we get TTS from settings
tts_list = self.settings.ttss
# create a list of tuple that can be used by the dialog menu
choices = self._get_choices_tuple_from_list(tts_list)
code, tag = self.d.menu("Sentence to test: %s" % sentence_to_test,
choices=choices)
if code == self.d.CANCEL:
self.show_tts_test_menu()
if code == self.d.OK:
self._run_tts_test(tag, sentence_to_test)
# then go back to this menu with the same sentence
# if the user want to test the same text with another TTS
self.show_tts_test_menu(sentence_to_test=sentence_to_test)
@staticmethod
def _run_tts_test(tts_name, sentence_to_test):
"""
Call the TTS
:param tts_name: Name of the TTS module to launch
:param sentence_to_test: String text to send to the TTS engine
"""
sentence_to_test = sentence_to_test.encode('utf-8')
tts_name = tts_name.encode('utf-8')
Say(message=sentence_to_test, tts=tts_name)
@staticmethod
def _get_choices_tuple_from_list(list_to_convert):
"""
Return a list of tup that can be used in Dialog menu
:param list_to_convert: List of object to convert into tuple
:return: List of choices
:rtype: List
"""
# create a list of tuple that can be used by the dialog menu
choices = list()
for el in list_to_convert:
tup = (str(el.name), str(el.parameters))
choices.append(tup)
logger.debug("Add el to the list: %s with parameters: %s" % (str(el.name), str(el.parameters)))
return choices
def callback_stt(self, audio):
"""
Callback function called after the STT has finish his job
Print the text of what the STT engine think we said on the screen
:param audio: Text from the translated audio
"""
code = self.d.msgbox("The STT engine think you said:\n %s" % audio, width=50)
if code == self.d.OK:
self.show_stt_test_menu()
def show_synapses_test_menu(self):
"""
Show a list of available synapse in the brain to run it directly
"""
# create a tuple for the list menu
choices = list()
x = 0
for el in self.brain.synapses:
tup = (str(el.name), str(x))
choices.append(tup)
x += 1
code, tag = self.d.menu("Select a synapse to run",
choices=choices)
if code == self.d.CANCEL:
self.show_main_menu()
if code == self.d.OK:
logger.debug("Run synapse from GUI: %s" % tag)
SynapseLauncher.start_synapse_by_name(tag, brain=self.brain)
self.show_synapses_test_menu()
| 2.375 | 2 |
src/edges_cal/cli.py | edges-collab/edges-cal | 0 | 12787357 | """CLI functions for edges-cal."""
import click
import papermill as pm
import yaml
from datetime import datetime
from nbconvert import PDFExporter
from pathlib import Path
from rich.console import Console
from traitlets.config import Config
from edges_cal import cal_coefficients as cc
console = Console()
main = click.Group()
@main.command()
@click.argument("config", type=click.Path(dir_okay=False, file_okay=True, exists=True))
@click.argument("path", type=click.Path(dir_okay=True, file_okay=False, exists=True))
@click.option(
"-o",
"--out",
type=click.Path(dir_okay=True, file_okay=False, exists=True),
default=".",
help="output directory",
)
@click.option(
"-c",
"--cache-dir",
type=click.Path(dir_okay=True, file_okay=False),
default=".",
help="directory in which to keep/search for the cache",
)
@click.option(
"-p/-P",
"--plot/--no-plot",
default=True,
help="whether to make diagnostic plots of calibration solutions.",
)
@click.option(
"-s",
"--simulators",
multiple=True,
default=[],
help="antenna simulators to create diagnostic plots for.",
)
def run(config, path, out, cache_dir, plot, simulators):
"""Calibrate using lab measurements in PATH, and make all relevant plots."""
out = Path(out)
with open(config, "r") as fl:
settings = yaml.load(fl, Loader=yaml.FullLoader)
if cache_dir != ".":
settings.update(load_kwargs={"cache_dir": cache_dir})
obs = cc.CalibrationObservation(path=path, **settings)
if plot:
# Plot Calibrator properties
fig = obs.plot_raw_spectra()
fig.savefig(out / "raw_spectra.png")
figs = obs.plot_s11_models()
for kind, fig in figs.items():
fig.savefig(out / f"{kind}_s11_model.png")
fig = obs.plot_calibrated_temps(bins=256)
fig.savefig(out / "calibrated_temps.png")
fig = obs.plot_coefficients()
fig.savefig(out / "calibration_coefficients.png")
# Calibrate and plot antsim
for name in simulators:
antsim = obs.new_load(load_name=name)
fig = obs.plot_calibrated_temp(antsim, bins=256)
fig.savefig(out / f"{name}_calibrated_temp.png")
# Write out data
obs.write(out / obs.path.parent.name)
@main.command()
@click.argument("config", type=click.Path(dir_okay=False, file_okay=True, exists=True))
@click.argument("path", type=click.Path(dir_okay=True, file_okay=False, exists=True))
@click.option(
"-w", "--max-wterms", type=int, default=20, help="maximum number of wterms"
)
@click.option(
"-r/-R",
"--repeats/--no-repeats",
default=False,
help="explore repeats of switch and receiver s11",
)
@click.option(
"-n/-N", "--runs/--no-runs", default=False, help="explore runs of s11 measurements"
)
@click.option(
"-c", "--max-cterms", type=int, default=20, help="maximum number of cterms"
)
@click.option(
"-w", "--max-wterms", type=int, default=20, help="maximum number of wterms"
)
@click.option(
"-r/-R",
"--repeats/--no-repeats",
default=False,
help="explore repeats of switch and receiver s11",
)
@click.option(
"-n/-N", "--runs/--no-runs", default=False, help="explore runs of s11 measurements"
)
@click.option(
"-t",
"--delta-rms-thresh",
type=float,
default=0,
help="threshold marking rms convergence",
)
@click.option(
"-o",
"--out",
type=click.Path(dir_okay=True, file_okay=False, exists=True),
default=".",
help="output directory",
)
@click.option(
"-c",
"--cache-dir",
type=click.Path(dir_okay=True, file_okay=False),
default=".",
help="directory in which to keep/search for the cache",
)
def sweep(
config,
path,
max_cterms,
max_wterms,
repeats,
runs,
delta_rms_thresh,
out,
cache_dir,
):
"""Perform a sweep of number of terms to obtain the best parameter set."""
with open(config, "r") as fl:
settings = yaml.load(fl, Loader=yaml.FullLoader)
if cache_dir != ".":
settings.update(cache_dir=cache_dir)
obs = cc.CalibrationObservation(path=path, **settings)
cc.perform_term_sweep(
obs,
direc=out,
verbose=True,
max_cterms=max_cterms,
max_wterms=max_wterms,
explore_repeat_nums=repeats,
explore_run_nums=runs,
delta_rms_thresh=delta_rms_thresh,
)
@main.command()
@click.argument("path", type=click.Path(dir_okay=True, file_okay=False, exists=True))
@click.option(
"-c",
"--config",
default=None,
type=click.Path(dir_okay=False, file_okay=True, exists=True),
help="a YAML config file specifying parameters of the calibration",
)
@click.option(
"-o",
"--out",
type=click.Path(dir_okay=True, file_okay=False, exists=True),
default=None,
help="output directory",
)
@click.option(
"-d",
"--cache-dir",
type=click.Path(dir_okay=True, file_okay=False),
default=".",
help="directory in which to keep/search for the cache",
)
@click.option("-r/-R", "--report/--no-report", default=True)
@click.option("-u/-U", "--upload/--no-upload", default=False, help="auto-upload file")
@click.option("-t", "--title", type=str, help="title of the memo", default=None)
@click.option(
"-a",
"--author",
type=str,
help="adds an author to the author list",
default=None,
multiple=True,
)
@click.option("-n", "--memo", type=int, help="which memo number to use", default=None)
@click.option("-q/-Q", "--quiet/--loud", default=False)
@click.option("-p/-P", "--pdf/--no-pdf", default=True)
@click.option("--cterms", type=int, default=8)
@click.option("--wterms", type=int, default=10)
def report(
config,
path,
out,
cache_dir,
report,
upload,
title,
author,
memo,
quiet,
pdf,
cterms,
wterms,
):
"""Make a full notebook report on a given calibration."""
single_notebook = Path(__file__).parent / "notebooks/calibrate-observation.ipynb"
console.print(f"Creating report for '{path}'...")
path = Path(path)
if out is None:
out = path / "outputs"
else:
out = Path(out)
if not out.exists():
out.mkdir()
# Describe the filename...
fname = Path(f"calibration_{datetime.now().strftime('%Y-%m-%d-%H.%M.%S')}.ipynb")
if config is not None:
with open(config, "r") as fl:
settings = yaml.load(fl, Loader=yaml.FullLoader)
else:
settings = {}
if "cterms" not in settings:
settings["cterms"] = cterms
if "wterms" not in settings:
settings["wterms"] = wterms
console.print("Settings:")
for k, v in settings.items():
console.print(f"\t{k}: {v}")
settings.update(observation=str(path))
if cache_dir != ".":
settings.update(cache_dir=cache_dir)
# This actually runs the notebook itself.
pm.execute_notebook(
str(single_notebook),
out / fname,
parameters=settings,
kernel_name="edges",
)
console.print(f"Saved interactive notebook to '{out/fname}'")
if pdf: # pragma: nocover
make_pdf(out, fname)
if upload:
upload_memo(out / fname.with_suffix(".pdf"), title, memo, quiet)
@main.command()
@click.argument("path", type=click.Path(dir_okay=True, file_okay=False, exists=True))
@click.argument("cmppath", type=click.Path(dir_okay=True, file_okay=False, exists=True))
@click.option(
"-c",
"--config",
default=None,
type=click.Path(dir_okay=False, file_okay=True, exists=True),
help="a YAML config file specifying parameters of the calibration",
)
@click.option(
"-C",
"--config-cmp",
default=None,
type=click.Path(dir_okay=False, file_okay=True, exists=True),
help="a YAML config file specifying parameters of the comparison calibration",
)
@click.option(
"-o",
"--out",
type=click.Path(dir_okay=True, file_okay=False, exists=True),
default=None,
help="output directory",
)
@click.option(
"-d",
"--cache-dir",
type=click.Path(dir_okay=True, file_okay=False),
default=".",
help="directory in which to keep/search for the cache",
)
@click.option("-r/-R", "--report/--no-report", default=True)
@click.option("-u/-U", "--upload/--no-upload", default=False, help="auto-upload file")
@click.option("-t", "--title", type=str, help="title of the memo", default=None)
@click.option(
"-a",
"--author",
type=str,
help="adds an author to the author list",
default=None,
multiple=True,
)
@click.option("-n", "--memo", type=int, help="which memo number to use", default=None)
@click.option("-q/-Q", "--quiet/--loud", default=False)
@click.option("-p/-P", "--pdf/--no-pdf", default=True)
@click.option("--cterms", type=int, default=8)
@click.option("--wterms", type=int, default=10)
@click.option("--cterms-comp", type=int, default=8)
@click.option("--wterms-comp", type=int, default=10)
def compare(
path,
cmppath,
config,
config_cmp,
out,
cache_dir,
report,
upload,
title,
author,
memo,
quiet,
pdf,
cterms,
wterms,
cterms_comp,
wterms_comp,
):
"""Make a full notebook comparison report between two observations."""
single_notebook = Path(__file__).parent / "notebooks/compare-observation.ipynb"
console.print(f"Creating comparison report for '{path}' compared to '{cmppath}'")
path = Path(path)
cmppath = Path(cmppath)
if out is None:
out = path / "outputs"
else:
out = Path(out)
if not out.exists():
out.mkdir()
# Describe the filename...
fname = Path(
f"calibration-compare-{cmppath.name}_"
f"{datetime.now().strftime('%Y-%m-%d-%H.%M.%S')}.ipynb"
)
if config is not None:
with open(config, "r") as fl:
settings = yaml.load(fl, Loader=yaml.FullLoader)
else:
settings = {}
if "cterms" not in settings:
settings["cterms"] = cterms
if "wterms" not in settings:
settings["wterms"] = wterms
if config_cmp is not None:
with open(config_cmp, "r") as fl:
settings_cmp = yaml.load(fl, Loader=yaml.FullLoader)
else:
settings_cmp = {}
if "cterms" not in settings_cmp:
settings_cmp["cterms"] = cterms_comp
if "wterms" not in settings_cmp:
settings_cmp["wterms"] = wterms_comp
console.print("Settings for Primary:")
for k, v in settings.items():
console.print(f"\t{k}: {v}")
console.print("Settings for Comparison:")
for k, v in settings_cmp.items():
console.print(f"\t{k}: {v}")
if cache_dir != ".":
lk = settings.get("load_kwargs", {})
lk.update(cache_dir=cache_dir)
settings.update(load_kwargs=lk)
lk = settings_cmp.get("load_kwargs", {})
lk.update(cache_dir=cache_dir)
settings_cmp.update(load_kwargs=lk)
# This actually runs the notebook itself.
pm.execute_notebook(
str(single_notebook),
out / fname,
parameters={
"observation": str(path),
"cmp_observation": str(cmppath),
"obs_config_": settings,
"cmp_config_": settings_cmp,
},
kernel_name="edges",
)
console.print(f"Saved interactive notebook to '{out/fname}'")
# Now output the notebook to pdf
if pdf: # pragma: nocover
make_pdf(out, fname)
if upload:
upload_memo(out / fname.with_suffix(".pdf"), title, memo, quiet)
def make_pdf(out, fname):
"""Make a PDF out of an ipynb."""
# Now output the notebook to pdf
if report:
c = Config()
c.TemplateExporter.exclude_input_prompt = True
c.TemplateExporter.exclude_output_prompt = True
c.TemplateExporter.exclude_input = True
exporter = PDFExporter(config=c)
body, resources = exporter.from_filename(out / fname)
with open(out / fname.with_suffix(".pdf"), "wb") as fl:
fl.write(body)
console.print(f"Saved PDF to '{out / fname.with_suffix('.pdf')}'")
def upload_memo(fname, title, memo, quiet): # pragma: nocover
"""Upload as memo to loco.lab.asu.edu."""
try:
import upload_memo # noqa
except ImportError:
raise ImportError(
"You need to manually install upload-memo to use this option."
)
opts = ["memo", "upload", "-f", str(fname)]
if title:
opts.extend(["-t", title])
if memo:
opts.extend(["-n", memo])
if quiet:
opts.append("-q")
run(opts)
| 1.953125 | 2 |
Donation/moneymovement_test.py | arnoldcheung/porticode3 | 0 | 12787358 | #
# SPDX-Copyright: Copyright 2018 Capital One Services, LLC
# SPDX-License-Identifier: MIT
# Copyright 2018 Capital One Services, LLC
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and
# associated documentation files (the "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
# of the Software, and to permit persons to whom the Software is furnished to do so, subject to the
# following conditions:
#
# The above copyright notice and this permission notice shall be included in all copies or substantial
# portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED
# INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
# PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT
# OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
#
import moneymovement, unittest
from models import TransferType, TransferRequestStatus, TransferRequest
class MoneyMovementTest(unittest.TestCase):
def test_moneymovement(self):
base_url = 'https://api-sandbox.capitalone.com'
# need OAuth2
client_id = '83c59ee7d6a4479c8e142422cbe9022a'
client_secret = '<KEY>'
moneymovement.setup_oauth(client_id, client_secret, base_url)
accounts = moneymovement.get_eligible_accounts()
# self.assertEqual(4, len(accounts["accounts"]))
capitalone_savings = accounts["accounts"][0]
capitalone_checking = accounts["accounts"][1]
external_checking = accounts["accounts"][2]
external_checking_2 = accounts["accounts"][3]
print(accounts)
print(capitalone_savings["availableBalance"])
print()
#print(capitalone_checking)
print()
# print(external_checking)
print(external_checking_2)
# POST /money-movement/transfer-requests ACH
transfer_request = TransferRequest()
transfer_request.originMoneyMovementAccountReferenceId = external_checking["moneyMovementAccountReferenceId"]
transfer_request.destinationMoneyMovementAccountReferenceId = capitalone_savings["moneyMovementAccountReferenceId"]
transfer_request.transferAmount = 10.45 # Upto 2 decimal places
transfer_request.currencyCode = "USD" # optional Default: USD
transfer_request.transferDate = "2018-11-17"
transfer_request.memo = "dream car" # optional
transfer_request.transferType = TransferType.ACH.value
transfer_request.frequency = "OneTime" # optional Default: OneTime
transfer_response_ach = moneymovement.initiate_transfer(transfer_request)
print(transfer_response_ach)
#self.assertEqual(TransferRequestStatus.SCHEDULED.value, transfer_response_ach["transferRequestStatus"])
print(capitalone_savings["availableBalance"])
print()
'''
# POST /money-movement/transfer-requests Internal
transfer_request.originMoneyMovementAccountReferenceId = capitalone_checking["moneyMovementAccountReferenceId"]
transfer_request.transferType = TransferType.INTERNAL.value
transfer_response_internal = moneymovement.initiate_transfer(transfer_request)
self.assertEqual(TransferRequestStatus.SCHEDULED.value, transfer_response_internal["transferRequestStatus"])
# GET /money-movement/transfer-requests/{transferRequestId}
transfer_request_id = transfer_response_ach["transferRequestId"]
transfer_request_ach = moneymovement.get_transfer_request(transfer_request_id)
self.assertEqual(transfer_request_id, transfer_request_ach["transferRequestId"])
'''
# GET /money-movement/transfer-requests
filters = {
"fromDate": "2018-11-16",
"toDate": "2018-11-18",
"transferType": None,
"transferRequestStatus": None
}
transfer_requests = moneymovement.get_transfer_requests(capitalone_savings["moneyMovementAccountReferenceId"], filters)
transfers = transfer_requests['transferRequests']
for transfer in transfers:
print(transfer['transferRequestId'] + transfer['memo'])
print(transfer_requests)
#self.assertEqual(transfer_requests["transferRequests"][0]["transferType"], TransferType.ACH.value);
'''
# PATCH /money-movement/transfer-requests/{transferRequestId}
moneymovement.update_transfer_request(transfer_request_id, TransferRequestStatus.CANCELLED.value)
'''
if __name__ == '__main__':
unittest.main()
| 2.078125 | 2 |
coala_json/reporters/cli/cli.py | chay2199/coala-json | 0 | 12787359 | <reponame>chay2199/coala-json<gh_stars>0
import os
import argparse
import sys
from coala_json.reporters.ReporterFactory import ReporterFactory
from coala_json.loader.coalaJsonLoader import coalaJsonLoader
from coala_json.reporters.AppveyorReporter import AppveyorReporter
def get_path(filepath):
return os.path.join(os.getcwd(), filepath)
def create_parser():
parser = argparse.ArgumentParser()
parser.add_argument('--junit', const=True, action='store_const',
help='mode in which coala will produce a JUnit report')
parser.add_argument('--checkstyle', const=True, action='store_const',
help='mode in which coala will produce a'
' Checkstyle report')
parser.add_argument('--tap', const=True, action='store_const',
help='mode in which coala will produce a TAP report')
parser.add_argument('--table', const=True, action='store_const',
help='mode in which coala will produce a HTML table'
' report')
parser.add_argument('--appveyor', const=True, action='store_const',
help='mode in which coala will upload test reports to'
' appveyor')
parser.add_argument('-f', '--input', help='path of the json input file')
parser.add_argument('-o', '--output', help='path of output report file. '
'If nothing is specified then '
'coala-json will print the '
'output to the stdout')
return parser
def main():
parser = create_parser()
args = parser.parse_args()
produce_report(parser, args)
def produce_report(parser, args):
if not args.input:
parser.error("Please specify a 'coala-json' input file")
if args.appveyor:
reporter = AppveyorReporter(coalaJsonLoader(), args.input)
output = reporter.to_output()
else:
with open(get_path(args.input)) as input_file:
factory = ReporterFactory(coalaJsonLoader(), parser, input_file,
args)
reporter = factory.get_reporter()
output = reporter.to_output()
if args.output:
with open(args.output, 'w+') as report:
report.write(output)
else:
sys.stdout.write(output)
if __name__ == '__main__':
sys.exit(main())
| 2.53125 | 3 |
examples/TfcBook/Chapter_3/Problem_8.py | leakec/tfc | 15 | 12787360 | import jax.numpy as np
from tfc import mtfc
from tfc.utils import egrad, NLLS
from tfc.utils.PlotlyMakePlot import MakePlot
# Constants:
n = [40,40]
nC = [2,[1,2]]
m = 40
r0 = 2.
rf = 4.
th0 = 0.
thf = 2.*np.pi
realSoln = lambda r,th: 4.*(-1024.+r**10)*np.sin(5.*th)/(1023.*r**5)
# Create TFC class:
myTfc = mtfc(n,nC,m,x0=[r0,th0],xf=[rf,thf])
H = myTfc.H
x = myTfc.x
# Create constrained expression:
g = lambda xi,*x: np.dot(H(*x),xi)
u1 = lambda xi,*x: g(xi,*x)+\
(x[0]-rf)/(r0-rf)*(0.-g(xi,r0*np.ones_like(x[0]),x[1]))+\
(x[0]-r0)/(rf-r0)*(4.*np.sin(5.*x[1])-g(xi,rf*np.ones_like(x[0]),x[1]))
u = lambda xi,*x: u1(xi,*x)+\
-x[1]/(2.*np.pi)*(u1(xi,x[0],thf*np.ones_like(x[1]))-u1(xi,x[0],th0*np.ones_like(x[1])))+\
(-x[1]**2+2.*np.pi*x[1])/(4.*np.pi)*(egrad(u1,2)(xi,x[0],thf*np.ones_like(x[1]))-egrad(u1,2)(xi,x[0],th0*np.ones_like(x[1])))
# Create the loss function:
ur = egrad(u,1)
u2r = egrad(ur,1)
u2th = egrad(egrad(u,2),2)
L = lambda xi: u2r(xi,*x)+1./x[0]*ur(xi,*x)+1./x[0]**2*u2th(xi,*x)
# Solve the problem:
xi = np.zeros(H(*x).shape[1])
xi,it,time = NLLS(xi,L,timer=True)
# Print out statistics:
print("Solution time: {0} seconds".format(time))
# Plot the solution:
R,Th = np.meshgrid(np.linspace(r0,rf,50),np.linspace(th0,thf,200))
dark = (R.flatten(),Th.flatten())
X = R*np.cos(Th)
Y = R*np.sin(Th)
U = u(xi,*dark).reshape((200,50))
p = MakePlot("x","y",zlabs="u(x,y,g(x,y))")
p.Surface(x=X,y=Y,z=U,showscale=False)
p.show()
# Plot the error
err = np.abs(realSoln(R,Th)-U)
p = MakePlot("x","y",zlabs="Error")
p.Surface(x=X,y=Y,z=err,showscale=False)
p.show()
| 1.828125 | 2 |
SaintsServer/team.py | johnsextro/saintsApp | 0 | 12787361 | <reponame>johnsextro/saintsApp
from google.appengine.ext import db
import logging
import string
package = 'SaintsSchedule'
class Team(db.Model):
teamId = db.StringProperty()
coach = db.StringProperty()
school = db.StringProperty()
grade = db.StringProperty()
year = db.IntegerProperty()
schedule = db.TextProperty()
season= db.StringProperty()
# schedule contains json like this below
# {"games": [{"game_date": "4/1/2013", "time": "1:00 PM", "home": "St. J & A", "away": "ICD", location": "St. Joes"}]}
def getGamesMultiTeams(self, teamIds):
team = Team()
games = ''
q = db.Query(Team)
q = Team.all()
logging.info(teamIds)
logging.info(teamIds.split(','))
q.filter("teamId IN", teamIds.split(','))
logging.info(q.count())
first = True
for team in q:
if first:
games = filter(lambda x: x in string.printable, team.schedule)
games = games[10:len(games)-2]
first = False
else:
temp = filter(lambda x: x in string.printable, team.schedule)
temp = temp[11:len(temp)-2]
games += ", " + temp
logging.info(games)
return games + "]"
def getGames(self, teamId):
team = Team()
games = ''
q = db.Query(Team)
q = Team.all()
q.filter("teamId =", str(teamId))
if q.count() > 0:
team = q.get()
games = filter(lambda x: x in string.printable, team.schedule)
return games
def getCoaches(self, school=None, season=None):
q = db.Query(Team, projection=('teamId', 'school', 'coach'))
q = Team.all()
if school is not None:
q.filter("school =", school)
if season is not None:
q.filter("season =", season)
q.order("school")
q.order("coach")
return q.run()
def getSeasons(self):
logging.info("Getting seasons from DB")
q = db.Query(Team, projection=('season'), distinct=True)
q = Team.all()
q.order("-season")
return q.run()
def getSchools(self, season=None):
q = db.Query(Team, projection=('school'), distinct=True)
q = Team.all()
if season is not None:
q.filter("season =", season)
q.order("school")
return q.run() | 2.90625 | 3 |
truth.py | kirichoi/NSSR | 0 | 12787362 | # -*- coding: utf-8 -*-
# COPYRIGHT 2017 <NAME>
# Truth network model analysis
from __future__ import print_function
import numpy as np
import tellurium as te
import antimony
import generate
import util
import clustering
def classify(setup, s_arr, c_arr):
"""
Ground truth classification. Returns initial perturbation response,
perturbation response, classification, and reaction index
:param g_truth: ground truth network matrix
:param s_truth: ground truth species concentrations
:param k_truth: ground truth rate constants
:param num_node: ground truth numbder of nodes
:param num_bound: ground truth numbder of boundary species
:param k_pert: perturbation amount
:param Thres: classification threshold
:rtype: list
"""
antimony.clearPreviousLoads()
# Strip and translate to string
t_s = setup.t_s.astype('str')
t_k = setup.t_k[setup.t_k != np.array(0)].astype('str')
#t_k_count = np.count_nonzero(setup.t_net)
t_ant = generate.generateAntimonyNew(setup.t_net, t_s, t_k, s_arr, c_arr)
#r_ind = np.array(np.where(setup.t_net != np.array(0))).T
r_ind = util.getPersistantOrder(setup.t_net, setup.p_net)
rr = te.loada(t_ant)
rr.reset() # ALWAYS RESET
rr.conservedMoietyAnalysis = True
pert_i = rr.steadyStateNamedArray() # Initial steady state
r_comb = clustering.getListOfCombinations(r_ind)
# Pertubation for rate constants
k_pert_output_i = np.empty([len(r_comb), setup.num_float])
for i in range(len(r_comb)):
k_pert_output_i[i] = util.perturbRate(rr, r_comb[i], setup.k_pert)
# Classification for rate constants
k_class_i = np.empty([len(r_comb), setup.num_float], dtype=int)
for i in range(len(r_comb)):
for j in range(setup.num_float):
k_diff = (k_pert_output_i[i][j] - pert_i[0][j])
if (np.abs(k_diff) > setup.Thres):
if k_diff < 0.:
k_class_i[i][j] = 1
else:
k_class_i[i][j] = 2
else:
k_class_i[i][j] = 0
antimony.clearPreviousLoads()
return pert_i[0], k_pert_output_i, k_class_i
def compareClass(t_analysis, k_class):
"""
Return indices of network matrices that fall into the same category and
those that does not fall into the same category as the result from true
network
:param t_analysis:
:param k_class:
"""
t_net_ind = []
nt_net_ind = []
for i in range(len(k_class)):
if np.array_equal(t_analysis[2], k_class[i]):
t_net_ind.append(i)
else:
nt_net_ind.append(i)
return t_net_ind, nt_net_ind
#@profile
def compareIndClass(t_analysis, k_class_i):
"""
Checks a single instance of classification against the true result. Returns
True if classification is identical and false otherwise
:param t_analysis:
:param k_class:
"""
partial = False
if np.array_equal(t_analysis, k_class_i):
partial = True
return partial
#def compareClass(p_r_ind, t_analysis, k_class, net_ind_group):
# """
# Return indices for network matrices that fall into the same category and
# those that does not fall into the same category as the output of ground
# truth model
#
# :param p_r_ind: persistant index
# :param t_analysis: output of ground truth classification
# :param k_class: classification output resulting from perturbing reaction
# :param net_ind_group: grouped reaction index
# :rtype: list
# """
#
# t_net_ind = []
# nt_net_ind = []
#
# for i in range(len(p_r_ind)):
# row = p_r_ind[i][0]
# col = p_r_ind[i][1]
#
# # Get generated classification from target indices
# t_k_class = sorted_k_class[row][col]
#
# # Get truth classification from target indices
# comp1 = np.array([np.in1d(t_analysis[3].T[0], row),
# np.in1d(t_analysis[3].T[1], col)])
#
# truth_k_class = t_analysis[2][comp1.all(axis=0)]
#
# # Get indices where generated classification and truth
# # classification is the same
# # TODO: Currently this matches all binary values
# ind_id = np.where((t_k_class == truth_k_class).all(axis=1))[0]
#
# # Network matrix indices that match with truth classification
# t_net_ind_i = net_ind_group[row][col][ind_id]
# # Network matrix indices that does not match with truth classification
# nt_net_ind_i = np.setdiff1d(net_ind_group[row][col], t_net_ind_i)
# t_net_ind.append(t_net_ind_i)
# nt_net_ind.append(nt_net_ind_i)
#
# return t_net_ind, nt_net_ind
| 2.609375 | 3 |
coding_intereview/merge_interval.py | purusharthmalik/Python-Bootcamp | 2 | 12787363 | <reponame>purusharthmalik/Python-Bootcamp
def insert(intervals, new_interval):
n = len(intervals)
start, end = 0, 0
while end < n:
if new_interval[0] <= intervals[end][1]:
if new_interval[1] < intervals[end][0]:
break
new_interval[0] = min(new_interval[0], intervals[end][0])
new_interval[1] = max(new_interval[1], intervals[end][1])
else:
start += 1
end += 1
return intervals[:start] + [new_interval] + intervals[end:]
intervals = [[5, 8], [9, 11]]
new_interval = [1, 3]
merged_interval = insert(intervals, new_interval)
print(merged_interval) | 3.875 | 4 |
zenithml/torch/layers/__init__.py | zenith-ml/zenithml | 0 | 12787364 | from zenithml.torch.layers.preprocess.numerical import NumericalLayer
from zenithml.torch.layers.preprocess.normalizers import MinMaxNormalizeLayer, LogNormalizeLayer, BucketizedLayer
from zenithml.torch.layers.preprocess.nhotencoder import NHotEncodingLayer
from zenithml.torch.layers.preprocess.embedding import EmbeddingLayer
# from zenithml.tf.layers.preprocess.cosine_similarity import CosineSimilarityLayer
| 1.421875 | 1 |
stock_report/analysis.py | pfeiffer-dev/stock-report | 0 | 12787365 | <filename>stock_report/analysis.py<gh_stars>0
# analysis.py
# stock-report
# Copyright 2022 <NAME>
# MIT License
import os
import plotly.express as px
import pandas as pd
class ANALYSIS:
def __init__(self, data):
"""
Inherit ANALYSIS class with its default arguments.
"""
self.data = data.daily_prices()
def main(self):
"""
Creates all images for technical analysis.
These include:
- MACD
- SMA
- RSI
- Bollinger Bands
- Max Drawdown
"""
pd.options.mode.chained_assignment = None # Disable SettingWithCopyWarning
# access data
data = self.data[['4. close']]
ticker = data[::-1] # reverse dataframe for better calculation
# MACD
exp1 = ticker['4. close'].ewm(span=12, adjust=False).mean()
exp2 = ticker['4. close'].ewm(span=26, adjust=False).mean()
ticker['MACD'] = exp1 - exp2
ticker['Signal Line'] = ticker['MACD'].ewm(span=9, adjust=False).mean()
# SMA
ticker['SMA 30 Days'] = ticker['4. close'].rolling(window=30).mean()
ticker['SMA 100 Days'] = ticker['4. close'].rolling(window=100).mean()
# RSI
delta = ticker['4. close'].diff()
up = delta.clip(lower=0)
down = -1 * delta.clip(upper=0)
ema_up = up.ewm(com=13, adjust=False).mean()
ema_down = down.ewm(com=13, adjust=False).mean()
rs = ema_up / ema_down
ticker['RSI'] = 100 - (100 / (1 + rs))
# Bollinger Bands
ticker['Moving Average 30 Days'] = ticker['4. close'].rolling(window=20).mean()
ticker['Standard Deviation 30 Days'] = ticker['4. close'].rolling(window=20).std()
ticker['Upper Band'] = ticker['Moving Average 30 Days'] + (ticker['Standard Deviation 30 Days'] * 2)
ticker['Lower Band'] = ticker['Moving Average 30 Days'] - (ticker['Standard Deviation 30 Days'] * 2)
# Max Drawdown
ticker['Daily Percentage Change'] = ticker['4. close'].pct_change()
ticker['Wealth Index'] = 1000 * (1 + ticker['Daily Percentage Change'][2:]).cumprod()
ticker['Previous Peaks'] = ticker['Wealth Index'].cummax()
ticker['Maximal Drawdown'] = (ticker['Wealth Index'] - ticker['Previous Peaks']) / ticker['Previous Peaks']
# Create plots
plot_data = ticker[::-1] # reverse dataframe for better displaying
download_folder = os.path.expanduser("~")+"/Downloads/"
# Plot of MACD
plot_macd = px.line(data_frame=plot_data[0:365],
y=['MACD', 'Signal Line'],
title='MACD')
plot_macd.write_image(f'{download_folder}/stock-report_macd.png', format='png', width=1000, height=500)
# Plot of SMA
plot_sma = px.line(data_frame=plot_data[0:365],
y=['4. close', 'SMA 30 Days', 'SMA 100 Days'],
title='Simple Moving Average')
plot_sma.write_image(f'{download_folder}/stock-report_sma.png', format='png', width=1000, height=500)
# Plot of RSI
plot_rsi = px.line(data_frame=plot_data[0:365],
y=['RSI'],
title='Relative Strength Index')
plot_rsi.add_hline(y=30, line_width=3, line_dash="dash", line_color="green")
plot_rsi.add_hline(y=70, line_width=3, line_dash="dash", line_color="red")
plot_rsi.write_image(f'{download_folder}/stock-report_rsi.png', format='png', width=1000, height=500)
# Plot of Bollinger Bands
plot_bb = px.line(data_frame=plot_data[0:365],
y=['4. close', 'Moving Average 30 Days', 'Upper Band', 'Lower Band'],
title='Bolinger Bands')
plot_bb.write_image(f'{download_folder}/stock-report_bb.png', format='png', width=1000, height=500)
# Plot of Maximal Drawdown
plot_md = px.line(data_frame=plot_data[0:365],
y=['Maximal Drawdown'],
title='Maximal Drawdown')
plot_md.write_image(f'{download_folder}/stock-report_md.png', format='png', width=1000, height=500)
# Plot of Daily Percentage Change
plot_dpc = px.line(data_frame=plot_data[0:365],
y=['Daily Percentage Change'],
title='Daily Percentage Change')
plot_dpc.write_image(f'{download_folder}/stock-report_dpc.png', format='png', width=1000, height=500) | 3 | 3 |
assignment1.py | walter2645-cmis/walter2645-cmis-cs2 | 0 | 12787366 | myName = "Walter"#defines myName
myAgeinYears = 19.1#defines myAgeinYears
myHeightinMeters = 1.73#defines myHeightinMeters
sideofsquare = 1#defines sideofsquare
lengthofrectangle = 2#defines lengthofrectangle
heightofrectangle = 3#defines heightofrectangle
myAgeinMonths = myAgeinYears*12#calculates myAgeinMonths using myAgeinYears
yearsbeforedeath = 71-myAgeinYears#calculates yearsbeforedeath using myAgeinYears
myHeightinFeet = myHeightinMeters*3.28084#calculates myHeihtinFeet using myHeightinMeters
diffFromAverageHeight = 1.734-myHeightinMeters#calculates diffFromAverageHeight using myHeightinMeters
areaofsquare = sideofsquare*sideofsquare#calculates areaofsquare using sideofsquare
halfvolumeofcube = areaofsquare*sideofsquare/2.0#calculates halfvolumeofcube using sideofsquare
oneNinthofRectangleArea = lengthofrectangle*heightofrectangle/9.0#calculates oneNinthofRectangleArea using lengthofrectangle and heightofrectangle
print "My name is " + myName + ". I am " + str(myAgeinYears) + " years old, which means i have apporximately " + str(yearsbeforedeath) + " years left to live. I am " + str(myHeightinMeters) + " meters tall, equivalent to " + str(myHeightinFeet) + " feet."#prints a brief description of me
print "A side of a square is " , sideofsquare , " unit(s) in length. The area of that square is " , areaofsquare , " unit(s) squared. The length of a rectangle is " , lengthofrectangle , " units, and the height of that rectangle is " , heightofrectangle , " units. One ninth of the rectangle's area is " , oneNinthofRectangleArea , " units squared."#prints some random data about a square and a rectangle
print ";) "*10000#prints 10000 winking smiley faces
| 3.890625 | 4 |
py_moysklad/entities/meta_entity.py | upmarket-cc/py_moysklad | 0 | 12787367 | <filename>py_moysklad/entities/meta_entity.py
from typing import Optional
from py_moysklad.entities.entity import Entity
from py_moysklad.entities.meta import Meta
class MetaEntity(Entity):
id: Optional[str] # noqa: VNE003
account_id: Optional[str]
name: Optional[str]
meta: Optional[Meta]
| 1.851563 | 2 |
dic_maps.py | fraglord94/hackerrank-solutions | 0 | 12787368 | <reponame>fraglord94/hackerrank-solutions
n =int(input())
d = dict(input().split() for _ in range(n))
for _ in d:
str_1 = str(input())
if(str_1 in d):
print(str_1+"="+d[str_1])
else:
print('Not found')
| 3.65625 | 4 |
eap_radius_test/scripts/gen_conf_kem.py | crest42/hostapd | 0 | 12787369 | import sys
from jinja2 import Environment, FileSystemLoader
from os import path, makedirs, getcwd
curves = []
curves_string = ""
PQ_L1_CURVES = ["bike1l1cpa", "bike1l1fo",
"frodo640aes", "frodo640shake",
"hqc128_1_cca2",
"kyber512", "kyber90s512",
"ntru_hps2048509",
"lightsaber",
"sidhp434", "sidhp503", "sikep434", "sikep503"]
PQ_L3_CURVES = ["bike1l3cpa", "bike1l3fo",
"frodo976aes", "frodo976shake",
"hqc192",
"kyber768", "kyber90s768",
"ntru_hps2048677", "ntru_hrss701",
"saber",
"sidhp610", "sikep610",
"ntrulpr761", "sntrup761",]
PQ_L5_CURVES = ["frodo1344aes", "frodo1344shake",
"hqc256_1_cca2", "hqc256_2_cca2", "hqc256_3_cca2",
"kyber1024", "kyber90s1024",
"ntru_hps4096821",
"firesaber",
"sidhp751", "sikep751"]
ECDH_L1_CURVES = ['p256']
ECDH_L3_CURVES = ['p384']
ECDH_L5_CURVES = ['p521']
for pq_curve in PQ_L1_CURVES:
continue
curves.append(pq_curve)
for ecdh_curve in ECDH_L1_CURVES:
c = f"{ecdh_curve}_{pq_curve}"
curves.append(c)
for pq_curve in PQ_L3_CURVES:
curves.append(pq_curve)
for ecdh_curve in ECDH_L3_CURVES:
c = f"{ecdh_curve}_{pq_curve}"
curves.append(c)
for pq_curve in PQ_L5_CURVES:
continue
curves.append(pq_curve)
for ecdh_curve in ECDH_L5_CURVES:
c = f"{ecdh_curve}_{pq_curve}"
curves.append(c)
#curves = curves + ['P-256', 'P-384', 'P-521']
curves = curves + ['P-384']
file_loader = FileSystemLoader('.') # directory of template file
env = Environment(loader=file_loader)
template = 'eap_tls_kem_template.conf.j2'
template = env.get_template(template) # load template file
BASE_DIR = '../confs'
CONF_DIR = f'{BASE_DIR}/kem'
if not path.exists(BASE_DIR):
makedirs(BASE_DIR)
if not path.exists(CONF_DIR):
makedirs(CONF_DIR)
for curve in curves:
curves_string += f"{curve}:"
filename = f"{CONF_DIR}/eap_tls_{curve}.conf"
f = open(filename, 'w')
output = template.render(curve=curve)
f.write(output)
f.close()
print(curves_string)
| 2.015625 | 2 |
Ch1-Arrays-and-Strings/05_one_away.py | fatima-rizvi/CtCI-Solutions-6th-Edition | 0 | 12787370 | # There are 3 types of edits that can be made on a string, insert a character, replace a character, or remove a character.
# Check how many edits were made. Return True if the two strings are 0 to 1 edits apart.
def one_away(str1, str2):
edits = 0
dif = len(str1) - len(str2)
if abs(dif) > 1:
return False
elif abs(dif) == 1:
edits = 1
c1 = 0
c2 = 0
while c1 < len(str1) - 1 and c2 < len(str2) - 1:
if str1[c1] == str2[c2]:
c1 += 1
c2 += 1
elif edits < 1:
if len(str1) == len(str2):
edits += 1
c1 += 1
c2 += 1
elif len(str1) > len(str2):
c2 += 1
elif len(str2) > len(str1):
c1 += 1
else:
return False
return True
print(one_away("pale", "ple")) # True
print(one_away("pales", "pale")) # True
print(one_away("pale", "bale")) # True
print(one_away("pale", "bake")) # False
print(one_away("bats", "cats")) # True
print(one_away("bats", "cat")) # False
print(one_away("bats", "batter")) # False
| 3.9375 | 4 |
nnet/separate.py | on1262/conv-tasnet | 149 | 12787371 | <gh_stars>100-1000
#!/usr/bin/env python
# wujian@2018
import os
import argparse
import torch as th
import numpy as np
from conv_tas_net import ConvTasNet
from libs.utils import load_json, get_logger
from libs.audio import WaveReader, write_wav
logger = get_logger(__name__)
class NnetComputer(object):
def __init__(self, cpt_dir, gpuid):
self.device = th.device(
"cuda:{}".format(gpuid)) if gpuid >= 0 else th.device("cpu")
nnet = self._load_nnet(cpt_dir)
self.nnet = nnet.to(self.device) if gpuid >= 0 else nnet
# set eval model
self.nnet.eval()
def _load_nnet(self, cpt_dir):
nnet_conf = load_json(cpt_dir, "mdl.json")
nnet = ConvTasNet(**nnet_conf)
cpt_fname = os.path.join(cpt_dir, "best.pt.tar")
cpt = th.load(cpt_fname, map_location="cpu")
nnet.load_state_dict(cpt["model_state_dict"])
logger.info("Load checkpoint from {}, epoch {:d}".format(
cpt_fname, cpt["epoch"]))
return nnet
def compute(self, samps):
with th.no_grad():
raw = th.tensor(samps, dtype=th.float32, device=self.device)
sps = self.nnet(raw)
sp_samps = [np.squeeze(s.detach().cpu().numpy()) for s in sps]
return sp_samps
def run(args):
mix_input = WaveReader(args.input, sample_rate=args.fs)
computer = NnetComputer(args.checkpoint, args.gpu)
for key, mix_samps in mix_input:
logger.info("Compute on utterance {}...".format(key))
spks = computer.compute(mix_samps)
norm = np.linalg.norm(mix_samps, np.inf)
for idx, samps in enumerate(spks):
samps = samps[:mix_samps.size]
# norm
samps = samps * norm / np.max(np.abs(samps))
write_wav(
os.path.join(args.dump_dir, "spk{}/{}.wav".format(
idx + 1, key)),
samps,
fs=args.fs)
logger.info("Compute over {:d} utterances".format(len(mix_input)))
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description=
"Command to do speech separation in time domain using ConvTasNet",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("checkpoint", type=str, help="Directory of checkpoint")
parser.add_argument(
"--input", type=str, required=True, help="Script for input waveform")
parser.add_argument(
"--gpu",
type=int,
default=-1,
help="GPU device to offload model to, -1 means running on CPU")
parser.add_argument(
"--fs", type=int, default=8000, help="Sample rate for mixture input")
parser.add_argument(
"--dump-dir",
type=str,
default="sps_tas",
help="Directory to dump separated results out")
args = parser.parse_args()
run(args) | 1.953125 | 2 |
leetcode-python/num056.py | shuaizi/leetcode | 0 | 12787372 | __author__ = 'shuai'
# Definition for an interval.
class Interval(object):
def __init__(self, s=0, e=0):
self.start = s
self.end = e
def __str__(self):
return str(self.start) + "-" + str(self.end)
class Solution(object):
def merge(self, intervals):
"""
:type intervals: List[Interval]
:rtype: List[Interval]
"""
def _compare(a, b):
if a.start < b.start:
return True
elif a.start > b.start:
return False
return a.end < b.end
if not intervals or len(intervals) <= 1:
return intervals
sorted(intervals, _compare)
ret = []
ret.append(intervals[0])
for i in range(1, len(intervals)):
tmp = ret[-1]
if tmp.end < intervals[i].start:
ret.append(intervals[i])
else:
if tmp.end <= intervals[i].end:
tmp.end = intervals[i].end
return ret
a = Interval(1, 4)
b = Interval(0, 4)
sol = Solution()
sss = sol.merge([a, b])
for i in range(len(sss)):
print sss[i]
| 3.796875 | 4 |
tests/test_solvers_rt1d_const_flux.py | jlashner/ares | 0 | 12787373 | """
test_const_ionization.py
Author: <NAME>
Affiliation: University of Colorado at Boulder
Created on: Thu Oct 16 14:46:48 MDT 2014
Description:
"""
import ares
import numpy as np
import matplotlib.pyplot as pl
from ares.physics.CrossSections import PhotoIonizationCrossSection as sigma
s_per_yr = ares.physics.Constants.s_per_yr
pars = \
{
'problem_type': 0,
'grid_cells': 1,
'initial_ionization': [1.-1e-6, 1e-6],
#'initial_temperature': 1e4,# make cold so collisional ionization is negligible
'isothermal': False,
'stop_time': 10.0,
'plane_parallel': True,
'recombination': False, # To match analytical solution
'source_type': 'toy',
'source_qdot': 1e4, # solver fails when this is large (like 1e10)
'source_lifetime': 1e10,
'source_E': [13.60000001],
'source_LE': [1.0],
'secondary_ionization': 0,
'collisional_ionization': 0,
'logdtDataDump': 0.5,
'initial_timestep': 1e-15,
}
def test(rtol=1e-2):
# Numerical solution
sim = ares.simulations.RaySegment(**pars)
sim.run()
t, xHII = sim.CellEvolution(field='h_2')
fig = pl.figure(1, figsize=(8, 12))
ax1 = fig.add_subplot(211); ax2 = fig.add_subplot(212)
ax1.loglog(t / s_per_yr, xHII, color='k', label='numerical')
ax1.set_ylim(1e-8, 5)
ax1.set_ylabel(r'$x_{\mathrm{HII}}$')
# Analytic solution: exponential time evolution
sigma0 = sigma(pars['source_E'][0])
qdot = pars['source_qdot']
Gamma = qdot * sigma0
xi0 = pars['initial_ionization'][1]
C = 1. - xi0
def xi(t, Gamma=Gamma):
return 1. - C * np.exp(-Gamma * t)
xHII_anyl = np.array(list(map(xi, t)))
ax1.scatter(t / s_per_yr, xHII_anyl, color='b', facecolors='none', s=100,
label='analytic')
ax1.legend(loc='upper left', fontsize=14)
# Only test accuracy at somewhat later times
mask = t > 0
err = np.abs(xHII[mask] - xHII_anyl[mask]) / xHII_anyl[mask]
ax2.loglog(t / s_per_yr, err)
ax2.set_xlabel(r'$t \ (\mathrm{yr})$')
ax2.set_ylabel(r'rel. error')
pl.draw()
pl.savefig('{!s}.png'.format(__file__[0:__file__.rfind('.')]))
pl.close()
assert np.allclose(xHII[mask], xHII_anyl[mask], rtol=rtol, atol=0)
if __name__ == '__main__':
test()
| 1.75 | 2 |
PythonFiles/SUWSS/dataCollection.py | VijayS02/Random-Programming-Items | 0 | 12787374 | import scipy.io
import numpy as np
import sys
import os.path
import matplotlib.pyplot as plt
trans = [139.62,119.43,36.48,14.5]
mdata = []
def avgWaveSpeed(data,ampStart,ampEnd,freq,transducers,index1,index2):
total = 0
count = 0
print(data)
zer = highestPoint(data,ampStart,0)[0]
tz = np.arange(ampStart,ampEnd,(1/freq))
for i in tz:
tmp = highestPoint(data,i,zer)
#print(tmp)
print(tmp, " " , index1 , " ", index2)
total = total + (transducers[index2]-transducers[index1])/(tmp[index2+1] -tmp[index1+1])
count = count +1
total = total/count
return abs(total*1000)
def highestPoint(data,val,start):
x = []
x.append(0)
for b in range(start,len(data)):
count = 0
i = data[b]
#print(i," ",count)
for z in i :
if(z[0] > val):
x.append(count)
break
count = count + 1
lowest = 10000
highest = 0
for v in x:
if(v <= lowest):
lowest = v
if(v>= highest):
highest = v
x[0] = lowest
x.append(highest)
return x
def cailbration(data):
high = False
for x in data:
if(x[0]>2):
high = True
break
if(high):
for z in range(0,len(data)):
data[z] = ((data[z]*0.5001 + 1.0032 - 1.01325)*10.1974)+10
else:
for z in range(0,len(data)):
data[z] = ((data[z]*3.1277 - 0.263 - 1.01325)*10.1974)+10
return data
def onclick(event):
print('%s click: button=%d, x=%d, y=%d, xdata=%f, ydata=%f' %
('double' if event.dblclick else 'single', event.button,
event.x, event.y, event.xdata, event.ydata))
text = ""
if(os.path.isfile('testfile.txt')):
file2 = open('testfile.txt')
text = file2.read()
file2.close()
file = open('testfile.txt',"w")
file.write(text + str(event.ydata)+'\n')
file.close()
mdata = []
x = open('testfile.txt').read().split('\n')
if(len(x) >2):
#print(x)
mdata.append(float(x[0]))
mdata.append(float(x[1]))
file = open('testfile.txt',"w")
file.write("")
file.close()
#print(avgWaveSpeed(data,mdata[0],mdata[1],10,trans,2,0))
def main(file,idx,x):
fig = plt.figure(x)
gid = 200+(x*10)+idx
#if(x==1):
#fig = plt.figure(3)
#else:
#fig = plt.figure(4)
#location = input('MatLabFile Location\n')
location = file
mat = scipy.io.loadmat(location)
data = []
x = mat.get('VoltageAI0')[0][0][1][0][0]
time = []
for i in range(0,x):
time.append(i/1000)
#print(time)
for i in range(0,10):
tmp = 'VoltageAI'+str(i)
if(mat.get(tmp)==None):
break
else:
data.append(cailbration(mat.get(tmp)[0][0][0]))
colors = ['b','y','m','k','r']
count = 0
#zxcv = avgWaveSpeed(data,29.5,31,10,trans,2,0)
pltinone = True
#zxc = input("All in one? y/n?\n")
zxc = "y"
if(zxc =="n"):
pltinone = False
fig = plt.figure(2)
for i in data:
if(pltinone):
plt.subplot(gid)
line = plt.plot(time,i)
import random
r = lambda: random.randint(0,255)
colorz = ('#%02X%02X%02X' % (r(),r(),r()))
plt.setp(line,'color',colorz,'antialiased',True)
else:
cur = 221 + count
plt.subplot(cur)
plt.ylabel('Bar ( gauge )')
plt.xlabel('Time ( s )')
line = plt.plot(time,i)
plt.setp(line,'color',colors[count],'antialiased',True)
count = count+1
plt.ylabel('Meters of water( m )')
plt.xlabel('Time ( s )')
cid = fig.canvas.mpl_connect('button_press_event', onclick)
plt.axis([0,8, 25, 35])
fig.canvas.mpl_disconnect(cid)
#return zxcv
return 1
sumx = 0
vals = []
main("D:\\Files\\Documents\\Programming\\PythonFiles\\SUWSS\\TDMS\\24July2018_Intact_1.mat",1,1)
#for i in range(1,2):
# print(i)
# sumx = sumx+(main('\TDMS\24July2018_Intact_'+str(i)+'.mat',i,2))
#print(sumx)
'''
sumy= 0
i = 6
for i in range(6,11):
print(i)
sumy = sumy+(main('LL Pipe Case\\24July2018_LL_'+str(i)+'.mat',240+i-5,2))
sumy = (sumy/5)
'''
#print(abs(sumx-sumy))
plt.show()
| 2.640625 | 3 |
Server/sputnik.py | SergeyMakeev/Sputnik | 2 | 12787375 | <filename>Server/sputnik.py
# coding=utf-8
from BaseHTTPServer import BaseHTTPRequestHandler, HTTPServer
import urlparse
import os
import traceback
import datetime
import math
from stat import *
import json
import urllib2
import gzip
import StringIO
import struct
# encode binary string to ascii string
def binary_encode(v):
assert isinstance(v, str)
res = ""
for i in xrange(0, len(v), 1):
_h = math.floor(ord(v[i]) / 16)
_l = ord(v[i]) - (_h * 16)
res += chr(int(_h + 65))
res += chr(int(_l + 97))
return res
# decode ascii string to binary string
def binary_decode(v):
assert isinstance(v, str)
arr = ""
for i in xrange(0, len(v), 2):
bt = (ord(v[i]) - 65) * 16 + (ord(v[i+1]) - 97)
arr += chr(bt)
return arr
class RobloxMeshVertex:
def __init__(self, px, py, pz, nx, ny, nz, u, v, w, r, g, b, a):
self.p_x = px
self.p_y = py
self.p_z = pz
self.n_x = nx
self.n_y = ny
self.n_z = nz
self.u = u
self.v = v
self.w = w
self.r = r
self.g = g
self.b = b
self.a = a
class RobloxMeshTriangle:
def __init__(self, i0, i1, i2):
self.i0 = i0
self.i1 = i1
self.i2 = i2
class RobloxMesh:
def __init__(self):
self.vertices = []
self.triangles = []
self.min_x = 99999999.0
self.min_y = 99999999.0
self.min_z = 99999999.0
self.max_x = -99999999.0
self.max_y = -99999999.0
self.max_z = -99999999.0
def append_vertex(self, vrx):
self.min_x = min(self.min_x, vrx.p_x)
self.min_y = min(self.min_y, vrx.p_y)
self.min_z = min(self.min_z, vrx.p_z)
self.max_x = max(self.max_x, vrx.p_x)
self.max_y = max(self.max_y, vrx.p_y)
self.max_z = max(self.max_z, vrx.p_z)
self.vertices.append(vrx)
def append_triangle(self, idx):
self.triangles.append(idx)
#
# https://wiki.roblox.com/index.php%3Ftitle%3DRoblox_Mesh_Format
#
# version 1.00
#
# This is the original version of Roblox's mesh format, which is stored purely in ASCII and can be read by humans.
# These files are stored as 3 lines of text:
#
# version 1.00
# num_faces
# data
#
# The num_faces line represents the number of polygons to expect in the data line.
# The data line represents a series of concatenated Vector3 pairs, stored inbetween brackets with the XYZ coordinates
# separated with commas as so: [1.00,1.00,1.00]
#
# You should expect to see num_faces * 9 concatenated Vector3 pairs in this line.
# Every single vertex is represented in the following manner:
# [vX,vY,vZ][nX,nY,nZ][tU,tV,tW]
#
# The 1st pair, [vX,vY,vZ] is the location of the vertex point. In version 1.00, the XYZ values are doubled,
# so you should scale the values down by 0.5 when converting them to floats. This issue is fixed in version 1.01.
# The 2nd pair, [nX,nY,nZ] is the normal unit vector of the vertex point, which is used to determine how light
# bounces off of this vertex.
# The 3rd pair, [tU,tV,tW] is the 2D UV texture coordinate of the vertex point, which is used to determine how the
# mesh's texture is applied to the mesh. The tW coordinate is unused, so you can expect it's value to be zero.
# One important quirk to note is that the tV coordinate is inverted, so when converting it to a float and
# storing it, the value should be stored as 1.f - tV.
#
# Every 3 sets of 3 Vector3 pairs are used to form a polygon, hence why you should expect to see num_faces * 9.
#
#
# version 2.00
# The version 2.00 format is a lot more complicated, as it's stored in a binary format and files may differ in
# structure depending on factors that aren't based on the version number. You will need some some advanced knowledge in
# Computer Science to understand this portion of the article. This will be presented in a C syntax.
#
# MeshHeader
# After reading past the version 2.00\n text, the first chunk of data can be represented with the following struct:
#
# struct MeshHeader
# {
# unsigned short sizeofMeshHeader; // Used to verify your MeshHeader struct is the same as this file's MeshHeader struct
# unsigned char sizeofMeshVertex; // Used to verify your MeshVertex struct is the same as this file's MeshVertex struct
# unsigned char sizeofMeshFace; // Used to verify your MeshFace struct is the same as this file 's MeshFace struct
# unsigned int num_vertices; // The number of vertices in this mesh
# unsigned int num_faces; // The number of faces in this mesh
# }
#
# One critical quirk to note, is that sizeofMeshVertex can vary between 36 and 40 bytes, due to the introduction of
# vertex color data to newer meshes. If you don't account for this difference, the mesh may not be read correctly.
#
# MeshVertex
#
# Once you have read the MeshHeader, you should expect to read an array, MeshVertex[num_vertices] vertices;
# using the following struct:
#
# struct MeshVertex
# {
# float vx, vy, vz; // XYZ coordinate of the vertex
# float nx, ny, nz; // XYZ coordinate of the vertex's normal
# float tu, tv, tw; // UV coordinate of the vertex(tw is reserved)
#
# // WARNING: The following bytes only exist if 'MeshHeader.sizeofMeshVertex' is equal to 40, rather than 36.
# unsigned char r, g, b, a; // The RGBA color of the vertex
# }
#
# This array represents all of the vertices in the mesh, which can be linked together into faces.
#
# MeshFace
#
# Finally, you should expect to read an array, MeshFace[num_faces] faces; using the following struct:
# struct MeshFace
# {
# unsigned int a; // 1st Vertex Index
# unsigned int b; // 2nd Vertex Index
# unsigned int c; // 3rd Vertex Index
# }
#
# This array represents indexes in the MeshVertex array that was noted earlier.
# The 3 MeshVertex structs that are indexed using the MeshFace are used to form a polygon in the mesh.
#
def parse_roblox_mesh(mesh_data):
data_stream = StringIO.StringIO(mesh_data)
header = data_stream.read(12)
mesh = RobloxMesh()
if header == 'version 1.00':
# skip line
data_stream.readline()
num_faces = int(data_stream.readline())
print("old mesh: " + str(num_faces))
text_data = data_stream.readline()
text_data = text_data.replace('][', ';')
text_data = text_data.replace('[', '')
text_data = text_data.replace(']', '')
pairs = text_data.split(";")
pairs_count = len(pairs)
print(str(pairs_count))
if pairs_count != (num_faces * 9):
print("Invalid number of pairs")
return None
for i in range(0, pairs_count, 3):
values = pairs[i + 0].split(",")
if len(values) != 3:
print("Invalid number of values")
return None
pos_x = float(values[0]) * 0.5
pos_y = float(values[1]) * 0.5
pos_z = float(values[2]) * 0.5
values = pairs[i + 1].split(",")
if len(values) != 3:
print("Invalid number of values")
return None
nrm_x = float(values[0])
nrm_y = float(values[1])
nrm_z = float(values[2])
values = pairs[i + 2].split(",")
if len(values) != 3:
print("Invalid number of values")
return None
t_u = float(values[0])
t_v = float(values[1])
t_w = float(values[2])
vrx = RobloxMeshVertex(pos_x, pos_y, pos_z, nrm_x, nrm_y, nrm_z, t_u, t_v, t_w, 1, 1, 1, 1)
mesh.append_vertex(vrx)
for i in range(0, num_faces):
tri = RobloxMeshTriangle(i*3+0, i*3+1, i*3+2)
mesh.append_triangle(tri)
return mesh
if header != 'version 2.00':
print("Unsupported mesh header: " + str(header))
return None
# skip '\n'
data_stream.read(1)
sizeof_mesh_header = struct.unpack('H', data_stream.read(2))[0]
sizeof_mesh_vertex = struct.unpack('B', data_stream.read(1))[0]
sizeof_mesh_face = struct.unpack('B', data_stream.read(1))[0]
num_vertices = struct.unpack('I', data_stream.read(4))[0]
num_faces = struct.unpack('I', data_stream.read(4))[0]
# print("sizeof_mesh_header = " + str(sizeof_mesh_header))
# print("sizeof_mesh_vertex = " + str(sizeof_mesh_vertex))
# print("sizeof_mesh_face = " + str(sizeof_mesh_face))
# print("num_vertices = " + str(num_vertices))
# print("num_faces = " + str(num_faces))
if sizeof_mesh_header != 12:
print("Unsupported mesh header size: " + str(sizeof_mesh_header))
return None
if sizeof_mesh_vertex != 36 and sizeof_mesh_vertex != 40:
print("Unsupported vertex size: " + str(sizeof_mesh_vertex))
return None
if sizeof_mesh_face != 12:
print("Unsupported face size: " + str(sizeof_mesh_face))
return None
for i in range(0, num_vertices):
pos_x = struct.unpack('f', data_stream.read(4))[0]
pos_y = struct.unpack('f', data_stream.read(4))[0]
pos_z = struct.unpack('f', data_stream.read(4))[0]
nrm_x = struct.unpack('f', data_stream.read(4))[0]
nrm_y = struct.unpack('f', data_stream.read(4))[0]
nrm_z = struct.unpack('f', data_stream.read(4))[0]
t_u = struct.unpack('f', data_stream.read(4))[0]
t_v = struct.unpack('f', data_stream.read(4))[0]
t_w = struct.unpack('f', data_stream.read(4))[0]
if sizeof_mesh_vertex == 40:
col_r = struct.unpack('B', data_stream.read(1))[0]
col_g = struct.unpack('B', data_stream.read(1))[0]
col_b = struct.unpack('B', data_stream.read(1))[0]
col_a = struct.unpack('B', data_stream.read(1))[0]
else:
col_r = 0xff
col_g = 0xff
col_b = 0xff
col_a = 0xff
vrx = RobloxMeshVertex(pos_x, pos_y, pos_z, nrm_x, nrm_y, nrm_z, t_u, t_v, t_w, col_r, col_g, col_b, col_a)
mesh.append_vertex(vrx)
for i in range(0, num_faces):
index0 = struct.unpack('I', data_stream.read(4))[0]
index1 = struct.unpack('I', data_stream.read(4))[0]
index2 = struct.unpack('I', data_stream.read(4))[0]
tri = RobloxMeshTriangle(index0, index1, index2)
mesh.append_triangle(tri)
return mesh
def save_mesh_to_obj(target_dir, mesh_id, mesh):
size_x = mesh.max_x - mesh.min_x
size_y = mesh.max_y - mesh.min_y
size_z = mesh.max_z - mesh.min_z
print(size_x)
print(size_y)
print(size_z)
print(mesh_id)
file_name = target_dir + '/' + str(mesh_id) + '.obj'
print(file_name)
file_handle = open(file_name, 'w+')
for v in mesh.vertices:
line = 'v ' + str(v.p_x / size_x) + ' ' + str(v.p_y / size_y) + ' ' + str(v.p_z / size_z) + '\n'
file_handle.write(line)
for v in mesh.vertices:
line = 'vt ' + str(v.u) + ' ' + str(v.v) + '\n'
file_handle.write(line)
for v in mesh.vertices:
line = 'vn ' + str(v.n_x) + ' ' + str(v.n_y) + ' ' + str(v.n_z) + '\n'
file_handle.write(line)
line = 'g m' + str(mesh_id) + '\n'
file_handle.write(line)
for t in mesh.triangles:
i0 = str(t.i0 + 1)
i1 = str(t.i1 + 1)
i2 = str(t.i2 + 1)
line = 'f ' + i0 + '/' + i0 + '/' + i0 + ' ' + i1 + '/' + i1 + '/' + i1 + ' ' + i2 + '/' + i2 + '/' + i2 + '\n'
file_handle.write(line)
file_handle.close()
return file_name
def save_scene_to_mel(file_name, scene_objects):
file_handle = open(file_name, 'w+')
cmd = 'string $loc[];\n'
file_handle.write(cmd)
cmd = 'string $geom[];\n'
file_handle.write(cmd)
for scene_object in scene_objects:
full_name = scene_object['full_name']
full_name = full_name.replace('.', '_')
full_name = full_name.replace('~', '_')
full_name = full_name.replace(' ', '_')
full_name = full_name.replace('(', '_')
full_name = full_name.replace(')', '_')
full_name = full_name.replace('/', '_')
full_name = full_name.replace('\\', '_')
pos_x = scene_object['pos_x']
pos_y = scene_object['pos_y']
pos_z = scene_object['pos_z']
rot_x = scene_object['rot_x'] * 57.2958
rot_y = scene_object['rot_y'] * 57.2958
rot_z = scene_object['rot_z'] * 57.2958
scl_x = scene_object['scl_x']
scl_y = scene_object['scl_y']
scl_z = scene_object['scl_z']
shape = scene_object['shape']
cmd = '$loc = `spaceLocator -n ' + full_name + '`;\n'
file_handle.write(cmd)
if shape == 'Enum.PartType.Block':
cmd = '$geom = `polyCube -n block`;\n'
file_handle.write(cmd)
cmd = 'parent $geom[0] $loc[0];\n'
file_handle.write(cmd)
if shape == 'Enum.PartType.Ball':
cmd = '$geom = `polySphere -r 0.5 -n ball`;\n'
file_handle.write(cmd)
cmd = 'parent $geom[0] $loc[0];\n'
file_handle.write(cmd)
if shape == 'Enum.PartType.Cylinder':
cmd = '$geom = `polyCylinder -r 0.5 -h 1 -n cyl`;\n'
file_handle.write(cmd)
cmd = 'setAttr($geom[0] + ".rotateZ") -90;\n'
file_handle.write(cmd)
cmd = 'parent $geom[0] $loc[0];\n'
scl = min(scl_y, scl_z)
scl_y = scl
scl_z = scl
file_handle.write(cmd)
if (shape == 'MeshPart' or shape == 'SpecialMesh') and 'mesh_file_name' in scene_object:
mesh_id = scene_object['mesh_id']
mesh_file_name = scene_object['mesh_file_name']
cmd = 'file -import -type "OBJ" "' + mesh_file_name + '";\n'
file_handle.write(cmd)
if shape == 'SpecialMesh':
mesh_bbox_x = scene_object['mesh_bbox_x']
mesh_bbox_y = scene_object['mesh_bbox_y']
mesh_bbox_z = scene_object['mesh_bbox_z']
cmd = 'setAttr |m' + str(mesh_id) + '.scaleX ' + str(mesh_bbox_x) + ';\n'
file_handle.write(cmd)
cmd = 'setAttr |m' + str(mesh_id) + '.scaleY ' + str(mesh_bbox_y) + ';\n'
file_handle.write(cmd)
cmd = 'setAttr |m' + str(mesh_id) + '.scaleZ ' + str(mesh_bbox_z) + ';\n'
file_handle.write(cmd)
cmd = 'parent |m' + str(mesh_id) + ' $loc[0];\n'
file_handle.write(cmd)
cmd = 'setAttr($loc[0] + ".translateX") ' + str(pos_x) + ';\n'
file_handle.write(cmd)
cmd = 'setAttr($loc[0] + ".translateY") ' + str(pos_y) + ';\n'
file_handle.write(cmd)
cmd = 'setAttr($loc[0] + ".translateZ") ' + str(pos_z) + ';\n'
file_handle.write(cmd)
cmd = 'setAttr($loc[0] + ".rotateX") ' + str(rot_x) + ';\n'
file_handle.write(cmd)
cmd = 'setAttr($loc[0] + ".rotateY") ' + str(rot_y) + ';\n'
file_handle.write(cmd)
cmd = 'setAttr($loc[0] + ".rotateZ") ' + str(rot_z) + ';\n'
file_handle.write(cmd)
cmd = 'setAttr($loc[0] + ".scaleX") ' + str(scl_x) + ';\n'
file_handle.write(cmd)
cmd = 'setAttr($loc[0] + ".scaleY") ' + str(scl_y) + ';\n'
file_handle.write(cmd)
cmd = 'setAttr($loc[0] + ".scaleZ") ' + str(scl_z) + ';\n'
file_handle.write(cmd)
file_handle.close()
def export_roblox_scene(file_name, scene_json):
print(scene_json)
scene_objects = json.loads(scene_json)
target_dir = os.path.dirname(file_name)
processed_meshes = {}
id_to_mesh = {}
for scene_object in scene_objects:
mesh_url = scene_object['mesh_id'].rstrip()
mesh_url = mesh_url.replace('rbxassetid://', 'http://www.roblox.com/asset/?id=')
mesh_id = mesh_url.replace('http://www.roblox.com/asset/?id=', '')
if mesh_id not in processed_meshes:
if len(mesh_url) > 0:
print("Fetch: " + mesh_url)
request = urllib2.Request(mesh_url)
request.add_header('Accept-Encoding', 'gzip')
response = urllib2.urlopen(request)
if response.info().get('Content-Encoding') == 'gzip':
mesh_compressed_data = response.read()
mesh_compressed_stream = StringIO.StringIO(mesh_compressed_data)
gzip_reader = gzip.GzipFile(fileobj=mesh_compressed_stream)
mesh_data = gzip_reader.read()
else:
mesh_data = response.read()
mesh = parse_roblox_mesh(mesh_data)
if mesh is not None:
mesh_file_name = save_mesh_to_obj(target_dir, mesh_id, mesh)
scene_object['mesh_file_name'] = mesh_file_name
processed_meshes[mesh_id] = mesh_file_name
id_to_mesh[mesh_id] = mesh
else:
scene_object['mesh_file_name'] = processed_meshes[mesh_id]
if len(mesh_id) > 0 and mesh_id in id_to_mesh:
mesh = id_to_mesh[mesh_id]
scene_object['mesh_bbox_x'] = mesh.max_x - mesh.min_x
scene_object['mesh_bbox_y'] = mesh.max_y - mesh.min_y
scene_object['mesh_bbox_z'] = mesh.max_z - mesh.min_z
else:
scene_object['mesh_bbox_x'] = 1.0
scene_object['mesh_bbox_y'] = 1.0
scene_object['mesh_bbox_z'] = 1.0
scene_object['mesh_id'] = mesh_id
save_scene_to_mel(file_name, scene_objects)
# noinspection PyBroadException
class S(BaseHTTPRequestHandler):
def _set_headers(self):
self.send_response(200)
self.send_header('Content-type', 'application/json')
self.end_headers()
def do_HEAD(self):
self._set_headers()
def do_GET(self):
if self.client_address[0] != '127.0.0.1':
print ('Unauthorized access attempt from ' + str(self.client_address))
self.send_error(404)
return
url = urlparse.urlparse(self.path)
cmd = url.path
params = urlparse.parse_qs(url.query)
if cmd == '/ver':
return self._ver(params)
if cmd == '/getcwd':
return self._getcwd()
if cmd == '/dirlist':
return self._dirlist(params)
if cmd == '/read':
return self._read(params)
if cmd == '/stat':
return self._stat(params)
print('Unknown command: ' + cmd)
print(self.path)
self.send_error(404)
def do_POST(self):
if self.client_address[0] != '127.0.0.1':
print ('Unauthorized access attempt from ' + str(self.client_address))
self.send_error(404)
return
url = urlparse.urlparse(self.path)
cmd = url.path
params = urlparse.parse_qs(url.query)
if cmd == '/chdir':
return self._chdir(params)
if cmd == '/write':
return self._write(params)
if cmd == '/scene_export':
return self._export_scene(params)
print('Unknown command: ' + cmd)
print(self.path)
self.send_error(404)
def _ver(self, params):
try:
if 'guid' in params:
guid_encoded = params['guid'][0]
guid = binary_decode(guid_encoded)
guid_answer = binary_encode(guid)
self._set_headers()
self.wfile.write('{ "result" : "ok", "ver" : "1.0", "guid" : "' + guid_answer + '" }')
else:
self._set_headers()
self.wfile.write('{ "result" : "ok", "ver" : "1.0" }')
except Exception:
print ('ver exception')
print(traceback.format_exc())
self._set_headers()
self.wfile.write('{ "result" : "error" }')
def _getcwd(self):
try:
current_dir = os.getcwd()
current_dir = current_dir.replace('\\', '/')
current_dir = current_dir.rstrip('/')
current_dir += '/'
respond = '{ "result" : "ok", "dir" : "' + current_dir + '" }'
self._set_headers()
self.wfile.write(respond)
except Exception:
print ('getcwd exception')
print(traceback.format_exc())
self._set_headers()
self.wfile.write('{ "result" : "error" }')
def _chdir(self, params):
try:
dir_name = params['dir'][0]
dir_name = dir_name.replace('\\', '/')
dir_name = dir_name.rstrip('/')
dir_name += '/'
os.chdir(dir_name)
self._set_headers()
self.wfile.write('{ "result" : "ok" }')
except Exception:
print ('chdir exception')
print(traceback.format_exc())
self._set_headers()
self.wfile.write('{ "result" : "error" }')
def _export_scene(self, params):
try:
file_name = params['file'][0]
file_name = file_name.replace('\\', '/')
print("Scene export : " + file_name)
data_len = int(self.headers.getheader('content-length', 0))
data_body = self.rfile.read(data_len)
export_roblox_scene(file_name, data_body)
self._set_headers()
self.wfile.write('{ "result" : "ok" }')
except Exception:
print ('write exception')
print(traceback.format_exc())
self._set_headers()
self.wfile.write('{ "result" : "error" }')
def _write(self, params):
try:
data_len = int(self.headers.getheader('content-length', 0))
data_body = self.rfile.read(data_len)
data = binary_decode(data_body)
file_name = params['file'][0]
file_name = file_name.replace('\\', '/')
file_handle = open(file_name, "wb")
file_handle.write(data)
file_handle.close()
self._set_headers()
self.wfile.write('{ "result" : "ok" }')
except Exception:
print ('write exception')
print(traceback.format_exc())
self._set_headers()
self.wfile.write('{ "result" : "error" }')
def _stat(self, params):
try:
path_name = params['path'][0]
path_name = path_name.replace('\\', '/')
path_stat = os.stat(path_name)
mode = path_stat.st_mode
is_dir = False
if S_ISDIR(mode):
is_dir = True
is_file = False
if S_ISREG(mode):
is_file = True
self._set_headers()
self.wfile.write('{ "result" : "ok", '
'"payload": { "is_exist" : true, "is_dir" : ' + str(is_dir).lower() +
', "is_file" : ' + str(is_file).lower() +
', "size" : ' + str(path_stat.st_size) + '} }')
except os.error:
self._set_headers()
self.wfile.write('{ "result" : "ok", "payload": { "is_exist" : false, "is_dir" : false, '
'"is_file" : false, "size" : 0 } }')
except Exception:
print ('stat exception')
print(traceback.format_exc())
self._set_headers()
self.wfile.write('{ "result" : "error" }')
def _read(self, params):
try:
file_name = params['file'][0]
file_name = file_name.replace('\\', '/')
file_handle = open(file_name, "rb")
file_content = file_handle.read()
file_handle.close()
file_size = os.path.getsize(file_name)
data = binary_encode(file_content)
respond = '{ "result" : "ok", "content" : "' + data + '", "size" : ' + str(file_size) + ' }'
self._set_headers()
self.wfile.write(respond)
except Exception:
print ('read exception')
print(traceback.format_exc())
self._set_headers()
self.wfile.write('{ "result" : "error" }')
def _dirlist(self, params):
try:
dir_name = os.getcwd()
if 'dir' in params:
dir_name = params['dir'][0]
dir_name = dir_name.replace('\\', '/')
dir_name = dir_name.rstrip('/')
dirs = os.listdir(dir_name)
dirs.sort()
json_files = ''
json_dirs = ''
for short_name in dirs:
short_name = short_name.replace('\\', '/')
short_name = short_name.rstrip('/')
long_name = dir_name + '/' + short_name
if os.path.isdir(long_name):
long_name += '/'
short_name += '/'
if len(json_dirs):
json_dirs += ', '
json_dirs += '{ "short_name" : "' + short_name + '", '
json_dirs += '"long_name" : "' + long_name + '" }'
elif os.path.isfile(long_name):
file_size = os.path.getsize(long_name)
time_stamp = os.path.getmtime(long_name)
if len(json_files):
json_files += ', '
json_files += '{ "short_name" : "' + short_name + '", '
json_files += '"long_name" : "' + long_name + '", '
json_files += '"size" : ' + str(file_size) + ', '
json_files += '"unix_time" : "' + str(time_stamp) + '", '
json_files += '"str_time" : "'
json_files += datetime.datetime.fromtimestamp(time_stamp).strftime('%Y-%m-%d %H:%M:%S')
json_files += '" }'
else:
print ('unknown')
respond = '{ "result" : "ok", "files": [' + json_files + '], "dirs": [' + json_dirs + '] }'
self._set_headers()
self.wfile.write(respond)
except Exception:
print ('dirlist exception')
print(traceback.format_exc())
self._set_headers()
self.wfile.write('{ "result" : "error" }')
def run(server_class=HTTPServer, handler_class=S, port=8002):
server_address = ('', port)
httpd = server_class(server_address, handler_class)
print ('Starting sputnik server...')
httpd.serve_forever()
run()
| 3.078125 | 3 |
projects/fred-snyder-examples/send-emails-csv-with-smtp/test-smtp-server.py | fred-snyder/notes-on-python | 1 | 12787376 | <reponame>fred-snyder/notes-on-python
import smtplib, ssl
# import smtp_server, port, login, password
import production_config
# Create SSL context
context = ssl.create_default_context()
# test the smtp server
try:
server = smtplib.SMTP(smtp_server,port)
server.ehlo()
server.starttls(context=context)
server.ehlo()
server.login(login, password)
except Exception as e:
# Print error messages
print(e)
finally:
server.quit()
| 2.453125 | 2 |
apps/menuplans/apps.py | jajadinimueter/recipe | 0 | 12787377 | from django.apps import AppConfig
class MenuplansConfig(AppConfig):
name = 'menuplans'
verbose_name = 'Menuplans'
| 1.046875 | 1 |
bikes_prediction/bikesmtl/api.py | cheer021/BikesPrediction_DP | 0 | 12787378 | <filename>bikes_prediction/bikesmtl/api.py
# from rest_framework import generics, permissions
# from .models import Station, StationDataNow
# from .serializers import StationSerializer, StationDataNowSerializer
# class StationList(generics.ListCreateAPIView):
# model = Station
# serializer_class = StationSerializer
# permission_classes = [
# permissions.AllowAny
# ]
# class StationDataNowList(generics.ListCreateAPIView):
# model = StationDataNow
# serializer_class = StationDataNowSerializer
# permission_classes = [
# permissions.AllowAny
# ]
# class StationDataList(generics.ListAPIView):
# model = StationDataNow
# serializer_class = StationDataNowSerializer
# def get_queryset(self):
# queryset = super(StationDataList, self).get_queryset()
# return queryset.filter(station=self.kwargs.get('station'))
# ]
from djangular.views.crud import NgCRUDView
class MyCRUDView(NgCRUDView):
model = BikesMtl | 2.28125 | 2 |
notify_by_email.py | hysds/lightweight-jobs | 0 | 12787379 | #!/usr/bin/env python
import os
import getpass
import requests
import json
import base64
import socket
from smtplib import SMTP
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from email.mime.base import MIMEBase
from email.header import Header
from email.utils import parseaddr, formataddr, COMMASPACE
from email import encoders
from hysds.celery import app
from hysds.es_util import get_mozart_es, get_grq_es
from hysds_commons.net_utils import get_container_host_ip
def read_context():
with open("_context.json", "r") as f:
cxt = json.load(f)
return cxt
def get_hostname():
"""Get hostname."""
try:
return socket.getfqdn()
except Exception as e:
print(e)
print("socket.getfqdn() failed, passing...")
pass
try:
return socket.gethostbyname(socket.gethostname())
except Exception as e:
print(e)
raise RuntimeError(
"Failed to resolve hostname for full email address. Check system."
)
def send_email(sender, cc, bcc, subject, body, attachments=None):
"""
Send an email.
All arguments should be Unicode strings (plain ASCII works as well).
Only the real name part of sender and recipient addresses may contain
non-ASCII characters.
The email will be properly MIME encoded and delivered though SMTP to
172.17.0.1. This is easy to change if you want something different.
The charset of the email will be the first one out of US-ASCII, ISO-8859-1
and UTF-8 that can represent all the characters occurring in the email.
"""
recipients = cc + bcc # combined recipients
# Header class is smart enough to try US-ASCII, then the charset we
# provide, then fall back to UTF-8.
header_charset = "ISO-8859-1"
# We must choose the body charset manually
for body_charset in "US-ASCII", "ISO-8859-1", "UTF-8":
try:
body.encode(body_charset)
except UnicodeError:
pass
else:
break
# Split real name (which is optional) and email address parts
sender_name, sender_addr = parseaddr(sender)
parsed_cc = [parseaddr(rec) for rec in cc]
parsed_bcc = [parseaddr(rec) for rec in bcc]
# We must always pass Unicode strings to Header, otherwise it will
# use RFC 2047 encoding even on plain ASCII strings.
unicode_parsed_cc = []
for recipient_name, recipient_addr in parsed_cc:
recipient_name = str(Header(str(recipient_name), header_charset))
# Make sure email addresses do not contain non-ASCII characters
recipient_addr = recipient_addr.encode("ascii")
recipient_addr = recipient_addr.decode()
unicode_parsed_cc.append((recipient_name, recipient_addr))
unicode_parsed_bcc = []
for recipient_name, recipient_addr in parsed_bcc:
recipient_name = str(Header(str(recipient_name), header_charset))
# Make sure email addresses do not contain non-ASCII characters
recipient_addr = recipient_addr.encode("ascii")
recipient_addr = recipient_addr.decode()
unicode_parsed_bcc.append((recipient_name, recipient_addr))
# Create the message ('plain' stands for Content-Type: text/plain)
msg = MIMEMultipart()
msg["CC"] = COMMASPACE.join(
[
formataddr((recipient_name, recipient_addr))
for recipient_name, recipient_addr in unicode_parsed_cc
]
)
msg["BCC"] = COMMASPACE.join(
[
formataddr((recipient_name, recipient_addr))
for recipient_name, recipient_addr in unicode_parsed_bcc
]
)
msg["Subject"] = Header(str(subject), header_charset)
msg["FROM"] = "<EMAIL>"
msg.attach(MIMEText(body.encode(body_charset), "plain", body_charset))
# Add attachments
if isinstance(attachments, dict):
for fname in attachments:
part = MIMEBase("application", "octet-stream")
part.set_payload(attachments[fname])
encoders.encode_base64(part)
part.add_header("Content-Disposition", 'attachment; filename="%s"' % fname)
msg.attach(part)
# Send the message via SMTP to docker host
smtp_url = "smtp://%s:25" % get_container_host_ip()
print("smtp_url : %s", smtp_url)
smtp = SMTP(get_container_host_ip())
smtp.sendmail(sender, recipients, msg.as_string())
smtp.quit()
def get_cities(src):
"""Return list of cities."""
cities = []
for city in src.get("city", []):
cities.append("%s, %s" % (city.get("name", ""), city.get("admin1_name", "")))
return cities
def get_value(d, key):
"""Return value from source based on key."""
for k in key.split("."):
if k in d:
d = d[k]
else:
return None
if isinstance(d, list):
return ", ".join([str(i) for i in d])
else:
return d
def get_metadata_snippet(src, snippet_cfg):
"""Return body text for metadata snippet."""
body = ""
for k, label in snippet_cfg:
val = get_value(src, k)
if val is not None:
body += "%s: %s\n" % (label, val)
body += "location type: %s\n" % src.get("location", {}).get("type", None)
body += "location coordinates: %s\n" % src.get("location", {}).get(
"coordinates", []
)
cities = get_cities(src)
body += "Closest cities: %s" % "\n\t\t".join(cities)
return body
def get_facetview_link(link, _id, version=None):
"""
Return link to object_id in FacetView interface.
:param link: str
:param _id: str, _id for elasticsearch document
:param version: str
:return: constructed URL for facetview
"""
if link.endswith("/"):
link = link[:-1]
origin = link.split("/")[-1:]
print(origin)
if "figaro" in origin:
term = "job_id"
else:
term = "_id"
if version is None:
query_string = 'query_string="' + term + '%3A%5C"' + _id + '%5C""'
else:
query_string = 'query_string="' + term + '%3A%5C"' + _id + '%5C""&system_version="' + version + '"'
print(_id)
return "%s/?%s" % (link, query_string)
if __name__ == "__main__":
path = "/".join(__file__.split("/")[0:-1])
settings_file = os.path.join(path, "settings.json")
settings_file = os.path.normpath(settings_file) # normalizing the path
settings = json.load(open(settings_file))
context = read_context()
object_id = context["id"]
url = context["url"]
emails = context["emails"]
rule_name = context["name"]
component = context["component"]
if component == "mozart" or component == "figaro":
es = get_mozart_es()
index = app.conf["STATUS_ALIAS"]
facetview_url = app.conf["MOZART_URL"]
facetview_url = "/".join(facetview_url.split("/")[0:-2]) + "/hysds_ui/figaro"
else: # "tosca"
es = get_grq_es()
index = app.conf["DATASET_ALIAS"]
facetview_url = app.conf["MOZART_URL"]
facetview_url = "/".join(facetview_url.split("/")[0:-2]) + "/hysds_ui/tosca"
cc_recipients = [i.strip() for i in emails.split(",")]
bcc_recipients = []
email_subject = "[monitor] (notify_by_email:%s) %s" % (rule_name, object_id)
email_body = "Product with id %s was ingested." % object_id
email_attachments = None
query = {
"query": {
"term": {
"_id": object_id
}
}
}
result = es.search(index=index, body=query) # can't use get_by_id on alias
if result["hits"]["total"]["value"] > 0:
doc = result["hits"]["hits"][0]
email_body += "\n\n%s" % get_metadata_snippet(doc, settings["SNIPPET_CFG"])
email_body += "\n\nThe entire metadata json for this product has been attached for your convenience.\n\n"
email_attachments = {
"metadata.json": json.dumps(doc, indent=2) # attach metadata json
}
# attach browse images
doc_source = doc["_source"]
if len(doc_source["browse_urls"]) > 0:
browse_url = doc_source["browse_urls"][0]
if len(doc_source["images"]) > 0:
email_body += "Browse images have been attached as well.\n\n"
for i in doc_source["images"]:
small_img = i["small_img"]
small_img_url = os.path.join(browse_url, small_img)
r = requests.get(small_img_url)
if r.status_code != 200:
continue
email_attachments[small_img] = r.content
else:
doc = None
email_body += "\n\n"
email_body += "You may access the product here:\n\n%s" % url
system_version = None if doc is None else doc.get("system_version")
facet_url = get_facetview_link(facetview_url, object_id, system_version)
if facet_url is not None:
email_body += (
"\n\nYou may view this product in FacetView here:\n\n%s" % facet_url
)
email_body += (
"\n\nNOTE: You may have to cut and paste the FacetView link into your "
)
email_body += "browser's address bar to prevent your email client from escaping the curly brackets."
username_email = "%s@%s" % (getpass.getuser(), get_hostname())
send_email(
username_email,
cc_recipients,
bcc_recipients,
email_subject,
email_body,
attachments=email_attachments,
)
| 2.421875 | 2 |
exercicios_curso_em_video/Exercicio 69.py | Sposigor/Caminho_do_Python | 1 | 12787380 | m = f = maiores = 0
while True:
print('=' * 20)
print('CADASTRE UMA PESSOA')
print('=' * 20)
idade = int(input('Digite a sua Idade:'))
while idade < 1 or idade > 120:
print('Você digitou uma idade inválida, digite a sua idade novamente')
idade = int(input('Digite a sua Idade :'))
if idade >= 18:
maiores += 1
sexo = str(input('Sexo: [M/F] ')).strip().upper()[0]
while sexo not in 'MF':
print('Você digitou uma opção INVÁLIDA, digite novamente')
sexo = str(input('Sexo: [M/F] ')).strip().upper()[0]
if sexo == 'M':
m += 1
elif sexo == 'F' and idade < 20:
f += 1
print('-'*20)
continuar = str(input('Deseja continuar? [S/N] ')).strip().upper()[0]
while continuar not in 'SN':
print('Opção inválida, digite novamente')
print('-' * 20)
continuar = str(input('Deseja continuar? [S/N] ')).strip().upper()[0]
if continuar == 'N':
break
print(f'{maiores} pessoas são maiores de 18 anos\n{f} são mulheres com menos de 20 anos\n{m} são homens') | 4.03125 | 4 |
utils.py | t3kv/DCKM | 7 | 12787381 | <gh_stars>1-10
from sklearn.model_selection import StratifiedShuffleSplit
from scipy.sparse import csr_matrix
import numpy as np
import os
from collections import Counter
import tensorflow as tf
import datetime
import inspect
import gc
# DEFAULT SETTINGS
_NOW = datetime.datetime.now()
_DATETIME = str(_NOW.year) + '-' + str(_NOW.day) + '-' + str(_NOW.month) + '-' + str(_NOW.hour) + '-' + str(_NOW.minute) + '-' + str(_NOW.second)
_LOG = 'log'
_RANDOM_SEED = 6789
def load_dataset(list_arch_os, dataset_name, make_it_imbalanced=True):
X_full = []
Y_full = np.array([])
data_folder = 'datasets'
for arch_os in list_arch_os:
decimal_functions_path = data_folder + '/' + dataset_name + '/binaries-' + arch_os + '.data'
label_path = data_folder + '/' + dataset_name + '/labels-' + arch_os + '.data'
with open(decimal_functions_path, 'r') as f:
X_lines = f.readlines()
with open(label_path, 'r') as f:
Y_lines = f.readlines()
Y = np.array([int(number) for number in Y_lines[0].split()])
X_full += X_lines
Y_full = np.concatenate((Y_full, Y), axis=0)
if dataset_name == 'NDSS18' and make_it_imbalanced:
print('Making NDSS18 imbalanced ...')
n_vul = np.sum(Y_full)
n_non_vul = len(Y_full) - n_vul
imbalanced_ratio = 1 / 50 # its means: vul:non-vul = 1:50
n_vul_new = int(n_non_vul * imbalanced_ratio)
imbalanced_X = []
imbalanced_y = []
index = 0
for id, opcode_assembly_code in enumerate(X_full):
if Y_full[index] == 1.0: # vulnerable function
if sum(imbalanced_y) < n_vul_new:
if opcode_assembly_code != '-----\n':
imbalanced_X.append(opcode_assembly_code)
else: # opcode_assembly_code == '-----\n'
index += 1
imbalanced_X.append(opcode_assembly_code) # also add '-----\n'
imbalanced_y.append(1)
else:
if opcode_assembly_code == '-----\n':
index += 1
elif Y_full[index] == 0.0:
if opcode_assembly_code != '-----\n':
imbalanced_X.append(opcode_assembly_code)
else: # opcode_assembly_code == '-----\n':
index += 1
imbalanced_X.append(opcode_assembly_code) # also add '-----\n'
imbalanced_y.append(0)
X_full = imbalanced_X
Y_full = np.asarray(imbalanced_y)
# process opcodes and assembly code (note that assembly code is the instruction information of the paper)
if dataset_name == 'NDSS18':
X_opcode, X_assembly, sequence_length, max_length, vocab_opcode_size = NDSS18_process_opcode_assembly_code(X_full)
else:
max_length = 300
X_opcode, X_assembly, sequence_length, vocab_opcode_size = six_projects_process_opcode_assembly_code(X_full, max_length)
del X_full
gc.collect()
X_opcode = np.asarray(X_opcode)
X_assembly = np.asarray(X_assembly)
test_set_ratio = 0.1 # it means train:valid:test.txt = 8:1:1
train_valid_index, test_index = split_by_ratio(X_opcode, Y_full, test_size=test_set_ratio)
train_index, valid_index = split_by_ratio(X_opcode[train_valid_index],
Y_full[train_valid_index],
test_size=test_set_ratio / (1 - test_set_ratio))
x_train_opcode = X_opcode[train_valid_index][train_index]
x_valid_opcode = X_opcode[train_valid_index][valid_index]
x_test_opcode = X_opcode[test_index]
del X_opcode
gc.collect()
x_train_assembly = X_assembly[train_valid_index][train_index]
x_valid_assembly = X_assembly[train_valid_index][valid_index]
x_test_assembly = X_assembly[test_index]
del X_assembly
gc.collect()
x_train_seq_len = sequence_length[train_valid_index][train_index]
x_valid_seq_len = sequence_length[train_valid_index][valid_index]
x_test_seq_len = sequence_length[test_index]
y_train = Y_full[train_valid_index][train_index]
y_valid = Y_full[train_valid_index][valid_index]
y_test = Y_full[test_index]
message = 'x_train (opcode & assembly) {}\n'.format(x_train_opcode.shape)
message += 'y_train {}\n'.format(y_train.shape)
message += 'x_valid (opcode & assembly) {}\n'.format(x_valid_opcode.shape)
message += 'y_valid {}\n'.format(y_valid.shape)
message += 'x_test (opcode & assembly) {}\n'.format(x_test_opcode.shape)
message += 'y_test {}\n'.format(y_test.shape)
message += 'max-length {}\n'.format(max_length)
message += 'vocab_opcode_size {}\n'.format(vocab_opcode_size)
print_and_write_logging_file(dir=_LOG, txt=message, running_mode=1)
return x_train_opcode, x_train_assembly, x_train_seq_len, y_train, \
x_valid_opcode, x_valid_assembly, x_valid_seq_len, y_valid, \
x_test_opcode, x_test_assembly, x_test_seq_len, y_test, max_length, vocab_opcode_size
def split_by_ratio(X, y, random_seed=_RANDOM_SEED, test_size=0.1):
shufflesplit = StratifiedShuffleSplit(n_splits=1, random_state=random_seed, test_size=test_size)
train_index, test_index = next(shufflesplit.split(X, y, groups=y))
return train_index, test_index
def make_batches(size, batch_size):
# returns a list of batch indices (tuples of indices).
return [(i, min(size, i + batch_size)) for i in range(0, size, batch_size)]
def make_dir(directory):
if not os.path.exists(directory):
os.makedirs(directory)
def build_vocab(words):
dictionary = dict()
count = []
count.extend(Counter(words).most_common())
index = 0
for word, occurs in count:
dictionary[word] = index
index += 1
index_dictionary = dict(zip(dictionary.values(), dictionary.keys()))
return dictionary, index_dictionary
def create_one_hot_vector_for_opcode(aa, dic_id_opcode, all_zeros=False):
bb = np.zeros(len(dic_id_opcode))
if all_zeros:
return bb
else:
bb[dic_id_opcode[aa]] = 1
return bb
def create_one_hot_vector_for_assembly(list_tuple=[], all_zeros=False):
bb = np.zeros(256)
if all_zeros: # for padding
return bb
else:
# count on each line of function, and assign at index the value of num_occurs
for tuple_hex_times in list_tuple:
decimal = int(tuple_hex_times[0])
n_occures = tuple_hex_times[1]
bb[decimal] = n_occures
return bb
def convert_to_one_hot(list_function_opcode, list_function_assembly_code, dic_opcode, max_length):
# process opcode
function_opcode_one_hot = []
for function_opcode in list_function_opcode:
opcode_one_hot = []
for opcode in function_opcode:
one_hex = create_one_hot_vector_for_opcode(opcode, dic_opcode)
opcode_one_hot.append(one_hex)
while len(opcode_one_hot) < max_length:
opcode_one_hot.append(create_one_hot_vector_for_opcode(opcode, dic_opcode, all_zeros=True))
function_opcode_one_hot.append(csr_matrix(opcode_one_hot))
function_opcode_one_hot = np.asarray(function_opcode_one_hot)
# process assembly
function_assembly_one_hot = []
for function_assembly in list_function_assembly_code:
assembly_one_hot = []
list_tuple = []
for list_hex in function_assembly:
list_tuple.extend(Counter(list_hex).most_common())
one_line = create_one_hot_vector_for_assembly(list_tuple)
assembly_one_hot.append(one_line)
list_tuple = []
while len(assembly_one_hot) < max_length:
assembly_one_hot.append(create_one_hot_vector_for_assembly(all_zeros=True))
function_assembly_one_hot.append(csr_matrix(assembly_one_hot))
function_assembly_one_hot = np.asarray(function_assembly_one_hot)
return function_opcode_one_hot, function_assembly_one_hot
def NDSS18_process_opcode_assembly_code(raw_X):
list_function_opcode = []
list_function_assembly_code = []
words_opcode = []
list_opcode = []
list_assembly_code = []
max_length = -1
length = 0
sequence_length = np.array([]).astype(int) # actual sequence_length of each function
for id, opcode_assembly_code in enumerate(raw_X):
if opcode_assembly_code != '-----\n':
opcode_assembly_code = opcode_assembly_code[:-1]
opcode_assembly_code_split = opcode_assembly_code.split('|')
if len(opcode_assembly_code_split) == 2: # opcode has 1 byte
opcode = opcode_assembly_code_split[0]
list_hex_code = opcode_assembly_code_split[1]
else:
opcode = ' '.join(opcode_assembly_code_split[:-1])
list_hex_code = opcode_assembly_code_split[-1]
list_opcode.append(opcode)
words_opcode.append(opcode)
list_assembly_code.append(list_hex_code.split(','))
length += 1
else:
list_function_opcode.append(list_opcode)
list_function_assembly_code.append(list_assembly_code)
list_opcode = []
list_assembly_code = []
if length > max_length:
max_length = length
sequence_length = np.append(sequence_length, length)
length = 0
dictionary_index, index_dictionary = build_vocab(words_opcode)
function_opcode_one_hot, function_assembly_one_hot = convert_to_one_hot(list_function_opcode, list_function_assembly_code, dictionary_index, max_length)
return function_opcode_one_hot, function_assembly_one_hot, sequence_length, max_length, len(dictionary_index)
def six_projects_process_opcode_assembly_code(raw_X, max_length=190):
list_function_opcode = []
list_function_assembly_code = []
words_opcode = []
list_opcode = []
list_assembly_code = []
length = 0
sequence_length = np.array([]).astype(int) # actual sequence_length of each function
for id, opcode_assembly_code in enumerate(raw_X):
if opcode_assembly_code != '-----\n':
opcode_assembly_code = opcode_assembly_code[:-1]
opcode_assembly_code_split = opcode_assembly_code.split('|')
if len(opcode_assembly_code_split) == 2: # opcode has 1 byte
opcode = opcode_assembly_code_split[0]
list_hex_code = opcode_assembly_code_split[1]
else:
opcode = ' '.join(opcode_assembly_code_split[:-1])
list_hex_code = opcode_assembly_code_split[-1]
length += 1
if length <= max_length:
list_opcode.append(opcode)
words_opcode.append(opcode)
list_assembly_code.append(list_hex_code.split(','))
length_cut_by_max_length = length
else:
list_function_opcode.append(list_opcode)
list_function_assembly_code.append(list_assembly_code)
list_opcode = []
list_assembly_code = []
sequence_length = np.append(sequence_length, length_cut_by_max_length)
length = 0
dictionary_index, index_dictionary = build_vocab(words_opcode)
function_opcode_one_hot, function_assembly_one_hot = convert_to_one_hot(list_function_opcode, list_function_assembly_code, dictionary_index, max_length)
return function_opcode_one_hot, function_assembly_one_hot, sequence_length, len(dictionary_index)
def get_default_config():
tf_config = tf.ConfigProto()
tf_config.gpu_options.allow_growth = True
tf_config.log_device_placement = False
tf_config.allow_soft_placement = True
return tf_config
def print_and_write_logging_file(dir, txt, running_mode, get_datetime_from_training=_DATETIME, show_message=True):
if show_message:
print(txt[:-1])
if running_mode == 1:
with open(os.path.join(dir, 'training_log_' + _DATETIME + '.txt'), 'a') as f:
f.write(txt)
elif running_mode == 0:
with open(os.path.join(dir, 'testing_log_' + get_datetime_from_training + '.txt'), 'a') as f:
f.write(txt)
else:
with open(os.path.join(dir, 'visualization_log_' + get_datetime_from_training + '.txt'), 'a') as f:
f.write(txt)
def save_all_params(class_object):
attributes = inspect.getmembers(class_object, lambda a: not (inspect.isroutine(a)))
list_params = [a for a in attributes if not(a[0].startswith('__') and a[0].endswith('__'))]
message = 'List parameters '
message += '{\n'
for params in list_params:
try:
message += '\t' + str(params[0]) + ': ' + str(params[1]) + '\n'
except:
continue
message += '}\n'
if class_object.running_mode == 1:
message += "Start training process.\n"
message += "Start pre-processing data.\n"
elif class_object.running_mode == 0:
message += "Start testing process.\n"
message += "Start pre-processing data.\n"
message += "-----------------------------------------------------\n"
make_dir(_LOG)
print_and_write_logging_file(_LOG, message, class_object.running_mode)
def convert_list_sparse_to_dense(X):
dense_matrix = []
for one_function in X:
dense_matrix.append(one_function.toarray())
return np.asarray(dense_matrix)
| 2.25 | 2 |
src/ecr/core/manager.py | eXceediDeaL/edl-coderunner | 1 | 12787382 | <reponame>eXceediDeaL/edl-coderunner
# pylint: disable=W0611
from ._manager import getSystemCommand
from ._WorkItem import WorkItem, WorkItemType, initializeCodeDirectory
from ._WorkManager import WorkManager, WorkManagerState, hasInitialized, initialize, clear, load
from ._Runner import runCommands
| 1.070313 | 1 |
end2end/soloist/experiment_management/ems.py | boliangz/dstc9 | 17 | 12787383 | <filename>end2end/soloist/experiment_management/ems.py
# experiment management system
import argparse
import logging
import os
import subprocess
import json
logging.basicConfig(format='%(asctime)s: %(levelname)s: %(message)s',
datefmt='%m/%d/%Y %H:%M:%S',
level=logging.INFO)
logger = logging.getLogger()
def train(config, config_name):
model_dir = os.path.join(
os.path.abspath(os.path.dirname(__file__)), 'model', config_name
)
if not os.path.exists(model_dir):
os.makedirs(model_dir)
tensorboard_log_dir = os.path.join(
os.path.abspath(os.path.dirname(__file__)), 'tensorboard_log', config_name
)
if not os.path.exists(tensorboard_log_dir):
os.makedirs(tensorboard_log_dir)
#
# run command
#
script = config['global_config']['python_path'] + '/end2end/soloist/methods/{}/train_dist.py'.format(config['method'])
cmd = [
'python',
script,
'--train', config['data']['train'],
'--dev', config['data']['dev'],
'--model_dir', model_dir,
'--tensorboard_log_dir', tensorboard_log_dir,
'--job_name', config_name,
'--python_path', config['global_config']['python_path']
]
for k, v in config['train'].items():
cmd += ['--{}'.format(k), v]
os.environ.update(
{'CUDA_VISIBLE_DEVICES': config['train']['cuda_device'],
'PYTHONPATH': config['global_config']['python_path']}
)
logger.info('train script:')
logger.info(' '.join(cmd))
subprocess.call(' '.join(cmd), shell=True)
def eval(config, config_name, output_dir):
# to-do
pass
def luban_dist_inference(test_set, num_tasks, config, config_name, output_dir):
# to-do
pass
def ems(args, config, config_name):
if args.mode in ['train', 'all']:
train(config, config_name)
if args.mode in ['eval', 'all']:
# to-do
pass
def main():
parser = argparse.ArgumentParser()
parser.add_argument('config')
parser.add_argument('mode', default='all', type=str,
help='model: train, eval, score, all')
args = parser.parse_args()
# load config
with open(args.config) as f:
config = json.load(f)
config_name = os.path.basename(args.config).replace('.json', '')
ems(args, config, config_name)
if __name__ == "__main__":
main()
| 2.03125 | 2 |
scvi/models/vaec.py | maxime1310/fuzzy_labeling_scRNA | 8 | 12787384 | import torch
from torch.distributions import Normal, Categorical, kl_divergence as kl
from scvi.models.classifier import Classifier
from scvi.models.modules import Encoder, DecoderSCVI
from scvi.models.utils import broadcast_labels
from scvi.models.vae import VAE
class VAEC(VAE):
r"""A semi-supervised Variational auto-encoder model - inspired from M2 model,
as described in (https://arxiv.org/pdf/1406.5298.pdf)
Args:
:n_input: Number of input genes.
:n_batch: Default: ``0``.
:n_labels: Default: ``0``.
:n_hidden: Number of hidden. Default: ``128``.
:n_latent: Default: ``1``.
:n_layers: Number of layers. Default: ``1``.
:dropout_rate: Default: ``0.1``.
:dispersion: Default: ``"gene"``.
:log_variational: Default: ``True``.
:reconstruction_loss: Default: ``"zinb"``.
:y_prior: Default: None, but will be initialized to uniform probability over the cell types if not specified
Examples:
>>> gene_dataset = CortexDataset()
>>> vaec = VAEC(gene_dataset.nb_genes, n_batch=gene_dataset.n_batches * False,
... n_labels=gene_dataset.n_labels)
>>> gene_dataset = SyntheticDataset(n_labels=3)
>>> vaec = VAEC(gene_dataset.nb_genes, n_batch=gene_dataset.n_batches * False,
... n_labels=3, y_prior=torch.tensor([[0.1,0.5,0.4]]))
"""
def __init__(self, n_input, n_batch, n_labels, n_hidden=128, n_latent=10, n_layers=1, dropout_rate=0.1,
y_prior=None, dispersion="gene", log_variational=True, reconstruction_loss="zinb"):
super(VAEC, self).__init__(n_input, n_batch, n_labels, n_hidden=n_hidden, n_latent=n_latent, n_layers=n_layers,
dropout_rate=dropout_rate, dispersion=dispersion, log_variational=log_variational,
reconstruction_loss=reconstruction_loss)
self.z_encoder = Encoder(n_input, n_latent, n_cat_list=[n_labels], n_hidden=n_hidden, n_layers=n_layers,
dropout_rate=dropout_rate)
self.decoder = DecoderSCVI(n_latent, n_input, n_cat_list=[n_batch, n_labels], n_layers=n_layers,
n_hidden=n_hidden, dropout_rate=dropout_rate)
self.y_prior = torch.nn.Parameter(
y_prior if y_prior is not None else (1 / n_labels) * torch.ones(1, n_labels), requires_grad=False
)
self.classifier = Classifier(n_input, n_hidden, n_labels, n_layers=n_layers, dropout_rate=dropout_rate)
def classify(self, x):
x = torch.log(1 + x)
return self.classifier(x)
def forward(self, x, local_l_mean, local_l_var, batch_index=None, y=None):
is_labelled = False if y is None else True
# Prepare for sampling
x_ = torch.log(1 + x)
ql_m, ql_v, library = self.l_encoder(x_)
# Enumerate choices of label
ys, xs, library_s, batch_index_s = (
broadcast_labels(
y, x, library, batch_index, n_broadcast=self.n_labels
)
)
if self.log_variational:
xs_ = torch.log(1 + xs)
# Sampling
qz_m, qz_v, zs = self.z_encoder(xs_, ys)
px_scale, px_r, px_rate, px_dropout = self.decoder(self.dispersion, zs, library_s, batch_index_s, ys)
reconst_loss = self._reconstruction_loss(xs, px_rate, px_r, px_dropout, batch_index_s, ys)
# KL Divergence
mean = torch.zeros_like(qz_m)
scale = torch.ones_like(qz_v)
kl_divergence_z = kl(Normal(qz_m, torch.sqrt(qz_v)), Normal(mean, scale)).sum(dim=1)
kl_divergence_l = kl(Normal(ql_m, torch.sqrt(ql_v)), Normal(local_l_mean, torch.sqrt(local_l_var))).sum(dim=1)
if is_labelled:
return reconst_loss, kl_divergence_z + kl_divergence_l
reconst_loss = reconst_loss.view(self.n_labels, -1)
probs = self.classifier(x_)
reconst_loss = (reconst_loss.t() * probs).sum(dim=1)
kl_divergence = (kl_divergence_z.view(self.n_labels, -1).t() * probs).sum(dim=1)
kl_divergence += kl(Categorical(probs=probs),
Categorical(probs=self.y_prior.repeat(probs.size(0), 1)))
kl_divergence += kl_divergence_l
return reconst_loss, kl_divergence
| 2.375 | 2 |
Packages/matplotlib-2.2.2/lib/mpl_examples/userdemo/demo_gridspec02.py | NightKirie/NCKU_NLP_2108_industry3 | 1 | 12787385 | """
===============
Demo Gridspec02
===============
"""
import matplotlib.pyplot as plt
from matplotlib.gridspec import GridSpec
def make_ticklabels_invisible(fig):
for i, ax in enumerate(fig.axes):
ax.text(0.5, 0.5, "ax%d" % (i+1), va="center", ha="center")
ax.tick_params(labelbottom=False, labelleft=False)
fig = plt.figure()
gs = GridSpec(3, 3)
ax1 = plt.subplot(gs[0, :])
# identical to ax1 = plt.subplot(gs.new_subplotspec((0, 0), colspan=3))
ax2 = plt.subplot(gs[1, :-1])
ax3 = plt.subplot(gs[1:, -1])
ax4 = plt.subplot(gs[-1, 0])
ax5 = plt.subplot(gs[-1, -2])
fig.suptitle("GridSpec")
make_ticklabels_invisible(fig)
plt.show()
| 3.328125 | 3 |
pycaw/api/mmdeviceapi/depend/structures.py | Jan-Zeiseweis/pycaw | 234 | 12787386 | <gh_stars>100-1000
from ctypes import Structure, Union
from ctypes.wintypes import (
DWORD, LONG, LPWSTR, ULARGE_INTEGER, VARIANT_BOOL, WORD)
from comtypes import GUID
from comtypes.automation import VARTYPE, VT_BOOL, VT_CLSID, VT_LPWSTR, VT_UI4
from future.utils import python_2_unicode_compatible
class PROPVARIANT_UNION(Union):
_fields_ = [
('lVal', LONG),
('uhVal', ULARGE_INTEGER),
('boolVal', VARIANT_BOOL),
('pwszVal', LPWSTR),
('puuid', GUID),
]
class PROPVARIANT(Structure):
_fields_ = [
('vt', VARTYPE),
('reserved1', WORD),
('reserved2', WORD),
('reserved3', WORD),
('union', PROPVARIANT_UNION),
]
def GetValue(self):
vt = self.vt
if vt == VT_BOOL:
return self.union.boolVal != 0
elif vt == VT_LPWSTR:
# return Marshal.PtrToStringUni(union.pwszVal)
return self.union.pwszVal
elif vt == VT_UI4:
return self.union.lVal
elif vt == VT_CLSID:
# TODO
# return (Guid)Marshal.PtrToStructure(union.puuid, typeof(Guid))
return
else:
return "%s:?" % (vt)
@python_2_unicode_compatible
class PROPERTYKEY(Structure):
_fields_ = [
('fmtid', GUID),
('pid', DWORD),
]
def __str__(self):
return "%s %s" % (self.fmtid, self.pid)
| 1.984375 | 2 |
web/templatetags/sortable_column.py | winny-/sillypaste | 3 | 12787387 | from django import template
register = template.Library()
@register.inclusion_tag('sortable_column_snippet.html')
def sortable_column(request, pretty_name, identifier, default=False):
current = request.GET.get('sort', identifier if default else None)
return {
'pretty_name': pretty_name,
'identifier': identifier,
'request': request,
'selected': identifier == current,
}
| 2.171875 | 2 |
organizer/__main__.py | darkanakin41/media-organizer | 1 | 12787388 | <filename>organizer/__main__.py
from typing import List
import click
from organizer import config
from organizer.model.media import Media
from organizer.util.arguments import is_option
from organizer.util.logger import logger
from organizer.util.scanner import scan_folder
from prettytable import PrettyTable
def print_result(medias: List[Media]):
"""
Print medias processing result into a table
:param medias: the list of medias
"""
headers = ['File', 'Size', 'Target', 'Copied']
if is_option('ignored'):
headers.append('Ignored')
if is_option('exists'):
headers.append('Exists')
table = PrettyTable(headers)
table.align["File"] = "l"
table.align["Size"] = "r"
table.align["Target"] = "l"
table.align["Exists"] = "c"
table.align["Ignored"] = "c"
table.align["Copied"] = "c"
display = False
for media in medias:
if (is_option('ignored') or not media.ignored) and (is_option('exists') or not media.exists):
row = [media.filename, media.get_file_size(), media.target, media.copied]
if is_option('ignored'):
row.append(media.ignored)
if is_option('exists'):
row.append(media.exists)
table.add_row(row)
display = True
table.title = 'Files'
if display:
print(table)
else:
logger.info('Nothing to copy, all media are already in the right spot!')
def get_medias() -> List[Media]:
"""
Retrieve the list of medias to be processed into folders
:return: the list of medias to process
"""
files = []
for folder in config['input']['folders']:
files += scan_folder(folder)
files.sort()
medias = []
for index, file in enumerate(files):
logger.debug('Convert to media %d/%d: %s', index + 1, len(files), file)
medias.append(Media(file))
return medias
def copy_medias_to_target(medias: List[Media]):
"""
Copy medias to target
:param medias: the list of medias
"""
for index, media in enumerate(medias):
logger.debug('Processing media %d/%d: %s --> %s',
index + 1,
len(medias),
media.file,
media.target)
media.copy_to_target()
@click.command()
@click.option('--verbose', is_flag=True, default=False, help='Verbose mode')
@click.option('--silent', is_flag=True, default=False, help='Silent mode')
@click.option('--dry-run', is_flag=True, default=False, help='Dry Run')
@click.option('--ignored', is_flag=True, default=False, help='Displayed ignored files in results.')
@click.option('--exists', is_flag=True, default=False, help='Displayed target already existing in results.')
def main(verbose: bool, silent: bool, dry_run: bool, ignored: bool, exists: bool): # pragma: no cover
"""
Main command
:param verbose: if running in verbose mode
:param silent: if running in silent mode
:param dry_run: if running in dry-run mode
:param ignored: if display ignored medias
:param exists: if display existing medias
:return:
"""
logger.info('Retrieve medias and associated datas')
medias = get_medias()
logger.info('Copy medias to target')
copy_medias_to_target(medias)
print_result(medias)
if __name__ == '__main__':
# pylint: disable=no-value-for-parameter
main()
| 2.625 | 3 |
CSV_build_GUI.py | sassystacks/Brisco_Devel | 0 | 12787389 | <reponame>sassystacks/Brisco_Devel<filename>CSV_build_GUI.py<gh_stars>0
from Tkinter import *
class Application(Frame):
def Build_CSV(self):
dt_selected = self.var1.get()
print(dt_selected)
try:
indx_date = self.lst1.index(dt_selected)+1
print(indx_date)
except:
print"Please Select a date"
def createWidgets(self):
self.QUIT = Button(self)
self.QUIT["text"] = "QUIT"
self.QUIT["fg"] = "red"
self.QUIT["command"] = self.quit
self.QUIT.pack({"side": "left"})
self.CSV_build = Button(self)
self.CSV_build ["text"] = "Create CSV",
self.CSV_build ["command"] = self.Build_CSV
self.CSV_build .pack({"side": "left"})
self.lst1 = ['Jan','Feb','Mar','Apr','May','Jun','Jul','Aug',
'Sep','Oct','Nov','Dec']
self.var1 = StringVar()
self.drop = OptionMenu(root,self.var1,*self.lst1)
self.drop.grid()
self.drop.pack({"side": "left"})
def __init__(self, master=None):
Frame.__init__(self, master)
self.pack()
self.createWidgets()
root = Tk()
app = Application(master=root)
root.geometry('{}x{}'.format(500, 500))
app.mainloop()
root.destroy()
| 2.640625 | 3 |
savegame.py | ronanpaixao/SkyAlchemy | 0 | 12787390 | # -*- coding: utf-8 -*-
"""
SkyAlchemy
Copyright ©2016 <NAME>
Licensed under the terms of the MIT License.
See LICENSE for details.
@author: <NAME>
"""
from __future__ import unicode_literals
import struct
from collections import OrderedDict
from io import BytesIO
import os
import os.path as osp
import ctypes
import ctypes.wintypes
#%% unpack and data
from skyrimtypes import unpack, RefID
import skyrimdata
#%%
class Savegame(object):
"""This class loads a The Elder Scrolls V: Skyrim savegame file and parses
useful information.
"""
def __init__(self, filename, load_now=True):
self.filename = filename
if load_now:
for i in self.loadGame():
pass
def loadGame(self):
filename = self.filename
#%%
d = OrderedDict() # Data storage
f = open(filename, 'rb') # TODO: replace
f.seek(0)
if True:
# with open(filename, 'rb') as f:
# File
d['magic'] = f.read(13)
yield f.tell()
if d['magic'] != b'TESV_SAVEGAME':
raise AssertionError("Incorrect magic in file header")
d['headerSize'] = unpack("uint32", f)
# Header
header = BytesIO(f.read(d['headerSize']))
d['version'] = unpack("uint32", header)
if not 7 <= d['version'] <= 9:
raise AssertionError("Only versions 7 to 9 are supported")
d['saveNumber'] = unpack("uint32", header)
d['playerName'] = unpack("wstring", header)
d['playerLevel'] = unpack("uint32", header)
d['playerLocation'] = unpack("wstring", header)
d['gameDate'] = unpack("wstring", header)
d['playerRaceEditorId'] = unpack("wstring", header)
d['playerSex'] = {0: "male", 1: "female"}[unpack("uint16", header)]
d['playerCurExp'] = unpack("float32", header)
d['playerLvlUpExp'] = unpack("float32", header)
d['filetime'] = unpack("filetime", header)
d['shotWidth'] = unpack("uint32", header)
d['shotHeight'] = unpack("uint32", header)
yield f.tell()
# Back to file
d['screenshotData'] = f.read(3*d['shotWidth']*d['shotHeight'])
from PIL import Image
d['screenshotImage'] = Image.frombytes("RGB",
(d['shotWidth'], d['shotHeight']),
d['screenshotData'])
yield f.tell()
d['formVersion'] = unpack("uint8", f)
d['pluginInfoSize'] = unpack("uint32", f)
# Plugin
plugin = BytesIO(f.read(d['pluginInfoSize']))
d['pluginCount'] = unpack("uint8", plugin)
d['plugins'] = [unpack("wstring", plugin)
for i in range(d['pluginCount'])]
yield f.tell()
# File Location Table
formIDArrayCountOffset = unpack("uint32", f)
unknownTable3Offset = unpack("uint32", f)
globalDataTable1Offset = unpack("uint32", f)
globalDataTable2Offset = unpack("uint32", f)
changeFormsOffset = unpack("uint32", f)
globalDataTable3Offset = unpack("uint32", f)
globalDataTable1Count = unpack("uint32", f)
globalDataTable2Count = unpack("uint32", f)
globalDataTable3Count = unpack("uint32", f)
changeFormCount = unpack("uint32", f)
f.read(4*15) # unused
yield f.tell()
# Global Data 1
f.seek(globalDataTable1Offset)
gdata1 = []
for i in range(globalDataTable1Count):
gdata1.append(unpack("globalData", f))
yield f.tell()
# Global Data 2
f.seek(globalDataTable2Offset)
gdata2 = []
for i in range(globalDataTable2Count):
gdata2.append(unpack("globalData", f))
yield f.tell()
# changeForms
f.seek(changeFormsOffset)
d_changeforms = []
for i in range(changeFormCount):
d_changeforms.append(unpack("ChangeForm", f))
yield f.tell()
d['changeforms'] = d_changeforms
# Global Data 3
yield f.tell()
f.seek(globalDataTable3Offset)
gdata3 = []
for i in range(globalDataTable3Count):
gdata3.append(unpack("globalData", f))
yield f.tell()
d['gdata'] = {v[1]:v[2] for v in (gdata1 + gdata2 + gdata3)}
# formID
f.seek(formIDArrayCountOffset)
formIDArrayCount = unpack("uint32", f)
d['formid'] = struct.Struct('{}I'.format(formIDArrayCount)).unpack(
f.read(formIDArrayCount*4))
yield f.tell()
# Visited Worldspace
visitedWorldspaceArrayCount = unpack("uint32", f)
d['visitedWorldspaceArray'] = struct.Struct('{}I'.format(
visitedWorldspaceArrayCount)).unpack(f.read(
visitedWorldspaceArrayCount*4))
yield f.tell()
# unknownTable3
f.seek(unknownTable3Offset)
ukt3count = unpack("uint32", f)
assert(len(f.read()) == ukt3count)
# EOF
assert(f.read() == b"")
yield f.tell()
# Inventory
for cf in d['changeforms']:
if cf.type == 1 and cf.formid.value == 0x14:
break
d['inventory'] = cf.d['inventory']
self.d = d
def populate_ids(self):
for k, created in self.d['gdata']['Created Objects'].items():
for item in created:
# print "Created", k, hex(item.refID.value)
RefID.createdid[item.refID.value] = item
for i, formid in enumerate(self.d['formid']):
if formid in RefID.defaultid:
RefID.formid[i+1] = RefID.defaultid[formid]
elif formid in RefID.createdid:
RefID.formid[i+1] = RefID.createdid[formid]
def player_ingrs(self):
for inv_item in self.d['inventory']:
if (inv_item.item.type not in {'C', 'F'} and
inv_item.item.name.type == "INGR"):
yield (inv_item.itemcount, inv_item.item.value)
#%%
def getSaveGames():
"""Get list of savegame files"""
dll = ctypes.windll.shell32
buf = ctypes.create_unicode_buffer(ctypes.wintypes.MAX_PATH + 1)
try:
if dll.SHGetSpecialFolderPathW(None, buf, 0x0005, False):
savedir = osp.join(buf.value, "My Games", "Skyrim", "Saves")
if not osp.exists(savedir) or not osp.isdir(savedir):
raise RuntimeError("Could not find savegame directory.")
else:
raise RuntimeError("Could not find savegame directory.")
except:
raise RuntimeError("Could not find savegame directory.")
savegames = [osp.join(savedir, f) for f in os.listdir(savedir) if f.endswith(".ess")]
return savegames
#%%
def test_savegame():
#%%
savegames = getSaveGames()
for filename in savegames:
sg = Savegame(filename)
sg.populate_createdid()
| 2.203125 | 2 |
test/test_basic_flow_rpm.py | Albatrosina/dev | 0 | 12787391 | <filename>test/test_basic_flow_rpm.py
# -*- coding: utf-8 -*-
from dev.model.fill_row import Filling
def test_r_p_m_accept(app):
app.action.switch_to_rpm_tab()
app.fill.corn(Filling(bt="20000", fp="5", hta="1", spread="1", roll="1", flex="1", basis="1", note="some test note"))
app.action.confirm_targets_row()
def test_r_p_m_decline(app):
app.action.switch_to_rpm_tab()
app.fill.corn(Filling(bt="20000", fp="5", hta ="1", spread="1", roll="1", flex="1", basis="1", note="some test note"))
app.action.delete_targets_row()
| 1.671875 | 2 |
drawingClass.py | crealu/py-writing | 0 | 12787392 | <reponame>crealu/py-writing
import turtle
from turtle import *
import random
turtle.colormode(255)
bw = [
[29, 255],
[208, 255],
[236, 255]
]
rw = [
[234, 255],
[6, 255],
[6, 255]
]
gw = [
[47, 255],
[245, 255],
[108, 255]
]
yw = [
[249, 255],
[243, 255],
[44, 255]
]
class Drawing:
def __init__(self, ca, path):
self.kame = turtle.Turtle(),
self.kameSpeed = 0,
self.circles = 6,
self.path = path,
self.colorArray = ca
def drawIt(self):
colorChangeArray = []
for cv in self.colorArray:
difference = abs(cv[1] - cv[0])
colorChangeArray.append(difference)
gradient = int(360 / self.circles[0])
r = self.colorArray[0][0]
g = self.colorArray[1][0]
b = self.colorArray[2][0]
self.kame[0].pencolor(r, g, b)
rInc = int(colorChangeArray[0] / gradient)
gInc = int(colorChangeArray[1] / gradient)
bInc = int(colorChangeArray[2] / gradient)
for i in range(0, 320):
if i % self.circles[0] is 0:
self.kame[0].width(abs(int(i/9) - 40))
self.kame[0].setheading(i + self.path[0])
self.kame[0].forward(10)
r += rInc
g += gInc
b += bInc
self.kame[0].pencolor(r, g, b)
raph = Drawing(bw, 0)
mikey = Drawing(rw, 90)
leo = Drawing(yw, 180)
don = Drawing(gw, 270)
def runAll():
raph.drawIt()
mikey.drawIt()
leo.drawIt()
don.drawIt()
# listens to events
turtle.listen()
turtle.onkey(don.drawIt, 'd')
turtle.onkey(mikey.drawIt, 'm')
turtle.onkey(raph.drawIt, 'r')
turtle.onkey(leo.drawIt, 'l')
turtle.onkey(runAll, 'a')
# prevents progam from closing
turtle.mainloop()
| 2.765625 | 3 |
mpu6050/__init__.py | streamnsight/mpu6050 | 0 | 12787393 | from .mpu6050 import mpu6050
from .mpu6050 import utils_3d | 1.039063 | 1 |
setup.py | PythonCHB/NumpyExtras | 0 | 12787394 | #!/usr/bin/env python
try:
from setuptools import setup, Extension
except ImportError:
print "You need setuptools to build this module"
from Cython.Build import cythonize
import numpy as np #for the include dirs...
#import os, sys
include_dirs = [np.get_include()]
ext_modules = cythonize(Extension("numpy_extras.filescanner",
["numpy_extras/filescanner.pyx"],
include_dirs = include_dirs,
language = "c",
))
# This setup is suitable for "python setup.py develop".
setup(name = "numpy_extras",
version = "0.2.0",
description='A few extra things for numpy',
author='<NAME>',
author_email='<EMAIL>',
url="https://github.com/NOAA-ORR-ERD",
license = "Public Domain",
packages = ["numpy_extras"],
ext_modules = ext_modules,
classifiers=["Development Status :: 2 - Pre-Alpha",
"License :: Public Domain",
"Intended Audience :: Developers",
"Intended Audience :: Science/Research",
"Operating System :: OS Independent",
"Programming Language :: Cython",
"Programming Language :: Python :: 2 :: Only",
"Programming Language :: Python :: Implementation :: CPython",
"Topic :: Utilities",
"Topic :: Scientific/Engineering",
],
)
| 1.757813 | 2 |
text2cc/gui/tk.py | dlehman83/text2cc | 1 | 12787395 | # -*- coding: utf-8 -*-
#
# Copyright (c) 2021, <NAME>
# Copyright (c) 2020, <NAME>
# All rights reserved.
#
# Licensed under the BSD 3-Clause License:
# http://opensource.org/licenses/BSD-3-Clause
#
import os
import pathlib
import shutil
import time
import tkinter as tk
import tkinter.filedialog
import webbrowser
from ..config import Config
from ..err import Text2qtiError
from ..qti import QTI
from ..quiz import Quiz
from .. import version
#from tkinter.ttk import * #possibly not needed
def main():
config = Config()
config.load()
file_name = ''
window = tk.Tk()
window.title('text2CC')
# Bring window to front and put in focus
window.iconify()
window.update()
window.deiconify()
# Window grid setup
current_row = 0
column_count = 4
header_label = tk.Label(
window,
text='text2cc – Create quizzes in Common Cartridge format from Markdown-based plain text',
font=(None, 16),
)
header_label.grid(
row=current_row, column=0, columnspan=column_count, padx=(30, 30),
sticky='nsew',
)
#current_row += 1
# header_link_label = tk.Label(
# window,
# text='github.com/gpoore/text2qti',
# font=(None, 14), fg='blue', cursor='hand2',
# )
# header_link_label.bind('<Button-1>', lambda x: webbrowser.open_new('https://github.com/gpoore/text2qti'))
# header_link_label.grid(
# row=current_row, column=0, columnspan=column_count, padx=(30, 30),
# sticky='nsew',
# )
current_row += 1
version_label = tk.Label(
window,
text=f'Version: {version.__version__}',
)
version_label.grid(
row=current_row, column=0, columnspan=column_count, padx=(30, 30), pady=(0, 30),
sticky='nsew',
)
current_row += 1
file_browser_label = tk.Label(
window,
text='Quiz file:\n(plain text file)',
justify='right',
)
file_browser_label.grid(
row=current_row, column=0, padx=(30, 5), pady=(5, 25),
sticky='nse',
)
last_dir = None
def browse_files():
nonlocal file_name
nonlocal last_dir
if last_dir is None:
initialdir = pathlib.Path('~').expanduser()
else:
initialdir = last_dir
file_name = tkinter.filedialog.askopenfilename(
initialdir=initialdir,
title='Select a quiz file',
filetypes=[('Quiz files', '*.md;*.txt')],
)
if file_name:
if last_dir is None:
last_dir = pathlib.Path(file_name).parent
file_browser_button.config(text=f'"{file_name}"', fg='green')
else:
file_browser_button.config(text=f'<none selected>', fg='red')
file_browser_button = tk.Button(
window,
text='<none selected>',
fg='red',
command=browse_files,
)
file_browser_button.grid(
row=current_row, column=1, columnspan=column_count-1, padx=(0, 30), pady=(5, 25),
sticky='nsew',
)
file_browser_button.bind('<Return>', lambda e: browse_files())
current_row += 1
#Above here set good
# advanced_options_label = tk.Label(
# window,
# text='Advanced options – LaTeX math & executable code',
# justify='right',
# )
# advanced_options_label.grid(
# row=current_row, column=1, columnspan=2, padx=(0, 0), pady=(5, 5),
# sticky='nsw',
# )
# current_row += 1
# latex_url_label = tk.Label(
# window,
# text='LaTeX math rendering URL:\n(for Canvas and similar systems)',
# justify='right',
# )
# latex_url_label.grid(
# row=current_row, column=0, padx=(30, 5), pady=(5, 5),
# sticky='nse',
# )
# latex_url_entry = tk.Entry(window, width=100)
# latex_url_entry.grid(
# row=current_row, column=1, columnspan=column_count-1, padx=(0, 30), pady=(5, 5),
# sticky='nsew',
# )
# if 'latex_render_url' in config:
# latex_url_entry.insert(1, f"{config['latex_render_url']}")
# current_row += 1
# pandoc_exists = bool(shutil.which('pandoc'))
# pandoc_mathml_label = tk.Label(
# window,
# text='Convert LaTeX math to MathML:\n(requires Pandoc; ignores rendering URL)',
# justify='right',
# )
# if not pandoc_exists:
# pandoc_mathml_label['fg'] = 'gray'
# pandoc_mathml_label.grid(
# row=current_row, column=0, padx=(30, 5), pady=(5, 5),
# sticky='nse',
# )
# pandoc_mathml_bool = tk.BooleanVar()
# def pandoc_mathml_command():
# if pandoc_mathml_bool.get():
# latex_url_label['fg'] = 'gray'
# latex_url_entry['fg'] = 'gray'
# else:
# latex_url_label['fg'] = 'black'
# latex_url_entry['fg'] = 'black'
# if pandoc_exists:
# pandoc_mathml_button = tk.Checkbutton(
# window,
# variable=pandoc_mathml_bool,
# command=pandoc_mathml_command,
# )
# pandoc_mathml_bool.set(config['pandoc_mathml'])
# else:
# pandoc_mathml_button = tk.Checkbutton(
# window,
# state=tk.DISABLED,
# )
# pandoc_mathml_button.grid(
# row=current_row, column=1, sticky='w',
# )
# current_row += 1
def run():
run_message_text.delete(1.0, tk.END)
run_message_text['fg'] = 'gray'
run_message_text.insert(tk.INSERT, 'Starting...')
run_message_text.update()
error_message = None
if not file_name:
error_message = 'Must select a quiz file'
run_message_text.delete(1.0, tk.END)
run_message_text.insert(tk.INSERT, error_message)
run_message_text['fg'] = 'red'
return
#if latex_url_entry.get():
#config['latex_render_url'] = latex_url_entry.get()
#config['pandoc_mathml'] = pandoc_mathml_bool.get()
file_path = pathlib.Path(file_name)
try:
text = file_path.read_text(encoding='utf-8-sig') # Handle BOM for Windows
except FileNotFoundError:
error_message = f'File "{file_path}" does not exist.'
except PermissionError as e:
error_message = f'File "{file_path}" cannot be read due to permission error. Technical details:\n\n{e}'
except UnicodeDecodeError as e:
error_message = f'File "{file_path}" is not encoded in valid UTF-8. Technical details:\n\n{e}'
except Exception as e:
error_message = f'An error occurred in reading the quiz file. Technical details:\n\n{e}'
if error_message:
run_message_text.delete(1.0, tk.END)
run_message_text.insert(tk.INSERT, error_message)
run_message_text['fg'] = 'red'
return
cwd = pathlib.Path.cwd()
os.chdir(file_path.parent)
try:
quiz = Quiz(text, config=config, source_name=file_path.as_posix())
qti = QTI(quiz)
qti.save(f'{file_path.stem}.zip')
except Text2qtiError as e:
error_message = f'Quiz creation failed:\n\n{e}'
except Exception as e:
error_message = f'Quiz creation failed unexpectedly. Technical details:\n\n{e}'
finally:
os.chdir(cwd)
if error_message:
run_message_text.delete(1.0, tk.END)
run_message_text.insert(tk.INSERT, error_message)
run_message_text['fg'] = 'red'
else:
run_message_text.delete(1.0, tk.END)
run_message_text.insert(tk.INSERT, f'Created quiz "{file_path.parent.as_posix()}/{file_path.stem}.zip"')
run_message_text['fg'] = 'green'
run_button = tk.Button(
window,
text='RUN',
font=(None, 14),
command=run,
)
run_button.grid(
row=current_row, column=1, columnspan=2, padx=(0, 0), pady=(30, 30),
sticky='nsew',
)
run_button.bind('<Return>', lambda e: run())
current_row += 1
run_message_label = tk.Label(
window,
text='\nRun Summary:\n',
relief='ridge',
width=120,
)
run_message_label.grid(
row=current_row, column=0, columnspan=column_count, padx=(30, 30), pady=(0, 0),
sticky='nsew',
)
current_row += 1
run_message_frame = tk.Frame(
window,
width=120, height=40,
borderwidth=1, relief='sunken', bg='white',
)
run_message_frame.grid(
row=current_row, column=0, columnspan=column_count, padx=(30, 30), pady=(0, 30),
sticky='nsew',
)
run_message_scrollbar = tk.Scrollbar(run_message_frame)
run_message_scrollbar.pack(
side='right', fill='y',
)
run_message_text = tk.Text(
run_message_frame,
width=10, height=10, borderwidth=0, highlightthickness=0,
wrap='word',
yscrollcommand=run_message_scrollbar.set,
)
run_message_text.insert(tk.INSERT, 'Waiting...')
run_message_text['fg'] = 'gray'
run_message_scrollbar.config(command=run_message_text.yview)
run_message_text.pack(
side='left', fill='both', expand=True,
padx=(5, 5), pady=(5, 5),
)
def About_page():
abt = tk.Toplevel(window)
def close_about():
abt.destroy()
abt.grab_set()
abt.title("About")
lbl=tk.Label(
abt,
text ='Text2CC \n \n Copyright © 2021, <NAME> \n Copyright © 2020, <NAME> \n ',
#anchor='e',
justify='center',
)
lbl.grid(
# column=0,
# columnspan=4,
# row=1,
# rowspan=4,
pady=3,
padx=30,
# sticky="NW"
)
vlbl=tk.Label(
abt,
text =f'Version: {version.__version__} \n',
#bg = 'red',
#anchor="CENTER",
#width=max((len(text)))
)
vlbl.grid(
# column=0,
# columnspan=4,
# row=2,
# rowspan=4,
pady=3,
# padx=30,
# sticky="NW"
)
liclbl = tk.Label(
abt,
text='License: BSD 3-Clause',
#font=(None, 14),
fg='blue', cursor='hand2',
)
liclbl.bind('<Button-1>', lambda x: webbrowser.open_new('https://opensource.org/licenses/BSD-3-Clause'))
liclbl.grid(
row=current_row, column=0, columnspan=column_count, padx=(30, 30),
#sticky='nsew',
)
OK_BTN=tk.Button(
abt,
text="OK",
command=close_about,
anchor= 'center',
)
OK_BTN.bind('<Return>', lambda e: close_about())
OK_BTN.grid(
# column=2,
# columnspan=1,
# row=5,
# padx=30,
pady=30,
# sticky="se"
)
OK_BTN.focus_set()
# Gets the requested values of the height and widht.
windowWidth = abt.winfo_reqwidth()
windowHeight = abt.winfo_reqheight()
# Gets both half the screen width/height and window width/height
positionRight = int(abt.winfo_screenwidth()/2 - windowWidth/2)
positionDown = int(abt.winfo_screenheight()/2 - windowHeight/2)
# Positions the window in the center of the page.
abt.geometry("+{}+{}".format(positionRight, positionDown))
#abt.geometry("225x100+{}+{}".format(positionRight, positionDown))
abt.resizable(height= None, width= None)
abt.focus()
#abt.bind('<Return>', close_about)
def open_help():
webbrowser.open_new('https://github.com/dlehman83/text2cc/blob/master/README.md')
menubar = tk.Menu(window)
filemenu = tk.Menu(menubar, tearoff=False)
filemenu.add_command(label="Open",command=browse_files,underline=0)
filemenu.add_command(label="Run",command=run,underline=0)
filemenu.add_command(label="Exit",command=window.quit,underline=1)
menubar.add_cascade(label="File", menu=filemenu,underline=0)
helpmenu = tk.Menu(menubar, tearoff=False)
helpmenu.add_command(label="Help",underline=0,command=open_help)
helpmenu.add_command(label="About",command=About_page,underline=0)
menubar.add_cascade(label="Help", menu=helpmenu,underline=0)
# Gets the requested values of the height and widht.
windowWidth = window.winfo_reqwidth()
windowHeight = window.winfo_reqheight()
# Gets both half the screen width/height and window width/height
positionRight = int(window.winfo_screenwidth()/2 - windowWidth/2)
positionDown = int(window.winfo_screenheight()/2 - windowHeight/2)
# Positions the window in the center of the page.
window.geometry("+{}+{}".format(positionRight, positionDown))
window.config(menu=menubar)
window.bind('<Return>',run)
window.mainloop()
| 3.109375 | 3 |
median-of-two-sorted-arrays/main.py | hotpxl/code-forces | 0 | 12787396 | <filename>median-of-two-sorted-arrays/main.py
class Solution(object):
def isMedian(self, nums1, nums2, i):
j = (len(nums1) + len(nums2) - 1) / 2 - i
if len(nums2) == 0:
if j == 0:
return 0
if j < 0:
return 1
if 0 < j:
return -1
if j < 0:
return 1
if len(nums2) < j:
return -1
if j == 0:
if nums1[i] <= nums2[0]:
return 0
else:
return 1
if j == len(nums2):
if nums2[-1] <= nums1[i]:
return 0
else:
return -1
if nums1[i] < nums2[j - 1]:
return -1
if nums2[j] < nums1[i]:
return 1
return 0
def findMedianSortedArraysOdd(self, nums1, nums2):
start = 0
end = len(nums1) - 1
while start <= end:
mid = (start + end) / 2
r = self.isMedian(nums1, nums2, mid)
if r == 0:
return nums1[mid]
if r == 1:
end = mid - 1
else:
start = mid + 1
return self.findMedianSortedArraysOdd(nums2, nums1)
def findMedianSortedArrays(self, nums1, nums2):
"""
:type nums1: List[int]
:type nums2: List[int]
:rtype: float
"""
if (len(nums1) + len(nums2)) % 2 == 1:
return self.findMedianSortedArraysOdd(nums1, nums2)
else:
m0 = None if len(nums1) == 0 else nums1[0]
m1 = None if len(nums2) == 0 else nums2[0]
if m0 is None and m1 is None:
return None
else:
m = min(m0, m1)
nums1.insert(0, m)
median0 = self.findMedianSortedArraysOdd(nums1, nums2)
nums1 = nums1[1:]
m0 = None if len(nums1) == 0 else nums1[-1]
m1 = None if len(nums2) == 0 else nums2[-1]
if m0 is None and m1 is None:
return None
else:
m = max(m0, m1)
nums1.append(m)
median1 = self.findMedianSortedArraysOdd(nums1, nums2)
return (median0 + median1) / 2.0
| 3.796875 | 4 |
app/manager.py | actini/storage-manager | 0 | 12787397 | from flask import Flask, request, jsonify
from services import MongoDBService
app = Flask(__name__)
@app.route("/")
def root():
return "Welcome to Storage Manager!"
@app.route("/health")
def health():
return "ok"
@app.route("/databases", methods=["GET"])
def all_databases():
databases = MongoDBService().list_databases()
return jsonify(databases)
@app.route("/databases", methods=["POST"])
def create_database():
database = request.json.get("database", None)
if database is None:
return {
"status": "error",
"message": "Database name is required!"
}, 400
mongodb = MongoDBService().connect()
try:
mongodb.create_database(database)
except Exception as e:
return {
"status": "error",
"message": "%s" % (e)
}, 500
user = request.json.get("user", None)
if user is None:
return {
"status": "ok",
"database": database
}
username = user.get("username", None)
password = user.get("password", None)
permission = user.get("permission", "readWrite")
if (username is None) or (password is None):
return {
"status": "error",
"message": "Username and password are required to create database with a user!"
}, 400
try:
mongodb.create_user(
database,
{
"username": username,
"password": password,
"permission": permission
}
)
except Exception as e:
return {
"status": "error",
"message": "%s" % (e)
}, 500
return {
"status": "ok",
"database": database,
"username": username
}
@app.route("/databases/<database>", methods=["DELETE"])
def drop_database(database):
try:
MongoDBService().drop_database(database)
except Exception as e:
return {
"status": "error",
"message": "%s" % (e)
}, 500
return {
"status": "ok"
}
@app.route("/users", methods=["POST"])
def create_user():
database = request.json.get("database", None)
user = request.json.get("user", {})
username = user.get("username", None)
password = user.get("password", None)
permission = user.get("permission", "readWrite")
if (database is None) or (username is None) or (password is None):
return {
"status": "error",
"message": "Username, password, database are required to create a user!"
}, 400
try:
MongoDBService().create_user(
database,
{
"username": username,
"password": password,
"permission": permission
}
)
except Exception as e:
return {
"status": "error",
"message": "%s" % (e)
}, 500
return {
"status": "ok",
"database": database,
"username": username
}
@app.route("/users/<username>", methods=["PUT", "PATCH"])
def update_user(username):
database = request.json.get("database", None)
user = request.json.get("user", {})
password = user.get("password", None)
permission = user.get("permission", None)
if database is None:
return {
"status": "error",
"message": "Database name is required to update a user!"
}, 400
if (password is None) and (permission is None):
return {
"status": "error",
"message": "Password or permission is required to update a user!"
}, 400
try:
MongoDBService().update_user(
database,
{
"username": username,
"password": password,
"permission": permission
}
)
except Exception as e:
return {
"status": "error",
"message": "%s" % (e)
}, 500
return {
"status": "ok",
"database": database,
"username": username
}
@app.route("/users/<username>", methods=["DELETE"])
def drop_user(username):
database = request.json.get("database", None)
if database is None:
return {
"status": "error",
"message": "Database name is required to drop a user!"
}, 400
try:
MongoDBService().drop_user(database, username)
except Exception as e:
return {
"status": "error",
"message": "%s" % (e)
}, 500
return {
"status": "ok"
}
if __name__ == "__main__":
app.run()
| 2.984375 | 3 |
Exercises/Registration101.py | kmader/qbi-2019-py | 2 | 12787398 | #!/usr/bin/env python
# coding: utf-8
# # Registration 101
#
# Image registration is a critical tool in longitudinal monitoring:
#
# - Estimation of local changes
# - Comparison to same animal (less variance)
# - [3R's](https://www.nc3rs.org.uk/the-3rs)
#
#
#
# ## Goal of tutorial:
# - Introduce the concept of aligning images
# - Demonstrate the use of numerical comparison between images
# - Introduce concept of optimisation
# - Registering images within framework (Thesis: <NAME> & <NAME>)
# ## setting up a responsive enviroment
# In[1]:
# Please do not edit this code, it is important for choosing a compatible rendered for images
# libraries for viewing images
import sys
sys.path.append("reg101_files")
from image_viewing import horizontal_pane, overlay_RGB, overlay_RGB_cost
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
# numpy as always
import numpy as np
# In[2]:
# this is a way of finding the fastest image display on your platform
print("Using:", matplotlib.get_backend())
gui_env = ["WXAgg", "TKAgg", "QT5Agg", "GTKAgg", "Qt4Agg"]
for gui in gui_env:
try:
print("testing", gui)
matplotlib.use(gui, warn=False, force=True)
from matplotlib import pyplot as plt
break
except:
continue
print("Using:", matplotlib.get_backend())
# ## What does registration actually do?
#
# In the example bellow there are images from 2 weeks.
#
# The position of the animal in the scanner was different each.
#
# In[3]:
get_ipython().run_line_magic("matplotlib", "notebook")
# this allows the image to be viewed in the notebook with edge
matplotlib.rcParams["figure.figsize"] = (8, 7)
images = [
mpimg.imread("reg101_files/week2.tiff"),
mpimg.imread("reg101_files/week3.tiff"),
]
horizontal_pane(images) # shows images side by side
# How can we interpret these images?
#
# In in vivo studies, it is still common to *dynamic histomorphometry*, where markers are given to the mice at different intervals over the course if a study. These are built into the bone matrix and afterwards can be labeled and visualised in histological sections.
#
# 
#
# In the above image above the green strain was used to mark the surface where bone formed. While the red is marking Osteoid (newly formed un minerlaised bone). The method is limited to purely observation of anabolic events. Resorption of bone removes material and therefore there is nothing which can be stained.
#
# Inspired by these histological images we can create a virtual histology image. In these images we will "stain" the change of mineralisation between the two weeks. Both images are grey-scale (1 -color) , we can emulate the histology by using colour channels. Here we put the *Later* week in *green* and the *Former* week in *red*, both weeks are averaged in the blue channel.
#
# When the images are aligned we see white (R+G+B) when the *Former* week is brighter we see *green* and when the *later* week is brighter we see *red*. This is essentially formation == green and resorption == red. The function bellow does this for use automically.
#
# In[4]:
# puts the images into the Red(week1) and G(week2) channel of an image
overlay_RGB(images)
# These images are clearly not well aligned. We will now discuss how to align the images.
# ## Overlaying an image
#
# Registration involves finding the best set of transormation paramters for overlaying an image.
#
# Run the following cell and try to find the best x,y,theta for aligning the images.
# In[5]:
# manual transform
from ipywidgets import interact, interactive, fixed, interact_manual
import ipywidgets as widgets
from IPython.display import clear_output, display
from scipy.ndimage import affine_transform
def move_overlay(image1, image2, dx, dy):
T = np.identity(3)
T[0, 2] = dy
T[1, 2] = dx
images = [image1, affine_transform(image2, T)]
overlay_RGB(images)
clear_output(True)
# In[6]:
interactive(
move_overlay,
image1=fixed(images[0]),
image2=fixed(images[1]),
dx=(-50, 50, 0.25),
dy=(-50, 50, 0.25),
)
# In[7]:
class_x = [-4.5, -6.5, -5.0, -4.25, -4, -5, -4.7]
class_y = [-18.5, -22, -20.5, -20, -19, -19, -19.5]
print("Class variance in X: ", np.mean(class_x))
print("Class variance in Y: ", np.mean(class_y))
# ## Cost functions
#
# We have now demonstrated the use of a powerful neural network (you) on finding the best fit. In realitiy the computer is not aware of what the images look like. The registration algorithm needs to numerically determine the goodness of fit, it can then optimize until the correct paramters are found.
#
# Image we have two images X and Y. The lease squared difference would be as follows:
#
# \begin{equation*}
# C^{LS} = \sum \left( X - Y \right)^2
# \end{equation*}
#
# Where this is the pixel-wise sum of both images. In Python it looks like this:
#
#
# ```python
# def least_squared(X,Y):
# delta = X-Y
# square_delta = delta**2
# return np.sum(square_delta)
#
# ```
#
# Another meausure of similarity is the correlations function:
# \begin{equation*}
# C^{cor} = \frac{\sum \left( X \cdot Y \right) }{\sqrt{\sum X^2}.\sqrt{\sum Y^2}}
# \end{equation*}
#
# In python it looks like this:
# ```python
# def correlation(Image1,Image2):
# corr = np.corrcoef(Image1,y=Image2)
# return(corr[0,1])
# ```
#
# ### excercise 2:
# Align the images again, this time try and use the cost function to determine your next move.
#
# In[8]:
# abs diference cost
def least_squared(Image1, Image2):
delta = Image1 - Image2
return np.sum(delta ** 2)
# In[9]:
def correlation(Image1, Image2):
return -1 * np.corrcoef(Image1.flatten(), y=Image2.flatten())[0, 1]
# In[10]:
c_funs = {"correlation": correlation, "least_squared": least_squared}
# In[11]:
cost = {c: [] for c in c_funs}
cost_function_names = [n for n in c_funs]
def move_overlay(image1, image2, dx, dy, cost_history, cost_function, cfuncs):
T = np.identity(3)
T[0, 2] = dy
T[1, 2] = dx
images = [image1, affine_transform(image2, T)]
overlay_RGB_cost(images, cost_history, cost_function, cfuncs, dx, dy)
clear_output(True)
# In[12]:
interactive(
move_overlay,
image1=fixed(images[0]),
image2=fixed(images[1]),
dx=(-60, 60, 0.5),
dy=(-60, 60, 0.5),
cost_history=fixed(cost),
cost_function=cost_function_names,
cfuncs=fixed(c_funs),
)
# In[13]:
comutational_cost = {"correlation": 0, "least_squared": 0, "abs_squared_difference": 0}
for function, v in cost.items():
for pix in v:
comutational_cost[function] += pix[3]
print("The total pixel cost was:", comutational_cost)
# This is quite an intenive task. For each movmement you made every pixel was evaluated to determine the cost function. Ideally we should align the images close so that they need less transformations. This can be done through two ways:
#
# 1. An initial guess
#
# 2. A multi-resolution scheme
#
# A good initial guess is a holy grail in image registration, these could involve calcuating principle axes and centerms of mass. However during fracture healing the changes in the bone are large, the formation of new material can cause priciple axes to flip or swap.
#
# A multi-resolution scheme on the other hand reduces the problem size, progressivly increasing it until the images are comapred at the native resoltuion. This system is "easy" to implement and has an inherent strength over the naive aproach.
#
# Images contain different frequencies. In general flat areas are low frequency whilke edges (and noise) are high frequency. The lower resolution images are predominatnly low frequency information and have comparatively less noise. A pyramid approach is effectivly using a smoothed cost function, avoiding local minima, while the progressive increasing of the resolution adds high frequency correction to the solution.
#
# In[14]:
def split_frequency(frequency, image):
f = np.fft.fft2(image) # forward fft
fshift = np.fft.fftshift(f) # shift to center frequencies
hff = np.copy(fshift)
origin = np.array(hff.shape) * 0.5
y, x = np.ogrid[-origin[0] : origin[0], -origin[1] : origin[1]]
mask = (
x * x + y * y <= frequency * frequency
) # mask for high and low pass filtering
hff[mask] = 0 # high pass filter
lff = np.copy(fshift)
lff[mask != 1] = 0 # low pass filter
hff_ishift = np.fft.ifftshift(hff) # inverse shift
lff_ishift = np.fft.ifftshift(lff) # inverse shift
lff_back = np.fft.ifft2(lff_ishift) # inverse fft
hff_back = np.fft.ifft2(hff_ishift) # inverse fft
hff_back = np.abs(hff_back)
lff_back = np.abs(lff_back)
# contrast adjustment for viewing image
hff_back /= np.percentile(hff_back, 99)
hff_back[hff_back > 1] = 1.0
horizontal_pane([image, (lff_back), hff_back])
# In[15]:
interactive(split_frequency, frequency=(0, 204, 1), image=fixed(images[0]))
# ## Pyrmaid registration
#
# - Frequencies are dependent on the image resolution
# - Easiest way to create a pyramid is to create a stack of rescaled images
# In[16]:
# manual transform
from scipy.ndimage.interpolation import zoom
cost_pyramid = {c: [] for c in c_funs}
def move_overlay_pyramid(
image1, image2, dx, dy, cost_history, cost_function, cfuncs, level, history
):
level = int(level)
level = 2 ** (level - 1)
if level != 1:
i1 = zoom(image1, 1 / level, order=0)
i2 = zoom(image2, 1 / level, order=0)
else:
i1 = image1
i2 = image2
T = np.identity(3)
T[0, 2] = dy * 1 / level
T[1, 2] = dx * 1 / level
if level != 1:
images = [i1, affine_transform(i2, T, order=0)]
else:
images = [i1, affine_transform(i2, T)]
if len(cost_history) > 0:
overlay_RGB_cost(images, cost_history, cost_function, cfuncs, dx, dy, history)
else:
print("Move around to make some history")
clear_output(True)
# In[17]:
interactive(
move_overlay_pyramid,
image1=fixed(images[0]),
image2=fixed(images[1]),
dx=(-60, 60, 0.5),
dy=(-60, 60, 0.5),
cost_history=fixed(cost_pyramid),
cost_function=cost_function_names,
cfuncs=fixed(c_funs),
level=[5, 4, 3, 2, 1],
history=[None, 10, 20, 100],
)
# In[18]:
comutational_cost_pyramid = {"correlation": 0, "least_squared": 0}
for function, v in cost_pyramid.items():
for pix in v:
comutational_cost_pyramid[function] += pix[3]
print("The total pixel cost was:", comutational_cost_pyramid)
# # Automated registration
#
# This is cleary a difficult problem to do by hand. As we have these functions it should be easy to minimise them. Actualy there is a whole field of mathematics and computer science devoted towards this: Optimisation. It is not really "easy" to do.
#
# There are several course at the ETH which can give actually useful information on this:
# - 401-0647-00L Introduction to Mathematical Optimization
# - 227-0707-00L Optimization Methods for Engineers (good for Graeme)
# - 261-5110-00L Optimization for Data Science (Machine learning)
# - 401-3904-00L Convex Optimization (Applied to specific problems where the only minima is the global minimum)
#
# In the bellow example the registration is performed using a [Evolutionary Algorithm](https://en.wikipedia.org/wiki/Evolutionary_algorithm). Simply put a random population of initial guesses are used, the cost function is determined for each and only the fittest X% are mated to create a new set of guesses.
#
# To avoid being captured in local minima *mutations* are introduced, which introduce new paramters values to the increasingly homogenus population.
# In[19]:
# opitimizer with least squared
from scipy.optimize import minimize as ls
from scipy.optimize import differential_evolution
def correlation(x, i1, i2, path):
x = np.array(x)
T = np.identity(3)
T[0, 2] = x[1]
T[1, 2] = x[0]
images = [i1.flatten(), affine_transform(i2, T).flatten()]
delta = -1 * np.corrcoef(images[0], y=images[1])[0, 1]
path.append((x[0], x[1], delta))
return delta
def least_squared(x, i1, i2, path):
x = np.array(x)
T = np.identity(3)
T[0, 2] = x[1]
T[1, 2] = x[0]
images = [i1, affine_transform(i2, T)]
delta = np.sum((images[0] - images[1]) ** 2)
path.append((x[0], x[1], delta))
return delta
path_corralation = []
optimum_c = differential_evolution(
correlation,
[(-60, 30), (-60, 30)],
args=(images[0], images[1], path_corralation),
tol=0.00125,
) # ,method='Powell',options={'eps':0.5})
path_least_squared = []
optimum_ls = differential_evolution(
least_squared,
[(-60, 30), (-60, 30)],
args=(images[0], images[1], path_least_squared),
tol=0.00125,
) # ,method='Powell',options={'eps':0.5})
# We have now searched for the best transform using both cost functions. What do they look like?
# In[20]:
# Using the correlation cost function
x = optimum_c["x"]
T = np.identity(3)
T[0, 2] = x[1]
T[1, 2] = x[0]
overlay_RGB([images[0], affine_transform(images[1], T)])
# In[21]:
# Using the Least squared cost function
x = optimum_ls["x"]
T = np.identity(3)
T[0, 2] = x[1]
T[1, 2] = x[0]
overlay_RGB([images[0], affine_transform(images[1], T)])
# In[22]:
# difference in the images
diff = []
x = optimum_ls["x"]
T = np.identity(3)
T[0, 2] = x[1]
T[1, 2] = x[0]
diff.append(affine_transform(images[1], T))
x = optimum_c["x"]
T = np.identity(3)
T[0, 2] = x[1]
T[1, 2] = x[0]
diff.append(affine_transform(images[1], T))
overlay_RGB(diff)
# In[23]:
print("Difference in the transformation", optimum_ls["x"] - optimum_c["x"])
# In the cell bellow the cost functions are plotted. This can be done as the optimization we used search an entire range of parameters.
#
# In[24]:
p_c = np.array(path_corralation)
p_ls = np.array(path_least_squared)
import matplotlib.tri as mtri
fig = plt.figure()
matplotlib.rcParams["figure.figsize"] = (9, 10)
"""
ax = fig.add_subplot(111, projection='3d')
ax.plot_trisurf(p_c[:,0],p_c[:,1],p_c[:,2],cmap=plt.cm.jet)
ax.set_title("Correlation")
ax.set_xlabel("dx")
ax.set_ylabel("dy")
ax.set_zlabel("cost (-)")
"""
ax = fig.add_subplot(111, projection="3d")
ax.set_title("Least Squared")
ax.set_xlabel("dx")
ax.set_ylabel("dy")
ax.set_zlabel("cost (-)")
ax.plot_trisurf(p_ls[:, 0], p_ls[:, 1], p_ls[:, 2], cmap=plt.cm.jet)
# # Conclusion to part 1
#
# - Image registration is not black magic
# - It is repetitive but simple maths
# - Algorithms are fallible:
# - Local minima in solution
# - Orientation of images
# - Qualitiy of images
# - Understanding your chosen cost function crucial for:
# - Choosing an algorithm
# - Knowing if your data is sufficient
| 3.421875 | 3 |
Grade 7/Lesson 28/sender.py | arm-university/ASP_International-Intro-to-Computing | 0 | 12787399 | # Add your Python code here. E.g.
#radio 1
from microbit import *
import radio
radio.on()
# any channel from 0 to 100 can be used for privacy.
radio.config(channel=5)
while True:
if button_a.was_pressed():
radio.send('HAPPY')
sleep(200)
elif button_b.was_pressed():
radio.send('SAD')
sleep(200)
| 3.125 | 3 |
build/lib/SciDataTool/Methods/DataLinspace/get_axis_periodic.py | enjoyneer87/SciDataTool | 24 | 12787400 | from SciDataTool.Functions import AxisError
from SciDataTool.Classes.Norm_vector import Norm_vector
def get_axis_periodic(self, Nper, is_aper=False):
"""Returns the vector 'axis' taking symmetries into account.
Parameters
----------
self: DataLinspace
a DataLinspace object
Nper: int
number of periods
is_antiperiod: bool
return values on a semi period (only for antiperiodic signals)
Returns
-------
New_axis: DataLinspace
Axis with requested (anti-)periodicities
"""
# Dynamic import to avoid loop
module = __import__("SciDataTool.Classes.DataLinspace", fromlist=["DataLinspace"])
DataLinspace = getattr(module, "DataLinspace")
try:
# Reduce axis to the given periodicity
Nper = Nper * 2 if is_aper else Nper
values = self.get_values()
N = self.get_length()
if N % Nper != 0:
raise AxisError("length of axis is not divisible by the number of periods")
values_per = values[: int(N / Nper)]
for norm in self.normalizations.values():
if isinstance(norm, Norm_vector):
norm.vector = norm.vector[: int(N / Nper)]
if is_aper:
sym = "antiperiod"
else:
sym = "period"
if Nper == 1 and sym == "period":
symmetries = dict()
else:
symmetries = {sym: Nper}
New_axis = DataLinspace(
initial=self.initial,
final=values_per[-1],
number=int(N / Nper),
include_endpoint=True,
name=self.name,
unit=self.unit,
symmetries=symmetries,
normalizations=self.normalizations.copy(),
is_components=self.is_components,
symbol=self.symbol,
)
except AxisError:
# Periodicity cannot be applied, return full axis
New_axis = self.copy()
return New_axis
| 3.28125 | 3 |
ic/settings/dev.py | shoorday/IC | 2 | 12787401 | from .base import *
# 开发环境
DEBUG = True
SECRET_KEY = '<KEY>'
# debug_toolbar
INTERNAL_IPS = ['127.0.0.1', ]
SITE_HOST = 'http://127.0.0.1:8000'
FRONT_HOST = 'http://127.0.0.1:8080'
# Github登录
GITHUB_APP_KEY = 'xxxxx'
GITHUB_APP_SECRET = 'xxxxx'
| 1.359375 | 1 |
buildmenuviews/login_menu.py | acadianshadow237/BA_MDI1 | 0 | 12787402 | # -*- coding: utf-8 -*-
################################################################################
## Form generated from reading UI file 'login.ui'
##
## Created by: Qt User Interface Compiler version 6.1.2
##
## WARNING! All changes made in this file will be lost when recompiling UI file!
################################################################################
from PySide6.QtCore import * # type: ignore
from PySide6.QtGui import * # type: ignore
from PySide6.QtWidgets import * # type: ignore
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
if not MainWindow.objectName():
MainWindow.setObjectName(u"MainWindow")
MainWindow.resize(800, 600)
self.actionLogin = QAction(MainWindow)
self.actionLogin.setObjectName(u"actionLogin")
self.actionExit = QAction(MainWindow)
self.actionExit.setObjectName(u"actionExit")
self.actionContents = QAction(MainWindow)
self.actionContents.setObjectName(u"actionContents")
self.actionAbout = QAction(MainWindow)
self.actionAbout.setObjectName(u"actionAbout")
self.centralwidget = QWidget(MainWindow)
self.centralwidget.setObjectName(u"centralwidget")
MainWindow.setCentralWidget(self.centralwidget)
self.menubar = QMenuBar(MainWindow)
self.menubar.setObjectName(u"menubar")
self.menubar.setGeometry(QRect(0, 0, 800, 22))
self.menuFile = QMenu(self.menubar)
self.menuFile.setObjectName(u"menuFile")
self.menuHelp = QMenu(self.menubar)
self.menuHelp.setObjectName(u"menuHelp")
MainWindow.setMenuBar(self.menubar)
self.statusbar = QStatusBar(MainWindow)
self.statusbar.setObjectName(u"statusbar")
MainWindow.setStatusBar(self.statusbar)
self.toolBar = QToolBar(MainWindow)
self.toolBar.setObjectName(u"toolBar")
font = QFont()
font.setPointSize(12)
self.toolBar.setFont(font)
self.toolBar.setMovable(False)
self.toolBar.setFloatable(False)
MainWindow.addToolBar(Qt.TopToolBarArea, self.toolBar)
self.menubar.addAction(self.menuFile.menuAction())
self.menubar.addAction(self.menuHelp.menuAction())
self.menuFile.addAction(self.actionLogin)
self.menuFile.addSeparator()
self.menuFile.addAction(self.actionExit)
self.menuHelp.addAction(self.actionContents)
self.menuHelp.addSeparator()
self.menuHelp.addAction(self.actionAbout)
self.retranslateUi(MainWindow)
self.actionExit.triggered.connect(MainWindow.close)
QMetaObject.connectSlotsByName(MainWindow)
# setupUi
def retranslateUi(self, MainWindow):
MainWindow.setWindowTitle(QCoreApplication.translate("MainWindow", u"MainWindow", None))
self.actionLogin.setText(QCoreApplication.translate("MainWindow", u"Login", None))
#if QT_CONFIG(shortcut)
self.actionLogin.setShortcut(QCoreApplication.translate("MainWindow", u"Ctrl+L", None))
#endif // QT_CONFIG(shortcut)
self.actionExit.setText(QCoreApplication.translate("MainWindow", u"Exit", None))
#if QT_CONFIG(shortcut)
self.actionExit.setShortcut(QCoreApplication.translate("MainWindow", u"Ctrl+X", None))
#endif // QT_CONFIG(shortcut)
self.actionContents.setText(QCoreApplication.translate("MainWindow", u"Contents", None))
self.actionAbout.setText(QCoreApplication.translate("MainWindow", u"About", None))
self.menuFile.setTitle(QCoreApplication.translate("MainWindow", u"File", None))
self.menuHelp.setTitle(QCoreApplication.translate("MainWindow", u"Help", None))
self.toolBar.setWindowTitle(QCoreApplication.translate("MainWindow", u"toolBar", None))
# retranslateUi
| 1.851563 | 2 |
pymagnitude/third_party/allennlp/modules/token_embedders/token_characters_encoder.py | tpeng/magnitude | 1,520 | 12787403 | <filename>pymagnitude/third_party/allennlp/modules/token_embedders/token_characters_encoder.py
from __future__ import absolute_import
import torch
from allennlp.common import Params
from allennlp.data.vocabulary import Vocabulary
from allennlp.modules.token_embedders.embedding import Embedding
from allennlp.modules.seq2vec_encoders.seq2vec_encoder import Seq2VecEncoder
from allennlp.modules.time_distributed import TimeDistributed
from allennlp.modules.token_embedders.token_embedder import TokenEmbedder
class TokenCharactersEncoder(TokenEmbedder):
u"""
A ``TokenCharactersEncoder`` takes the output of a
:class:`~allennlp.data.token_indexers.TokenCharactersIndexer`, which is a tensor of shape
(batch_size, num_tokens, num_characters), embeds the characters, runs a token-level encoder, and
returns the result, which is a tensor of shape (batch_size, num_tokens, encoding_dim). We also
optionally apply dropout after the token-level encoder.
We take the embedding and encoding modules as input, so this class is itself quite simple.
"""
def __init__(self, embedding , encoder , dropout = 0.0) :
super(TokenCharactersEncoder, self).__init__()
self._embedding = TimeDistributed(embedding)
self._encoder = TimeDistributed(encoder)
if dropout > 0:
self._dropout = torch.nn.Dropout(p=dropout)
else:
self._dropout = lambda x: x
def get_output_dim(self) :
return self._encoder._module.get_output_dim() # pylint: disable=protected-access
def forward(self, token_characters ) : # pylint: disable=arguments-differ
mask = (token_characters != 0).long()
return self._dropout(self._encoder(self._embedding(token_characters), mask))
# The setdefault requires a custom from_params
@classmethod
def from_params(cls, vocab , params ) : # type: ignore
# pylint: disable=arguments-differ
embedding_params = params.pop(u"embedding")
# Embedding.from_params() uses "tokens" as the default namespace, but we need to change
# that to be "token_characters" by default.
embedding_params.setdefault(u"vocab_namespace", u"token_characters")
embedding = Embedding.from_params(vocab, embedding_params)
encoder_params = params.pop(u"encoder")
encoder = Seq2VecEncoder.from_params(encoder_params)
dropout = params.pop_float(u"dropout", 0.0)
params.assert_empty(cls.__name__)
return cls(embedding, encoder, dropout)
TokenCharactersEncoder = TokenEmbedder.register(u"character_encoding")(TokenCharactersEncoder)
| 2.421875 | 2 |
161019/test/8-tests.py | yeputons/fall-2016-paradigms | 1 | 12787404 | tests = []
inout = (
("\n", ""),
("set 123\n0\n",
["lock guard", "get flag", "set flag 1", "set result 123", "unlock guard"],
["lock guard", "get flag", "set result 123", "set flag 1", "unlock guard"]
),
("set 123\n1\n", ["lock guard", "get flag", "unlock guard"]),
("get\n0\n", ["lock guard", "get flag", "unlock guard", "-1"]),
("get\n1\n123\n",
["lock guard", "get flag", "get result", "set flag 0", "unlock guard", "123"],
["lock guard", "get flag", "set flag 0", "get result", "unlock guard", "123"],
),
)
def fixout(l):
return "\n".join(l)
def generate():
res = []
for k, *v in inout:
v = list(map(fixout, v))
res.append((k, v))
return res
def solve(dataset):
for k, v1, *v2 in out:
if k == dataset:
return v1
def check(reply, clue):
return reply in clue
print(generate())
| 2.75 | 3 |
mapping_parenting_tech/analysis/prototyping/archived/initial-play-store-analysis.py | nestauk/mapping_parenting_tech | 0 | 12787405 | # ---
# jupyter:
# jupytext:
# cell_metadata_filter: -all
# comment_magics: true
# formats: ipynb,py:percent
# text_representation:
# extension: .py
# format_name: percent
# format_version: '1.3'
# jupytext_version: 1.13.8
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %%
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from datetime import datetime as dt
from tqdm.notebook import tqdm
import os, sys
from mapping_parenting_tech import PROJECT_DIR, logging
INPUT_DIR = PROJECT_DIR / "inputs/data/play_store"
OUTPUT_DIR = PROJECT_DIR / "outputs/data"
# %%
chunk_size = 10 ** 4
file = INPUT_DIR / "Google-Playstore.csv"
file_size = os.path.getsize(file)
start = dt.now()
print(f"Started at {start}")
data_types = {
"App Name": str,
"App Id": str,
"Category": str,
"Rating": float,
"Rating Count": str,
"Installs": str,
"Minimum Installs": str,
"Maximum Installs": str,
"Free": bool,
"Price": float,
"Currency": str,
"Size": str,
"Minimum Android": str,
"Developer Id": str,
"Developer Website": str,
"Developer Email": str,
"Released": str,
"Last Updated": str,
"Content Rating": str,
"Privacy Policy": str,
"Ad Supported": bool,
"In App Purchases": bool,
"Editors Choice": bool,
"Scraped Time": str,
}
date_cols = ["Released", "Last Updated", "Scraped Time"]
data_chunks = []
for data_chunk in pd.read_csv(
file, dtype=data_types, parse_dates=date_cols, chunksize=chunk_size
):
data_chunks.append(data_chunk)
sys.stdout.write(
f"Loaded {len(data_chunks)} chunks of (maybe) {int(file_size / chunk_size)}\r"
)
sys.stdout.flush()
print("\n")
print("Concatenating dataframe")
df = pd.concat(data_chunks, ignore_index=True)
end = dt.now()
duration = end - start
print(f"Completed at {end}\nStep took {duration}s")
# %%
df.shape
# %%
plot_df = df[["App Id", "Released"]]
plot_df["year_released"] = plot_df["Released"].dt.year
plot_df["Month released"] = plot_df["Released"].dt.month
plot_df = plot_df.groupby("year_released", as_index=False).agg(
app_count=("App Id", "count"),
months_in_year=("Month released", lambda x: x.nunique()),
)
plot_df["apps_per_month"] = plot_df["app_count"] / plot_df["months_in_year"]
plot_df["growth"] = plot_df["apps_per_month"].pct_change()
plot_df.plot.bar(x="year_released", y=["growth"], figsize=(10, 8), ylim=(0, 2.2))
print("Average growth: ", plot_df["apps_per_month"].mean())
# %%
plot_df.to_csv(OUTPUT_DIR / "play_store_growth.csv")
# %%
df["Minimum Installs"].fillna(0, inplace=True)
# %%
df = df.astype({"Minimum Installs": "int64"})
# %%
df.columns
# %%
cat_sizes = (
df.groupby("Category")
.agg(cat_size=("Category", "count"))
.sort_values("cat_size", ascending=False)
)
cat_sizes = cat_sizes.assign(
size_pc=(cat_sizes.cat_size / cat_sizes.cat_size.sum()) * 100
)
# %%
cat_sizes
# %%
import altair as alt
# %%
fig = (
alt.Chart(cat_sizes.reset_index(), width=700, height=550)
.mark_bar()
.encode(x="size_pc:Q", y=alt.Y("Category:N", sort="-x"), tooltip="size_pc")
)
fig
# %%
# cat_sizes.reset_index().sort_values("size_pc", ascending=False).to_csv("category_sizes.csv")
# %%
app_installs_df = df.groupby("Minimum Installs").agg(
installCount=("Minimum Installs", "count"), av_score=("Rating", "mean")
)
app_installs_df = app_installs_df[app_installs_df.index != 0]
# %%
base = alt.Chart(app_installs_df.reset_index(), width=700, height=700).encode(
x=alt.X("Minimum Installs", scale=alt.Scale(type="log"))
)
counts = base.mark_point(size=60, filled=True).encode(
alt.Y("installCount", axis=alt.Axis(title="Number of installs"))
)
scores = base.mark_line(stroke="red").encode(
alt.Y("av_score", axis=alt.Axis(title="Average score"))
)
alt.layer(counts, scores).resolve_scale(y="independent")
# %%
fig = (
alt.Chart(app_installs_df.reset_index(), width=700, height=500)
.mark_point()
.encode(
x=alt.X("Minimum Installs", scale=alt.Scale(type="log", base=10)),
y="installCount",
)
)
fig + fig.transform_loess("Minimum Installs", "installCount").mark_line()
# %%
# basic_app_details = df[
# [
# "appId",
# "cluster",
# "minInstalls",
# "score",
# "ratings",
# "reviews",
# "price",
# "free",
# "containsAds",
# "offersIAP",
# ]
# ]
basic_app_details = df[
[
"App Id",
"Category",
"Rating",
"Minimum Installs",
"Free",
"Price",
"Ad Supported",
"In App Purchases",
]
]
# %%
plotter = (
basic_app_details.groupby("Category")
.agg(
cluster_size=("Category", "count"),
free=("Free", "sum"),
IAPs=("In App Purchases", "sum"),
ads=("Ad Supported", "sum"),
)
.reset_index()
)
turn_to_pc = ["free", "ads", "IAPs"]
for i in turn_to_pc:
plotter[f"{i}_pc"] = plotter[i] / plotter.cluster_size
plotter
# %%
data_map = {
"free_pc": "Number of free apps",
"IAPs_pc": "Number of apps with in-app purchases",
"ads_pc": "Number of apps with ads",
}
# %%
mean_free = plotter.free_pc.mean()
mean_IAPs = plotter.IAPs_pc.mean()
mean_ads = plotter.ads_pc.mean()
print(
f" Mean number of free apps:\t{mean_free*100}%\n",
f"Mean number of apps with IAPs:\t{mean_IAPs*100}%\n",
f"Mean number of apps with Ads:\t{mean_ads*100}%",
)
# %%
df = plotter.sort_values("free_pc", ascending=False)
bar_width = round(1 / len(data_map), 2) - 0.1
fig, ax = plt.subplots(figsize=(18, 9))
plt.setp(
ax.get_xticklabels(), rotation=45, horizontalalignment="right", fontsize="medium"
)
plt.grid(visible=True, axis="y", which="major")
ax.set_ylabel("Percentage of apps")
x = np.arange(len(df.Category))
for i, (key, value) in enumerate(data_map.items()):
ax.bar(x + (i * bar_width), df[key], label=data_map[key], width=bar_width)
ax.set_xticks(x + (len(data_map) * bar_width) / len(data_map))
ax.set_xticklabels(df.Category.unique())
fig.legend(loc="upper left")
| 2.375 | 2 |
lib/googlecloudsdk/command_lib/tasks/app.py | kustodian/google-cloud-sdk | 0 | 12787406 | <filename>lib/googlecloudsdk/command_lib/tasks/app.py
# -*- coding: utf-8 -*- #
# Copyright 2017 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for App Engine apps for `gcloud tasks` commands."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from apitools.base.py import exceptions as apitools_exceptions
from googlecloudsdk.api_lib.app import appengine_api_client as app_engine_api
from googlecloudsdk.api_lib.tasks import GetApiAdapter
from googlecloudsdk.calliope import base as calliope_base
from googlecloudsdk.command_lib.app import create_util
from googlecloudsdk.core import exceptions
from googlecloudsdk.core import properties
from googlecloudsdk.core.console import console_io
class RegionResolvingError(exceptions.Error):
"""Error for when the app's region cannot be ultimately determined."""
def ResolveAppLocation(project_ref):
"""Determines Cloud Tasks location for the project or creates an app.
Args:
project_ref: The project resource to look up the location for.
Returns:
The existing or created app's locationId.
Raises:
RegionResolvingError: If the region of the app could not be determined.
"""
location = _GetLocation(project_ref) or _CreateApp(project_ref)
if location is not None:
return location
raise RegionResolvingError(
'Could not determine the location for the project. Please try again.')
def _GetLocation(project_ref):
"""Gets the location from the Cloud Tasks API."""
try:
locations_client = GetApiAdapter(calliope_base.ReleaseTrack.GA).locations
locations = list(locations_client.List(project_ref, page_size=2))
if len(locations) > 1:
# Projects currently can only use Cloud Tasks in single region, so this
# should never happen for now, but that will change in the future.
raise RegionResolvingError('Multiple locations found for this project. '
'Please specify an exact location.')
if len(locations) == 1:
return locations[0].labels.additionalProperties[0].value
return None
except apitools_exceptions.HttpNotFoundError:
return None
def _CreateApp(project_ref):
"""Walks the user through creating an AppEngine app."""
project = properties.VALUES.core.project.GetOrFail()
if console_io.PromptContinue(
message=('There is no App Engine app in project [{}].'.format(project)),
prompt_string=('Would you like to create one'),
throw_if_unattended=True):
try:
app_engine_api_client = app_engine_api.GetApiClientForTrack(
calliope_base.ReleaseTrack.GA)
create_util.CreateAppInteractively(app_engine_api_client, project)
except create_util.AppAlreadyExistsError:
raise create_util.AppAlreadyExistsError(
'App already exists in project [{}]. This may be due a race '
'condition. Please try again.'.format(project))
else:
return _GetLocation(project_ref)
return None
| 2.09375 | 2 |
cloud2015-1/cloudTrabajo/ArrRef.py | paurisg/U | 0 | 12787407 | <reponame>paurisg/U<gh_stars>0
class ArrRef:
def __init__(self, A):
self.A = A
self.N = len(self.A)
def get(self,i):
if(i < self.N):
return self.A[i]
def getF(self):
if(len(self.A) == 2):
return self.A[0]
def getS(self):
if(len(self.A) == 2):
return self.A[1]
def eBitsExec(self,w):
return w.mask&(w.f(self)>>w.offset)
class eBits(ArrRef):
def __init__(self,bits,offset,f):
self.mask = (1 << bits) - 1
self.offset = offset
self.f = f
| 2.875 | 3 |
openstack/tests/functional/network/v2/test_sfc_port_chain.py | gthiemonge/openstacksdk | 0 | 12787408 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from openstack.network.v2 import sfc_port_chain
from openstack.network.v2 import sfc_flow_classifier
from openstack.network.v2 import sfc_port_pair_group
from openstack.network.v2 import sfc_port_pair
from openstack.network.v2 import subnet
from openstack.network.v2 import network
from openstack.network.v2 import port
from openstack.tests.functional import base
from openstack import exceptions
class TestSfcPortChain(base.BaseFunctionalTest):
IPV4 = 4
CIDR = "10.100.0.0/24"
CHAIN_PARAMETERS = {'correlation': 'nsh'}
def require_networking_sfc(self):
try:
return [sot.id for sot in self.conn.network.sfc_port_chains()]
except exceptions.NotFoundException:
self.skipTest('networking-sfc plugin not found in network')
def setUp(self):
super(TestSfcPortChain, self).setUp()
self.require_networking_sfc()
name = self.getUniqueString()
net = self._create_network(name)
self.NET_ID = net.id
name = self.getUniqueString()
sub = self._create_subnet(name, net.id, self.CIDR)
self.SUB_ID = sub.id
self.PORT1_ID = None
self.PORT2_ID = None
self.SERVER_ID = None
self._create_server()
name = self.getUniqueString()
pp1 = self.conn.network.create_sfc_port_pair(
name=name,
ingress=self.PORT1_ID,
egress=self.PORT2_ID)
assert isinstance(pp1, sfc_port_pair.SfcPortPair)
self.PP1_ID = pp1.id
name = self.getUniqueString()
pp2 = self.conn.network.create_sfc_port_pair(
name=name,
ingress=self.PORT2_ID,
egress=self.PORT1_ID)
assert isinstance(pp2, sfc_port_pair.SfcPortPair)
self.PP2_ID = pp2.id
self.PPG1_NAME = self.getUniqueString()
ppg1 = self.conn.network.create_sfc_port_pair_group(
name=self.PPG1_NAME,
port_pairs=[self.PP1_ID])
assert isinstance(ppg1, sfc_port_pair_group.SfcPortPairGroup)
self.PPG1_ID = ppg1.id
self.PPG2_NAME = self.getUniqueString()
ppg2 = self.conn.network.create_sfc_port_pair_group(
name=self.PPG2_NAME,
port_pairs=[self.PP2_ID])
assert isinstance(ppg2, sfc_port_pair_group.SfcPortPairGroup)
self.PPG2_ID = ppg2.id
self.FC_NAME = self.getUniqueString()
fc = self.conn.network.create_sfc_flow_classifier(
name=self.FC_NAME,
logical_source_port=self.PORT1_ID,
source_ip_prefix=self.CIDR)
assert isinstance(fc, sfc_flow_classifier.SfcFlowClassifier)
self.FC_ID = fc.id
self.PC_NAME = self.getUniqueString()
self.UPDATE_NAME = self.getUniqueString()
pc = self.conn.network.create_sfc_port_chain(
name=self.PC_NAME,
port_pair_groups=[self.PPG1_ID, self.PPG2_ID],
flow_classifiers=[self.FC_ID],
chain_parameters=self.CHAIN_PARAMETERS)
assert isinstance(pc, sfc_port_chain.SfcPortChain)
self.PC_ID = pc.id
def tearDown(self):
self.conn.network.delete_sfc_port_chain(self.PC_ID,
ignore_missing=False)
self.conn.network.delete_sfc_flow_classifier(self.FC_ID,
ignore_missing=False)
self.conn.network.delete_sfc_port_pair_group(self.PPG1_ID,
ignore_missing=False)
self.conn.network.delete_sfc_port_pair_group(self.PPG2_ID,
ignore_missing=False)
self.conn.network.delete_sfc_port_pair(self.PP1_ID,
ignore_missing=False)
self.conn.network.delete_sfc_port_pair(self.PP2_ID,
ignore_missing=False)
self.conn.compute.delete_server(self.SERVER_ID, ignore_missing=False)
self.conn.network.delete_port(self.PORT1_ID, ignore_missing=False)
self.conn.network.delete_port(self.PORT2_ID, ignore_missing=False)
self.conn.network.delete_subnet(self.SUB_ID, ignore_missing=False)
self.conn.network.delete_network(self.NET_ID, ignore_missing=False)
super(TestSfcPortChain, self).tearDown()
def _create_subnet(self, name, net_id, cidr):
self.name = name
self.net_id = net_id
self.cidr = cidr
sub = self.conn.network.create_subnet(
name=self.name,
ip_version=self.IPV4,
network_id=self.net_id,
cidr=self.cidr)
assert isinstance(sub, subnet.Subnet)
self.assertEqual(self.name, sub.name)
return sub
def _create_network(self, name, **args):
self.name = name
net = self.conn.network.create_network(name=name, **args)
assert isinstance(net, network.Network)
self.assertEqual(self.name, net.name)
return net
def _create_server(self):
p1 = self.conn.network.create_port(network_id=self.NET_ID)
assert isinstance(p1, port.Port)
self.PORT1_ID = p1.id
p2 = self.conn.network.create_port(network_id=self.NET_ID)
assert isinstance(p2, port.Port)
self.PORT2_ID = p2.id
flavor = self.conn.compute.find_flavor(base.FLAVOR_NAME,
ignore_missing=False)
image = self.conn.compute.find_image(base.IMAGE_NAME,
ignore_missing=False)
srv = self.conn.compute.create_server(
name=self.getUniqueString(),
flavor_id=flavor.id, image_id=image.id,
networks=[{"port": self.PORT1_ID}, {"port": self.PORT2_ID}])
self.conn.compute.wait_for_server(srv)
self.SERVER_ID = srv.id
def test_get(self):
sot = self.conn.network.get_sfc_port_chain(self.PC_ID)
self.assertEqual(self.PC_ID, sot.id)
self.assertIn(self.PPG1_ID, sot.port_pair_groups)
self.assertIn(self.PPG2_ID, sot.port_pair_groups)
self.assertIn(self.FC_ID, sot.flow_classifiers)
for k in self.CHAIN_PARAMETERS:
self.assertEqual(self.CHAIN_PARAMETERS[k],
sot.chain_parameters[k])
def test_list(self):
ids = [sot.id for sot in self.conn.network.sfc_port_chains()]
self.assertIn(self.PC_ID, ids)
def test_find(self):
sot = self.conn.network.find_sfc_port_chain(self.PC_NAME)
self.assertEqual(self.PC_ID, sot.id)
def test_update(self):
sot = self.conn.network.update_sfc_port_chain(
self.PC_ID,
name=self.UPDATE_NAME,
port_pair_groups=[self.PPG1_ID],
flow_classifiers=[self.FC_ID])
self.assertEqual(self.UPDATE_NAME, sot.name)
self.assertIn(self.PPG1_ID, sot.port_pair_groups)
self.assertNotIn(self.PPG2_ID, sot.port_pair_groups)
self.assertIn(self.FC_ID, sot.flow_classifiers)
| 1.78125 | 2 |
tests/Unit/PointwiseFunctions/GeneralRelativity/IndexManipulation.py | nilsvu/spectre | 117 | 12787409 | <reponame>nilsvu/spectre
# Distributed under the MIT License.
# See LICENSE.txt for details.
import numpy as np
def raise_or_lower_first_index(tensor, metric):
return np.einsum("ij,ikl", metric, tensor)
def trace_last_indices(tensor, metric):
return np.einsum("ij,kij", metric, tensor)
| 2.15625 | 2 |
Code/Multiply_two_numbers.py | satyambharti171/Python-Guide-for-Beginners | 54 | 12787410 | <reponame>satyambharti171/Python-Guide-for-Beginners<gh_stars>10-100
def multiply(a,b):
c=a*b
return c
print(multiply(3,5)) | 3.6875 | 4 |
redis_i_action/django_c2/c2/urls.py | enixdark/im-r-e-d-i-s | 0 | 12787411 | from django.conf.urls import patterns, include, url
from .views import HelloView
urlpatterns = patterns('',
# Examples:
# url(r'^$', 'django_c2.views.home', name='home'),
# url(r'^blog/', include('blog.urls')),
url(r'',HelloView.as_view(),name="index"),
)
| 2.03125 | 2 |
visualization/multiple_draw_accuracy_graph.py | jireh-father/tensorflow-triplet-loss | 0 | 12787412 | <reponame>jireh-father/tensorflow-triplet-loss<filename>visualization/multiple_draw_accuracy_graph.py<gh_stars>0
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import glob, os
import argparse
import json
import numpy as np
parser = argparse.ArgumentParser()
parser.add_argument('--model_dir', default='d:/result/triplet',
help="Experiment directory containing params.json")
parser.add_argument('--epoch_list', default=None,
help="Directory containing the dataset")
if __name__ == '__main__':
args = parser.parse_args()
result_path = os.path.join(args.model_dir, "search_result")
result_files = glob.glob(os.path.join(result_path, "*_accuracies.json"))
assert len(result_files) > 0
legends = []
plt.figure(figsize=(8, 8))
model_epochs = None
if args.epoch_list is not None:
model_epochs = [int(e) for e in args.epoch_list.split(",")]
for result_file in result_files:
i = int(os.path.basename(result_file).split("_")[0])
if args.epoch_list is not None and i not in model_epochs:
continue
accuracies = json.load(open(result_file))
plt.plot(accuracies)
legends.append("epoch %d" % i)
plt.legend(legends, loc='upper left')
plt.savefig(os.path.join(args.model_dir, "search_result", "accuracy_graph.png"))
| 2.421875 | 2 |
classes/__init__.py | Jollyfant/EPOS-TURTLE | 1 | 12787413 | <filename>classes/__init__.py
from Agent import Agent
from Annotation import Annotation
from Catalog import Catalog
from CatalogRecord import CatalogRecord
from Checksum import Checksum
from Concept import Concept
from ConceptScheme import ConceptScheme
from ContactPoint import ContactPoint
from CreativeWork import CreativeWork
from Dataset import Dataset
from Distribution import Distribution
from Document import Document
from EPOSWebService import EPOSWebService
from Equipment import Equipment
from Facility import Facility
from Frequency import Frequency
from HydraAPIDocumentation import HydraAPIDocumentation
from HydraClass import HydraClass
from HydraIriTemplate import HydraIriTemplate
from HydraIriTemplateMapping import HydraIriTemplateMapping
from HydraOperation import HydraOperation
from Identifier import Identifier
from Kind import Kind
from LicenseDocument import LicenseDocument
from LinguisticSystem import LinguisticSystem
from Location import Location
from MediaTypeOrExtent import MediaTypeOrExtent
from Organization import Organization
from PeriodOfTime import PeriodOfTime
from Person import Person
from PostalAddress import PostalAddress
from Project import Project
from PropertyValue import PropertyValue
from ProvenanceStatement import ProvenanceStatement
from Publication import Publication
from RDFSLiteral import RDFSLiteral
from Resource import Resource
from RightsStatement import RightsStatement
from Service import Service
from SoftwareApplication import SoftwareApplication
from SoftwareSourceCode import SoftwareSourceCode
from Standard import Standard
| 1.679688 | 2 |
main.py | mrcrow85/python-password-utils | 1 | 12787414 | #!/usr/bin/env python3
import sys
from password.generate import generate
from password.validate import validate
from password.pwn_check import main
if __name__ == "__main__":
if not sys.argv[1:]:
while True:
try:
text = input(
"""Select the option you like to run:
1) Generate a password
2) Validate a password
3) Check if password is pwned\n"""
)
if text == "1":
length = input('How many characters? ')
uppercase = input('Should contain uppercase letters? ')
numbers = input('Should contain numbers? ')
symbols = input('Should contain special characters? ')
print(generate(
int(length),
include_uppercase=uppercase,
include_numbers=numbers,
include_symbols=symbols
))
break
elif text == "2":
length = input('Minimum length? ')
lowercase = input('How many lowercase letters? ')
uppercase = input('How many uppercase letters? ')
numbers = input('How many numbers? ')
symbols = input('How many symbols? ')
password = input('Enter the password: ')
validated = validate(
password,
lowercase=lowercase,
uppercase=uppercase,
numbers=numbers,
symbols=symbols,
length=length
)
print('Password valid!') if validated else print('Invalid password!')
break
elif text == "3":
main(input('Enter a password: '))
break
else:
print("Invalid option. Please try again.")
except RuntimeError:
print("There was an error with the API call, please fix it ASAP!")
raise
except KeyboardInterrupt:
print("\nGoodbye!")
break
| 3.890625 | 4 |
file_processing_pipeline/cli.py | mariusfeteanu/file-processing-pipeline | 0 | 12787415 | """A command line interface to processes files."""
import click
from file_processing_pipeline.process import process_end_of_day
from file_processing_pipeline.io import CSV
@click.command()
@click.option("-d", "--data-set",
help="The data set to import, e.g. end_of_day.",
default='end_of_day',
required=True)
@click.option("-i", "--input-root",
help="The directory containing the dataset, or the excel workbook (not just dir).",
required=True)
@click.option("-r", "--reference-root",
help="The directory containing the reference data,"
" or the excel workbook (not just dir).",
required=True)
@click.option("-o", "--output-root",
help="The directory where to output the data.",
required=True)
@click.option("-t", "--file-type",
help="The input file type (csv or excel).",
default=CSV,
required=True)
def cli(data_set,
input_root,
reference_root,
output_root,
file_type):
if data_set == 'end_of_day':
process_end_of_day(input_path_root=input_root,
ref_path_root=reference_root,
output_path_root=output_root,
file_type=file_type)
def main(): # pragma: nocover
cli(auto_envvar_prefix='FPP') # pylint: disable=unexpected-keyword-arg,no-value-for-parameter
| 2.921875 | 3 |
datasets/sunrgbd.py | danjia21/3detr | 0 | 12787416 | <reponame>danjia21/3detr
# Copyright (c) Facebook, Inc. and its affiliates.
"""
Modified from https://github.com/facebookresearch/votenet
Dataset for 3D object detection on SUN RGB-D (with support of vote supervision).
A sunrgbd oriented bounding box is parameterized by (cx,cy,cz), (l,w,h) -- (dx,dy,dz) in upright depth coord
(Z is up, Y is forward, X is right ward), heading angle (from +X rotating to -Y) and semantic class
Point clouds are in **upright_depth coordinate (X right, Y forward, Z upward)**
Return heading class, heading residual, size class and size residual for 3D bounding boxes.
Oriented bounding box is parameterized by (cx,cy,cz), (l,w,h), heading_angle and semantic class label.
(cx,cy,cz) is in upright depth coordinate
(l,h,w) are *half length* of the object sizes
The heading angle is a rotation rad from +X rotating towards -Y. (+X is 0, -Y is pi/2)
Author: <NAME>
Date: 2019
"""
import os
import sys
import numpy as np
from torch.utils.data import Dataset
import scipy.io as sio # to load .mat files for depth points
import utils.pc_util as pc_util
from utils.random_cuboid import RandomCuboid
from utils.pc_util import shift_scale_points, scale_points
from utils.box_util import (
flip_axis_to_camera_tensor,
get_3d_box_batch_tensor,
flip_axis_to_camera_np,
get_3d_box_batch_np,
)
MEAN_COLOR_RGB = np.array([0.5, 0.5, 0.5]) # sunrgbd color is in 0~1
DATA_PATH_V1 = "./data/sunrgbd/sunrgbd_pc_bbox_votes_50k_v1" ## Replace with path to dataset
DATA_PATH_V2 = "" ## Not used in the codebase.
class SunrgbdDatasetConfig(object):
def __init__(self):
self.num_semcls = 10
self.num_angle_bin = 12
self.max_num_obj = 64
self.type2class = {
"bed": 0,
"table": 1,
"sofa": 2,
"chair": 3,
"toilet": 4,
"desk": 5,
"dresser": 6,
"night_stand": 7,
"bookshelf": 8,
"bathtub": 9,
}
self.class2type = {self.type2class[t]: t for t in self.type2class}
self.type2onehotclass = {
"bed": 0,
"table": 1,
"sofa": 2,
"chair": 3,
"toilet": 4,
"desk": 5,
"dresser": 6,
"night_stand": 7,
"bookshelf": 8,
"bathtub": 9,
}
def angle2class(self, angle):
"""Convert continuous angle to discrete class
[optinal] also small regression number from
class center angle to current angle.
angle is from 0-2pi (or -pi~pi), class center at 0, 1*(2pi/N), 2*(2pi/N) ... (N-1)*(2pi/N)
returns class [0,1,...,N-1] and a residual number such that
class*(2pi/N) + number = angle
"""
num_class = self.num_angle_bin
angle = angle % (2 * np.pi)
assert angle >= 0 and angle <= 2 * np.pi
angle_per_class = 2 * np.pi / float(num_class)
shifted_angle = (angle + angle_per_class / 2) % (2 * np.pi)
class_id = int(shifted_angle / angle_per_class)
residual_angle = shifted_angle - (
class_id * angle_per_class + angle_per_class / 2
)
return class_id, residual_angle
def class2angle(self, pred_cls, residual, to_label_format=True):
"""Inverse function to angle2class"""
num_class = self.num_angle_bin
angle_per_class = 2 * np.pi / float(num_class)
angle_center = pred_cls * angle_per_class
angle = angle_center + residual
if to_label_format and angle > np.pi:
angle = angle - 2 * np.pi
return angle
def class2angle_batch(self, pred_cls, residual, to_label_format=True):
num_class = self.num_angle_bin
angle_per_class = 2 * np.pi / float(num_class)
angle_center = pred_cls * angle_per_class
angle = angle_center + residual
if to_label_format:
mask = angle > np.pi
angle[mask] = angle[mask] - 2 * np.pi
return angle
def class2anglebatch_tensor(self, pred_cls, residual, to_label_format=True):
return self.class2angle_batch(pred_cls, residual, to_label_format)
def box_parametrization_to_corners(self, box_center_unnorm, box_size, box_angle):
box_center_upright = flip_axis_to_camera_tensor(box_center_unnorm)
boxes = get_3d_box_batch_tensor(box_size, box_angle, box_center_upright)
return boxes
def box_parametrization_to_corners_np(self, box_center_unnorm, box_size, box_angle):
box_center_upright = flip_axis_to_camera_np(box_center_unnorm)
boxes = get_3d_box_batch_np(box_size, box_angle, box_center_upright)
return boxes
def my_compute_box_3d(self, center, size, heading_angle):
R = pc_util.rotz(-1 * heading_angle)
l, w, h = size
x_corners = [-l, l, l, -l, -l, l, l, -l]
y_corners = [w, w, -w, -w, w, w, -w, -w]
z_corners = [h, h, h, h, -h, -h, -h, -h]
corners_3d = np.dot(R, np.vstack([x_corners, y_corners, z_corners]))
corners_3d[0, :] += center[0]
corners_3d[1, :] += center[1]
corners_3d[2, :] += center[2]
return np.transpose(corners_3d)
class SunrgbdDetectionDataset(Dataset):
def __init__(
self,
dataset_config,
split_set="train",
root_dir=None,
num_points=20000,
use_color=False,
use_height=False,
use_v1=True,
augment=False,
use_random_cuboid=True,
random_cuboid_min_points=30000,
):
assert num_points <= 50000
assert split_set in ["train", "val", "trainval"]
self.dataset_config = dataset_config
self.use_v1 = use_v1
if root_dir is None:
root_dir = DATA_PATH_V1 if use_v1 else DATA_PATH_V2
self.data_path = root_dir + "_%s" % (split_set)
if split_set in ["train", "val"]:
self.scan_names = sorted(
list(
set([os.path.basename(x)[0:6] for x in os.listdir(self.data_path)])
)
)
elif split_set in ["trainval"]:
# combine names from both
sub_splits = ["train", "val"]
all_paths = []
for sub_split in sub_splits:
data_path = self.data_path.replace("trainval", sub_split)
basenames = sorted(
list(set([os.path.basename(x)[0:6] for x in os.listdir(data_path)]))
)
basenames = [os.path.join(data_path, x) for x in basenames]
all_paths.extend(basenames)
all_paths.sort()
self.scan_names = all_paths
self.num_points = num_points
self.augment = augment
self.use_color = use_color
self.use_height = use_height
self.use_random_cuboid = use_random_cuboid
self.random_cuboid_augmentor = RandomCuboid(
min_points=random_cuboid_min_points,
aspect=0.75,
min_crop=0.75,
max_crop=1.0,
)
self.center_normalizing_range = [
np.zeros((1, 3), dtype=np.float32),
np.ones((1, 3), dtype=np.float32),
]
self.max_num_obj = 64
def __len__(self):
return len(self.scan_names)
def __getitem__(self, idx):
scan_name = self.scan_names[idx]
if scan_name.startswith("/"):
scan_path = scan_name
else:
scan_path = os.path.join(self.data_path, scan_name)
point_cloud = np.load(scan_path + "_pc.npz")["pc"] # Nx6
bboxes = np.load(scan_path + "_bbox.npy") # K,8
if not self.use_color:
point_cloud = point_cloud[:, 0:3]
else:
assert point_cloud.shape[1] == 6
point_cloud = point_cloud[:, 0:6]
point_cloud[:, 3:] = point_cloud[:, 3:] - MEAN_COLOR_RGB
if self.use_height:
floor_height = np.percentile(point_cloud[:, 2], 0.99)
height = point_cloud[:, 2] - floor_height
point_cloud = np.concatenate(
[point_cloud, np.expand_dims(height, 1)], 1
) # (N,4) or (N,7)
# ------------------------------- DATA AUGMENTATION ------------------------------
if self.augment:
if np.random.random() > 0.5:
# Flipping along the YZ plane
point_cloud[:, 0] = -1 * point_cloud[:, 0]
bboxes[:, 0] = -1 * bboxes[:, 0]
bboxes[:, 6] = np.pi - bboxes[:, 6]
# Rotation along up-axis/Z-axis
rot_angle = (np.random.random() * np.pi / 3) - np.pi / 6 # -30 ~ +30 degree
rot_mat = pc_util.rotz(rot_angle)
point_cloud[:, 0:3] = np.dot(point_cloud[:, 0:3], np.transpose(rot_mat))
bboxes[:, 0:3] = np.dot(bboxes[:, 0:3], np.transpose(rot_mat))
bboxes[:, 6] -= rot_angle
# Augment RGB color
if self.use_color:
rgb_color = point_cloud[:, 3:6] + MEAN_COLOR_RGB
rgb_color *= (
1 + 0.4 * np.random.random(3) - 0.2
) # brightness change for each channel
rgb_color += (
0.1 * np.random.random(3) - 0.05
) # color shift for each channel
rgb_color += np.expand_dims(
(0.05 * np.random.random(point_cloud.shape[0]) - 0.025), -1
) # jittering on each pixel
rgb_color = np.clip(rgb_color, 0, 1)
# randomly drop out 30% of the points' colors
rgb_color *= np.expand_dims(
np.random.random(point_cloud.shape[0]) > 0.3, -1
)
point_cloud[:, 3:6] = rgb_color - MEAN_COLOR_RGB
# Augment point cloud scale: 0.85x-1.15x
scale_ratio = np.random.random() * 0.3 + 0.85
scale_ratio = np.expand_dims(np.tile(scale_ratio, 3), 0)
point_cloud[:, 0:3] *= scale_ratio
bboxes[:, 0:3] *= scale_ratio
bboxes[:, 3:6] *= scale_ratio
if self.use_height:
point_cloud[:, -1] *= scale_ratio[0, 0]
if self.use_random_cuboid:
point_cloud, bboxes, _ = self.random_cuboid_augmentor(
point_cloud, bboxes
)
# ------------------------------- LABELS ------------------------------
angle_classes = np.zeros((self.max_num_obj,), dtype=np.float32)
angle_residuals = np.zeros((self.max_num_obj,), dtype=np.float32)
raw_angles = np.zeros((self.max_num_obj,), dtype=np.float32)
raw_sizes = np.zeros((self.max_num_obj, 3), dtype=np.float32)
label_mask = np.zeros((self.max_num_obj))
label_mask[0 : bboxes.shape[0]] = 1
max_bboxes = np.zeros((self.max_num_obj, 8))
max_bboxes[0 : bboxes.shape[0], :] = bboxes
target_bboxes_mask = label_mask
target_bboxes = np.zeros((self.max_num_obj, 6))
for i in range(bboxes.shape[0]):
bbox = bboxes[i]
semantic_class = bbox[7]
raw_angles[i] = bbox[6] % 2 * np.pi
box3d_size = bbox[3:6] * 2
raw_sizes[i, :] = box3d_size
angle_class, angle_residual = self.dataset_config.angle2class(bbox[6])
angle_classes[i] = angle_class
angle_residuals[i] = angle_residual
corners_3d = self.dataset_config.my_compute_box_3d(
bbox[0:3], bbox[3:6], bbox[6]
)
# compute axis aligned box
xmin = np.min(corners_3d[:, 0])
ymin = np.min(corners_3d[:, 1])
zmin = np.min(corners_3d[:, 2])
xmax = np.max(corners_3d[:, 0])
ymax = np.max(corners_3d[:, 1])
zmax = np.max(corners_3d[:, 2])
target_bbox = np.array(
[
(xmin + xmax) / 2,
(ymin + ymax) / 2,
(zmin + zmax) / 2,
xmax - xmin,
ymax - ymin,
zmax - zmin,
]
)
target_bboxes[i, :] = target_bbox
point_cloud, choices = pc_util.random_sampling(
point_cloud, self.num_points, return_choices=True
)
point_cloud_dims_min = point_cloud.min(axis=0)
point_cloud_dims_max = point_cloud.max(axis=0)
mult_factor = point_cloud_dims_max - point_cloud_dims_min
box_sizes_normalized = scale_points(
raw_sizes.astype(np.float32)[None, ...],
mult_factor=1.0 / mult_factor[None, ...],
)
box_sizes_normalized = box_sizes_normalized.squeeze(0)
box_centers = target_bboxes.astype(np.float32)[:, 0:3]
box_centers_normalized = shift_scale_points(
box_centers[None, ...],
src_range=[
point_cloud_dims_min[None, ...],
point_cloud_dims_max[None, ...],
],
dst_range=self.center_normalizing_range,
)
box_centers_normalized = box_centers_normalized.squeeze(0)
box_centers_normalized = box_centers_normalized * target_bboxes_mask[..., None]
# re-encode angles to be consistent with VoteNet eval
angle_classes = angle_classes.astype(np.int64)
angle_residuals = angle_residuals.astype(np.float32)
raw_angles = self.dataset_config.class2angle_batch(
angle_classes, angle_residuals
)
box_corners = self.dataset_config.box_parametrization_to_corners_np(
box_centers[None, ...],
raw_sizes.astype(np.float32)[None, ...],
raw_angles.astype(np.float32)[None, ...],
)
box_corners = box_corners.squeeze(0)
ret_dict = {}
ret_dict["point_clouds"] = point_cloud.astype(np.float32)
ret_dict["gt_box_corners"] = box_corners.astype(np.float32)
ret_dict["gt_box_centers"] = box_centers.astype(np.float32)
ret_dict["gt_box_centers_normalized"] = box_centers_normalized.astype(
np.float32
)
target_bboxes_semcls = np.zeros((self.max_num_obj))
target_bboxes_semcls[0 : bboxes.shape[0]] = bboxes[:, -1] # from 0 to 9
ret_dict["gt_box_sem_cls_label"] = target_bboxes_semcls.astype(np.int64)
ret_dict["gt_box_present"] = target_bboxes_mask.astype(np.float32)
ret_dict["scan_idx"] = np.array(idx).astype(np.int64)
ret_dict["gt_box_sizes"] = raw_sizes.astype(np.float32)
ret_dict["gt_box_sizes_normalized"] = box_sizes_normalized.astype(np.float32)
ret_dict["gt_box_angles"] = raw_angles.astype(np.float32)
ret_dict["gt_angle_class_label"] = angle_classes
ret_dict["gt_angle_residual_label"] = angle_residuals
ret_dict["point_cloud_dims_min"] = point_cloud_dims_min
ret_dict["point_cloud_dims_max"] = point_cloud_dims_max
return ret_dict
| 2.171875 | 2 |
catalog/views.py | gcrsaldanha/sms | 0 | 12787417 | from rest_framework import generics
from .models import (
Item,
Manufacturer,
Catalog,
)
from .serializers import (
ItemSerializer,
ManufacturerSerializer,
CatalogSerializer,
)
class ItemListCreate(generics.ListCreateAPIView):
queryset = Item.objects.all()
serializer_class = ItemSerializer
class ManufacturerListCreate(generics.ListCreateAPIView):
queryset = Manufacturer.objects.all()
serializer_class = ManufacturerSerializer
class CatalogListCreate(generics.ListCreateAPIView):
queryset = Catalog.objects.all()
serializer_class = CatalogSerializer
| 2.03125 | 2 |
xymon_monitors/icecast_json.py | UniversityRadioYork/xymon-monitors | 0 | 12787418 | <filename>xymon_monitors/icecast_json.py
#!/usr/bin/env python3
import logging
import xymon_monitors.logging as xm_logging
import urllib.request
import json
l = logging.getLogger()
xm_logging.configure('icecast_json')
@xm_logging.exception_quit(l, Exception, 'Icecast JSON unavailable')
def get_HTTP(url):
return urllib.request.urlopen(url).read()
@xm_logging.exception_quit(l, Exception, 'Could not parse Icecast JSON')
def parse_json(s):
try:
json.loads(str(s, 'utf-8'))
except NameError:
pass # Skip
@xm_logging.exception_quit(l, Exception, 'Unknown exception thrown')
def main():
r = get_HTTP('https://ury.org.uk/audio/status-json.xsl')
parse_json(r)
l.info('SUCCESS:Icecast JSON working')
if __name__ == '__main__':
main()
| 2.40625 | 2 |
setup.py | a-musing-moose/bakedbeans | 6 | 12787419 | <reponame>a-musing-moose/bakedbeans
import io
import os
from setuptools import setup
NAME = 'bakedbeans'
here = os.path.abspath(os.path.dirname(__file__))
with io.open(os.path.join(here, 'README.rst'), encoding='utf-8') as f:
long_description = '\n' + f.read()
about = {}
with open(os.path.join(here, NAME, '__version__.py')) as f:
exec(f.read(), about)
setup(
name='bakedbeans',
version=about['__version__'],
author='<NAME>',
author_email='<EMAIL>',
url='https://github.com/a-musing-moose/bakedbeans',
license='MIT',
packages=[NAME],
install_requires=[
'click~=6.7',
'Flask~=0.12.2',
'jsonschema~=2.6.0'
],
description='Canned response HTTP server',
long_description=long_description,
classifiers=(
'Intended Audience :: Developers',
'Topic :: Utilities',
'Programming Language :: Python :: 3.6',
),
entry_points={
'console_scripts': ['baked=bakedbeans.cli:main'],
}
)
| 1.242188 | 1 |
src/py/route.py | AlecioFuranze/zeloo7 | 0 | 12787420 | <gh_stars>0
from ast import arg
class Route:
def RETURN():
return None
| 1.460938 | 1 |
utils_nlp/eval/question_answering.py | Anita1017/nlp-recipes | 4,407 | 12787421 | """ Official evaluation script for SQuAD version 2.0.
Modified by XLNet authors to update `find_best_threshold` scripts for SQuAD V2.0
"""
import collections
import json
import re
import string
def get_raw_scores(qa_ids, actuals, preds):
"""
Computes exact match and F1 scores without applying any unanswerable probability threshold.
Args:
qa_ids (list): Unique ids corresponding to the answers in `actuals`.
actuals (list): List of ground truth answers.
preds (dict): Dictionary with qa_id as keys and predicted answers as values.
Returns:
tuple: (exact_match, f1)
"""
# Helper functions
def _normalize_answer(s):
"""Lower text and remove punctuation, articles and extra whitespace."""
def remove_articles(text):
regex = re.compile(r"\b(a|an|the)\b", re.UNICODE)
return re.sub(regex, " ", text)
def white_space_fix(text):
return " ".join(text.split())
def remove_punc(text):
exclude = set(string.punctuation)
return "".join(ch for ch in text if ch not in exclude)
def lower(text):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(s))))
def _get_tokens(s):
"""Normalizes text and returns white-space tokenized tokens. """
if not s:
return []
return _normalize_answer(s).split()
def _compute_exact(a_gold, a_pred):
"""Compute the exact match between two sentences after normalization.
Returns:
int: 1 if two sentences match exactly after normalization,
0 otherwise.
"""
return int(_normalize_answer(a_gold) == _normalize_answer(a_pred))
def _compute_f1(a_gold, a_pred):
"""
Compute F1 score based on token overlapping between two
sentences.
"""
gold_toks = _get_tokens(a_gold)
pred_toks = _get_tokens(a_pred)
common = collections.Counter(gold_toks) & collections.Counter(pred_toks)
num_same = sum(common.values())
if len(gold_toks) == 0 or len(pred_toks) == 0:
# If either is no-answer, then F1 is 1 if they agree, 0 otherwise
return int(gold_toks == pred_toks)
if num_same == 0:
return 0
precision = 1.0 * num_same / len(pred_toks)
recall = 1.0 * num_same / len(gold_toks)
f1 = (2 * precision * recall) / (precision + recall)
return f1
# Helper functions end
exact_scores = {}
f1_scores = {}
for qid, gold_answers in zip(qa_ids, actuals):
if not gold_answers:
# For unanswerable questions, only correct answer is empty string
gold_answers = [""]
if qid not in preds:
print("Missing prediction for %s" % qid)
continue
a_pred = preds[qid]
# Take max over all gold answers
if isinstance(gold_answers, str):
gold_answers = [gold_answers]
exact_scores[qid] = max(_compute_exact(a, a_pred) for a in gold_answers)
f1_scores[qid] = max(_compute_f1(a, a_pred) for a in gold_answers)
return exact_scores, f1_scores
def find_best_thresh(preds, scores, na_probs, qid_to_has_ans, unanswerable_exists=False):
"""
Find the best threshold to determine a question is impossible to answer.
Args:
preds (dict): Dictionary with qa_id as keys and predicted answers as values.
scores (dict): Dictionary with qa_id as keys and raw evaluation scores (exact_match or
f1) as values.
na_probs (dict): Dictionary with qa_id as keys and unanswerable probabilities as values.
qid_to_has_ans (dict): Dictionary with qa_id as keys boolean values indicating if the
question has answer as values.
unanswerable_exists (bool, optional): Whether there is unanswerable questions in the data.
Defaults to False.
Returns:
tuple: score after applying best threshold, best threshold, (score for answerable
questions after applying best threshold, if unanswerable_exists=True)
"""
num_no_ans = sum(1 for k in qid_to_has_ans if not qid_to_has_ans[k])
# If na_prob > threshold, the question is considered as unanswerable by the prediction.
# Initially, the threshold is 0. All questions are considered as unanswerable by the
# predictions. So cur_score is the number of actual unanswerable questions (i.e. correctly
# predicted as unanswerable in the data.
cur_score = num_no_ans
best_score = cur_score
best_thresh = 0.0
# Sorted in ascending order
qid_list = sorted(na_probs, key=lambda k: na_probs[k])
for i, qid in enumerate(qid_list):
# When using the cur_na_prob as threshold, all predictions with na_prob > na_prob_cur are
# considered as unanswerable. Current question is considered answerable.
if qid not in scores:
continue
if qid_to_has_ans[qid]:
# Current question has ground truth answer, the prediction is correct. The raw score
# is added to cur_score
diff = scores[qid]
else:
# Current question doesn't have ground truth answer.
if preds[qid]:
# Prediction is not empty, incorrect. cur_score -= 1
diff = -1
else:
# Prediction is empty, correct, the original score 1 from num_no_ans is preserved.
diff = 0
cur_score += diff
if cur_score > best_score:
# When cur_score > best_score, the threshold can increase so that more questions are
# considered as answerable and fewer questions are considered as unanswerable.
# Imagine a PDF with two humps with some overlapping, the x axis is the na_prob. The
# hump on the left is answerable questions and the hump on the right is unanswerable
# questions.
# At some point, the number of actual answerable questions decreases, and we got more
# penalty from considering unanswerable questions as answerable than the score added
# from actual answerable questions, we will not change the threshold anymore and the
# optimal threshold is found.
best_score = cur_score
best_thresh = na_probs[qid]
if not unanswerable_exists:
return 100.0 * best_score / len(scores), best_thresh
else:
has_ans_score, has_ans_cnt = 0, 0
for qid in qid_list:
if not qid_to_has_ans[qid]:
continue
has_ans_cnt += 1
if qid not in scores:
continue
has_ans_score += scores[qid]
return 100.0 * best_score / len(scores), best_thresh, 1.0 * has_ans_score / has_ans_cnt
def find_all_best_thresh(
main_eval, preds, exact_raw, f1_raw, na_probs, qid_to_has_ans, unanswerable_exists=False
):
"""
Update raw evaluation scores by finding the best threshold to determine a question is
impossible to answer.
Args:
main_eval (dict): Dictionary with raw evaluation scores without apply any threshold.
preds (dict): Dictionary with qa_id as keys and predicted answers as values.
exact_raw (dict): Dictionary with qa_id as keys and raw exact_match scores as values.
f1_raw (dict): Dictionary with qa_id as keys and raw f1 scores as values.
na_probs (dict): Dictionary with qa_id as keys and unanswerable probabilities as values.
qid_to_has_ans (dict): Dictionary with qa_id as keys boolean values indicating if the
question has answer as values.
unanswerable_exists (bool, optional): Whether there is unanswerable questions in the data.
Defaults to False.
Returns:
dict: Updated `main_eval` with scores after applying best threshold and best threshold
for each score.
"""
all_exact = find_best_thresh(preds, exact_raw, na_probs, qid_to_has_ans, unanswerable_exists)
all_f1 = find_best_thresh(preds, f1_raw, na_probs, qid_to_has_ans, unanswerable_exists)
main_eval["best_exact"] = all_exact[0]
main_eval["best_exact_thresh"] = all_exact[1]
main_eval["best_f1"] = all_f1[0]
main_eval["best_f1_thresh"] = all_f1[1]
if unanswerable_exists:
main_eval["has_ans_exact"] = all_exact[2]
main_eval["has_ans_f1"] = all_f1[2]
def evaluate_qa(
actual_dataset, preds, na_probs=None, na_prob_thresh=0, unanswerable_exists=False, out_file=None
):
"""
Evaluate question answering prediction results against ground truth answers.
Args:
Evaluates question answering model performance.
Args:
actual_dataset (:class:`utils_nlp.dataset.pytorch.QADataset`): Input question answering
dataset with ground truth answers.
preds (dict): The key of the dictionary is the qa_id in the original
:class:`utils_nlp.dataset.pytorch.QADataset`. The values of the dictionary are
the predicted answer texts in string type.
na_probs (dict, optional): Dictionary of qa_id and unanswerable probability pairs.
If None, unanswerable probabilities are all set to zero. Defaults to None.
na_prob_thresh (float, optional): Probability threshold to predict a question to be
unanswerable. For an unanswerable question, if `na_probs` > `na_prob_thresh`,
the prediction is considered as correct. Otherwise, the prediction is considered as
incorrect. Defaults to 0.
out_file (str, optional): Path of the file to save the evaluation results to.
Defaults to None.
Returns:
dict: A dictionary with exact_match and f1 values.
"""
# Helper functions
def _apply_no_ans_threshold(scores, na_probs, qid_to_has_ans, na_prob_thresh):
"""Update the input scores by applying unanswerable probability threshold."""
new_scores = {}
for qid, s in scores.items():
pred_na = na_probs[qid] > na_prob_thresh
if pred_na:
new_scores[qid] = float(not qid_to_has_ans[qid])
else:
new_scores[qid] = s
return new_scores
def _make_eval_dict(exact_scores, f1_scores, qid_list=None):
"""Create a dictionary of evaluation results."""
if not qid_list:
total = len(exact_scores)
return collections.OrderedDict(
[
("exact", 100.0 * sum(exact_scores.values()) / total),
("f1", 100.0 * sum(f1_scores.values()) / total),
("total", total),
]
)
else:
total = len(qid_list)
return collections.OrderedDict(
[
("exact", 100.0 * sum(exact_scores[k] for k in qid_list) / total),
("f1", 100.0 * sum(f1_scores[k] for k in qid_list) / total),
("total", total),
]
)
def _merge_eval(main_eval, new_eval, prefix):
"""Merge multiple evaluation result dictionaries."""
for k in new_eval:
main_eval["%s_%s" % (prefix, k)] = new_eval[k]
# Helper functions end
if na_probs is None:
na_probs_available = False
na_probs = {k: 0.0 for k in preds}
else:
na_probs_available = True
qa_ids = [item.qa_id for item in actual_dataset]
actuals = [item.answer_text for item in actual_dataset]
qid_to_has_ans = {qa_id: bool(ans) for (qa_id, ans) in zip(qa_ids, actuals)}
has_ans_qids = [k for k, v in qid_to_has_ans.items() if v]
no_ans_qids = [k for k, v in qid_to_has_ans.items() if not v]
exact_raw, f1_raw = get_raw_scores(qa_ids, actuals, preds)
exact_thresh = _apply_no_ans_threshold(exact_raw, na_probs, qid_to_has_ans, na_prob_thresh)
f1_thresh = _apply_no_ans_threshold(f1_raw, na_probs, qid_to_has_ans, na_prob_thresh)
out_eval = _make_eval_dict(exact_thresh, f1_thresh)
if has_ans_qids:
has_ans_eval = _make_eval_dict(exact_thresh, f1_thresh, qid_list=has_ans_qids)
_merge_eval(out_eval, has_ans_eval, "HasAns")
if no_ans_qids:
no_ans_eval = _make_eval_dict(exact_thresh, f1_thresh, qid_list=no_ans_qids)
_merge_eval(out_eval, no_ans_eval, "NoAns")
if na_probs_available:
find_all_best_thresh(
out_eval, preds, exact_raw, f1_raw, na_probs, qid_to_has_ans, unanswerable_exists
)
if out_file:
with open(out_file, "w") as f:
json.dump(out_eval, f)
else:
print(json.dumps(out_eval, indent=2))
return out_eval
| 2.6875 | 3 |
datatypes.py | andreasjansson/music-inpainting-bert | 1 | 12787422 | <reponame>andreasjansson/music-inpainting-bert<filename>datatypes.py
# need this class because autoreloading frozen dataclasses is broken
# in jupyter https://github.com/ipython/ipython/issues/12185
from fractions import Fraction
from typing import List, Tuple
from dataclasses import dataclass
import music21 as m21
@dataclass(frozen=True, eq=True)
class Chord:
pitches: Tuple[int, ...]
root: int
bass: int
def __str__(self):
pitches_str = ",".join([str(p) for p in self.pitches])
return f"{pitches_str}/{self.root}/{self.bass}"
@classmethod
def from_string(cls, s):
pitches_str, root, bass = s.split("/")
pitches = tuple(int(x) for x in pitches_str.split(","))
root = int(root)
bass = int(bass)
return Chord(pitches, root, bass)
@classmethod
def from_m21_chord(cls, m21_chord: m21.harmony.ChordSymbol):
pitches = tuple(sorted(set([int(n.pitch.midi) % 12 for n in m21_chord.notes])))
root = int(m21_chord.root().midi % 12)
bass = int(m21_chord.bass().midi % 12)
return Chord(pitches, root, bass)
@classmethod
def from_dict(cls, d: dict):
return Chord(
pitches=tuple(sorted(d["pitches"])), root=d["root"], bass=d["bass"],
)
def to_dict(self):
return {
"pitches": self.pitches,
"root": self.root,
"bass": self.bass,
}
def transpose(self, step):
return Chord(
tuple(sorted((p + step) % 12 for p in self.pitches)),
(self.root + step) % 12,
(self.bass + step) % 12,
)
@dataclass(frozen=True, eq=True)
class Song:
notes: Tuple[Tuple[float, int], ...]
chords: Tuple[Tuple[float, Chord], ...]
bars: Tuple[Tuple[float, int], ...]
duration: float
path: str
@classmethod
def from_dict(cls, d: dict, path: str):
return Song(
notes=tuple((t, n) for t, n in d["notes"]),
chords=tuple((t, Chord.from_dict(c)) for t, c in d["chords"]),
bars=tuple((t, n) for t, n in d["bars"]),
duration=d["duration"],
path=path,
)
def transpose(self, step):
notes = tuple((t, n + step) for t, n in self.notes)
chords = tuple((t, c.transpose(step)) for t, c in self.chords)
return Song(
notes=notes,
chords=chords,
bars=self.bars,
duration=self.duration,
path=self.path,
)
@dataclass(frozen=True, eq=True)
class Pattern:
timed_notes: Tuple[Tuple[Fraction, int], ...]
@classmethod
def from_list(cls, lst):
timed_notes = tuple((Fraction(frac[0], frac[1]), pitch) for frac, pitch in lst)
return Pattern(timed_notes=timed_notes)
| 2.953125 | 3 |
edk2toolext/tests/capsule/capsule_helper_test.py | cfernald/edk2-pytool-extensions | 32 | 12787423 | <reponame>cfernald/edk2-pytool-extensions<gh_stars>10-100
## @file capsule_helper_test.py
# This unittest module contains test cases for the capsule_helper module.
#
##
# Copyright (C) Microsoft Corporation
#
# SPDX-License-Identifier: BSD-2-Clause-Patent
##
import os
import uuid
import unittest
import tempfile
from edk2toollib.uefi.uefi_capsule_header import UefiCapsuleHeaderClass
from edk2toollib.uefi.fmp_capsule_header import FmpCapsuleHeaderClass, FmpCapsuleImageHeaderClass
from edk2toolext.capsule import capsule_helper
DUMMY_OPTIONS = {
'capsule': {
'fw_version': '0xDEADBEEF',
'lsv_version': '0xFEEDF00D',
'esrt_guid': '00112233-4455-6677-8899-aabbccddeeff',
'fw_name': 'TEST_FW',
'fw_version_string': '1.2.3', # deliberately use 3-part version to exercise version normalization.
'provider_name': 'TESTER',
'fw_description': 'TEST FW',
'fw_integrity_file': "IntegrityFile.bin"
},
'signer': {
'option2': 'value2',
'option_not': 'orig_value'
}
}
DUMMY_OPTIONS_FILE_NAME = 'dummy_options_file'
DUMMY_PAYLOAD_FILE_NAME = 'dummy_payload'
class CapsuleSignerTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
# We'll use the one-time setup to create
# any temporary test files we'll need.
cls.temp_dir = tempfile.mkdtemp()
cls.dummy_payload = os.path.join(cls.temp_dir, DUMMY_PAYLOAD_FILE_NAME + ".bin")
with open(cls.dummy_payload, 'wb') as dummy_file:
dummy_file.write(b'DEADBEEF')
def test_should_pass_wrapped_blob_to_signing_module(self):
dummy_payload = b'This_Is_My_Sample_Payload,ThereAreManyLikeIt;This One Is Mine'
class DummySigner(object):
@classmethod
def sign(cls, data, signature_options, signer_options):
self.assertTrue(dummy_payload in data)
capsule_helper.build_capsule(dummy_payload, DUMMY_OPTIONS['capsule'], DummySigner, DUMMY_OPTIONS['signer'])
def test_should_pass_signer_options_to_signing_module(self):
class DummySigner(object):
@classmethod
def sign(cls, data, signature_options, signer_options):
self.assertEqual(signer_options, DUMMY_OPTIONS['signer'])
capsule_helper.build_capsule(b'030303', DUMMY_OPTIONS['capsule'], DummySigner, DUMMY_OPTIONS['signer'])
# def test_should_be_able_to_generate_a_production_equivalent_capsule(self):
# with open(BUILD_CAPSULE_BINARY_PATH, 'rb') as data_file:
# capsule_data = data_file.read()
# capsule_options = {
# "esrt_guid": "80ddc468-57a0-43e5-9594-8ba2ce5c342e",
# "fw_version": "0x7fff000",
# "lsv_version": "0x1"
# }
# signer_options = {
# 'key_file': TEST_CAPSULE_SIGNER_1,
# 'eku': "1.3.6.1.4.1.311.172.16.17.32"
# }
# wdk_signer = signing_helper.get_signer(signing_helper.SIGNTOOL_SIGNER)
# final_capsule = capsule_helper.build_capsule(capsule_data, capsule_options, wdk_signer, signer_options)
# with open(TEST_CAPSULE_PATH_1, 'rb') as comparison_file:
# comparison_data = comparison_file.read()
# self.assertEqual(final_capsule.Encode(), comparison_data)
# def test_should_be_able_to_update_the_guid_in_place(self):
# with open(BUILD_CAPSULE_BINARY_PATH, 'rb') as data_file:
# capsule_data = data_file.read()
# capsule_options = {
# "esrt_guid": "3624cd98-bdb6-461b-84a3-4f4853efc7e3",
# "fw_version": "0x7fff000",
# "lsv_version": "0x1"
# }
# signer_options = {
# 'key_file': TEST_CAPSULE_SIGNER_1,
# 'eku': "1.3.6.1.4.1.311.172.16.17.32"
# }
# wdk_signer = signing_helper.get_signer(signing_helper.SIGNTOOL_SIGNER)
# final_capsule = capsule_helper.build_capsule(capsule_data, capsule_options, wdk_signer, signer_options)
# with open(os.path.join(TEMP_CAPSULE_DIRECTORY_PATH, 'Capsule1.bin'), 'wb') as out_file:
# out_file.write(final_capsule.Encode())
# final_capsule.DumpInfo()
# fmp_capsule_image_header = final_capsule.FmpCapsuleHeader.GetFmpCapsuleImageHeader(0)
# fmp_capsule_image_header.UpdateImageTypeId = uuid.UUID('80ddc468-57a0-43e5-9594-8ba2ce5c342e')
# # WORKAROUND for library bug.
# final_capsule.FmpCapsuleHeader._ItemOffsetList = []
# with open(os.path.join(TEMP_CAPSULE_DIRECTORY_PATH, 'Capsule2.bin'), 'wb') as out_file:
# out_file.write(final_capsule.Encode())
# final_capsule.DumpInfo()
# self.assertFalse(True)
class FileGenerationTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
# We'll use the one-time setup to create
# any temporary test files we'll need.
cls.temp_dir = tempfile.mkdtemp()
cls.dummy_payload = os.path.join(cls.temp_dir, DUMMY_PAYLOAD_FILE_NAME + ".bin")
with open(cls.dummy_payload, 'wb') as dummy_file:
dummy_file.write(b'DEADBEEF')
def test_should_be_able_to_save_a_capsule(self):
fmp_capsule_image_header = FmpCapsuleImageHeaderClass()
fmp_capsule_image_header.UpdateImageTypeId = uuid.UUID(DUMMY_OPTIONS['capsule']['esrt_guid'])
fmp_capsule_image_header.UpdateImageIndex = 1
fmp_capsule_header = FmpCapsuleHeaderClass()
fmp_capsule_header.AddFmpCapsuleImageHeader(fmp_capsule_image_header)
uefi_capsule_header = UefiCapsuleHeaderClass()
uefi_capsule_header.FmpCapsuleHeader = fmp_capsule_header
uefi_capsule_header.PersistAcrossReset = True
uefi_capsule_header.InitiateReset = True
capsule_file_path = capsule_helper.save_capsule(uefi_capsule_header, DUMMY_OPTIONS['capsule'], self.temp_dir)
# Now read the data and check for the GUID.
with open(capsule_file_path, 'rb') as capsule_file:
capsule_bytes = capsule_file.read()
self.assertTrue(uuid.UUID(DUMMY_OPTIONS['capsule']['esrt_guid']).bytes_le in capsule_bytes)
def test_should_be_able_to_generate_windows_files(self):
inf_file_path = capsule_helper.create_inf_file(DUMMY_OPTIONS['capsule'], self.temp_dir)
self.assertTrue(os.path.isfile(inf_file_path))
@unittest.skip("test fails in unittest environment. need to debug")
def test_should_be_able_to_generate_cat(self):
cat_file_path = capsule_helper.create_cat_file(DUMMY_OPTIONS['capsule'], self.temp_dir)
self.assertTrue(os.path.isfile(cat_file_path))
class MultiNodeFileGenerationTest(unittest.TestCase):
@staticmethod
def buildPayload(esrt):
fmp_capsule_image_header = FmpCapsuleImageHeaderClass()
fmp_capsule_image_header.UpdateImageTypeId = uuid.UUID(esrt)
fmp_capsule_image_header.UpdateImageIndex = 1
fmp_capsule_header = FmpCapsuleHeaderClass()
fmp_capsule_header.AddFmpCapsuleImageHeader(fmp_capsule_image_header)
uefi_capsule_header = UefiCapsuleHeaderClass()
uefi_capsule_header.FmpCapsuleHeader = fmp_capsule_header
uefi_capsule_header.PersistAcrossReset = True
uefi_capsule_header.InitiateReset = True
return uefi_capsule_header
@classmethod
def setUpClass(cls):
cls.temp_dir = tempfile.mkdtemp()
cls.temp_output_dir = tempfile.mkdtemp()
cls.capsule = capsule_helper.Capsule(
version_string="1.2.3",
name="TEST_FW",
provider_name="Tester",
)
cls.capsule.payloads.append(
capsule_helper.CapsulePayload(
cls.buildPayload("ea5c13fe-cac9-4fd7-ac30-37709bd668f2"),
"test1.bin",
uuid.UUID("ea5c13fe-cac9-4fd7-ac30-37709bd668f2"),
0xDEADBEEF,
"TEST FW"
)
)
cls.capsule.payloads.append(
capsule_helper.CapsulePayload(
cls.buildPayload("43e67b4e-b2f1-4891-9ff2-a6acd9c74cbd"),
"test2.bin",
uuid.UUID("43e67b4e-b2f1-4891-9ff2-a6acd9c74cbd"),
0xDEADBEEF,
"TEST FW"
)
)
def test_should_be_able_to_save_a_multi_node_capsule(self):
capsule_file_path = capsule_helper.save_multinode_capsule(self.capsule, self.temp_output_dir)
# make sure all the files we expect got created
for payload in self.capsule.payloads:
payload_file = os.path.join(capsule_file_path, payload.payload_filename)
self.assertTrue(os.path.isfile(payload_file))
with open(payload_file, 'rb') as fh:
capsule_bytes = fh.read()
self.assertIn(payload.esrt_guid.bytes_le, capsule_bytes)
def test_should_be_able_to_save_a_multi_node_capsule_with_integrity(self):
self.capsule.payloads[0].integrity_data = uuid.UUID("ea5c13fe-cac9-4fd7-ac30-37709bd668f2").bytes
self.capsule.payloads[0].integrity_filename = "integrity1.bin"
self.capsule.payloads[1].integrity_data = uuid.UUID("43e67b4e-b2f1-4891-9ff2-a6acd9c74cbd").bytes
self.capsule.payloads[1].integrity_filename = "integrity2.bin"
capsule_file_path = capsule_helper.save_multinode_capsule(self.capsule, self.temp_output_dir)
for payload in self.capsule.payloads:
payload_file = os.path.join(capsule_file_path, payload.payload_filename)
self.assertTrue(os.path.isfile(payload_file))
with open(payload_file, 'rb') as fh:
capsule_bytes = fh.read()
self.assertIn(payload.esrt_guid.bytes_le, capsule_bytes)
integrityFile = os.path.join(capsule_file_path, payload.integrity_filename)
self.assertTrue(os.path.isfile(integrityFile))
with open(integrityFile, 'rb') as fh:
integrity_bytes = fh.read()
self.assertIn(payload.integrity_data, integrity_bytes)
self.capsule.payloads[0].integrity_data = None
self.capsule.payloads[0].integrity_filename = None
self.capsule.payloads[1].integrity_data = None
self.capsule.payloads[1].integrity_filename = None
def test_should_be_able_to_generate_multi_node_inf_file(self):
inf_file_path = capsule_helper.create_multinode_inf_file(self.capsule, self.temp_output_dir)
self.assertTrue(os.path.isfile(inf_file_path))
if __name__ == '__main__':
unittest.main()
| 2.109375 | 2 |
tests/pyflakes_bears/pep8_naming_test_files/E05/invalid_nested_class.py | MacBox7/coala-pyflakes | 0 | 12787424 | <reponame>MacBox7/coala-pyflakes<filename>tests/pyflakes_bears/pep8_naming_test_files/E05/invalid_nested_class.py
def foo():
'''
>>> class Good():
... class bad():
... pass
'''
pass
| 1.445313 | 1 |
cellacdc/bioformats/formatwriter.py | SchmollerLab/Cell_ACDC | 29 | 12787425 | # Python-bioformats is distributed under the GNU General Public
# License, but this file is licensed under the more permissive BSD
# license. See the accompanying file LICENSE for details.
#
# Copyright (c) 2009-2014 Broad Institute
# All rights reserved.
'''formatwriter.py - mechanism to wrap a bioformats WriterWrapper and ImageWriter
The following file formats can be written using Bio-Formats:
- TIFF (uncompressed or LZW)
- OME-TIFF (uncompressed or LZW)
- JPEG
- PNG
- AVI (uncompressed)
- QuickTime (uncompressed is supported natively; additional codecs use QTJava)
- Encapsulated PostScript (EPS)
Support for OME-XML in the near future.
The writer API (see loci.formats.IFormatWriter) is very similar to the reader
API, in that files are written one plane at time (rather than all at once).
All writers allow the output file to be changed before the last plane has
been written. This allows you to write to any number of output files using
the same writer and output settings (compression, frames per second, etc.),
and is especially useful for formats that do not support multiple images per
file.
'''
from __future__ import absolute_import, print_function, unicode_literals
__version__ = "$Revision$"
import numpy as np
import os
import sys
import javabridge as jutil
from .. import bioformats
import javabridge
from ..bioformats import omexml as ome
def write_image(pathname, pixels, pixel_type,
c = 0, z = 0, t = 0,
size_c = 1, size_z = 1, size_t = 1,
channel_names = None):
"""Write the image using bioformats.
:param filename: save to this filename
:param pixels: the image to save
:param pixel_type: save using this pixel type
:param c: the image's channel index
:param z: the image's `z` index
:param t: the image's `t` index
:param size_c: # of channels in the stack
:param size_z: # of z stacks
:param size_t: # of timepoints in the stack
:param channel_names: names of the channels (make up names if not present).
"""
omexml = ome.OMEXML()
omexml.image(0).Name = os.path.split(pathname)[1]
p = omexml.image(0).Pixels
assert isinstance(p, ome.OMEXML.Pixels)
p.SizeX = pixels.shape[1]
p.SizeY = pixels.shape[0]
p.SizeC = size_c
p.SizeT = size_t
p.SizeZ = size_z
p.DimensionOrder = ome.DO_XYCZT
p.PixelType = pixel_type
index = c + size_c * z + size_c * size_z * t
if pixels.ndim == 3:
p.SizeC = pixels.shape[2]
p.Channel(0).SamplesPerPixel = pixels.shape[2]
omexml.structured_annotations.add_original_metadata(
ome.OM_SAMPLES_PER_PIXEL, str(pixels.shape[2]))
elif size_c > 1:
p.channel_count = size_c
pixel_buffer = convert_pixels_to_buffer(pixels, pixel_type)
xml = omexml.to_xml()
script = """
importClass(Packages.loci.formats.services.OMEXMLService,
Packages.loci.common.services.ServiceFactory,
Packages.loci.formats.ImageWriter);
var service = new ServiceFactory().getInstance(OMEXMLService);
var metadata = service.createOMEXMLMetadata(xml);
var writer = new ImageWriter();
writer.setMetadataRetrieve(metadata);
writer.setId(path);
writer.setInterleaved(true);
writer.saveBytes(index, buffer);
writer.close();
"""
jutil.run_script(script,
dict(path=pathname,
xml=xml,
index=index,
buffer=pixel_buffer))
def convert_pixels_to_buffer(pixels, pixel_type):
'''Convert the pixels in the image into a buffer of the right pixel type
pixels - a 2d monochrome or color image
pixel_type - one of the OME pixel types
returns a 1-d byte array
'''
if pixel_type in (ome.PT_UINT8, ome.PT_INT8, ome.PT_BIT):
as_dtype = np.uint8
elif pixel_type in (ome.PT_UINT16, ome.PT_INT16):
as_dtype = "<u2"
elif pixel_type in (ome.PT_UINT32, ome.PT_INT32):
as_dtype = "<u4"
elif pixel_type == ome.PT_FLOAT:
as_dtype = "<f4"
elif pixel_type == ome.PT_DOUBLE:
as_dtype = "<f8"
else:
raise NotImplementedError("Unsupported pixel type: %d" % pixel_type)
buf = np.frombuffer(np.ascontiguousarray(pixels, as_dtype).data, np.uint8)
env = jutil.get_env()
return env.make_byte_array(buf)
def make_iformat_writer_class(class_name):
'''Bind a Java class that implements IFormatWriter to a Python class
Returns a class that implements IFormatWriter through calls to the
implemented class passed in. The returned class can be subclassed to
provide additional bindings.
'''
class IFormatWriter(object):
'''A wrapper for loci.formats.IFormatWriter
See http://hudson.openmicroscopy.org.uk/job/LOCI/javadoc/loci/formats/ImageWriter.html
'''
canDoStacks = jutil.make_method('canDoStacks', '()Z',
'Reports whether the writer can save multiple images to a single file.')
getColorModel = jutil.make_method('getColorModel', '()Ljava/awt/image/ColorModel;',
'Gets the color model.')
getCompression = jutil.make_method('getCompression', '()Ljava/lang/String;',
'Gets the current compression type.')
getCompressionTypes = jutil.make_method('getCompressionTypes', '()[Ljava/lang/String;',
'Gets the available compression types.')
getFramesPerSecond = jutil.make_method('getFramesPerSecond', '()I',
'Gets the frames per second to use when writing.')
getMetadataRetrieve = jutil.make_method('getMetadataRetrieve', '()Lloci/formats/meta/MetadataRetrieve;',
'Retrieves the current metadata retrieval object for this writer.')
getPixelTypes = jutil.make_method('getPixelTypes', '()[I',
'Gets the supported pixel types.')
# getPixelTypes = jutil.make_method('getPixelTypes', '(Ljava/lang/String;)[I',
# 'Gets the supported pixel types for the given codec.')
isInterleaved = jutil.make_method('isInterleaved', '()Z',
'Gets whether or not the channels in an image are interleaved.')
isSupportedType = jutil.make_method('isSupportedType', '(I)Z',
'Checks if the given pixel type is supported.')
saveBytes = jutil.make_method('saveBytes', '([BZ)V',
'Saves the given byte array to the current file.')
saveBytesIB = jutil.make_method('saveBytes', '(I[B)V',
'Saves bytes, first arg is image #')
# saveBytes = jutil.make_method('saveBytes', '([BIZZ)V',
# 'Saves the given byte array to the given series in the current file.')
savePlane = jutil.make_method('savePlane', '(Ljava/lang/Object;Z)V',
'Saves the given image plane to the current file.')
# savePlane = jutil.make_method('savePlane', '(Ljava/lang/Object;IZZ)V',
# 'Saves the given image plane to the given series in the current file.')
setColorModel = jutil.make_method('setColorModel', '(Ljava/awt/image/ColorModel;)V',
'Sets the color model.')
setCompression = jutil.make_method('setCompression', '(Ljava/lang/String;)V',
'Sets the current compression type.')
setFramesPerSecond = jutil.make_method('setFramesPerSecond', '(I)V',
'Sets the frames per second to use when writing.')
setInterleaved = jutil.make_method('setInterleaved', '(Z)V',
'Sets whether or not the channels in an image are interleaved.')
setMetadataRetrieve = jutil.make_method('setMetadataRetrieve', '(Lloci/formats/meta/MetadataRetrieve;)V',
'Sets the metadata retrieval object from which to retrieve standardized metadata.')
setValidBitsPerPixel = jutil.make_method(
'setValidBitsPerPixel', '(I)V',
'Sets the number of valid bits per pixel')
setSeries = jutil.make_method(
'setSeries', '(I)V',
'''Set the series for the image file
series - the zero-based index of the image stack in the file,
for instance in a multi-image tif.''')
return IFormatWriter
def make_image_writer_class():
'''Return an image writer class for the given Java environment'''
env = jutil.get_env()
class_name = 'loci/formats/ImageWriter'
klass = env.find_class(class_name)
base_klass = env.find_class('loci/formats/IFormatWriter')
IFormatWriter = make_iformat_writer_class(class_name)
#
# This uses the writers.txt file from inside the loci_tools.jar
#
class_list = jutil.make_instance("loci/formats/ClassList",
"(Ljava/lang/String;"
"Ljava/lang/Class;" # base
"Ljava/lang/Class;)V", # location in jar
"writers.txt", base_klass, klass)
class ImageWriter(IFormatWriter):
new_fn = jutil.make_new(class_name, '(Lloci/formats/ClassList;)V')
def __init__(self):
self.new_fn(class_list)
setId = jutil.make_method('setId', '(Ljava/lang/String;)V',
'Sets the current file name.')
addStatusListener = jutil.make_method('addStatusListener', '()Lloci/formats/StatusListener;',
'Adds a listener for status update events.')
close = jutil.make_method('close','()V',
'Closes currently open file(s) and frees allocated memory.')
getFormat = jutil.make_method('getFormat', '()Ljava/lang/String;',
'Gets the name of this file format.')
getNativeDataType = jutil.make_method('getNativeDataType', '()Ljava/lang/Class;',
'Returns the native data type of image planes for this reader, as returned by IFormatReader.openPlane(int, int, int, int, int) or IFormatWriter#saveData.')
getStatusListeners = jutil.make_method('getStatusListeners', '()[Lloci/formats/StatusListener;',
'Gets a list of all registered status update listeners.')
getSuffixes = jutil.make_method('getSuffixes', '()Ljava/lang/String;',
'Gets the default file suffixes for this file format.')
getWriter = jutil.make_method('getWriter', '()Lloci/formats/IFormatWriter;',
'Gets the writer used to save the current file.')
# getWriter = jutil.make_method('getWriter', '(Ljava/lang/Class)Lloci/formats/IFormatWriter;',
# 'Gets the file format writer instance matching the given class.')
# getWriter = jutil.make_method('getWriter', '(Ljava/lang/String;)Lloci/formats/IFormatWriter;',
# 'Gets the writer used to save the given file.')
getWriters = jutil.make_method('getWriters', '()[Lloci/formats/IFormatWriter;',
'Gets all constituent file format writers.')
isThisType = jutil.make_method('isThisType', '(Ljava/lang/String;)Z',
'Checks if the given string is a valid filename for this file format.')
removeStatusListener = jutil.make_method('removeStatusListener', '(Lloci/formats/StatusListener;)V',
'Saves the given byte array to the current file.')
return ImageWriter
def make_ome_tiff_writer_class():
'''Return a class that wraps loci.formats.out.OMETiffWriter'''
class_name = 'loci/formats/out/OMETiffWriter'
IFormatWriter = make_iformat_writer_class(class_name)
class OMETiffWriter(IFormatWriter):
def __init__(self):
self.new_fn = jutil.make_new(self.class_name, '()V')
self.setId = jutil.make_method('setId', '(Ljava/lang/String;)V',
'Sets the current file name.')
self.close = jutil.make_method(
'close','()V',
'Closes currently open file(s) and frees allocated memory.')
self.saveBytesIFD = jutil.make_method(
'saveBytes', '(I[BLloci/formats/tiff/IFD;)V',
'''save a byte array to an image channel
index - image index
bytes - byte array to save
ifd - a loci.formats.tiff.IFD instance that gives all of the
IFD values associated with the channel''')
self.new_fn()
return OMETiffWriter
def make_writer_wrapper_class(class_name):
'''Make an ImageWriter wrapper class
class_name - the name of the wrapper class
You can instantiate an instance of the wrapper class like this:
writer = XXX(ImageWriter())
'''
IFormatWriter = make_iformat_writer_class(class_name)
class WriterWrapper(IFormatWriter):
__doc__ = '''A wrapper for %s
See http://hudson.openmicroscopy.org.uk/job/LOCI/javadoc/loci/formats/ImageWriter.html
'''%class_name
new_fn = jutil.make_new(class_name, '(Lloci/formats/IFormatWriter;)V')
def __init__(self, writer):
self.new_fn(writer)
setId = jutil.make_method('setId', '(Ljava/lang/String;)V',
'Sets the current file name.')
return WriterWrapper
def make_format_writer_class(class_name):
'''Make a FormatWriter wrapper class
class_name - the name of a class that implements loci.formats.FormatWriter
Known names in the loci.formats.out package:
APNGWriter, AVIWriter, EPSWriter, ICSWriter, ImageIOWriter,
JPEG2000Writer, JPEGWriter, LegacyQTWriter, OMETiffWriter,
OMEXMLWriter, QTWriter, TiffWriter
'''
new_fn = jutil.make_new(class_name,
'(Ljava/lang/String;Ljava/lang/String;)V')
class FormatWriter(object):
__doc__ = '''A wrapper for %s implementing loci.formats.FormatWriter
See http://hudson.openmicroscopy.org.uk/job/LOCI/javadoc/loci/formats/FormatWriter'''%class_name
def __init__(self):
self.new_fn()
canDoStacks = jutil.make_method('canDoStacks','()Z',
'Reports whether the writer can save multiple images to a single file')
getColorModel = jutil.make_method('getColorModel',
'()Ljava/awt/image/ColorModel;',
'Gets the color model')
getCompression = jutil.make_method('getCompression',
'()Ljava/lang/String;',
'Gets the current compression type')
getCompressionTypes = jutil.make_method('getCompressionTypes',
'()[Ljava/lang/String;',
'Gets the available compression types')
getFramesPerSecond = jutil.make_method('getFramesPerSecond',
'()I', "Gets the frames per second to use when writing")
getMetadataRetrieve = jutil.make_method('getMetadataRetrieve',
'()Lloci/formats/meta/MetadataRetrieve;',
'Retrieves the current metadata retrieval object for this writer.')
getPixelTypes = jutil.make_method('getPixelTypes',
'()[I')
isInterleaved = jutil.make_method('isInterleaved','()Z',
'Gets whether or not the channels in an image are interleaved')
isSupportedType = jutil.make_method('isSupportedType','(I)Z',
'Checks if the given pixel type is supported')
saveBytes = jutil.make_method('saveBytes', '([BZ)V',
'Saves the given byte array to the current file')
setColorModel = jutil.make_method('setColorModel',
'(Ljava/awt/image/ColorModel;)V',
'Sets the color model')
setCompression = jutil.make_method('setCompression',
'(Ljava/lang/String;)V',
'Sets the current compression type')
setFramesPerSecond = jutil.make_method('setFramesPerSecond',
'(I)V',
'Sets the frames per second to use when writing')
setId = jutil.make_method('setId','(Ljava/lang/String;)V',
'Sets the current file name')
setInterleaved = jutil.make_method('setInterleaved', '(Z)V',
'Sets whether or not the channels in an image are interleaved')
setMetadataRetrieve = jutil.make_method('setMetadataRetrieve',
'(Lloci/formats/meta/MetadataRetrieve;)V',
'Sets the metadata retrieval object from which to retrieve standardized metadata')
return FormatWriter
def getRGBColorSpace():
'''Get a Java object that represents an RGB color space
See java.awt.color.ColorSpace: this returns the linear RGB color space
'''
cs_linear_rgb = jutil.get_static_field('java/awt/color/ColorSpace',
'CS_LINEAR_RGB', 'I')
return jutil.static_call('java/awt/color/ColorSpace', 'getInstance',
'(I)Ljava/awt/color/ColorSpace;',
cs_linear_rgb)
def getGrayColorSpace():
'''Get a Java object that represents an RGB color space
See java.awt.color.ColorSpace: this returns the linear RGB color space
'''
cs_gray = jutil.get_static_field('java/awt/color/ColorSpace',
'CS_GRAY', 'I')
return jutil.static_call('java/awt/color/ColorSpace', 'getInstance',
'(I)Ljava/awt/color/ColorSpace;',
cs_gray)
'''Constant for color model transparency indicating bitmask transparency'''
BITMASK = 'BITMASK'
'''Constant for color model transparency indicting an opaque color model'''
OPAQUE = 'OPAQUE'
'''Constant for color model transparency indicating a transparent color model'''
TRANSPARENT = 'TRANSPARENT'
'''Constant for color model transfer type indicating byte per pixel'''
TYPE_BYTE = 'TYPE_BYTE'
'''Constant for color model transfer type indicating unsigned short per pixel'''
TYPE_USHORT = 'TYPE_USHORT'
'''Constant for color model transfer type indicating integer per pixel'''
TYPE_INT = 'TYPE_INT'
def getColorModel(color_space,
has_alpha=False,
is_alpha_premultiplied = False,
transparency = OPAQUE,
transfer_type = TYPE_BYTE):
'''Return a java.awt.image.ColorModel color model
color_space - a java.awt.color.ColorSpace such as returned by
getGrayColorSpace or getRGBColorSpace
has_alpha - True if alpha channel is specified
is_alpha_premultiplied - True if other channel values have already
been reduced by the alpha multiplier, False if the channel values are
independent of the multiplier.
transparency - one of BITMASK, OPAQUE or TRANSPARENT.
transfer_type - one of TYPE_BYTE, TYPE_USHORT, TYPE_INT
'''
jtransparency = jutil.get_static_field('java/awt/Transparency',
transparency,
'I')
jtransfer_type = jutil.get_static_field('java/awt/image/DataBuffer',
transfer_type, 'I')
return jutil.make_instance('java/awt/image/ComponentColorModel',
'(Ljava/awt/color/ColorSpace;ZZII)V',
color_space, has_alpha, is_alpha_premultiplied,
jtransparency, jtransfer_type)
if __name__ == "__main__":
import wx
import matplotlib.backends.backend_wxagg as mmmm
from .. import bioformats
from .formatreader import *
from .metadatatools import *
app = wx.PySimpleApp()
# dlg = wx.FileDialog(None)
# if dlg.ShowModal()==wx.ID_OK:
# filename = dlg.Path
# else:
# app.Exit()
# sys.exit()
filename = '/Users/afraser/Desktop/cpa_example/images/AS_09125_050116000001_A01f00d0.png'
filename = '/Users/afraser/Desktop/wedding/header.jpg'
out_file = '/Users/afraser/Desktop/test_output.avi'
try:
os.remove(out_file)
print('previous output file deleted')
except:
print('no output file to delete')
env = jutil.attach()
ImageReader = make_image_reader_class()
ChannelSeparator = make_reader_wrapper_class("loci/formats/ChannelSeparator")
FormatTools = make_format_tools_class()
# writer testing
ImageWriter = make_image_writer_class()
writer = ImageWriter()
w = 400
h = 400
c = 3
z = 1
t = 4
images = []
for tt in range(t):
images += [(np.random.rand(w, h, c) * 255).astype('uint8')]
imeta = createOMEXMLMetadata()
meta = wrap_imetadata_object(imeta)
meta.createRoot()
meta.setPixelsBigEndian(True, 0, 0)
meta.setPixelsDimensionOrder('XYCZT', 0, 0)
meta.setPixelsPixelType(FormatTools.getPixelTypeString(FormatTools.UINT8), 0, 0)
meta.setPixelsSizeX(w, 0, 0)
meta.setPixelsSizeY(h, 0, 0)
meta.setPixelsSizeC(c, 0, 0)
meta.setPixelsSizeZ(z, 0, 0)
meta.setPixelsSizeT(t, 0, 0)
meta.setLogicalChannelSamplesPerPixel(c, 0, 0)
print('big endian:', meta.getPixelsBigEndian(0, 0))
print('dim order:', meta.getPixelsDimensionOrder(0, 0))
print('pixel type:', meta.getPixelsPixelType(0, 0))
print('size x:', meta.getPixelsSizeX(0, 0))
print('size y:', meta.getPixelsSizeY(0, 0))
print('size c:', meta.getPixelsSizeC(0, 0))
print('size z:', meta.getPixelsSizeZ(0, 0))
print('size t:', meta.getPixelsSizeT(0, 0))
print('samples per pixel:', meta.getLogicalChannelSamplesPerPixel(0, 0))
writer.setMetadataRetrieve(meta)
writer.setId(out_file)
for image in images:
if len(image.shape)==3 and image.shape[2] == 3:
save_im = np.array([image[:,:,0], image[:,:,1], image[:,:,2]]).astype(np.uint8).flatten()
else:
save_im = image.astype(np.uint8).flatten()
writer.saveBytes(env.make_byte_array(save_im), (image is images[-1]))
writer.close()
print('Done writing image :)')
# import PIL.Image as Image
# im = Image.open(out_file, 'r')
# im.show()
jutil.detach()
app.MainLoop()
| 2.5 | 2 |
cv_interactive/utils.py | kovarn/computervision-interactive | 0 | 12787426 | from matplotlib import pyplot as plt
def imshow(img, **kwargs):
if len(img.shape) == 2 and 'cmap' not in kwargs:
return plt.imshow(img, cmap=plt.cm.gray, **kwargs)
if len(img.shape) == 3 and img.shape[2] == 3:
return plt.imshow(img[:, :, ::-1], **kwargs)
return plt.imshow(img, **kwargs)
| 3.125 | 3 |
scripts/issues/issue15.py | Jhsmit/awesome-panel | 179 | 12787427 | <reponame>Jhsmit/awesome-panel
import panel as pn
text = r"""
```math
f(x) = \int_{-\infty}^\infty
\hat f(\xi)\,e^{2 \pi i \xi x}
\,d\xi
```
"""
app = pn.Column(pn.pane.Markdown(text))
app.servable()
| 1.898438 | 2 |
cohesity_management_sdk/models/type_gpfs_protection_source_enum.py | nick6655/management-sdk-python | 18 | 12787428 | # -*- coding: utf-8 -*-
# Copyright 2021 Cohesity Inc.
class TypeGpfsProtectionSourceEnum(object):
"""Implementation of the 'Type_GpfsProtectionSource' enum.
Specifies the type of the entity in an GPFS file system
like 'kCluster', 'kFilesystem', or, 'kFileset'.
'kCluster' indicates an GPFS Cluster.
'kFilesystem' indicates a top level filesystem on GPFS cluster.
'kFileset' indicates a fileset within a filesystem.
Attributes:
KCLUSTER: TODO: type description here.
KFILESYSTEM: TODO: type description here.
KFILESET: TODO: type description here.
"""
KCLUSTER = 'kCluster'
KFILESYSTEM = 'kFilesystem'
KFILESET = 'kFileset'
| 2.03125 | 2 |
Game of Life/game_of_life_interface.py | Pedro-W21/Game-of-Life-py | 0 | 12787429 | # -*- coding: utf-8 -*-
"""
Ce fichier contient l'implémentation dans une interface graphique de la logique du Game of Life
Il ne contient pas le code de la Class Espace utilisée pour
Created on Wed Feb 17 14:36:56 2021
@author: <NAME>
"""
import os
import game_of_life_logique as gol
from tkinter import *
os.chdir(os.path.dirname(__file__))
def from_rgb(rgb):
"""prend un tuple rgb et le transforme en string héxadécimal de couleur tkinter
-----------------------------------
Entrées : tuple rgb de 0 à 255 (R,G,B)
Sortie : string d'héxadécimal
note : fonction prise d'internet.
"""
return "#%02x%02x%02x" % rgb
class Fenetre(Frame):
def __init__(self, espace, master=None):
"""initialisation de la fenetre
-----------------------------------
Entrées : espace enfant et master de la fenetre
Sortie : objet fenetre
"""
self.master = master
self.master.title("<NAME> Game of Life")
self.master.config(bg="grey")
self.espace = espace
self.col_vie = from_rgb((255, 255, 255))
self.col_mort = from_rgb((0, 0, 0))
def reset_espace(self):
"""retour à l'état initial de l'espace de l'utilisateur
-----------------------------------
Entrée : input du bouton de retour à zéro
Sortie : retour à zéro de l'espace et des variables affichées
"""
self.espace.retour_zero()
self.var_pop.set("Population=" + str(self.espace.pop_init))
self.var_str_iter.set("0 itérations ")
self.affiche_base()
def setup_canvas(self):
"""initialisation du canvas et de ses labels correspondants
-----------------------------------
Entrées : aucunes
Sortie : affichage du cadre du canvas et ce qu'il y a à l'intérieur
"""
# frame qui contient le canvas, ses informations, les outils à gauche,et les paramètres à droite, il se fait pack dans init_parametres()
self.frame_para_canvas = Frame(highlightbackground="black", bg="light grey")
# frame qui contient le canvas et ses informations, se fait pack dans init_parametres() pour avoir le volet de création d'espace à gauche
self.frame_canvas = Frame(master=self.frame_para_canvas, highlightbackground="black", highlightthickness=2, bg="light grey")
# frame qui contient les informations sur l'espace courant
self.frame_labels = Frame(master=self.frame_canvas, highlightbackground="black", highlightthickness=1, bg="light grey")
self.var_str_iter = StringVar()
self.var_str_iter.set(str(self.espace.n_iter) + " itérations ")
self.var_dim = StringVar()
self.var_dim.set("Dimensions : longueur=" + str(self.espace.dim[0]) + ", largeur=" + str(self.espace.dim[1]))
self.var_pop = StringVar()
self.var_pop.set("Population=" + str(self.espace.liste_pop[0]))
self.label_pop = Label(self.frame_labels, textvariable=self.var_pop)
self.label_dim = Label(self.frame_labels, textvariable=self.var_dim)
self.label_iter = Label(self.frame_labels, textvariable=self.var_str_iter)
self.label_iter.pack(side=LEFT)
self.label_dim.pack(side=LEFT)
self.label_pop.pack(side=LEFT)
self.frame_labels.pack(fill="x")
# fin de l'initialisation du frame qui contient les informations sur l'espace courant
self.canvas = Canvas(self.frame_canvas, bg="grey", width=self.espace.dim[0]*10, height=self.espace.dim[1]*10)
self.canvas.bind("<Button>", self.click_canvas) # bind pour les clics seuls
self.canvas.bind("<B1-Motion>", self.creer_xy) # bind pour les clics gauches maintenus en mouvement
self.canvas.bind("<B3-Motion>", self.delete_xy) # bind pour les clics droits maintenus en mouvement
self.affiche_base() # affiche_base() pour afficher l'état initial de l'espace courant
self.canvas.pack()
def iter_affiche(self):
"""effectue 1 itération sur tout l'espace et affiche les changements
-----------------------------------
Entrées : aucunes
Sortie : affichage sur le canvas Tkinter
"""
liste_chg = self.espace.iteration() # extraie la liste des changements de l'itération n à n+1
self.var_str_iter.set(str(self.espace.n_iter) + " itérations")
self.var_pop.set("Population=" + str(self.espace.liste_pop[-1]))
self.affichage_frame(liste_chg)
def n_iterations(self):
"""effectue n itération sur tout l'espace et affiche les changements
-----------------------------------
Entrées : nombre entier dans le widget Entry correspondant
Sortie : affichage sur le canvas Tkinter
"""
try:
n = int(self.entree_nb.get())
for i in range(n):
self.iter_affiche()
self.frame_canvas.update_idletasks()
except Exception:
self.entree_nb.delete(0, END)
self.entree_nb.insert(0, "PAS UN NOMBRE")
def regen_dens(self):
"""lance la fonction regen_espace et gère les erreurs
-----------------------------------
Entrées : bouton correspondant cliqué
Sortie : affichage de la nouvelle génération
"""
try:
self.espace.regen_espace(int(self.entree_regen.get()))
self.var_pop.set("Population=" + str(self.espace.liste_pop[-1]))
self.affiche_base()
except Exception:
self.entree_regen.delete(0, END)
self.entree_regen.insert(0, "PAS UN NOMBRE")
def pos_canvas(self, event):
"""retourne la position du curseur dans le canvas tkinter
-----------------------------------
Entrées : objet event tkinter
Sortie : position x, y du curseur par rapport au coin du canvas
"""
x = event.x_root - self.canvas.winfo_rootx()
y = event.y_root - self.canvas.winfo_rooty()
return x, y
def delete_xy(self, event):
"""supprime la cellule aux coordonées du curseur si il est dans le canvas
pour les clicks continus (EN MOUVEMENT)
-----------------------------------
Entrées : objet event tkinter
Sortie : actualisation du canvas et de l'espace avec la nouvelle cellule
"""
x, y = self.pos_canvas(event)
x_carte = x//10
y_carte = y//10
liste_chg = []
if -1 < x_carte < self.espace.dim[0] and -1 < y_carte < self.espace.dim[1] and self.espace.carte[x_carte][y_carte] == 1:
self.espace.carte[x_carte][y_carte] = 0
self.espace.liste_pop[-1] -= 1
liste_chg.append((0, x_carte, y_carte))
self.affichage_frame(liste_chg)
self.var_pop.set("Population=" + str(self.espace.liste_pop[-1]))
def creer_xy(self, event):
"""créé une cellule aux coordonées du curseur si il est dans le canvas
pour les clicks continus (EN MOUVEMENT)
-----------------------------------
Entrées : objet event tkinter
Sortie : actualisation du canvas et de l'espace avec la nouvelle cellule
"""
x, y = self.pos_canvas(event)
x_carte = x//10
y_carte = y//10
liste_chg = []
if -1 < x_carte < self.espace.dim[0] and -1 < y_carte < self.espace.dim[1] and self.espace.carte[x_carte][y_carte] == 0:
self.espace.carte[x_carte][y_carte] = 1
self.espace.liste_pop[-1] += 1
liste_chg.append((1, x_carte, y_carte))
self.affichage_frame(liste_chg)
self.var_pop.set("Population=" + str(self.espace.liste_pop[-1]))
def click_canvas(self, event):
"""créé ou détruit une cellule aux coordonées du curseur si il est dans le canvas
pour les clicks seuls (PAS DE MOUVEMENT)
-----------------------------------
Entrées : objet event tkinter
Sortie : actualisation du canvas et de l'espace avec la nouvelle cellule
"""
x, y = self.pos_canvas(event)
x_carte = x//10
y_carte = y//10
liste_chg = []
if event.num == 1 and self.espace.carte[x_carte][y_carte] == 0:
self.espace.carte[x_carte][y_carte] = 1
self.espace.liste_pop[-1] += 1
liste_chg.append((1, x_carte, y_carte))
elif event.num == 3 and self.espace.carte[x_carte][y_carte] == 1:
self.espace.carte[x_carte][y_carte] = 0
self.espace.liste_pop[-1] -= 1
liste_chg.append((0, x_carte, y_carte))
self.affichage_frame(liste_chg)
def init_interface(self):
"""initialisation de la barre d'action en dessous du canvas
-----------------------------------
Entrées : aucunes
Sortie : affichage de la barre d'action fonctionnelle
"""
# frame qui contient tous les élements du bas de la fenêtre
self.frame_interface = Frame(highlightbackground="black", highlightthickness=3, bg="light grey")
# frame pour les boutons d'itération
self.frame_boutons_iter = Frame(master=self.frame_interface, highlightbackground="black", highlightthickness=1, bg="light grey")
self.bouton_iter = Button(self.frame_boutons_iter, text="1 itération", command=self.iter_affiche)
self.entree_nb = Entry(self.frame_boutons_iter)
self.bouton_start = Button(self.frame_boutons_iter, text="N itérations", command=self.n_iterations)
self.label_iter = Label(self.frame_boutons_iter, text="itération de l'espace")
self.label_iter.pack(fill="x")
self.bouton_iter.pack(fill="x")
self.entree_nb.pack(fill="x")
self.bouton_start.pack(fill="x")
self.frame_boutons_iter.pack(side=LEFT, fill="y")
# fin de l'initialisation du volet d'itération
# frame des 2 actions "spéciales", le retour à zéro et la regénération de l'espace courant avec une nouvelle densité
self.frame_special = Frame(master=self.frame_interface, highlightbackground="black", highlightthickness=1, bg="light grey")
self.label_special = Label(self.frame_special, text="reset et regénération")
self.bouton_reset = Button(self.frame_special, text="Redémarrer à l'état initial", command=self.reset_espace)
self.entree_regen = Entry(self.frame_special)
self.bouton_regen = Button(self.frame_special, text="regénérer avec la densité indiquée", command=self.regen_dens)
self.label_special.pack(fill="x")
self.bouton_reset.pack(fill="x")
self.entree_regen.pack(fill="x")
self.bouton_regen.pack(fill="x")
self.frame_special.pack(side=LEFT, fill="y")
# fin de l'initialisation du volet spécial
# frame du chargement et de la sauvegarde
self.frame_sauv_charg = Frame(master=self.frame_interface, highlightbackground="black", highlightthickness=1, bg="light grey")
self.label_sauv_charg = Label(self.frame_sauv_charg, text="Sauvegarde et chargement")
self.entree_sauv_charg = Entry(self.frame_sauv_charg)
self.bouton_charg = Button(self.frame_sauv_charg, text="charger depuis", command=self.chargement_win)
self.bouton_sauv = Button(self.frame_sauv_charg, text="sauvegarder dans", command=self.sauvegarde_win)
self.label_sauv_charg.pack(fill="x")
self.entree_sauv_charg.pack(fill="x")
self.bouton_charg.pack(fill="x")
self.bouton_sauv.pack(fill="x")
self.frame_sauv_charg.pack(side=LEFT, fill="y")
# fin de l'initialisation du volet de chargement et de sauvegarde
# frame du bouton d'affichage du graphique
self.frame_graphique = Frame(master=self.frame_interface, highlightbackground="black", highlightthickness=1, bg="light grey")
self.label_graph = Label(self.frame_graphique, text="affichage du graphique\nde population")
self.bouton_graph = Button(self.frame_graphique, text="Afficher la courbe", command=self.espace.graphique)
self.label_graph.pack()
self.bouton_graph.pack(fill="both")
self.frame_graphique.pack(side=LEFT, fill="y")
# fin de l'initialisation du volet d'affichage du graphique
# frame du bestiaire
self.frame_formes = Frame(master=self.frame_interface, highlightbackground="black", highlightthickness=1, bg="light grey")
self.label_formes = Label(self.frame_formes, text="Bestiaire\n (x, y) puis nom de la forme")
self.label_formes.pack()
self.frame_pos = Frame(master=self.frame_formes, highlightbackground="black", bg="light grey")
self.entree_pos_x = Entry(self.frame_pos)
self.entree_pos_x.pack(side=LEFT, fill="x") # entrée de la position x
self.entree_pos_y = Entry(self.frame_pos)
self.entree_pos_y.pack(side=LEFT, fill="x") # entrée de la position y
self.frame_pos.pack(fill="x")
self.entree_forme = Entry(self.frame_formes)
self.entree_forme.pack(fill="x") # entrée du nom de la forme
self.bouton_forme = Button(self.frame_formes, text="afficher la forme", command=self.dessine_forme)
self.bouton_forme.pack(fill="x") # bouton pour rajouter la forme donnée
self.frame_formes.pack(side=LEFT, fill="both")
# fin de l'initialisation du volet du bestiaire
self.frame_interface.pack(fill="x")
# fin de l'intialisation du volet inférieur de l'interface
def dessine_forme(self):
"""dessine une des formes du dictionaire de formes
-----------------------------------
Entrées : position x, y du coin supérieur gauche de la forme, et so nom
Sorties : forme dessinée sur l'écran ou message(s) d'erreur dans les entrées
"""
try:
self.espace.carte = self.espace.forme_donnee(int(self.entree_pos_x.get()), int(self.entree_pos_y.get()), self.espace.carte, self.entree_forme.get())
self.entree_pos_x.delete(0, END)
self.entree_pos_y.delete(0, END)
self.affiche_base()
self.var_pop.set("Population=" + str(self.espace.compte_pop()))
except ValueError: # ValueError ne sort que si x ou y n'est pas un nombre
self.entree_pos_x.delete(0, END)
self.entree_pos_x.insert(0, "POSITION INVALIDE")
self.entree_pos_y.delete(0, END)
self.entree_pos_y.insert(0, "POSITION INVALIDE")
except IndexError: # IndexError sort si x ou y ou un point de la forme rajoutée est en dehors de la carte
self.entree_pos_x.delete(0, END)
self.entree_pos_x.insert(0, "POSITION HORS-CARTE")
self.entree_pos_y.delete(0, END)
self.entree_pos_y.insert(0, "POSITION HORS-CARTE")
except KeyError: # KeyError ne sort que si la forme voulue n'existe pas
self.entree_forme.delete(0, END)
self.entree_forme.insert(0, "FORME INVALIDE")
def sauvegarde_win(self):
"""utilisation de la fonction de sauvegarde de l'espace depuis l'interface
-----------------------------------
Entrées : nom du fichier dans lequel sauvegarder donné par l'entrée correspondante
Sortie : fichier sauvegardé
"""
try:
gol.sauvegarde(self.entree_sauv_charg.get(), self.espace.carte)
self.entree_sauv_charg.delete(0, END)
self.entree_sauv_charg.insert(0, "Sauvegarde terminée")
except OSError:
self.entree_sauv_charg.delete(0, END)
self.entree_sauv_charg.insert(0, "NOM INVALIDE")
def changement_espace(self):
"""protocole à suivre pour changer les commandes que chaque bouton fait à un nouvel espace
-----------------------------------
Entrées : aucune
Sortie : affichage du nouvel espace sur un canvas de bonne taille
"""
self.canvas.delete('all')
self.canvas.config(width=self.espace.dim[0]*10, height=self.espace.dim[1]*10)
self.bouton_graph.config(command=self.espace.graphique)
# on change la commande du bouton de graphique car c'est le seul bouton qui execute directement une méthode de l'espace
self.var_str_iter.set(str(self.espace.n_iter) + " itérations")
self.var_pop.set("Population=" + str(self.espace.liste_pop[-1]))
self.affiche_base()
def chargement_win(self):
"""utilisation de la fonction de chargement de l'espace depuis l'interface
-----------------------------------
Entrées : nom du fichier depuis lequel charger donné par l'entrée correspondante
Sortie : espace changé au nouvel espace si tout a ben fonctionné
"""
try:
self.espace = gol.chargement(self.entree_sauv_charg.get())
self.changement_espace()
self.entree_sauv_charg.delete(0, END)
self.entree_sauv_charg.insert(0, "Chargement terminé")
except FileNotFoundError:
self.entree_sauv_charg.delete(0, END)
self.entree_sauv_charg.insert(0, "FICHIER INEXISTANT")
except IndexError:
self.entree_sauv_charg.delete(0, END)
self.entree_sauv_charg.insert(0, "MAUVAIS FORMATTAGE")
except OSError:
self.entree_sauv_charg.delete(0, END)
self.entree_sauv_charg.insert(0, "FICHIER INEXISTANT")
def introduction(self):
"""Fonction pour avoir une fenêtre apparaître afin d'expliquer le jeu de la vie et ses règles
---------------------
Entrées : aucunes
Sorties : fenêtre d'introduction au programme"""
fenetre_intro = Toplevel(self.master)
texte_intro = """Bienvenue,
ceci est une recréation en python du Jeu de la Vie créé par <NAME> en 1970
le jeu se déroule sur un quadrillage, ici torique, où chaque cellule est vivante ou morte
les règles sont simples:
- si une cellule morte a exactement trois voisins vivants, elle devient vivante
- si une cellule vivante a entre 2 et 3 voisins vivants inclu, elle reste vivante, sinon, elle meurt
pour commencer à jouer, fermez cette fenêtre !
"""
label_intro = Label(fenetre_intro, text=texte_intro)
label_intro.pack()
def affichage_frame(self, liste_chg):
"""affichage d'une image sur un canvas déjà rempli
-----------------------------------
Entrées : liste de changements opérés par une itération
Sortie : affichage mis à jour avec les bons changements
"""
for chg, x, y in liste_chg: # chg est 0 ou 1, x et y sont des coordonnées entières
self.canvas.delete(self.rects[x][y]) # on supprime le rectangle affiché de cette position pour ne pas en accumuler au fil du temps
if chg == 0:
self.rects[x][y] = self.canvas.create_rectangle(x*10, y*10, (x+1)*10, (y+1)*10, fill=self.col_mort, outline=from_rgb((128, 128, 128)))
else: # on modifie la valeur de cette position du quadrillage de rectangles avec un nouveau rectangle
self.rects[x][y] = self.canvas.create_rectangle(x*10, y*10, (x+1)*10, (y+1)*10, fill=self.col_vie, outline=from_rgb((128, 128, 128)))
def affiche_base(self):
"""affichage d'une image de la carte de l'espace de la fenetre,
lent mais essentiel pour l'initialisation du canvas, par exemple
-----------------------------------
Entrées : aucune
Sortie : mise à jour du canvas
"""
# création d'une liste à deux dimensions pour stocker les valeurs d'indentification des rectangles du canvas
self.rects = []
self.canvas.delete('all')
for x in range(self.espace.dim[0]):
self.rects.append([])
for y in range(self.espace.dim[1]):
if self.espace.carte[x][y] == 0:
self.rects[x].append(self.canvas.create_rectangle(x*10, y*10, (x+1)*10, (y+1)*10, fill=self.col_mort, outline=from_rgb((128, 128, 128))))
else:
self.rects[x].append(self.canvas.create_rectangle(x*10, y*10, (x+1)*10, (y+1)*10, fill=self.col_vie, outline=from_rgb((128, 128, 128))))
def creer_nouv_espace(self):
"""création d'un nouvel espace selon les tailles données
-----------------------------------
Entrées : dimensions du nouvel espace issues des paramètres
Sortie : mise à jour du canvas
"""
self.espace = gol.Espace(self.taille_x_para.get(), self.taille_y_para.get(), 5)
self.changement_espace()
self.affiche_base()
def applique_param(self):
"""applique les paramètres choisis dans le volet à droite du canvas
-----------------------------------
Entrées : couleurs RGB des cellules mortes et vivantes, et nombre de voisins pour les cellules
Sorties : changement des couleurs des cellules dans le Canvas et des règles du jeu
"""
self.col_vie = from_rgb((self.rouge_vie.get(), self.vert_vie.get(), self.bleu_vie.get()))
self.col_mort = from_rgb((self.rouge_morts.get(), self.vert_morts.get(), self.bleu_morts.get()))
self.espace.voisins_min = self.nb_min_vois.get()
self.espace.voisins_max = self.nb_max_vois.get()
self.espace.voisins_res = self.nb_res.get()
self.affiche_base()
def clique_bestiaire(self, event):
"""lorsque l'utilisateur clique sur la liste, met à jour la forme choisie
-----------------------------------
Entrées : aucunes
Sorties : changement de la forme choisie dans le boite de selection du bestiaire
"""
bete_choisie = self.liste_bestiaire.get("active")
self.entree_forme.delete(0, END)
self.entree_forme.insert("end", bete_choisie)
def init_parametres(self):
"""méthode pour initialiser le volet de paramétrage à côté du Canvas.
options : - changement de taille de carte
- changement de couleur d'affichages des cellules mortes et vivantes
- changement de nombre minimum et maximum d evoisins pour vivantes
- changement de nombre de voisins pour vivre
- bouton pour appliquer les changements
-----------------------------------
Entrées : aucune
Sortie : ajout de la liste du contenu du bestiaire,
de l'espace de création d'un nouvel espace, et des paramètres dans la fenêtre
"""
self.frame_espace_bestiaire = Frame(master=self.frame_para_canvas, highlightbackground="black", highlightthickness=3, bg="light grey")
# frame qui permet de changer la taille de la carte en en créant une nouvelle
self.frame_espace = Frame(master=self.frame_espace_bestiaire, highlightbackground="black", highlightthickness=1, bg="light grey")
self.label_espace = Label(master=self.frame_espace, text="volet de création d'un espace")
self.label_espace.pack(fill="x")
self.label_taille_x = Label(master=self.frame_espace, text="Longueur de l'espace voulu")
self.label_taille_x.pack()
self.taille_x_para = IntVar()
self.taille_x_para.set(self.espace.dim[0])
self.taille_x_scale = Scale(master=self.frame_espace, from_=5, to=100, variable=self.taille_x_para, orient=HORIZONTAL)
self.taille_x_scale.pack(fill="x")
self.label_taille_y = Label(master=self.frame_espace, text="Largeur de l'espace voulu")
self.label_taille_y.pack()
self.taille_y_para = IntVar()
self.taille_y_para.set(self.espace.dim[1])
self.taille_y_scale = Scale(master=self.frame_espace, from_=5, to=100, variable=self.taille_y_para, orient=HORIZONTAL)
self.taille_y_scale.pack(fill="x")
self.bouton_nouv_espace = Button(master=self.frame_espace, text="créer un nouvel espace", command=self.creer_nouv_espace)
self.bouton_nouv_espace.pack(fill="x")
self.frame_espace.pack(fill="x")
# fin de l'initialisation de ce premier volet
# initialisation de l'affichage du contenu du bestiaire actuel
self.frame_bestiaire = Frame(master=self.frame_espace_bestiaire, highlightbackground="black", highlightthickness=3, bg="light grey")
self.label_bestiaire = Label(master=self.frame_bestiaire, text="contenu du bestiaire")
self.label_bestiaire.pack()
self.liste_bestiaire = Listbox(master=self.frame_bestiaire)
for bete in gol.Espace.formes.keys():
self.liste_bestiaire.insert("end", bete)
self.liste_bestiaire.bind("<Button>", self.clique_bestiaire)
self.liste_bestiaire.pack(fill="x")
self.frame_bestiaire.pack(fill="x")
# fin de l'initialisation de l'afichage du contenu du bestiaire actuel
# création de la frame pour les autres paramètres qui s'appliquent avec un autre bouton
self.frame_autres = Frame(master=self.frame_para_canvas, highlightbackground="black", highlightthickness=1, bg="light grey")
# frame des sliders de couleur RVB pour les cellules mortes
self.frame_col_morts = Frame(master=self.frame_autres, highlightbackground="black", highlightthickness=1, bg="light grey")
self.label_col_morts = Label(master=self.frame_col_morts, text="couleur des cellules mortes")
self.label_col_morts.pack()
self.rouge_morts = IntVar()
self.rouge_morts.set(0)
self.rouge_morts_scale = Scale(master=self.frame_col_morts, from_=0, to=255, variable=self.rouge_morts, orient=HORIZONTAL, bg="red")
self.rouge_morts_scale.pack(fill="x")
self.vert_morts = IntVar()
self.vert_morts.set(0)
self.vert_morts_scale = Scale(master=self.frame_col_morts, from_=0, to=255, variable=self.vert_morts, orient=HORIZONTAL, bg="green")
self.vert_morts_scale.pack(fill="x")
self.bleu_morts = IntVar()
self.bleu_morts.set(0)
self.bleu_morts_scale = Scale(master=self.frame_col_morts, from_=0, to=255, variable=self.bleu_morts, orient=HORIZONTAL, bg="blue")
self.bleu_morts_scale.pack(fill="x")
self.frame_col_morts.pack(fill="x")
# fin de l'initialisation des sliders de couleur RVB pour les cellules mortes
# frame des sliders de couleur RVB pour les cellules vivantes
self.frame_col_vie = Frame(master=self.frame_autres, highlightbackground="black", highlightthickness=1, bg="light grey")
self.label_col_vie = Label(master=self.frame_col_vie, text="couleur des cellules vivantes")
self.label_col_vie.pack()
self.rouge_vie = IntVar()
self.rouge_vie.set(255)
self.rouge_vie_scale = Scale(master=self.frame_col_vie, from_=0, to=255, variable=self.rouge_vie, orient=HORIZONTAL, bg="red")
self.rouge_vie_scale.pack(fill="x")
self.vert_vie = IntVar()
self.vert_vie.set(255)
self.vert_vie_scale = Scale(master=self.frame_col_vie, from_=0, to=255, variable=self.vert_vie, orient=HORIZONTAL, bg="green")
self.vert_vie_scale.pack(fill="x")
self.bleu_vie = IntVar()
self.bleu_vie.set(255)
self.bleu_vie_scale = Scale(master=self.frame_col_vie, from_=0, to=255, variable=self.bleu_vie, orient=HORIZONTAL, bg="blue")
self.bleu_vie_scale.pack(fill="x")
self.frame_col_vie.pack(fill="x")
# fin de l'initialisation des sliders de couleur RVB pour les cellules vivantes
# frame des sliders de règles de l'espace courant
self.frame_regles_espace = Frame(master=self.frame_autres, highlightbackground="black", highlightthickness=1, bg="light grey")
self.label_regles = Label(master=self.frame_regles_espace, text="règles de l'espace courant")
self.label_regles.pack()
self.label_res = Label(master=self.frame_regles_espace, text="nombre de voisins pour ressuciter")
self.label_res.pack()
self.nb_res = IntVar()
self.nb_res.set(3)
self.scale_res = Scale(master=self.frame_regles_espace, from_=0, to=8, variable=self.nb_res, orient=HORIZONTAL)
self.scale_res.pack(fill="x")
self.label_min_vois = Label(master=self.frame_regles_espace, text="minimum de voisins pour survivre")
self.label_min_vois.pack()
self.nb_min_vois = IntVar()
self.nb_min_vois.set(2)
self.scale_min_vois = Scale(master=self.frame_regles_espace, from_=0, to=8, variable=self.nb_min_vois, orient=HORIZONTAL)
self.scale_min_vois.pack(fill="x")
self.label_max_vois = Label(master=self.frame_regles_espace, text="maximum de voisins pour survivre")
self.label_max_vois.pack()
self.nb_max_vois = IntVar()
self.nb_max_vois.set(3)
self.scale_max_vois = Scale(master=self.frame_regles_espace, from_=0, to=8, variable=self.nb_max_vois, orient=HORIZONTAL)
self.scale_max_vois.pack(fill="x")
self.frame_regles_espace.pack(fill="x")
# fin de l'initialisation des sliders de règles de l'espace courant
# bouton pour appliquer les paramètres de couleur et de règles
self.bouton_applique = Button(master=self.frame_autres, text="appliquer les paramètres", command=self.applique_param)
self.bouton_applique.pack(fill="x")
self.frame_espace_bestiaire.pack(side=LEFT)
self.frame_canvas.pack(side=LEFT)
self.frame_autres.pack(side=LEFT)
self.frame_para_canvas.pack()
def main():
"""Fonction Main qui assure le démarrage du programme
-----------------------------------
Entrées : aucunes
Sorties : fin de programme si il y a un crash
"""
espace_user = gol.Espace(50, 50, 300)
master = Tk()
wind = Fenetre(espace_user, master)
wind.setup_canvas()
wind.init_parametres()
wind.init_interface()
wind.introduction()
try:
master.mainloop()
except Exception:
master.destroy()
main()
# ce qui manque :
# plus de formes
| 3.734375 | 4 |
tests/CSIRO_wire_break_validation.py | BillMills/AutoQC | 17 | 12787430 | <filename>tests/CSIRO_wire_break_validation.py
import qctests.CSIRO_wire_break
import util.testingProfile
import numpy
##### CSIRO_wire_break_test ---------------------------------------------------
def test_CSIRO_wire_break():
'''
Spot-check the nominal behavior of the CSIRO wire break test.
'''
# too cold at the bottom of xbt profile
p = util.testingProfile.fakeProfile([-2.399,-2.399,-2.4001], [10,20,30], probe_type=2)
qc = qctests.CSIRO_wire_break.test(p, None)
truth = numpy.zeros(3, dtype=bool)
truth[2] = True
assert numpy.array_equal(qc, truth), 'failed to flag too-cold temperature at bottom of profile'
# too hot at bottom of xbt profile
p = util.testingProfile.fakeProfile([31.99,31.99,32.001], [10,20,30], probe_type=2)
qc = qctests.CSIRO_wire_break.test(p, None)
truth = numpy.zeros(3, dtype=bool)
truth[2] = True
assert numpy.array_equal(qc, truth), 'failed to flag too-hot temperature at bottom of profile'
# right on border - no flag
p = util.testingProfile.fakeProfile([-2.399,-2.399,-2.4], [10,20,30], probe_type=2)
qc = qctests.CSIRO_wire_break.test(p, None)
truth = numpy.zeros(3, dtype=bool)
print(qc)
print(truth)
assert numpy.array_equal(qc, truth), 'flagged marginally cold temperature at bottom of profile'
p = util.testingProfile.fakeProfile([31.99,31.99,32], [10,20,30], probe_type=2)
qc = qctests.CSIRO_wire_break.test(p, None)
truth = numpy.zeros(3, dtype=bool)
assert numpy.array_equal(qc, truth), 'flagged marginally hot temperature at bottom of profile'
# don't flag if not an xbt
p = util.testingProfile.fakeProfile([0,0,-100], [10,20,30], probe_type=1)
qc = qctests.CSIRO_wire_break.test(p, None)
truth = numpy.zeros(3, dtype=bool)
assert numpy.array_equal(qc, truth), 'flagged non-xbt profile'
# don't flag if not at bottom of profile
p = util.testingProfile.fakeProfile([0,32.01,31.99], [10,20,30], probe_type=2)
qc = qctests.CSIRO_wire_break.test(p, None)
truth = numpy.zeros(3, dtype=bool)
assert numpy.array_equal(qc, truth), "flagged hot temperature that wasn't at bottom of profile"
# flag both sides of a gap
p = util.testingProfile.fakeProfile([9,9,10], [10,20,30], probe_type=2)
qc = qctests.CSIRO_wire_break.test(p, None)
truth = numpy.ones(3, dtype=bool)
truth[0] = False
assert numpy.array_equal(qc, truth), "should flag both sides of a gap"
| 2.21875 | 2 |
arekit/contrib/experiment_rusentrel/entities/factory.py | nicolay-r/AREk | 18 | 12787431 | <reponame>nicolay-r/AREk
from arekit.contrib.experiment_rusentrel.entities.str_rus_cased_fmt import RussianEntitiesCasedFormatter
from arekit.contrib.experiment_rusentrel.entities.str_rus_nocased_fmt import RussianEntitiesFormatter
from arekit.contrib.experiment_rusentrel.entities.str_simple_fmt import StringEntitiesSimpleFormatter
from arekit.contrib.experiment_rusentrel.entities.str_simple_sharp_prefixed_fmt import \
SharpPrefixedEntitiesSimpleFormatter
from arekit.contrib.experiment_rusentrel.entities.str_simple_uppercase_fmt import SimpleUppercasedEntityFormatter
from arekit.contrib.experiment_rusentrel.entities.types import EntityFormatterTypes
def create_entity_formatter(fmt_type, create_russian_pos_tagger_func=None):
""" Factory method for entity formatters, applicable in bert.
"""
assert(isinstance(fmt_type, EntityFormatterTypes))
assert(callable(create_russian_pos_tagger_func) or create_russian_pos_tagger_func is None)
if fmt_type == EntityFormatterTypes.RussianCased:
return RussianEntitiesCasedFormatter(create_russian_pos_tagger_func())
elif fmt_type == EntityFormatterTypes.SimpleSharpPrefixed:
return SharpPrefixedEntitiesSimpleFormatter()
elif fmt_type == EntityFormatterTypes.RussianSimple:
return RussianEntitiesFormatter()
elif fmt_type == EntityFormatterTypes.Simple:
return StringEntitiesSimpleFormatter()
elif fmt_type == EntityFormatterTypes.SimpleUppercase:
return SimpleUppercasedEntityFormatter()
| 2.078125 | 2 |
parslr/__init__.py | maximmenshikov/parslr | 0 | 12787432 | from parslr.Parslr import Parslr
from parslr.parslr_args import prepare_parser
| 1.101563 | 1 |
pyassim/apf.py | ZoneTsuyoshi/pyassim | 0 | 12787433 | <reponame>ZoneTsuyoshi/pyassim
"""
==========================================
Inference with Auxiliary Particle Filter
==========================================
This module implements the Particle Filter and Particle Smoother,
for Nonlinear Non-Gaussian state space models
"""
import numpy as np
import numpy.random as rd
# from scipy import linalg
from .utils import array1d, array2d, check_random_state, get_params, \
preprocess_arguments, check_random_state
from .util_functions import _parse_observations, _last_dims, \
_determine_dimensionality
class AuxiliaryParticleFilter(object):
"""Implements the Particle Filter and Particle Smoother.
This class implements the Particle Filter and Particle Smoother
for a Nonlinear Non-Gaussian model specified by,
.. math::
x_{t+1} &= f_{t}(x_{t}) + v_{t}, v_{t} &\sim p(v_{t}) \\
y_{t} &= h_{t}(x_{t}) + w_{t}, w_{t} &\sim N(w_{t}) \\
The Particle Filter is an algorithm designed to estimate
:math:`P(x_t | y_{0:t})`. All state transitions are nonlinear with
non-Gaussian distributed noise, observations are nonlinear with non-Gaussian
distributed noise.
Similarly, the Particle Smoother is an algorithm designed to estimate
:math:`P(x_t | y_{0:T-1})`.
Args:
y, observation [n_time, n_dim_obs] {numpy-array, float}
also known as :math:`y`. observation value
initial_mean [n_dim_sys] {float}
also known as :math:`\mu_0`. initial state mean
initial_covariance [n_dim_sys, n_dim_sys] {numpy-array, float}
also known as :math:`\Sigma_0`. initial state covariance
f, transition_functions [n_time] {function}
also known as :math:`f`. transition function from x_{t-1} to x_{t}
q, transition_noise [n_time - 1] {(method, parameters)}
also known as :math:`p(v)`. method and parameters of transition
noise. noise distribution must be parametric and need input variable
`size`, which mean number of ensemble
lf, likelihood_functions [n_time] or [] {function}
also known as :math:`p(w)`. likelihood function between x_t and y_t.
only need kernel part and not need regulation part. likelihood function
must be parameteric and need `likelihood_function_parameters`.
lfp, likelihood_function_parameters [n_time, n_param] or [n_param]
{numpy-array, float}
: parameters for `likelihood_functions`
likelihood_function_is_log_form {boolean}
: which `likelihood_functions` are log form. If true,
`likelihood_functions` mean log likelihood function. If false,
`likelihood_functions` mean likelihood function. For example,
if you use gaussian distribution, whose kernel are exponential
form, then you should use log from because of overflow problem.
observation_parameters_time_invariant {boolean}
: which observation parameters are time-invariant. If true,
`likelihood_functions` and `likelihood_function_parameters` has
time-invariantness. If false, they are time-variant
eta, regularization_noise [n_time - 1] {(method, parameters)}
: noise distribution for regularization. noise distribution
must be parametric and need input variable `size`,
which mean number of ensemble
n_particle {int}
: number of particles (ensembles)
n_dim_sys {int}
: dimension of system variable
n_dim_obs {int}
: dimension of observation variable
dtype {np.dtype}
: dtype of numpy-array
seed {int}
: random seed
Attributes:
regularization {boolean}
: which particle filter has regularization. If true,
after filtering step, add state variables to regularization noise
because of protecting from degeneration of particle.
If false, doesn't add regularization noise.
"""
def __init__(self, observation = None,
initial_mean = None, initial_covariance = None,
transition_functions = None, transition_noise = None,
likelihood_functions = None, likelihood_function_parameters = None,
likelihood_function_is_log_form = True,
observation_parameters_time_invariant = True,
regularization_noise = None,
n_particle = 100, n_dim_sys = None, n_dim_obs = None,
dtype = np.float32, seed = 10) :
# check order of tensor and mask missing values
self.y = _parse_observations(observation)
# determine dimensionality
self.n_dim_sys = _determine_dimensionality(
[(initial_mean, array1d, -1),
(initial_covariance, array2d, -2)],
n_dim_sys
)
self.n_dim_obs = _determine_dimensionality(
[(observation, array1d, -1)],
n_dim_obs
)
# transition_functions
# None -> system + noise
if transition_functions is None:
self.f = [lambda x, v: x + v]
else:
self.f = transition_functions
# transition_noise
# None -> standard normal distribution
if transition_noise is None:
self.q = (rd.multivariate_normal,
[np.zeros(self.n_dim_sys, dtype = dtype),
np.eye(self.n_dim_sys, dtype = dtype)])
else:
self.q = transition_noise
# initial_mean None -> np.zeros
if initial_mean is None:
self.initial_mean = np.zeros(self.n_dim_sys, dtype = dtype)
else:
self.initial_mean = initial_mean.astype(dtype)
# initial_covariance None -> np.eye
if initial_covariance is None:
self.initial_covariance = np.eye(self.n_dim_sys, dtype = dtype)
else:
self.initial_covariance = initial_covariance.astype(dtype)
# likelihood_functions, likelihood_function_parameters None
if likelihood_function_parameters is None:
self.likelihood_function_is_log_form = likelihood_function_is_log_form
self.observation_parameters_time_invariant \
= observation_parameters_time_invariant
self.lf = likelihood_functions
self.lfp = likelihood_function_parameters
else:
self.likelihood_function_is_log_form = True
self.observation_parameters_time_invariant = True
self.lf = self._log_norm_likelihood
# use normal likelihood, but want to change parameter R
if likelihood_functions is None:
self.lfp = [np.eye(self.n_dim_obs, dtype = dtype)]
else:
self.lfp = likelihood_function_parameters
# regularization noise
if regularization_noise is None:
self.regularization = False
else:
self.eta = regularization_noise
self.regularization = True
self.n_particle = n_particle
np.random.seed(seed)
self.dtype = dtype
self.log_likelihood = - np.inf
def _norm_likelihood(self, y, mean, covariance):
"""calculate likelihood for Gauss distribution whose parameters
are `mean` and `covariance`
Args:
y [n_dim_obs] {numpy-array, float}
observation point which measure likehoodness
mean [n_particle, n_dim_obs] {numpy-array, float}
: mean of Gauss distribution
covariance [n_dim_obs, n_dim_obs] {numpy-array, float}
: covariance of Gauss distribution
"""
Y = np.zeros((self.n_dim_obs, self.n_particle), dtype = self.dtype)
Y.T[:] = y
return np.exp((- 0.5 * (Y - mean).T @ np.linalg.pinv(covariance) \
@ (Y - mean))[:, 0])
def _log_norm_likelihood(self, y, mean, covariance) :
"""calculate log likelihood for Gauss distribution whose parameters
are `mean` and `covariance`
Args:
y [n_dim_obs] {numpy-array, float}
observation point which measure likehoodness
mean [n_particle, n_dim_obs] {numpy-array, float}
: mean of Gauss distribution
covariance [n_dim_obs, n_dim_obs] {numpy-array, float}
: covariance of Gauss distribution
"""
Y = np.zeros((self.n_dim_obs, self.n_particle), dtype = self.dtype)
Y.T[:] = y
return (
- 0.5 * (Y - mean).T @ np.linalg.pinv(covariance) @ (Y - mean)
).diagonal()
def _emperical_cummulative_inv(self, w_cumsum, idx, u):
"""calculate inverse map for emperical cummulative function
Args:
w_cumsum [n_particle] {numpy-array, float}
: emperical cummulative function for particles
idx [n_particle] {numpy-array, int}
: array of ID which are assigined to each particle
u {float}
: value between 0 and 1
Returns (int):
like floor function, ID number which maximize set of ID numbers,
set are less than `u`
"""
if np.any(w_cumsum < u) == False:
return 0
k = np.max(idx[w_cumsum < u])
return k + 1
def _resampling(self, weights):
"""caluclate standard resampling method
Args:
weights {numpy-array, float} [n_particle]
: set of likelihoodness for each particle
Returns:
k_list {numpy-array, float} [n_particle]
: index set which represent particle number remeining
"""
w_cumsum = np.cumsum(weights)
# generate basic labels
idx = np.asanyarray(range(self.n_particle))
# storage for k
k_list = np.zeros(self.n_particle, dtype = np.int32)
# get index for resampling from weights with uniform distribution
for i, u in enumerate(rd.uniform(0, 1, size = self.n_particle)):
k = self._emperical_cummulative_inv(w_cumsum, idx, u)
k_list[i] = k
return k_list
def _stratified_resampling(self, weights):
"""caluclate stratified resampling method
Args:
weights {numpy-array, float} [n_particle]
: set of likelihoodness for each particle
Returns:
k_list {numpy-array, float} [n_particle]
: index set which represent particle number remeining
"""
idx = np.asanyarray(range(self.n_particle))
u0 = rd.uniform(0, 1 / self.n_particle)
u = [1 / self.n_particle*i + u0 for i in range(self.n_particle)]
w_cumsum = np.cumsum(weights)
k = np.asanyarray([
self._emperical_cummulative_inv(w_cumsum, idx, val) for val in u
])
return k
def filter(self):
"""Calculate prediction and filter for observation times.
Attributes (self):
x_pred_mean [n_time+1, n_dim_sys] {numpy-array, float}
: mean of `x_pred` regarding to particles at time t
x_filt_mean [n_time+1, n_dim_sys] {numpy-array, float}
: mean of `x_filt` regarding to particles
Attributes (local):
T {int}
: length of time-series
x_pred [n_dim_sys, n_particle]
: hidden state at time t given observations for each particle
x_filt [n_dim_sys, n_particle] {numpy-array, float}
: hidden state at time t given observations for each particle
w [n_particle] {numpy-array, float}
: weight (likelihoodness) lambda of each particle
v [n_dim_sys, n_particle] {numpy-array, float}
: ensemble menbers of system noise
k [n_particle] {numpy-array, float}
: index numbers for resampling
"""
# length of time-series data
T = len(self.y)
# initial filter, prediction
self.x_pred_mean = np.zeros((T + 1, self.n_dim_sys), dtype = self.dtype)
self.x_filt_mean = np.zeros((T + 1, self.n_dim_sys), dtype = self.dtype)
x_pred = np.zeros((self.n_dim_sys, self.n_particle), dtype = self.dtype)
# initial distribution
x_filt = rd.multivariate_normal(self.initial_mean, self.initial_covariance,
size = self.n_particle).T
# initial setting
self.x_pred_mean[0] = self.initial_mean
self.x_filt_mean[0] = self.initial_mean
for t in range(T):
# visualize calculating times
print("\r filter calculating... t={}".format(t), end="")
## filter update
# calculate prediction step
f = _last_dims(self.f, t, 1)[0]
# raise parametric system noise
v = self.q[0](*self.q[1], size = self.n_particle).T
# calculate ensemble prediction
x_pred = f(*[x_filt, v])
# calculate mean of ensemble prediction
self.x_pred_mean[t + 1] = np.mean(x_pred, axis = 1)
# treat missing values
if np.any(np.ma.getmask(self.y[t])):
x_filt = x_pred
else:
# (log) likelihood for each particle for y[t]
lf = self._last_likelihood(self.lf, t)
lfp = self._last_likelihood(self.lfp, t)
try:
w = lf(self.y[t], x_pred, *lfp)
except:
raise ValueError("you must check likelihood_functions"
+ "and parameters.")
# avoid evaporation
if self.likelihood_function_is_log_form:
w = np.exp(w - np.max(w))
else:
w = w / np.max(w)
# normalize weights
w = w / np.sum(w)
# calculate resampling
k = self._stratified_resampling(w)
x_filt = x_pred[:, k]
# add regularization
if self.regularization:
x_filt += self.eta[0](*self.eta[1], size = self.n_particle).T
# calculate mean of filtering results
self.x_filt_mean[t + 1] = np.mean(x_filt, axis = 1)
def get_predicted_value(self, dim = None) :
"""Get predicted value
Args:
dim {int} : dimensionality for extract from predicted result
Returns (numpy-array, float)
: mean of hidden state at time t given observations
from times [0...t]
"""
# if not implement `filter`, implement `filter`
try :
self.x_pred_mean[0]
except :
self.filter()
if dim is None:
return self.x_pred_mean[1:]
elif dim <= self.x_pred_mean.shape[1]:
return self.x_pred_mean[1:, int(dim)]
else:
raise ValueError("The dim must be less than "
+ self.x_pred_mean.shape[1] + ".")
def get_filtered_value(self, dim = None) :
"""Get filtered value
Args:
dim {int} : dimensionality for extract from filtered result
Returns (numpy-array, float)
: mean of hidden state at time t given observations
from times [0...t]
"""
# if not implement `filter`, implement `filter`
try :
self.x_filt_mean[0]
except :
self.filter()
if dim is None:
return self.x_filt_mean[1:]
elif dim <= self.x_filt_mean.shape[1]:
return self.x_filt_mean[1:, int(dim)]
else:
raise ValueError("The dim must be less than "
+ self.x_filt_mean.shape[1] + ".")
def smooth(self, lag = 10):
"""calculate fixed lag smooth. Because of memory saving,
also describe filtering step
Args:
lag {int}
: lag of smoothing
Attributes (self):
x_pred_mean [n_time+1, n_dim_sys] {numpy-array, float}
: mean of `x_pred` regarding to particles at time t
x_filt_mean [n_time+1, n_dim_sys] {numpy-array, float}
: mean of `x_filt` regarding to particles
x_smooth_mean [n_time, n_dim_sys] {numpy-array, float}
: mean of `x_smooth` regarding to particles at time t
Attributes (local):
T {int}
: length of time-series
x_pred [n_dim_sys, n_particle]
: hidden state at time t given observations for each particle
x_filt [n_dim_sys, n_particle] {numpy-array, float}
: hidden state at time t given observations for each particle
x_smooth [n_time, n_dim_sys, n_particle] {numpy-array, float}
: hidden state at time t given observations[:t+lag] for each particle
w [n_particle] {numpy-array, float}
: weight (likelihoodness) lambda of each particle
v [n_dim_sys, n_particle] {numpy-array, float}
: ensemble menbers of system noise
k [n_particle] {numpy-array, float}
: index numbers for resampling
"""
# length of time-series data
T = len(self.y)
# initial filter, prediction
self.x_pred_mean = np.zeros((T + 1, self.n_dim_sys), dtype = self.dtype)
self.x_filt_mean = np.zeros((T + 1, self.n_dim_sys), dtype = self.dtype)
self.x_smooth_mean = np.zeros((T + 1, self.n_dim_sys), dtype = self.dtype)
x_pred = np.zeros((self.n_dim_sys, self.n_particle), dtype = self.dtype)
x_filt = np.zeros((self.n_dim_sys, self.n_particle), dtype = self.dtype)
x_smooth = np.zeros((T + 1, self.n_dim_sys, self.n_particle),
dtype = self.dtype)
# initial distribution
x_filt = rd.multivariate_normal(self.initial_mean, self.initial_covariance,
size = self.n_particle).T
# initial setting
self.x_pred_mean[0] = self.initial_mean
self.x_filt_mean[0] = self.initial_mean
self.x_smooth_mean[0] = self.initial_mean
for t in range(T):
print("\r filter and smooth calculating... t={}".format(t), end="")
## filter update
# calculate prediction step
f = _last_dims(self.f, t, 1)[0]
# raise parametric system noise
v = self.q[0](*self.q[1], size = self.n_particle).T
# calculate ensemble prediction
x_pred = f(*[x_filt, v])
# calculate mean of predicted values
self.x_pred_mean[t + 1] = np.mean(x_pred, axis = 1)
# treat missing values
if np.any(np.ma.getmask(self.y[t])):
x_filt = x_pred
else:
# (log) likelihood for each particle for y[t]
lf = self._last_likelihood(self.lf, t)
lfp = self._last_likelihood(self.lfp, t)
try:
w = lf(self.y[t], x_pred, *lfp)
except:
raise ValueError("you must check likelihood_functions"
+ " and parameters.")
# avoid evaporation
if self.likelihood_function_is_log_form:
w = np.exp(w - np.max(w))
else:
w = w / np.max(w)
# calculate resampling
k = self._stratified_resampling(w)
x_filt = x_pred[:, k]
# add regularization
if self.regularization:
x_filt += self.eta[0](*self.eta[1], size = self.n_particle).T
# substitute initial smooth value
x_smooth[t + 1] = x_filt
# calculate mean of filtering results
self.x_filt_mean[t + 1] = np.mean(x_filt, axis = 1)
# calculate fixed lag smoothing
if (t > lag - 1) :
x_smooth[t - lag:t + 1] = x_smooth[t - lag:t + 1, :, k]
else :
x_smooth[:t + 1] = x_smooth[:t + 1, :, k]
# calculate mean of smoothing results
self.x_smooth_mean = np.mean(x_smooth, axis = 2)
def get_smoothed_value(self, dim = None) :
"""Get RTS smoothed value
Args:
dim {int} : dimensionality for extract from RTS smoothed result
Returns (numpy-array, float)
: mean of hidden state at time t given observations
from times [0...T]
"""
# if not implement `smooth`, implement `smooth`
try :
self.x_smooth_mean[0]
except :
self.smooth()
if dim is None:
return self.x_smooth_mean[1:]
elif dim <= self.x_smooth_mean.shape[1]:
return self.x_smooth_mean[1:, int(dim)]
else:
raise ValueError("The dim must be less than "
+ self.x_smooth_mean.shape[1] + ".")
# last likelihood function and parameters (尤度関数とパラメータの決定)
def _last_likelihood(self, X, t):
"""Extract the final dimensions of `X`
Extract the final `ndim` dimensions at index `t` if `X` has >= `ndim` + 1
dimensions, otherwise return `X`.
Args:
X : array with at least dimension `ndims`
t : int
index to use for the `ndims` + 1th dimension
Returns:
Y : array with dimension `ndims`
the final `ndims` dimensions indexed by `t`
"""
if self.observation_parameters_time_invariant:
return X
else:
try:
return X[t]
except:
raise ValueError("you must check which likelihood " +
"parameters are time-invariant.")
| 3.078125 | 3 |
problems/leetcode/lt-1839.py | neerajp99/algorithms | 1 | 12787434 | <filename>problems/leetcode/lt-1839.py<gh_stars>1-10
# 1839. Longest Substring Of All Vowels in Order
"""
A string is considered beautiful if it satisfies the following conditions:
- Each of the 5 English vowels ('a', 'e', 'i', 'o', 'u') must appear at least once in it.
- The letters must be sorted in alphabetical order (i.e. all 'a's before 'e's, all 'e's before 'i's, etc.).
- For example, strings "aeiou" and "aaaaaaeiiiioou" are considered beautiful, but "uaeio", "aeoiu", and "aaaeeeooo" are not beautiful.
Given a string word consisting of English vowels, return the length of the longest beautiful substring of word. If no such substring exists, return 0.
A substring is a contiguous sequence of characters in a string.
Input: word = "aeiaaioaaaaeiiiiouuuooaauuaeiu"
Output: 13
Explanation: The longest beautiful substring in word is "aaaaeiiiiouuu" of length 13.
"""
class Solution:
def longestBeautifulSubstring(self, word: str) -> int:
max_length = 0
# Handle edge case
if len(word) == 1:
return 0
ptr1 = 0
ptr2 = 1
curr = set(word[0])x
while ptr2 < len(word):
if ord(word[ptr2 - 1]) <= ord(word[ptr2]):
curr.add(word[ptr2])
if len(curr) == 5:
max_length = max(max_length, ptr2 - ptr1 + 1)
else:
# Create the previous pointer as next pointer
ptr1 = ptr2
curr = set(word[ptr1])
# Increment the next pointer by 1
ptr2 += 1
return max_length | 3.734375 | 4 |
3ds/tests/file-test.py | Katistic/3ds_monty | 16 | 12787435 | <reponame>Katistic/3ds_monty
from citrus import *
gfx.init_default()
console.init(gfx.SCREEN_TOP)
d = open('/tmp.py', 'r').read()
print(d)
while apt.main_loop():
hid.scan_input()
if hid.keys_down() & hid.KEY_START:
break
gfx.flush_buffers()
gfx.swap_buffers()
gsp.wait_for_vblank()
gfx.exit() | 1.914063 | 2 |
app/main.py | acutaia/goeasy-ublox_api | 0 | 12787436 | """
App main entry point
:author: <NAME>
:copyright: Copyright 2021, LINKS Foundation
:version: 1.0.0
..
Copyright 2021 LINKS Foundation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
# Third Party
from fastapi import FastAPI
from fastapi.openapi.utils import get_openapi
from fastapi.openapi.docs import get_redoc_html
from fastapi.staticfiles import StaticFiles
# Internal
from .routers import galileo, ublox
from .db.postgresql import get_database
# --------------------------------------------------------------------------------------------
# Instantiate
database = get_database()
app = FastAPI(docs_url=None, redoc_url=None)
app.include_router(galileo.router)
app.include_router(ublox.router)
app.mount("/static", StaticFiles(directory="static"), name="static")
@app.on_event("startup")
async def startup():
await database.connect()
@app.on_event("shutdown")
async def shutdown():
await database.disconnect()
@app.get("/api/v1/galileo/docs", include_in_schema=False)
async def custom_redoc_ui_html():
return get_redoc_html(
openapi_url=app.openapi_url,
title="UbloxApi",
redoc_js_url="/static/redoc.standalone.js",
redoc_favicon_url="/static/satellite.png",
)
def custom_openapi():
if app.openapi_schema:
return app.openapi_schema
openapi_schema = get_openapi(
title="UbloxApi",
version="1.0.0",
routes=app.routes,
)
openapi_schema["info"]["x-logo"] = {"url": "/static/logo_full.png"}
app.openapi_schema = openapi_schema
return app.openapi_schema
app.openapi = custom_openapi
| 1.90625 | 2 |
utils/ignore.py | ayieko168/Ternary-Metal-Nitrides-Research | 0 | 12787437 | import os, json
with open("PubChemElements_all.json") as fo:
data = json.load(fo)
all_elements = []
for j in data['Table']['Row']:
element_obj = {}
for element in list(zip(data['Table']['Columns']['Column'], j['Cell'])):
property = element[0]
value = element[1]
element_obj[property] = value
all_elements.append(element_obj)
with open("my_elements_data.json", 'w') as fo:
json.dump(all_elements, fo, indent=2)
| 3.015625 | 3 |
reflection/main.py | junaruga/py-sample | 0 | 12787438 | from a.filea import ClassA
from a.b.fileb import ClassB
# class_name: foo.bar.Bar
def import_class(class_name):
components = class_name.split('.')
module = __import__(components[0])
for comp in components[1:]:
# print(repr(comp))
module = getattr(module, comp)
return module
if __name__ == '__main__':
a = ClassA()
print(repr(a))
a.hello()
b = ClassB()
print(repr(b))
b.hello()
reflection_clsa = import_class('a.filea.ClassA')
print(repr(reflection_clsa))
reflection_a = reflection_clsa()
print(repr(reflection_a))
reflection_a.hello()
reflection_clsb = import_class('a.b.fileb.ClassB')
print(repr(reflection_clsb))
reflection_b = reflection_clsb()
print(repr(reflection_b))
reflection_b.hello()
| 2.78125 | 3 |
rlkit/events/visualize_beta_v.py | Asap7772/railrl_evalsawyer | 1 | 12787439 | <filename>rlkit/events/visualize_beta_v.py
import argparse
import numpy as np
import matplotlib.pyplot as plt
import joblib
from rlkit.misc.visualization_util import make_heat_map, plot_heatmap
from rlkit.policies.simple import RandomPolicy
from rlkit.state_distance.rollout_util import multitask_rollout
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('file', type=str,
help='path to the snapshot file')
parser.add_argument('--pause', action='store_true')
parser.add_argument('--mt', type=int, help='max time to goal', default=0)
args = parser.parse_args()
if args.pause:
import ipdb; ipdb.set_trace()
file = args.file
data = joblib.load(file)
beta_v = data['beta_v']
env = data['env']
num_steps_left = np.array([[args.mt]])
def create_beta_goal(obs):
def beta_eval(g1, g2):
return beta_v.eval_np(
observations=np.array([[
*obs
]]),
goals=np.array([[
g1, g2
]]),
num_steps_left=num_steps_left
)[0, 0]
return beta_eval
def create_beta_pos(goal):
def beta_eval(x, y):
return beta_v.eval_np(
observations=np.array([[
x, y
]]),
goals=np.array([[
*goal
]]),
num_steps_left=num_steps_left
)[0, 0]
return beta_eval
rng = [-4, 4]
resolution = 30
obs = (0, 0)
plt.title("pos {}".format(obs))
heatmap = make_heat_map(create_beta_goal(obs), rng, rng, resolution=resolution)
plot_heatmap(heatmap)
obs = (0, 0.75)
plt.figure()
plt.title("pos {}".format(obs))
heatmap = make_heat_map(create_beta_goal(obs), rng, rng, resolution=resolution)
plot_heatmap(heatmap)
obs = (0, 1.75)
plt.figure()
plt.title("pos {}".format(obs))
heatmap = make_heat_map(create_beta_goal(obs), rng, rng, resolution=resolution)
plot_heatmap(heatmap)
goal = (0, 1.25)
plt.figure()
plt.title("goal {}".format(goal))
heatmap = make_heat_map(create_beta_pos(goal), rng, rng,
resolution=resolution)
plot_heatmap(heatmap)
goal = (0, 4)
plt.figure()
plt.title("goal {}".format(goal))
heatmap = make_heat_map(create_beta_pos(goal), rng, rng,
resolution=resolution)
plot_heatmap(heatmap)
goal = (-4, -4)
plt.figure()
plt.title("goal {}".format(goal))
heatmap = make_heat_map(create_beta_pos(goal), rng, rng,
resolution=resolution)
plot_heatmap(heatmap)
goal = (4, -4)
plt.figure()
plt.title("goal {}".format(goal))
heatmap = make_heat_map(create_beta_pos(goal), rng, rng,
resolution=resolution)
plot_heatmap(heatmap)
goal = (4, 4)
plt.figure()
plt.title("goal {}".format(goal))
heatmap = make_heat_map(create_beta_pos(goal), rng, rng,
resolution=resolution)
plot_heatmap(heatmap)
goal = (-4, 4)
plt.figure()
plt.title("goal {}".format(goal))
heatmap = make_heat_map(create_beta_pos(goal), rng, rng,
resolution=resolution)
plot_heatmap(heatmap)
plt.show()
| 2.234375 | 2 |
scripts/ckpt_processor/read_fti_ckpts.py | Knoxort/fti | 34 | 12787440 | # This module initiates the checkpoint
# processing of FTI files.
import os
import glob
import os.path
import time
from fnmatch import fnmatch
import configparser
import posix_read_ckpts
import subprocess
import sys
# variables used for input validation
fti_levels = (1, 2, 3, 4)
output_formats = ('CSV', 'HDF5', 'data')
# runtime variables of FTI (ckpt and meta)
config_file = ""
ckpt_dir = ""
meta_dir = ""
global_dir = ""
group_size = 0
nbHeads = 0
nodeSize = 0
totalRanks = 0
ioMode = 0
ckpt_abs_path = ""
meta_abs_path = ""
execution_id = ""
level_meta_dir = ""
level_dir = ""
# This function reads the config_file
# and sets FTI parameters
def init_config_params(config_file):
global execution_id
global ckpt_dir
global meta_dir
global global_dir
global group_size
global nbHeads
global nodeSize
global ioMode
if os.path.isfile(config_file) is False:
print("Configuration file not found")
sys.exit(2001)
else:
config = configparser.ConfigParser()
config.read(config_file)
execution_id = config['restart']['exec_id']
ckpt_dir = config['basic']['ckpt_dir']
meta_dir = config['basic']['meta_dir']
global_dir = config['basic']['glbl_dir']
group_size = config['basic']['group_size']
nbHeads = config['basic']['head']
nodeSize = config['basic']['node_size']
ioMode = config['basic']['ckpt_io']
# This function processes FTI's files
# given config_file and set the absolute
# paths of meta files and ckpt files
def process_fti_paths(config_file):
global ckpt_dir
global meta_dir
global ckpt_abs_path
global meta_abs_path
# ckpt dir
dir_path = os.path.dirname(os.path.realpath(config_file))
# concatenate paths
if level_dir == '/l4/':
# switch to global_dir
ckpt_dir = global_dir
if ckpt_dir.startswith('./') is True: # same directory as config
ckpt_abs_path = dir_path + ckpt_dir.replace('.', '')
elif "." not in ckpt_dir: # absolute path
# set dir
ckpt_abs_path = ckpt_dir
else: # relative path
# iterate over the number of '../' found in ckpt_path
os.chdir(dir_path)
dirs = ckpt_dir.count("..")
for i in range(dirs):
os.chdir("..")
# concatenate the remaining part
for i in range(dirs):
# remove ../
ckpt_dir = ckpt_dir.replace('../', '')
os.chdir(ckpt_dir)
ckpt_abs_path = os.getcwd()
print("ckpt_abs_path ", ckpt_abs_path)
# meta dir
dir_path = os.path.dirname(os.path.realpath(config_file))
print(dir_path)
# concatenate paths
if meta_dir.startswith('./') is True: # same directory as config
# omit dot + concatenate the rest of the path
meta_abs_path = dir_path + meta_dir.replace('.', '')
elif "." not in meta_dir: # absolute path
# set dir
meta_abs_path = meta_dir
else: # relative path
# iterate over the number of '../' found in ckpt_path
os.chdir(dir_path)
dirs = meta_dir.count("..")
for i in range(dirs):
os.chdir("..")
# concatenate the remaining part
for i in range(dirs):
# remove ../
meta_dir = meta_dir.replace('../', '')
os.chdir(meta_dir)
meta_abs_path = os.getcwd()
print("meta_abs_path ", meta_abs_path)
# This function returns the path of the
# ckpt corresponding to rank_id
def find_ckpt_file(rank_id):
pattern_ckpt_file = ""
pattern_ckpt_path = execution_id+level_dir
if level_dir == '/l1/' or level_dir == '/l4/': # local
pattern_ckpt_file = "*-Rank"+str(rank_id)+".fti"
if level_dir == '/l4/' and ioMode == "2": # global
pattern_ckpt_file = "-mpiio.fti"#Ckpt1-mpiio.fti
ckpt_file = ""
for root, dirs, files in os.walk(os.path.abspath(ckpt_abs_path)):
for file in files:
file = os.path.join(root, file)
if pattern_ckpt_path in file and pattern_ckpt_file in file:
ckpt_file = file
if level_dir == '/l4/' and ioMode == "2": # global
PFSfile = ckpt_file
# recover from L4 to tmp/
ckpt_file = recover_mpiio_l4(rank_id, PFSfile)
if ckpt_file == "":
print("Checkpoint file not found")
sys.exit(2002)
return ckpt_file
# This function is called if io=2 and level=4
# it recovers the file from l4 directory in mpiio format
# to tmp/file in posix format
def recover_mpiio_l4(rank_id, PFSfile):
# preparing input for mpiio recovery
global nodeSize
global nbApprocs
global nbNodes
global nbHeads
nodeSize = int(nodeSize)
nbHeads = int(nbHeads)
nbApprocs = nodeSize - nbHeads
nbNodes = totalRanks / nodeSize if nodeSize else 0
nbNodes = int(nbNodes)
executable_path = "./mpiio/"
# get fileSize from metafile
# read ckpt_file_size entry of second section
fileSize = 0
meta_pattern = "sector"
meta_file = ""
for root, dirs, files in os.walk(os.path.abspath(meta_abs_path)):
for file in files:
if file.startswith(meta_pattern) is True:
file = os.path.join(root, file)
print(file)
meta_file = file
break
# processing the meta file for the size
config = configparser.ConfigParser()
config.read(meta_file)
fileSize = config['0']['ckpt_file_size']
os.chdir(executable_path)
cmd = "./mpiio_main "+str(rank_id)+" "+str(PFSfile)+" "+str(fileSize)+" "+str(nbApprocs)+" "+str(nbNodes)
subprocess.check_call(cmd, shell=True)
print("Rank ", str(rank_id), " is done copying...")
print(
"MPI-IO recovery finished successfully. "
"Now current dir is",
os.getcwd())
# look for what has been stored under /tmp
ckpt_path = os.getcwd()+"/tmp" # Ckpt1-mpiio.fti
pattern_ckpt_file = "*.fti"
ckpt_file = ""
# find file in this directory
for root, dirs, files in os.walk(os.path.abspath(ckpt_path)):
for file in files:
file = os.path.join(root, file)
if fnmatch(file, pattern_ckpt_file):
ckpt_file = file
if ckpt_path == "":
print("Could not recover from MPI-IO")
sys.exit()
return ckpt_file
# This function returns the path of the
# meta corresponding to the ckpt_file
# note: for now it works with level 1
def find_meta_file(ckpt_file):
meta_file = ""
if level_dir == '/l4/' and ioMode == "2":
print("should take any sector file")
for path, subdirs, files in os.walk(meta_abs_path):
for file in files:
file = meta_abs_path+'/'+execution_id+level_dir+file
meta_file = file
break
# traverse all meta files in the directory
else: # levels (1,2,3)
for path, subdirs, files in os.walk(meta_abs_path):
for file in files:
file = meta_abs_path+'/'+execution_id+level_dir+file
if os.path.isfile(file) is True:
config = configparser.ConfigParser()
config.read(file)
ckpt = ckpt_file.rsplit('/', 1)[1]
for section in config.sections():
if section.isdigit() is True:
if config[section]['ckpt_file_name'] == ckpt:
meta_file = file
break
if meta_file == "":
print("Metadata file not found")
sys.exit(2004)
return meta_file
# This function sets FTI's files paths
# depending on the level where the ckpt is stored
def process_level(level):
global level_dir
level_dir = '/l'+str(level)+'/'
# print("level dir : ", level_dir)
# This function compares ckpt directories
# and returns the level to which the last ckpt was stored
def get_latest_ckpt():
latest = max(glob.glob(
os.path.join(ckpt_abs_path, '*/')), key=os.path.getmtime)
latest = latest.rsplit('/', 1)[0]
latest = latest.rsplit('/', 1)[1]
level = latest[1]
return level
# API to read the checkpoints given config and rank
# def read_checkpoints(config_file, rank_id, level=None, output=None):
def read_checkpoints(config_file, rank_id, ranks=None,
level=None, output=None):
init_config_params(config_file)
if level in fti_levels:
process_level(level)
elif level is None:
# check for latest ckpt
last_level = get_latest_ckpt()
process_level(level)
else:
# invalid fti level
print("Invalid FTI level")
sys.exit(1001)
if output is not None and output not in output_formats:
print("Wrong output format. Choose one")
print("CSV (default):: Comma Separated Values file")
print("HDF5 :: Hierarchical Data Format file")
print("data :: numpy array")
sys.exit(1002)
elif output is None:
# default output format (CSV)
output = 'CSV'
if level == 4 and ioMode == 2 and ranks is None:
print("Total # of ranks is required when reading MPI-IO"
" chekpoints from level 4")
sys.exit(1003)
global totalRanks
totalRanks = ranks
process_fti_paths(config_file)
ckpt_file = find_ckpt_file(rank_id)
meta_file = find_meta_file(ckpt_file)
print("Processing ", ckpt_file, " using meta ", meta_file)
# posix_read_ckpts.read_checkpoint(
# ckpt_file, meta_file, config_file, group_size, level, output)
if output == "data":
return posix_read_ckpts.read_checkpoint(
ckpt_file, meta_file, config_file, group_size, level, output)
else:
posix_read_ckpts.read_checkpoint(
ckpt_file, meta_file, config_file, group_size, level, output)
| 2.34375 | 2 |
strategy/indicator/bsawe/bsawe.py | firebird631/siis | 0 | 12787441 | <gh_stars>0
# @date 2019-04-14
# @author <NAME>, All rights reserved without prejudices.
# @license Copyright (c) 2018 Dream Overflow
# Awesome based buy/sell signal indicator
from strategy.indicator.indicator import Indicator
from strategy.indicator.utils import crossunder, crossover # , down_sample
# import numpy as np
from talib import EMA as ta_EMA, SMA as ta_SMA
import logging
logger = logging.getLogger('siis.strategy.indicator')
class BSAweIndicator(Indicator):
"""
Awesome based buy/sell signal indicator.
@ref https://www.forexstrategiesresources.com/scalping-forex-strategies-iii/337-bollinger-bands-and-chaos-awesome-scalping-system
@ref Squeeze Momentum Indicator [LazyBear]
"""
__slots__ = '_bb_L', '_fast_MA_L', '_awesome_fast_L', '_awesome_slow_L', '_use_EMA', '_signal'
@classmethod
def indicator_type(cls):
return Indicator.TYPE_TREND
@classmethod
def indicator_class(cls):
return Indicator.CLS_OSCILLATOR
def __init__(self, timeframe, bb_L=20, fast_MA_L=3.0, awesome_fast_L=5, awesome_slow_L=34, use_EMA=False):
super().__init__("bsawe", timeframe)
self._bb_L = bb_L
self._fast_MA_L = fast_MA_L
self._awesome_fast_L = awesome_fast_L
self._awesome_slow_L = awesome_slow_L
self._use_EMA = use_EMA
self._signal = 0 # signal direction
def signal(self):
return self._signal
def compute(self, timestamp, high, low, close):
# Breakout Indicator Inputs
bb_basis = ta_EMA(close, self._bb_L) if self._use_EMA else ta_SMA(close, self._bb_L)
fast_ma = ta_EMA(close, self._fast_MA_L)
# Calculate Awesome Oscillator
hl2 = (high + low) * 0.5
xSMA1_hl2 = ta_SMA(hl2, self._awesome_fast_L)
xSMA2_hl2 = ta_SMA(hl2, self._awesome_slow_L)
xSMA1_SMA2 = xSMA1_hl2 - xSMA2_hl2
# Calculate direction of AO
if xSMA1_SMA2[-1] >= 0:
if xSMA1_SMA2[-1] > xSMA1_SMA2[-2]:
AO = 1
else:
AO = 2
else:
if xSMA1_SMA2[-1] > xSMA1_SMA2[-2]:
AO = -1
else:
AO = -2
# Calc breakouts
break_down = crossunder(fast_ma, bb_basis) and close[-1] < bb_basis[-1] and AO<0 # abs(AO)==2
break_up = crossover(fast_ma, bb_basis) and close[-1] > bb_basis[-1] and AO>0 # abs(AO)==1
self._signal = 1 if break_up else -1 if break_down else 0
self._last_timestamp = timestamp
return self._signal
| 2.484375 | 2 |
shop/management/commands/recommendation.py | knkemree/django_ecommerce_website | 0 | 12787442 | import rec as rec
from django.core.management.base import BaseCommand
import pandas as pd
from sklearn.metrics.pairwise import cosine_similarity
import plotly.offline as py
import plotly.graph_objects as go
from django.db.models import Sum
import slug
import http.client
import json
from shop.models import Rec, Product, Category
class Command(BaseCommand):
help = "collect ft articles"
# define logic of command
def handle(self, *args, **options):
conn = http.client.HTTPSConnection("www.primemobileparts.com")
payload = ""
headers = {'authorization': "Bearer <KEY>"}
conn.request("GET", "/api/user-order-report",payload, headers)
res = conn.getresponse()
data = res.read()
data.decode("utf-8")
x = json.loads(data.decode("utf-8"))
df = pd.DataFrame(x['reportData'])
df = df.dropna(subset=['user_id', 'product_id', 'quantity'])
customer_item_matrix = df.pivot_table(
index='user_id',
columns='product_id',
values='quantity',
aggfunc='sum'
)
print(df.head())
customer_item_matrix = customer_item_matrix.applymap(lambda x: 1 if x > 0 else 0)
user_user_sim_matrix = pd.DataFrame(
cosine_similarity(customer_item_matrix)
)
user_user_sim_matrix.columns = customer_item_matrix.index
user_user_sim_matrix['user_id'] = customer_item_matrix.index
user_user_sim_matrix = user_user_sim_matrix.set_index('user_id')
# user_user_sim_matrix.loc[737.0].sort_values(ascending=False) # Angelaya benzer kullaniclar
#items_bought_by_A = set(customer_item_matrix.loc[737.0].iloc[customer_item_matrix.loc[737.0].nonzero()].index)
#items_bought_by_B = set(customer_item_matrix.loc[685.0].iloc[customer_item_matrix.loc[685.0].nonzero()].index)
#items_to_recommend_to_B = items_bought_by_A - items_bought_by_B
#items_to_recommend_to_B
item_item_sim_matrix = pd.DataFrame(
cosine_similarity(customer_item_matrix.T)
)
item_item_sim_matrix.columns = customer_item_matrix.T.index
item_item_sim_matrix['product_id'] = customer_item_matrix.T.index
item_item_sim_matrix = item_item_sim_matrix.set_index('product_id')
for y in df['product_id']:
#Category.objects.get_or_create(name=z, slug = z.lower().replace(' ', '-'))
#f = Category.objects.get(name = z)
#Product.objects.get_or_create(name=y, price = p, category_id = f.id, slug=y.lower().replace(' ', '-'))
dict = {}
dict["products"] = {}
top_10_similar_items = list(
item_item_sim_matrix \
.loc[y] \
.sort_values(ascending=False) \
.iloc[1:13] \
.index
)
dict["products"][y] = [i for i in top_10_similar_items]
#print(y)
#print(top_10_similar_items)
#print(dict)
rec = json.dumps(dict)
#recs = json.loads(rec)
print(rec)
conn = http.client.HTTPSConnection("www.primemobileparts.com")
headers = {
'content-type': "application/json",
'authorization': "Bearer XgXLQTAvcOwn4Q4LycjR0W1hViX5ChenEepGTcyPo37C3TBCy6ubDxu1FiHt"
}
conn.request("POST", "/api/product-related", rec, headers)
res = conn.getresponse()
data = res.read()
#print(data.decode("utf-8"))
#Product.objects.get_or_create(name=y)
#print('%s added' % (top_10_similar_items,))
#Rec.product.add(d)
self.stdout.write('post complete') | 2.171875 | 2 |
kinetics/reaction_classes/michaelis_menton_modifiers.py | wlawler45/kinetics | 13 | 12787443 | <reponame>wlawler45/kinetics
""" Modifiers (eg inhibtion) """
class Modifier():
def __init__(self):
self.substrate_names = []
self.substrate_indexes = []
self.parameter_names = []
self.parameter_indexes = []
def get_substrate_indexes(self, substrate_names):
self.substrate_indexes = []
for name in self.substrate_names:
self.substrate_indexes.append(substrate_names.index(name))
def get_parameter_indexes(self, parameter_names):
self.parameter_indexes = []
for name in self.parameter_names:
self.parameter_indexes.append(parameter_names.index(name))
def calc_modifier(self, substrates, parameters):
# the substrate indexes will be stored in self.substrate_indexes,
# in the order that they are named in self.substrate_names
# same for parameters
# use these indexes to write the equation here.
return substrates, parameters
class SubstrateInhibition(Modifier):
def __init__(self, ki=None, a=None):
super().__init__()
self.substrate_names = [a]
self.parameter_names = [ki]
def calc_modifier(self, substrates, parameters):
ki = parameters[self.parameter_indexes[0]]
a = substrates[self.substrate_indexes[0]]
substrates[self.substrate_indexes[0]] = a * (1 + a / ki)
return substrates, parameters
class CompetitiveInhibition(Modifier):
def __init__(self, km=None, ki=None, i=None):
super().__init__()
self.substrate_names = [i]
self.parameter_names = [km, ki]
def calc_modifier(self, substrates, parameters):
km = parameters[self.parameter_indexes[0]]
ki = parameters[self.parameter_indexes[1]]
i = substrates[self.substrate_indexes[0]]
parameters[self.parameter_indexes[0]] = km * (1 + i/ki)
return substrates, parameters
class MixedInhibition(Modifier):
def __init__(self, kcat=None, km=None, ki=None, alpha=None, i=None):
super().__init__()
self.substrate_names = [i]
self.parameter_names = [kcat, km, ki, alpha]
def calc_modifier(self, substrates, parameters):
kcat = parameters[self.parameter_indexes[0]]
km = parameters[self.parameter_indexes[1]]
ki = parameters[self.parameter_indexes[2]]
alpha = parameters[self.parameter_indexes[3]]
i = substrates[self.substrate_indexes[0]]
parameters[self.parameter_indexes[0]] = kcat / (1 + i / (alpha * ki))
parameters[self.parameter_indexes[1]] = km * (1 + i / ki) / (1 + i / (alpha * ki))
return substrates, parameters
class MixedInhibition2(Modifier):
def __init__(self, kcat=None, km=None, kic=None, kiu=None, i=None):
super().__init__()
self.substrate_names = [i]
self.parameter_names = [kcat, km, kic, kiu]
def calc_modifier(self, substrates, parameters):
kcat = parameters[self.parameter_indexes[0]]
km = parameters[self.parameter_indexes[1]]
kic = parameters[self.parameter_indexes[2]]
kiu = parameters[self.parameter_indexes[3]]
i = substrates[self.substrate_indexes[0]]
parameters[self.parameter_indexes[0]] = kcat / (1 + i / kiu)
parameters[self.parameter_indexes[1]] = km * (1 + i / kic) / (1 + i / (kiu))
return substrates, parameters
class FirstOrder_Modifier(Modifier):
def __init__(self, kcat=None, k=None, s=None):
super().__init__()
self.substrate_names = [s]
self.parameter_names = [kcat, k]
def calc_modifier(self, substrates, parameters):
kcat = parameters[self.parameter_indexes[0]]
k = parameters[self.parameter_indexes[1]]
s = substrates[self.substrate_indexes[0]]
parameters[self.parameter_indexes[0]] = s*k*kcat
return substrates, parameters
| 3.125 | 3 |
circuit_benchmarks/toffoli.py | eddieschoute/circuit-benchmarks | 7 | 12787444 | import itertools
import math
from qiskit import QuantumRegister, QuantumCircuit, ClassicalRegister
from qiskit.circuit import Gate, InstructionSet
from qiskit.dagcircuit import DAGCircuit
from qiskit.extensions.standard import *
from qiskit.qasm import pi
def toffoli(number_qubits: int):
assert number_qubits >= 2
q = QuantumRegister(number_qubits)
qc = QuantumCircuit(q, name="toffoli")
# for i in range(number_qubits-1):
# qc.h(controls[i])
qc.ntoffoli(q[number_qubits-1], *q[0:number_qubits-1])
# qc.measure(controls, c_controls)
# qc.measure(target, c_target)
return qc
class NcrxGate(Gate):
"""n-controlled x rotation gate."""
def __init__(self, theta, tgt, *ctls, circ=None):
"""Create new Toffoli gate."""
assert len(ctls) >= 1
super().__init__(f"c^{len(ctls)}rx", [theta], [tgt] + list(ctls), circ)
def _define_decompositions(self):
decomposition = DAGCircuit()
nr_qubits = len(self.qargs)
q = QuantumRegister(nr_qubits)
last_control = q[1]
target = q[0]
decomposition.add_qreg(q)
if nr_qubits == 2:
# Equal to crx of theta
crx_theta = Cu3Gate(self.params[0], -pi/2, pi/2, last_control, target)
decomposition.apply_operation_back(crx_theta)
else:
# Recurse
rule = [
# C-sqrt(rx(theta)) gate
Cu3Gate(self.params[0]/2, -pi/2, pi/2, last_control, target),
NcrxGate(pi, last_control, *q[2:]), # toffoli
Cu3Gate(self.params[0]/2, -pi/2, pi/2, last_control, target).inverse(),
NcrxGate(pi, last_control, *q[2:]), # toffoli
NcrxGate(self.params[0]/2, target, *q[2:]) # c^nrx(theta/2) gate on n-1 qubits
]
for inst in rule:
decomposition.apply_operation_back(inst)
# decomposition.apply_operation_back(ToffoliGate(q[1], q[2], q[0]))
self._decompositions = [decomposition]
def inverse(self):
"""Invert this gate."""
return self # self-inverse
def reapply(self, circ):
"""Reapply this gate to corresponding qubits in circ."""
self._modifiers(circ.ncrx(self.params[0], self.qargs[0], *self.qargs[1:]))
def ncrx(self, theta, tgt, *ctls):
"""Apply n-controlled x-rotation(theta) to target from controls"""
if all(isinstance(ctl, QuantumRegister) for ctl in ctls) and \
isinstance(tgt, QuantumRegister) and \
all(len(ctl) == len(tgt) for ctl in ctls):
instructions = InstructionSet()
for i in range(ctls[0].size):
instructions.add(self.ntoffoli(theta, (tgt, i), *zip(ctls, itertools.repeat(i))))
return instructions
for ctl in ctls:
self._check_qubit(ctl)
self._check_qubit(tgt)
self._check_dups(list(ctls) + [tgt])
return self._attach(NcrxGate(theta, tgt, *ctls, circ=self))
def ntoffoli(self, tgt, *ctls):
"""Apply n-controlled Toffoli to tgt with controls."""
if all(isinstance(ctl, QuantumRegister) for ctl in ctls) and \
isinstance(tgt, QuantumRegister) and \
all(len(ctl) == len(tgt) for ctl in ctls):
instructions = InstructionSet()
for i in range(ctls[0].size):
instructions.add(self.ntoffoli((tgt, i), *zip(ctls, itertools.repeat(i))))
return instructions
for ctl in ctls:
self._check_qubit(ctl)
self._check_qubit(tgt)
self._check_dups(list(ctls) + [tgt])
return self._attach(NcrxGate(pi, tgt, *ctls, circ=self))
QuantumCircuit.ncrx = ncrx
QuantumCircuit.ntoffoli = ntoffoli
| 2.328125 | 2 |
users/migrations/0005_userpronoun.py | jazzyeagle/chatbot_website | 0 | 12787445 | <reponame>jazzyeagle/chatbot_website
# Generated by Django 3.2.8 on 2021-10-24 21:00
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('users', '0004_user_email'),
]
operations = [
migrations.CreateModel(
name='UserPronoun',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('subjective', models.CharField(max_length=10)),
('objective', models.CharField(max_length=10)),
('possessive', models.CharField(max_length=10)),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='users.user')),
],
),
]
| 1.882813 | 2 |
tests/useful_scripts/reindex_stix.py | ukncsc/edge-mod | 2 | 12787446 | <reponame>ukncsc/edge-mod
from mongoengine.connection import get_db
from pymongo import MongoClient
client = MongoClient()
db = client.inbox
# full-text-search index
db.stix.ensure_index(
[('fts', 'text')],
name='idx_fts',
background=True,
)
# create default indexes
db.stix.ensure_index([('data.hash',1)])
db.stix.ensure_index([('type',1)])
db.stix.ensure_index([('created_on',1)])
db.stix.ensure_index([('data.summary.type', 1)])
db.stix.ensure_index([('txn',1)])
db.stix.ensure_index([('data.summary.title', 1)])
db.activity.log.ensure_index([('stix_id', 1)])
# cache index and uniqueness
db.cache.ensure_index([('cache_name', 1)])
# unique index on name
db.trust_group.ensure_index([('name', 1)], unique=True)
| 2.09375 | 2 |
example.py | pudo/docstash | 1 | 12787447 | from docstash import Stash
# open a stash in the current working directory:
stash = Stash(path='.stash')
# print a list of collections:
print list(stash)
# access (or create) a specific collection:
collection = stash.get('test')
# import a file from the local working directory:
collection.ingest('README.md')
# import an http resource:
collection.ingest('http://pudo.org/index.html')
#collection.ingest_dir('.')
# iterate through each document and set a metadata
# value:
for doc in collection:
print doc
with open(doc.file, 'rb') as fh:
doc['body_length'] = len(fh.read())
doc.save()
| 2.84375 | 3 |
examples/imap-store.py | karpierz/libcurl | 0 | 12787448 | <gh_stars>0
#***************************************************************************
# _ _ ____ _
# Project ___| | | | _ \| |
# / __| | | | |_) | |
# | (__| |_| | _ <| |___
# \___|\___/|_| \_\_____|
#
# Copyright (C) 1998 - 2021, <NAME>, <<EMAIL>>, et al.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at https://curl.se/docs/copyright.html.
#
# You may opt to use, copy, modify, merge, publish, distribute and/or sell
# copies of the Software, and permit persons to whom the Software is
# furnished to do so, under the terms of the COPYING file.
#
# This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
# KIND, either express or implied.
#
#***************************************************************************
"""
IMAP example showing how to modify the properties of an e-mail
"""
import sys
import ctypes as ct
import libcurl as lcurl
from curltestutils import * # noqa
# This is a simple example showing how to modify an existing mail
# using libcurl's IMAP capabilities with the STORE command.
#
# Note that this example requires libcurl 7.30.0 or above.
def main(argv=sys.argv[1:]):
curl: ct.POINTER(lcurl.CURL) = lcurl.easy_init()
with curl_guard(False, curl):
if not curl: return 1
# Set username and password
lcurl.easy_setopt(curl, lcurl.CURLOPT_USERNAME, b"user")
lcurl.easy_setopt(curl, lcurl.CURLOPT_PASSWORD, b"<PASSWORD>")
# This is the mailbox folder to select
lcurl.easy_setopt(curl, lcurl.CURLOPT_URL, b"imap://imap.example.com/INBOX")
# Set the STORE command with the Deleted flag for message 1. Note that
# you can use the STORE command to set other flags such as Seen, Answered,
# Flagged, Draft and Recent.
lcurl.easy_setopt(curl, lcurl.CURLOPT_CUSTOMREQUEST, b"STORE 1 +Flags \\Deleted")
# Perform the custom request
res: lcurl.CURLcode = lcurl.easy_perform(curl)
# Check for errors
if res != lcurl.CURLE_OK:
handle_easy_perform_error(res)
else:
# Set the EXPUNGE command, although you can use the CLOSE command
# if you do not want to know the result of the STORE
lcurl.easy_setopt(curl, lcurl.CURLOPT_CUSTOMREQUEST, b"EXPUNGE")
# Perform the second custom request
res = lcurl.easy_perform(curl)
# Check for errors
if res != lcurl.CURLE_OK:
handle_easy_perform_error(res)
return int(res)
sys.exit(main())
| 2.15625 | 2 |
mxnet/doc/tutorials/onnx/utils.py | anirudhacharya/web-data | 0 | 12787449 | <reponame>anirudhacharya/web-data
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
# Get top n indices of an array
get_top_n = lambda array, n: (-array).argsort()[:,:n]
# Get the labels for a given list of predictions
get_predictions = lambda predictions, categories: [[categories[i] for i in p] for p in predictions]
# zip the predictions, result and labels
get_zipped = lambda result , predictions, top_n: [list(zip(result[i,a], predictions[i])) for i,a in enumerate(top_n)]
def _plot_image(ax, img):
ax.imshow(img)
ax.tick_params(axis='both',
which='both',
bottom='off',
top='off',
left='off',
right='off',
labelleft='off',
labelbottom='off')
return ax
def _plot_prediction_bar(ax, result):
result = result[::-1]
perf = [category[0] for category in result]
ax.barh(range(len(perf)), perf, align='center', color='#33ccff')
ax.tick_params(axis='both',
which='both',
bottom='off',
top='off',
left='off',
right='off',
labelbottom='off')
tick_labels = [category[1].split(',')[0] for category in result]
ax.yaxis.set_ticks(range(len(perf)))
ax.yaxis.set_ticklabels(tick_labels, position=(0.5,0), minor=False, horizontalalignment='center')
def plot_predictions(images, results, categories, N):
"""Plot a list of images with associated top-N predictions
arguments:
images -- an array of images
results -- a list of np arrays of shape [1,N_Categories]
categories -- an array of str representing the labels
N -- the number of predictions to display
"""
factor = int((len(images)/6)+1)
top_n = get_top_n(results, N)
predictions = get_predictions(top_n, categories)
zipped = get_zipped(results, predictions, top_n)
gs = gridspec.GridSpec(factor+1, 3)
fig = plt.figure(figsize=(15, int(5*(factor+1)+N/3)))
gs.update(hspace=0.1, wspace=0.001)
for gg, results, img in zip(gs, zipped, images):
gg2 = gridspec.GridSpecFromSubplotSpec(6+int(N/3), 10, subplot_spec=gg)
ax = fig.add_subplot(gg2[0:5, :])
_plot_image(ax, img)
ax = fig.add_subplot(gg2[5:6+int(N/3), 1:9])
_plot_prediction_bar(ax, results)
| 2.921875 | 3 |
fxm/task/apps.py | panyuan5056/fx | 0 | 12787450 | from django.apps import AppConfig
class TaskConfig(AppConfig):
name = 'task'
verbose_name = "敏感数据发现"
main_menu_index = 2
| 1.179688 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.