id
stringlengths 1
7
| text
stringlengths 6
1.03M
| dataset_id
stringclasses 1
value |
---|---|---|
3259788
|
import io
import gzip
from pathlib import Path
import re
import zlib
from ncompress import decompress as unlzw
class MavReader:
''' Opens AVNMAV file and returns stream of data
'''
empty = re.compile(b'\s+\n')
newline = re.compile(b'1\n')
def __init__(self, filepath, stations=False):
"""Open a avnmav file and returns text or a generator of stations
filepath: str or Path
location of avnmav file
stations: bool, default=False
True: generator of list of stations
False: stream of text
"""
self.filepath = Path(filepath)
if not self.filepath.exists():
raise FileNotFoundError(f"{self.filepath}")
self.stations = stations
self.stream = None
def __enter__(self):
if self.filepath.suffix == '.gz':
self.stream = gzip.open(self.filepath,'r')
elif self.filepath.suffix == '.Z':
file_obj = open(self.filepath, 'rb')
compressed_data = file_obj.read()
self.stream = io.BytesIO(unlzw(compressed_data))
file_obj.close()
else:
self.stream = open(self.filepath, 'rb')
if self.stations:
return self.get_stations()
return self.stream
def __exit__(self, exc_type, exc_value, exc_traceback):
self.stream.close()
def get_stations(self):
station = []
for line in self.stream:
if MavReader.empty.match(line):
yield station
station = []
elif MavReader.newline.match(line):
continue
else:
station.append(line.decode())
|
StarcoderdataPython
|
3273973
|
<gh_stars>0
def fun(mystr):
return len(mystr)
list1 = ['Monday','Tuesday','Wednesday','Thursday','Friday','Saturday']
result = list(map(fun,list1))
print(result)
|
StarcoderdataPython
|
3284405
|
<filename>TweetIngest/send.py
import sys
import logging
import datetime
import time
import os
import enhancedjsonencoder
from azure.eventhub import EventHubClient, Sender, EventData
from telemetry import Telemetry
class EventHubSender(object):
def __init__(self, connectionString):
print("Initiating EventHubSender with "+connectionString)
if not connectionString:
raise ValueError("No EventHubs URL supplied.")
self.address=connectionString
self.client=EventHubClient.from_connection_string(connectionString)
self.sender=self.client.add_sender()
self.client.run()
self.encoder=enhancedjsonencoder.EnhancedJSONEncoder()
def __del__(self):
self.sender.close()
def close(self):
self.__del__()
def sendTweet(self,tweet):
try:
tweetjson= self.encoder.encode(tweet)
self.sender.send(EventData(tweetjson))
Telemetry.IncrementMessages()
except:
raise
|
StarcoderdataPython
|
80829
|
<reponame>ealpizarp/climate_crawler
# Costa Rica Institute of Technology
# A web scrapper that fetches information about the world climate from https://en.tutiempo.net/climate
# and store it in an local output csv file and in the Hadopp distributed file system
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import requests
from bs4 import BeautifulSoup
import os
from timeit import default_timer as timer
import threading
import pydoop.hdfs as hdfs
lock = threading.Lock()
baseUrl = 'https://en.tutiempo.net'
tableHeader = ["Year", "T", "TM", "Tm", "PP",
"V", "RA", "SN", "TS", "FG", "TN", "GR"]
archivo = None
archivoErrores = None
linksVisitados = set()
# Esta funcion se encarga de recibir todas las filas de una tabla y escribirlas en un archivo
# La actualización de dicho archivo no se realiza en esta funcion
def writeToFile(basicInfo, rows):
lock.acquire() # Se bloquean los threads para evitar que haya errores al escribir en el archivo
stringToWrite = ""
for row in rows:
stringToWrite += basicInfo + ";" + ';'.join(row) + "\n"
archivo.write(stringToWrite)
lock.release()
# Esta funcion se encarga de buscar las tablas de continentes, países y estaciones, asi como retornar los links que contienen
# Se manejan en una sola funcion porque estas tablas comparten la misma clase de css
def getLinks(link, pos=-1):
listaLinks = []
completeLink = baseUrl + link
# Esta verificación es porque en los continentes no se enviá una posición, entonces el valor por default es -1
# La posición es para ir avanzando las paginas de las estaciones (pagina 1, pagina 2, ... , pagina n)
if (pos != -1):
completeLink = completeLink + "/"+str(pos)+"/"
page = None
amountOfTries = 0
# Para evitar perder información se reintenta un maximo de 2 veces en caso de que falle el request
while not page and amountOfTries < 2:
amountOfTries += 1
try:
page = requests.get(completeLink)
except:
continue
if not page:
print('Error obteniendo tabla ', completeLink)
return None
soup = BeautifulSoup(page.content, 'html.parser')
# Todas las tablas utilizan esta misma clase
tablas = soup.find_all("div", {"class": "mlistados"})
# Aca se buscan todos los links dentro de cada tabla
for tabla in tablas:
referencias = tabla.find_all("a")
for ref in referencias:
listaLinks += [[ref['href'], ref.string]]
return listaLinks
# Esta funcion es para obtener la tabla de clima de una estación
def getTableInfo(basicInfo, link):
tableRowsContent = []
completeLink = baseUrl + link
page = None
amountOfTries = 0
while not page and amountOfTries < 2:
amountOfTries += 1
try:
page = requests.get(completeLink)
except Exception as e:
print(e, completeLink)
continue
if not page:
print('Error obteniendo tabla con datos del clima ', completeLink)
return None
soup = BeautifulSoup(page.content, 'html.parser')
# Se obtiene la tabla, y de estas todas las filas
table = soup.find("div", {"class": "mt5 minoverflow"})
tableRows = table.find_all("tr")[1:]
for row in tableRows:
dataCells = row.find_all("td") # Se toman las celdas
firstColumn = True
rowText = []
# Una vez obtenidas todas las lineas se convierte a texto para ser impresas
# La primera celda de cada fila es un caso especial dado que que se deba buscar una etiqueta "strong"
for cell in dataCells:
if (firstColumn == False):
rowText += [cell.string]
else:
year = cell.find("strong")
rowText += [year.string]
firstColumn = False
tableRowsContent += [rowText]
writeToFile(basicInfo, tableRowsContent)
# Funcion para obtener las estaciones de un país, asi como obtener la tabla del clima de cada estación
def getStations(nombreContinente, linkPais):
nombrePais = linkPais[1]
currentPage = 1
# Se itera las paginas de estaciones dado que algunos países cuentas con varias
while True:
listaEstaciones = getLinks(linkPais[0], currentPage)
if not listaEstaciones:
break
currentPage += 1
for linkEstacion in listaEstaciones:
nombreEstacion = linkEstacion[1]
try:
getTableInfo(nombreContinente + ";" +
nombrePais + ";" + nombreEstacion, linkEstacion[0])
except AttributeError as e:
archivoErrores.write(str(e) + "Continente: " + nombreContinente + " Pais: " + nombrePais +
" Provincia: " + nombreEstacion + " Link: " + baseUrl + linkEstacion[0] + "\n")
# Una vez escritas todas las estaciones de una pagina se limpia el buffer y se sincroniza el archivo con el sistema
archivo.flush()
os.fsync(archivo.fileno())
archivoErrores.flush()
os.fsync(archivoErrores.fileno())
# Esta funcion obtiene los países de un continente
def getCountries(linkContinente):
nombreContinente = linkContinente[1]
listaPaises = getLinks(linkContinente[0])
# Se crea un thread por país y luego se espera a que todos terminen
threadList = []
for linkPais in listaPaises:
threadList += [threading.Thread(target=getStations,
args=(nombreContinente, linkPais, ),
daemon=True)]
for thread in threadList:
thread.start()
for thread in threadList:
thread.join()
def getContinents():
listaContinentes = getLinks("/climate/")
threadList = []
for linkContinente in listaContinentes[:len(listaContinentes)-1]:
threadList += [threading.Thread(target=getCountries,
args=(linkContinente,),
daemon=True)]
threadList += [threading.Thread(target=getStations,
args=("Antartica", listaContinentes[len(
listaContinentes)-1], ),
daemon=True)]
for thread in threadList:
thread.start()
for thread in threadList:
thread.join()
def pruebas():
global archivo, archivoErrores
cantidadDePruebas = 10
for i in range(cantidadDePruebas):
archivo = open("out" + str(i+1) + ".txt", "w")
archivo.write("continente" + ";" + "provincia" + ";" + "pais" + ";" + ';'.join(
["Year", "T", "TM", "Tm", "PP", "V", "RA", "SN", "TS", "FG", "TN", "GR"]) + "\n")
archivoErrores = open("errores" + str(i+1) + ".txt", "w")
start = timer()
getContinents()
end = timer()
print("Duracion", end - start)
archivo.close()
archivoErrores.close()
archivoErrores.close()
def saveFile():
global archivo, archivoErrores
archivo = open("climate" + ".csv", "w")
archivoErrores = open("ERROR_LOG" + ".txt", "w")
start = timer()
getContinents()
end = timer()
print("Duracion", end - start)
archivo.close()
archivoErrores.close()
archivoErrores.close()
def saveToHadoop():
if hdfs.path.exists('/climate.csv'):
hdfs.rm('/climate.csv', True)
hdfs.put('./climate.csv', '/')
# Se invoca funcion que se encarga de ejecutar todos los procesos recursivamente
saveFile()
# Se guarda el archivo resultante en el HDFS de Hadoop
saveToHadoop()
|
StarcoderdataPython
|
1712758
|
<reponame>KapJI/moonraker-telegram-bot
import logging
import time
from concurrent.futures import ThreadPoolExecutor
from apscheduler.schedulers.base import BaseScheduler
from telegram import ChatAction, Message, Bot
from configuration import ConfigWrapper
from camera import Camera
from klippy import Klippy
logger = logging.getLogger(__name__)
class Timelapse:
def __init__(self, config: ConfigWrapper, klippy: Klippy, camera: Camera, scheduler: BaseScheduler, bot: Bot, logging_handler: logging.Handler = None):
self._enabled: bool = config.timelapse.enabled and camera.enabled
self._mode_manual: bool = config.timelapse.mode_manual
self._height: float = config.timelapse.height
self._interval: int = config.timelapse.interval
self._target_fps: int = config.timelapse.target_fps
self._min_lapse_duration: int = config.timelapse.min_lapse_duration
self._max_lapse_duration: int = config.timelapse.max_lapse_duration
self._last_frame_duration: int = config.timelapse.last_frame_duration
# Todo: add to runtime params section!
self._after_lapse_gcode: str = config.timelapse.after_lapse_gcode
self._send_finished_lapse: bool = config.timelapse.send_finished_lapse
self._silent_progress: bool = config.telegram_ui.silent_progress
self._klippy = klippy
self._camera = camera
# push params to cameras instances
self._camera.target_fps = self._target_fps
self._camera.min_lapse_duration = self._min_lapse_duration
self._camera.max_lapse_duration = self._max_lapse_duration
self._camera.last_frame_duration = self._last_frame_duration
self._sched = scheduler
self._chat_id: int = config.bot.chat_id
self._bot: Bot = bot
self._running: bool = False
self._paused: bool = False
self._last_height: float = 0.0
self._executors_pool: ThreadPoolExecutor = ThreadPoolExecutor(2)
if logging_handler:
logger.addHandler(logging_handler)
if config.bot.debug:
logger.setLevel(logging.DEBUG)
@property
def enabled(self) -> bool:
return self._enabled
@enabled.setter
def enabled(self, new_value: bool):
self._enabled = new_value
@property
def manual_mode(self) -> bool:
return self._mode_manual
@manual_mode.setter
def manual_mode(self, new_value: bool):
self._mode_manual = new_value
@property
def interval(self) -> int:
return self._interval
@interval.setter
def interval(self, new_value: int):
if new_value == 0:
self._interval = new_value
self._remove_timelapse_timer()
elif new_value > 0:
self._interval = new_value
self._reschedule_timelapse_timer()
@property
def height(self) -> float:
return self._height
@height.setter
def height(self, new_value: float):
if new_value >= 0:
self._height = new_value
@property
def target_fps(self) -> int:
return self._target_fps
@target_fps.setter
def target_fps(self, new_value: int):
if new_value >= 1:
self._target_fps = new_value
self._camera.target_fps = new_value
@property
def min_lapse_duration(self) -> int:
return self._min_lapse_duration
@min_lapse_duration.setter
def min_lapse_duration(self, new_value: int):
if new_value >= 0:
if new_value <= self._max_lapse_duration and not new_value == 0:
logger.warning(f"Min lapse duration {new_value} is lower than max lapse duration {self._max_lapse_duration}")
self._min_lapse_duration = new_value
self._camera.min_lapse_duration = new_value
@property
def max_lapse_duration(self) -> int:
return self._max_lapse_duration
@max_lapse_duration.setter
def max_lapse_duration(self, new_value: int):
if new_value >= 0:
if new_value <= self._min_lapse_duration and not new_value == 0:
logger.warning(f"Max lapse duration {new_value} is lower than min lapse duration {self._min_lapse_duration}")
self._max_lapse_duration = new_value
self._camera.max_lapse_duration = new_value
@property
def last_frame_duration(self) -> int:
return self._last_frame_duration
@last_frame_duration.setter
def last_frame_duration(self, new_value: int):
if new_value >= 0:
self._last_frame_duration = new_value
self._camera.last_frame_duration = new_value
@property
def running(self) -> bool:
return self._running
@running.setter
def running(self, new_val: bool):
self._running = new_val
self._paused = False
if new_val:
self._add_timelapse_timer()
else:
self._remove_timelapse_timer()
@property
def paused(self) -> bool:
return self._paused
@paused.setter
def paused(self, new_val: bool):
self._paused = new_val
if new_val:
self._remove_timelapse_timer()
elif self._running:
self._add_timelapse_timer()
def take_lapse_photo(self, position_z: float = -1001, manually: bool = False):
if not self._enabled:
logger.debug(f"lapse is disabled")
return
elif not self._klippy.printing_filename:
logger.debug(f"lapse is inactive for file undefined")
return
elif not self._running:
logger.debug(f"lapse is not running at the moment")
return
elif self._paused and not manually:
logger.debug(f"lapse is paused at the moment")
return
elif not self._mode_manual and self._klippy.printing_duration <= 0.0:
logger.debug(f"lapse must not run with auto mode and zero print duration")
return
if 0.0 < position_z < self._last_height - self._height:
self._last_height = position_z
if self._height > 0.0 and round(position_z * 100) % round(self._height * 100) == 0 and position_z > self._last_height:
self._executors_pool.submit(self._camera.take_lapse_photo)
self._last_height = position_z
elif position_z < -1000:
self._executors_pool.submit(self._camera.take_lapse_photo)
def take_test_lapse_photo(self):
self._executors_pool.submit(self._camera.take_lapse_photo)
def clean(self):
self._camera.clean()
def _add_timelapse_timer(self):
if self._interval > 0 and not self._sched.get_job('timelapse_timer'):
self._sched.add_job(self.take_lapse_photo, 'interval', seconds=self._interval, id='timelapse_timer')
def _remove_timelapse_timer(self):
if self._sched.get_job('timelapse_timer'):
self._sched.remove_job('timelapse_timer')
def _reschedule_timelapse_timer(self):
if self._interval > 0 and self._sched.get_job('timelapse_timer'):
self._sched.add_job(self.take_lapse_photo, 'interval', seconds=self._interval, id='timelapse_timer', replace_existing=True)
def _send_lapse(self):
if not self._enabled or not self._klippy.printing_filename:
logger.debug(f"lapse is inactive for enabled {self.enabled} or file undefined")
else:
lapse_filename = self._klippy.printing_filename_with_time
gcode_name = self._klippy.printing_filename
info_mess: Message = self._bot.send_message(chat_id=self._chat_id, text=f"Starting time-lapse assembly for {gcode_name}", disable_notification=self._silent_progress)
if self._executors_pool._work_queue.qsize() > 0:
info_mess.edit_text(text="Waiting for the completion of tasks for photographing")
time.sleep(5)
while self._executors_pool._work_queue.qsize() > 0:
time.sleep(1)
self._bot.send_chat_action(chat_id=self._chat_id, action=ChatAction.RECORD_VIDEO)
(video_bio, thumb_bio, width, height, video_path, gcode_name) = self._camera.create_timelapse(lapse_filename, gcode_name, info_mess)
if self._send_finished_lapse:
info_mess.edit_text(text="Uploading time-lapse")
if video_bio.getbuffer().nbytes > 52428800:
info_mess.edit_text(text=f'Telegram bots have a 50mb filesize restriction, please retrieve the timelapse from the configured folder\n{video_path}')
else:
self._bot.send_video(self._chat_id, video=video_bio, thumb=thumb_bio, width=width, height=height, caption=f'time-lapse of {gcode_name}', timeout=120, disable_notification=self._silent_progress)
self._bot.delete_message(self._chat_id, message_id=info_mess.message_id)
else:
info_mess.edit_text(text="Time-lapse creation finished")
video_bio.close()
thumb_bio.close()
if self._after_lapse_gcode:
# Todo: add exception handling
self._klippy.save_data_to_marco(video_bio.getbuffer().nbytes, video_path, f'{gcode_name}.mp4')
self._klippy.execute_command(self._after_lapse_gcode.strip())
def send_timelapse(self):
self._sched.add_job(self._send_lapse, misfire_grace_time=None, coalesce=False, max_instances=1, replace_existing=False)
def stop_all(self):
self._remove_timelapse_timer()
self._running = False
self._paused = False
self._last_height = 0.0
def parse_timelapse_params(self, message: str):
mass_parts = message.split(sep=" ")
mass_parts.pop(0)
response = ''
for part in mass_parts:
try:
if 'enabled' in part:
self.enabled = bool(int(part.split(sep="=").pop()))
response += f"enabled={self.enabled} "
elif 'manual_mode' in part:
self.manual_mode = bool(int(part.split(sep="=").pop()))
response += f"manual_mode={self.manual_mode} "
elif 'height' in part:
self.height = float(part.split(sep="=").pop())
response += f"height={self.height} "
elif 'time' in part:
self.interval = int(part.split(sep="=").pop())
response += f"time={self.interval} "
elif 'target_fps' in part:
self.target_fps = int(part.split(sep="=").pop())
response += f"target_fps={self.target_fps} "
elif 'last_frame_duration' in part:
self.last_frame_duration = int(part.split(sep="=").pop())
response += f"last_frame_duration={self.last_frame_duration} "
elif 'min_lapse_duration' in part:
self.min_lapse_duration = int(part.split(sep="=").pop())
response += f"min_lapse_duration={self.min_lapse_duration} "
elif 'max_lapse_duration' in part:
self.max_lapse_duration = int(part.split(sep="=").pop())
response += f"max_lapse_duration={self.max_lapse_duration} "
else:
self._klippy.execute_command(f'RESPOND PREFIX="Timelapse params error" MSG="unknown param `{part}`"')
except Exception as ex:
self._klippy.execute_command(f'RESPOND PREFIX="Timelapse params error" MSG="Failed parsing `{part}`. {ex}"')
if response:
full_conf = f"enabled={self.enabled} " \
f"manual_mode={self.manual_mode} " \
f"height={self.height} " \
f"time={self.interval} " \
f"target_fps={self.target_fps} " \
f"last_frame_duration={self.last_frame_duration} " \
f"min_lapse_duration={self.min_lapse_duration} " \
f"max_lapse_duration={self.max_lapse_duration} "
self._klippy.execute_command(f'RESPOND PREFIX="Timelapse params" MSG="Changed timelapse params: {response}"')
self._klippy.execute_command(f'RESPOND PREFIX="Timelapse params" MSG="Full timelapse config: {full_conf}"')
|
StarcoderdataPython
|
1721365
|
<reponame>ayush-1506/dialogy
"""
.. _duration_entity:
Module provides access to an entity type (class) to handle locations.
Import classes:
- LocationEntity
"""
from typing import Any, Dict
import attr
from dialogy import constants as const
from dialogy.types.entity import BaseEntity
@attr.s
class DurationEntity(BaseEntity):
"""
This entity type expects a normalized attribute. This provides the duration normalized to seconds.
Helpful in cases where we wish to operate on time like:
"I want a booking in 2 days."
We can tell the time at which the sentence was said, but we need to make the booking after two days.
This entity parses this information and also provides us the number of seconds to add to the current timestamp
to get to a date that's 2 days ahead.
"""
normalized = attr.ib(type=Dict[str, Any], default=attr.Factory(dict))
_meta = attr.ib(type=Dict[str, str], default=attr.Factory(dict))
@classmethod
def reshape(cls, dict_: Dict[str, Any]) -> Dict[str, Any]:
"""
:type dict_: Dict[str, Any]
"""
match_start = dict_[const.EntityKeys.START]
match_end = dict_[const.EntityKeys.END]
dict_[const.EntityKeys.RANGE] = {
const.EntityKeys.START: match_start,
const.EntityKeys.END: match_end,
}
# ['body', 'start', 'value', 'end', 'dim', 'latent']
dict_[const.EntityKeys.TYPE] = dict_[const.EntityKeys.DIM]
# This piece is a preparation for multiple entity values.
# So, even though we are confident of the value found, we are still keeping the
# structure.
value = dict_[const.EntityKeys.VALUE][const.EntityKeys.NORMALIZED]
dict_[const.EntityKeys.VALUES] = [value]
del dict_[const.EntityKeys.START]
del dict_[const.EntityKeys.END]
del dict_[const.EntityKeys.VALUE]
return dict_
def set_value(self, value: Any = None) -> "BaseEntity":
return super().set_value(value=value)
|
StarcoderdataPython
|
50293
|
<filename>pf_py_common/py_object_copier.py
class PyObjectCopier:
pass
|
StarcoderdataPython
|
3373732
|
<reponame>Arya07/SSM-Pytorch<gh_stars>0
# --------------------------------------------------------
# Fast R-CNN
# Copyright (c) 2015 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by <NAME>
# --------------------------------------------------------
"""Factory method for easily getting imdbs by name."""
__sets = {}
from datasets.pascal_voc import pascal_voc
from datasets.coco import coco
from datasets.icub import icub_dataset
import numpy as np
#icub transformation dataset
icub_manual_path = '/home/IIT.LOCAL/emaiettini/workspace/Repos/SSM/data/iCubWorld-Transformations_manual'
# icub_devkit_path = '/home/elisa/Repos/py-faster_icubworld/data/iCubWorld-Translation_devkit'
for t in ['train', 'test']:
for task in ['TASK2']:
for numObjs in ['30', '21']:
for supervision in ['supervised', 'unsupervised']:
split = '{}_{}_{}objs_{}'.format(t, task, numObjs, supervision)
name = '{}_{}_{}objs_{}'.format(t, task, numObjs, supervision)
__sets[name] = (lambda split=split: icub_dataset(split))
for t in ['train']:
for task in ['TASK2', 'TASK2_EAL']:
for numObjs in ['21']:
for color in ['white', 'pois_odd', 'green', 'full']:
split = '{}_{}_{}objs_{}'.format(t, task, numObjs, color)
name = '{}_{}_{}objs_{}'.format(t, task, numObjs, color)
__sets[name] = (lambda split=split: icub_dataset(split))
for t in ['test']:
for task in ['TASK2', 'TASK2_EAL']:
for numObjs in ['21']:
for color in ['white', 'pois_odd', 'green']:
split = '{}_{}_{}objs_{}'.format(t, task, numObjs, color)
name = '{}_{}_{}objs_{}'.format(t, task, numObjs, color)
__sets[name] = (lambda split=split: icub_dataset(split, devkit_path=icub_manual_path))
for t in ['train', 'test']:
for task in ['TASK2']:
for numObjs in ['21']:
split = '{}_{}_{}objs'.format(t, task, numObjs, supervision)
name = '{}_{}_{}objs'.format(t, task, numObjs, supervision)
__sets[name] = (lambda split=split: icub_dataset(split))
for t in ['train', 'test']:
for task in ['TASK2']:
for numObjs in ['21']:
for supervision in ['acquisition1', 'acquisition2']:
split = '{}_{}_{}objs_{}'.format(t, task, numObjs, supervision)
name = '{}_{}_{}objs_{}'.format(t, task, numObjs, supervision)
__sets[name] = (lambda split=split: icub_dataset(split, devkit_path=icub_manual_path))
for t in ['train']:
for task in ['TASK2', 'TASK2_sequence']:
for numObjs in ['30', '21', '5', '10']:
for supervision in ['TRA_supervised', 'NoTRA_unsupervised', 'NoTRA_unsupervised_ordered']:
split = '{}_{}_{}objs_{}'.format(t, task, numObjs, supervision)
name = '{}_{}_{}objs_{}'.format(t, task, numObjs, supervision)
__sets[name] = (lambda split=split: icub_dataset(split))
for t in ['train']:
for task in ['TASK2']:
for numObjs in ['30']:
for supervision in ['supervised', 'unsupervised']:
split = '{}_{}_{}objs_1over8_{}_fake'.format(t, task, numObjs, supervision)
name = '{}_{}_{}objs_1over8_{}_fake'.format(t, task, numObjs, supervision)
__sets[name] = (lambda split=split: icub_dataset(split))
for t in ['test']: # split sarebbe l'imageset
for task in ['TASK2']:
for numObjs in ['30', '5', '10']:
split = '{}_{}_{}objs_manual'.format(t, task, numObjs)
name = '{}_{}_{}objs_manual'.format(t, task, numObjs)
__sets[name] = (lambda split=split: icub_dataset(split, devkit_path=icub_manual_path))
# Set up voc_<year>_<split> using selective search "fast" mode
for year in ['2007', '2012', '0712']:
for split in ['train', 'val', 'trainval', 'test']:
name = 'voc_{}_{}'.format(year, split)
__sets[name] = (lambda split=split, year=year: pascal_voc(split, year))
# Set up coco_2014_<split>
for year in ['2014']:
for split in ['train', 'val', 'minival', 'valminusminival']:
name = 'coco_{}_{}'.format(year, split)
__sets[name] = (lambda split=split, year=year: coco(split, year))
# Set up coco_2015_<split>
for year in ['2015']:
for split in ['test', 'test-dev']:
name = 'coco_{}_{}'.format(year, split)
__sets[name] = (lambda split=split, year=year: coco(split, year))
def get_imdb(name):
"""Get an imdb (image database) by name."""
if name not in __sets:
raise KeyError('Unknown dataset: {}'.format(name))
return __sets[name]()
def list_imdbs():
"""List all registered imdbs."""
return __sets.keys()
|
StarcoderdataPython
|
4800399
|
from numpy import Inf, linspace, meshgrid, reshape
from numpy.linalg import norm
from numpy.ma import masked_array
from numpy.random import rand
from .convex_body import ConvexBody
from ..util import arr_map
class Box(ConvexBody):
def sample(self, N):
return 2 * rand(N, self.dim) - 1
def is_member(self, xs):
return norm(xs, Inf, axis=1) <= 1
def meshgrid(self, N):
interval = linspace(-1, 1, N)
return meshgrid(*([interval] * self.dim), indexing='ij')
def uniform_grid(self, N):
grids = self.meshgrid(N)
return [masked_array(grid, mask=(False * grid)) for grid in grids]
def uniform_list(self, N):
grids = self.meshgrid(N)
return arr_map(lambda grid: reshape(grid, -1), grids).T
|
StarcoderdataPython
|
4824000
|
import inspect
import torch
import collections
import textwrap
import functools
import warnings
from typing import Dict, List, Set, Type
import torch._jit_internal as _jit_internal
from torch.jit.frontend import get_default_args, get_jit_def, get_class_properties
from torch.jit._builtins import _find_builtin
from torch.nn import Module
from torch._six import get_function_from_type, bind_method
ScriptMethodStub = collections.namedtuple('ScriptMethodStub', ('resolution_callback', 'def_', 'original_method'))
PropertyStub = collections.namedtuple('Property', ('resolution_callback', 'def_'))
# TODO: there should be a more principled way of doing this.
ignored_attributes = [
"_version",
"_parameters",
"_buffers",
"_modules",
"_initializing",
"_backward_hooks",
"_forward_hooks",
"_forward_pre_hooks",
"_state_dict_hooks",
"_load_state_dict_pre_hooks",
"dump_patches",
]
def make_stub(func, name):
rcb = _jit_internal.createResolutionCallbackFromClosure(func)
ast = get_jit_def(func, name, self_name="RecursiveScriptModule")
return ScriptMethodStub(rcb, ast, func)
def make_stub_from_method(nn_module, method_name):
func = getattr(nn_module, method_name)
if isinstance(func, ScriptMethodStub):
return func
# Make sure the name present in the resulting AST will match the name
# requested here. The only time they don't match is if you do something
# like:
# def _forward(self):
# pass
# forward = _forward
# In this case, the actual function object will have the name `_forward`,
# even though we requested a stub for `forward`.
return make_stub(func, method_name)
def make_stubs_from_exported_methods(mod):
stubs = []
for name in dir(mod):
item = getattr(mod, name, None)
if (
_jit_internal.get_torchscript_modifier(item)
is _jit_internal.FunctionModifiers.EXPORT
):
stubs.append(make_stub_from_method(mod, name))
return stubs
# base types that can be constants
# in addition, tuples and lists of these base types are also considered constants
# If you edit this list, then you also need to edit the handlers in
# ConstantValue in jit/script/init.cpp
_constant_types = (bool, float, int, str, type(None), torch.device, torch.layout, torch.dtype)
def _get_valid_constant(attr, v, owner_type):
if isinstance(v, _constant_types):
return v
elif isinstance(v, tuple) or isinstance(v, list):
return tuple(_get_valid_constant(attr, x, owner_type) for x in v)
constants = ", ".join(torch.typename(typ) for typ in _constant_types)
raise TypeError(textwrap.dedent("""
'{}' object in attribute '{}.{}' is not a valid constant.
Valid constants are:
1. a nn.ModuleList
2. a value of type {{{}}}
3. a list or tuple of (2)
""".format(torch.typename(type(v)), owner_type, attr, constants)))
class SourceContext(torch._C._jit_tree_views.SourceRangeFactory):
def __init__(self, source, filename, file_lineno, leading_whitespace_len):
super(SourceContext, self).__init__(source, filename, file_lineno, leading_whitespace_len)
def infer_concrete_type_builder(nn_module, share_types=True):
"""
Build a ConcreteModuleTypeBuilder from an nn.Module. This
ConcreteModuleType doesn't have a JIT type associated with it yet, it
must be filled in by the caller.
"""
concrete_type_builder = torch._C.ConcreteModuleTypeBuilder(type(nn_module))
if isinstance(nn_module, (torch.nn.ModuleDict)):
concrete_type_builder.set_module_dict()
if isinstance(nn_module, (torch.nn.ModuleList, torch.nn.Sequential)):
concrete_type_builder.set_module_list()
class_annotations = getattr(nn_module, '__annotations__', {})
if isinstance(nn_module, (torch.quantization.QuantWrapper)):
class_annotations = {}
# Get user-annotated ignored attributes.
user_annotated_ignored_attributes = getattr(nn_module, "__jit_ignored_attributes__", list())
concrete_type_builder.add_ignored_attributes(user_annotated_ignored_attributes)
# try to infer the type from type annotation or from the object itself
def infer_type(name, item):
# The forward function from Module is special; never use this annotations; we
# need to infer type directly using JIT. I originally wanted to write
# this test as isinstance(class_annotations[name], Callable) but
# isinstance on typing things doesn't seem to work: isinstance(list, Callable)
# is also true!
inferred = False
if name in class_annotations and class_annotations[name] != torch.nn.Module.__annotations__["forward"]:
ann_to_type = torch.jit.annotations.ann_to_type(class_annotations[name], _jit_internal.fake_range())
attr_type = torch._C.InferredType(ann_to_type)
elif isinstance(item, torch.jit.Attribute):
ann_to_type = torch.jit.annotations.ann_to_type(item.type, _jit_internal.fake_range())
attr_type = torch._C.InferredType(ann_to_type)
else:
attr_type = torch._C._jit_try_infer_type(item)
inferred = True
return attr_type, inferred
added_names = set()
for name, item in nn_module._parameters.items():
if name in user_annotated_ignored_attributes:
continue
assert item is None or isinstance(item, torch.Tensor)
attr_type, _ = infer_type(name, item)
# We currently have the invariant in various places in our code
# that parameters must be Tensors. However, the nn.Module API also
# allows NoneType parameters. These parameters are not returned as
# part of `parameters()` and its variants, but are available
# through direct attribute access.
concrete_type_builder.add_attribute(name, attr_type.type(), True, False)
added_names.add(name)
for name, item in nn_module._buffers.items():
if name in user_annotated_ignored_attributes:
continue
assert item is None or isinstance(item, torch.Tensor)
attr_type, _ = infer_type(name, item)
concrete_type_builder.add_attribute(name, attr_type.type(), False, True)
added_names.add(name)
for name, item in nn_module._modules.items():
if name in user_annotated_ignored_attributes:
continue
attr_type, _ = infer_type(name, item)
if item is None:
# Modules can be None. We don't have direct support for optional
# Modules, so the register it as an NoneType attribute instead.
concrete_type_builder.add_attribute(name, attr_type.type(), False, False)
continue
if attr_type.success():
assert attr_type.type().is_interface_type()
# if the type can be inferred, it should be a module interface type
sub_concrete_type = torch._C.ConcreteModuleType.from_jit_type(attr_type.type())
else:
# otherwise we get the concrete module type for item and add it to concrete_type
sub_concrete_type = get_module_concrete_type(item, share_types)
concrete_type_builder.add_module(name, sub_concrete_type)
added_names.add(name)
# populate constants_set
constants_set = getattr(nn_module, "__constants__", set())
# Constants annotated via `Final[T]` rather than being added to `__constants__`
for name, ann in class_annotations.items():
if torch._jit_internal.is_final(ann):
constants_set.add(name)
for name in constants_set:
if name in added_names:
# TODO: We should really error in this case, but its bc-breaking so
# we need to warn for at least one release
if name in nn_module._modules:
hint = "submodule"
elif name in nn_module._buffers:
hint = "buffer"
elif name in nn_module._parameters:
hint = "parameter"
else:
raise AssertionError("added_names must be submodule, parameter, or buffer")
warnings.warn("'{}' was found in ScriptModule constants, "
" but it is a non-constant {}. Consider removing it.".format(name, hint))
continue
if not hasattr(nn_module, name):
# TODO: We should really error in this case, but its bc-breaking so
# we need to warn for at least one release
warnings.warn("'{}' was found in ScriptModule constants, "
"but was not actually set in __init__. "
"Consider removing it.".format(name))
continue
value = getattr(nn_module, name)
concrete_type_builder.add_constant(name, _get_valid_constant(name, value, type(nn_module).__name__))
added_names.add(name)
# populate overloads
overloads = getattr(nn_module, "__overloads__", {})
# update with any annotated overloads
overloads.update(get_overload_name_mapping(get_overload_annotations(nn_module)))
for name, overloaded_names in overloads.items():
concrete_type_builder.add_overload(name, overloaded_names)
for name, value in nn_module.__dict__.items():
if name in ignored_attributes or name.startswith("__"):
# Python objects have lots of random attributes attached to them;
# PyTorch adds a few more. Prevent these from getting compiled.
continue
if name in user_annotated_ignored_attributes:
continue
if name in added_names:
# Don't re-add anything we already added
continue
# Handle Python function attributes
if inspect.isfunction(value):
try:
scripted_fn = torch.jit.script(value)
concrete_type_builder.add_function_attribute(
name,
torch._C._jit_try_infer_type(scripted_fn).type(),
value)
except Exception as e:
# If we fail to script the function, it isn't a hard error.
# Instead, we will add it to the list of attributes we failed
# to convert, with the compilation error.
hint = ("(This function exists as an attribute on the Python module, "
"but we failed to compile it to a TorchScript function. "
"\nThe error stack is reproduced here:\n{}").format(e)
concrete_type_builder.add_failed_attribute(name, hint)
pass
continue
# Handle calls to builtin functions (either bespoke builtins from torch.jit._builtins or
# a call to an aten function like torch.add)
builtin_symbol_name = _find_builtin(value)
if builtin_symbol_name:
concrete_type_builder.add_builtin_function(name, builtin_symbol_name)
continue
# Handle Script function attributes
if isinstance(value, torch.jit.ScriptFunction):
concrete_type_builder.add_function_attribute(
name,
torch._C._jit_try_infer_type(value).type(),
value)
continue
# If we got here, this is a regular "data" attribute, Add it to the concrete type
attr_type, inferred = infer_type(name, value)
if attr_type.success():
concrete_type_builder.add_attribute(name, attr_type.type(), False, False)
else:
# TODO: could add more detail here. For example, what the user should do
# when the pytype is `list` or `NoneType`
inferred_msg = "Its type was inferred; try adding a type annotation for the attribute." if inferred else ""
additional_info = f"{attr_type.reason()}. {inferred_msg}"
hint = "(This attribute exists on the Python module, " \
f"but we failed to convert Python type: '{torch.typename(type(value))}' " \
f"to a TorchScript type. {additional_info})"
concrete_type_builder.add_failed_attribute(name, hint)
# add hooks to concrete type
for hook in nn_module._forward_hooks.values():
concrete_type_builder.add_forward_hook(hook)
for pre_hook in nn_module._forward_pre_hooks.values():
concrete_type_builder.add_forward_pre_hook(pre_hook)
return concrete_type_builder
class ConcreteTypeStore(object):
type_store: Dict[Type[Module], List[torch._C.ConcreteModuleType]]
methods_compiled: Set[torch._C.ConcreteModuleType]
def __init__(self):
# Python module type => List[ConcreteModuleType)]
self.type_store = {}
# ConcreteTypes that have had their methods already compiled
self.methods_compiled = set()
def get_or_create_concrete_type(self, nn_module):
"""
Infer a ConcreteType from this `nn.Module` instance. Underlying JIT
types are re-used if possible.
"""
concrete_type_builder = infer_concrete_type_builder(nn_module)
nn_module_type = type(nn_module)
if nn_module_type not in self.type_store:
self.type_store[nn_module_type] = []
# Search the type store for an already-available JIT type
known_types = self.type_store[nn_module_type]
for known_type in known_types:
if known_type.equals(concrete_type_builder):
return known_type
# We didn't find anything; generate a new JIT type from this concrete type
concrete_type = concrete_type_builder.build()
self.type_store[nn_module_type].append(concrete_type)
return concrete_type
concrete_type_store = ConcreteTypeStore()
def create_methods_and_properties_from_stubs(concrete_type, method_stubs, property_stubs):
method_defs = [m.def_ for m in method_stubs]
method_rcbs = [m.resolution_callback for m in method_stubs]
method_defaults = [get_default_args(m.original_method) for m in method_stubs]
property_defs = [p.def_ for p in property_stubs]
property_rcbs = [p.resolution_callback for p in property_stubs]
concrete_type._create_methods_and_properties(property_defs, property_rcbs, method_defs, method_rcbs, method_defaults)
def create_hooks_from_stubs(concrete_type, hook_stubs, pre_hook_stubs):
hook_defs = [h.def_ for h in hook_stubs]
hook_rcbs = [h.resolution_callback for h in hook_stubs]
pre_hook_defs = [h.def_ for h in pre_hook_stubs]
pre_hook_rcbs = [h.resolution_callback for h in pre_hook_stubs]
concrete_type._create_hooks(hook_defs, hook_rcbs, pre_hook_defs, pre_hook_rcbs)
def get_module_concrete_type(nn_module, share_types=True):
"""
Gets a concrete type for nn_modules. If share_types is True, the concrete
type is fetched from concrete_type_store. If it is False, a new concrete type
is created without first searching concrete_type_store.
Args:
nn_module: The original Python nn.Module that we are creating a ScriptModule for.
share_types = Whether to share underlying JIT types between modules (if possible).
Returns:
A concrete type for nn_module.
"""
assert isinstance(nn_module, Module)
if isinstance(nn_module, torch.jit.ScriptModule) and \
hasattr(nn_module, "_concrete_type"):
return nn_module._concrete_type
if share_types:
# Look into the store of cached JIT types
concrete_type = concrete_type_store.get_or_create_concrete_type(nn_module)
else:
# Get a concrete type directly, without trying to re-use an existing JIT
# type from the type store.
concrete_type_builder = infer_concrete_type_builder(nn_module, share_types)
concrete_type_builder.set_poisoned()
concrete_type = concrete_type_builder.build()
return concrete_type
def create_script_module(nn_module, stubs_fn, share_types=True):
"""
Creates a new ScriptModule from an nn.Module
Args:
nn_module: The original Python nn.Module that we are creating a ScriptModule for.
stubs_fn: Lambda that takes an nn.Module and generates a list of ScriptMethodStubs to compile.
share_types: Whether to share underlying JIT types between modules (if possible).
NOTE: Only set to False this when we cannot guarantee type sharing will work
correctly. This only happens today for traced modules, where the same
module can produce different traced methods depending on the inputs.
"""
assert not isinstance(nn_module, torch.jit.RecursiveScriptModule)
check_module_initialized(nn_module)
concrete_type = get_module_concrete_type(nn_module, share_types)
return create_script_module_impl(nn_module, concrete_type, stubs_fn)
def create_script_module_impl(nn_module, concrete_type, stubs_fn):
"""
Convert an nn.Module to a RecursiveScriptModule.
Args:
nn_module: The original Python nn.Module that we are creating a ScriptModule for.
concrete_type: The fully initialized ConcreteType of the module.
stubs_fn: Lambda that takes an nn.Module and generates a list of ScriptMethodStubs to compile.
"""
cpp_module = torch._C._create_module_with_type(concrete_type.jit_type)
method_stubs = stubs_fn(nn_module)
property_stubs = get_property_stubs(nn_module)
hook_stubs, pre_hook_stubs = get_hook_stubs(nn_module)
def init_fn(script_module):
# Initialize the ScriptModule:
# 1. Copy the attributes/parameters/buffers from the original `nn_module` to the new ScriptModule.
for name, (attr_type, is_param) in concrete_type.get_attributes().items():
orig_value = getattr(nn_module, name)
orig_value = orig_value.value if isinstance(orig_value, torch.jit.Attribute) else orig_value
cpp_module.setattr(name, orig_value)
# 2. Copy the submodules from the original `nn_module` to the new ScriptModule,
# recursively scripting them.
for name, sub_concrete_type in concrete_type.get_modules():
orig_value = getattr(nn_module, name)
assert isinstance(orig_value, Module), "Expected Module but got {}".format(type(orig_value))
module_type = sub_concrete_type.jit_type
if isinstance(module_type, torch._C.InterfaceType):
# use the interface inference rule to compile the module
scripted = interface_script(module_type, orig_value)
elif isinstance(orig_value, torch.jit.ScriptModule):
scripted = orig_value
else:
# always reuse the provided stubs_fn to infer the methods to compile
scripted = create_script_module_impl(orig_value, sub_concrete_type, stubs_fn)
cpp_module.setattr(name, scripted)
script_module._modules[name] = scripted
# 3. Copy @ignored/@unused methods and attrs from the original `nn_module` to the new ScriptModule.
# This ensures we can access these Python methods on the ScriptModule.
for name in dir(nn_module):
item = getattr(nn_module, name, None)
if inspect.ismethod(item) and _jit_internal.is_ignored_fn(item):
unbound_function = getattr(type(nn_module), name)
bound_method = unbound_function.__get__(script_module)
setattr(script_module, name, bound_method)
elif concrete_type.is_ignored_attribute(name):
setattr(script_module, name, item)
# For convenience, attach the concrete type to the new ScriptModule
script_module._concrete_type = concrete_type
# Actually create the ScriptModule, initializing it with the function we just defined
script_module = torch.jit.RecursiveScriptModule._construct(cpp_module, init_fn)
# Compile methods if necessary
if concrete_type not in concrete_type_store.methods_compiled:
create_methods_and_properties_from_stubs(concrete_type, method_stubs, property_stubs)
# Create hooks after methods to ensure no name collisions between hooks and methods.
# If done before, hooks can overshadow methods that aren't exported.
create_hooks_from_stubs(concrete_type, hook_stubs, pre_hook_stubs)
torch._C._run_emit_module_hook(cpp_module)
concrete_type_store.methods_compiled.add(concrete_type)
# Copy the forward hooks and pre-hooks to the new ScriptModule
# to allow the hooks to be run from eager as ScriptFunctions
for idx, fn in enumerate(script_module._c._get_forward_pre_hooks()):
script_module._forward_pre_hooks[idx] = fn
for idx, fn in enumerate(script_module._c._get_forward_hooks()):
script_module._forward_hooks[idx] = fn
# Special handling so methods like __len__ work in script methods on classes derived from containers
if isinstance(nn_module, (torch.nn.ModuleList, torch.nn.Sequential, torch.nn.ModuleDict)) and \
'__len__' not in cpp_module._method_names():
script_module.define("def __len__(self):\n return {}\n".format(len(nn_module)))
if isinstance(nn_module, torch.nn.ModuleDict) and \
'__contains__' not in cpp_module._method_names():
if len(nn_module.keys()):
keys = repr(list(nn_module.keys()))
script_module.define("def __contains__(self, key: str):\n return key in {}\n".format(keys))
else:
script_module.define("def __contains__(self, key: str):\n return False\n")
# Make the compiled methods available to the Python ScriptModule class.
for method_stub in method_stubs:
if method_stub.original_method is None:
# define()'d methods don't have an Python original_method, so we
# don't need to do any Python re-wrapping stuff
continue
name = method_stub.original_method.__name__
if name != method_stub.def_.name().name:
# TODO: Why skip this? Because @torch.jit._overload_method will
# mangle the name of the function.
continue
script_method = cpp_module._get_method(name)
# Wrap the original to propagate docstrings and such.
# TODO: we don't currently do this functions that are recursively
# compiled, we should.
wrapped_script_method = functools.wraps(method_stub.original_method)(script_method) # type: ignore
# Add the methods to the script_module directly. This ensures they will
# be found first when `name` is looked up (as opposed to the stubs or
# nn.Module.forward)
script_module.__dict__[name] = wrapped_script_method
# Make module properties available on the Python ScriptModule class.
for property_stub in property_stubs:
property_name = property_stub.def_.name().name
fget = cpp_module._get_method(property_stub.def_.getter_name().name)
# Setter is optional, so it may not exist.
setter_name = property_stub.def_.setter_name()
fset = cpp_module._get_method(setter_name.name) if setter_name else None
script_module.__dict__[property_name] = property(property_name, fget, fset) # type: ignore
# copy over python methods to script module if they aren't defined on the script module
# this is currently an internal api used only on module containers
for name in dir(nn_module):
item = getattr(nn_module, name, None)
if _jit_internal.get_torchscript_modifier(item) is _jit_internal.FunctionModifiers.COPY_TO_SCRIPT_WRAPPER:
add_python_attr_to_scripted_model(script_module, nn_module, name)
return script_module
# We define shims of certain attributes on the RecursiveScriptModule to support
# magic methods. To check if a script model defines an attribute we need
# to also check that the attribute is not the shim
def script_model_defines_attr(script_model, attr):
script_attr = getattr(script_model, attr, None)
if script_attr is None:
return False
default_attr = get_function_from_type(torch.jit.RecursiveScriptModule, attr)
if default_attr is None:
return False
return script_attr != default_attr
def add_python_attr_to_scripted_model(script_model, orig, attr):
if hasattr(orig, attr) and script_model_defines_attr(script_model, attr):
setattr(script_model, attr, getattr(orig, attr))
def get_overload_annotations(mod):
# original function => [(mangled overload name, overload function)]
overloads = {}
for name in dir(type(mod)):
item = getattr(mod, name, None)
if not callable(item):
continue
# builtin functions like repr() in python 2 do not have __module__ defined
if hasattr(item, "__module__") and item.__module__ is not None:
method_overloads = _jit_internal._get_overloaded_methods(item, mod.__class__)
if method_overloads is None:
continue
names = [name + "__" + str(i) for i in range(len(method_overloads))]
overloads[item] = list(zip(names, method_overloads))
return overloads
def get_overload_name_mapping(overload_info):
# Same format as __overloads__
# original function => [overload names]
overload_name_mappings: Dict[str, List[str]] = {}
for orig_fn, overloads in overload_info.items():
original_name = orig_fn.__name__
if original_name not in overload_name_mappings:
overload_name_mappings[original_name] = []
for overload_name, _ in overloads:
overload_name_mappings[original_name].append(overload_name)
return overload_name_mappings
def _check_no_signature(func):
signature = torch.jit.annotations.get_signature(func, None, _jit_internal.fake_range(), inspect.ismethod(func))
if signature is None:
qual_name = _jit_internal._qualified_name(func)
raise RuntimeError("Must explicitly add type annotations to overloaded functions: {}".format(qual_name))
def make_stubs_for_overloads(overload_info):
overload_stubs = []
for orig_fn, overloads in overload_info.items():
orig_ast = get_jit_def(orig_fn, orig_fn.__name__, self_name="RecursiveScriptModule")
for overload_name, overload_fn in overloads:
_check_no_signature(overload_fn)
over_ast = get_jit_def(overload_fn, overload_fn.__name__, self_name="RecursiveScriptModule")
new_ast = torch._C._replace_overloaded_method_decl(over_ast.decl(), orig_ast, overload_name)
_rcb = _jit_internal.createResolutionCallbackFromClosure(orig_fn)
overload_stubs.append(ScriptMethodStub(_rcb, new_ast, overload_fn))
return overload_stubs
def check_module_initialized(mod):
assert isinstance(mod, torch.nn.Module)
if not hasattr(mod, '_parameters'):
raise RuntimeError("'{}' has not been initialized, did you forget to call 'super()'?"
.format(torch.typename(type(mod))))
# This is to avoid importing torch.distributed.nn
if not hasattr(mod, 'remote_parameters'):
for name, param in mod._parameters.items():
if isinstance(param, torch.nn.parameter.UninitializedParameter):
raise RuntimeError("'{}' has uninitialized parameters {}. Did you forget to run a forward pass?"
.format(torch.typename(type(mod)), name))
def infer_methods_to_compile(nn_module):
"""
Implements the default rules for which methods should act as starting
points for compilation (TODO add a link when the rules are published).
"""
check_module_initialized(nn_module)
methods: List[str] = []
if hasattr(nn_module, 'forward') and not _jit_internal.is_ignored_fn(nn_module.forward):
forward_func = getattr(nn_module.forward, "__func__", None)
module_forward = get_function_from_type(torch.nn.Module, "forward")
if forward_func != module_forward:
methods = ['forward']
exported = []
for name in dir(nn_module):
item = getattr(nn_module, name, None)
if _jit_internal.get_torchscript_modifier(item) is _jit_internal.FunctionModifiers.EXPORT:
exported.append(name)
methods = methods + exported
overload_name_mappings = dict(getattr(nn_module, "__overloads__", {}))
overload_info = get_overload_annotations(nn_module)
overload_name_mappings.update(get_overload_name_mapping(overload_info))
overload_stubs = make_stubs_for_overloads(overload_info)
nn_module.__overloads__ = overload_name_mappings
# we shouldn't directly compile overloaded methods, just its overloads
def ignore_overloaded(method_name):
return method_name not in overload_name_mappings
filtered_methods = filter(ignore_overloaded, methods)
# Unique the methods. We don't want to use a set to store the methods because it
# introduces non-determinism to compile order.
uniquer: Set[str] = set()
uniqued_methods = []
for name in filtered_methods:
if name in uniquer:
continue
uniqued_methods.append(name)
uniquer.add(name)
stubs = []
for method in uniqued_methods:
stubs.append(make_stub_from_method(nn_module, method))
return overload_stubs + stubs
def get_hook_stubs(nn_module):
"""
Returns forward hook and pre_hook ScriptModuleStubs
"""
check_module_initialized(nn_module)
hook_map: Dict = {}
hook_stubs = []
for hook in nn_module._forward_hooks.values():
if hook.__name__ in hook_map:
if id(hook) != id(hook_map[hook.__name__]):
raise RuntimeError(
f"Hook '{hook.__name__}' on {type(nn_module).__name__} "
"has at least two different python definitions."
" Please use unique names for all hooks."
)
else:
hook_map[hook.__name__] = hook
hook_stubs.append(make_stub(hook, hook.__name__))
pre_hook_stubs = []
for pre_hook in nn_module._forward_pre_hooks.values():
if pre_hook.__name__ in hook_map:
if id(pre_hook) != id(hook_map[pre_hook.__name__]):
raise RuntimeError(
f"Pre-hook '{pre_hook.__name__}' on {type(nn_module).__name__} "
"has at least two different python definitions."
" Please use unique names for all hooks."
)
else:
hook_map[pre_hook.__name__] = pre_hook
pre_hook_stubs.append(make_stub(pre_hook, pre_hook.__name__))
return hook_stubs, pre_hook_stubs
def get_property_stubs(nn_module):
"""
Create property stubs for the properties of the module by creating method
stubs for the getter and setter.
"""
module_ty = type(nn_module)
properties_asts = get_class_properties(module_ty, self_name="RecursiveScriptModule")
rcbs = {}
for name in dir(module_ty):
item = getattr(module_ty, name, None)
if isinstance(item, property):
if not item.fget:
raise RuntimeError(f'Property {name} of {nn_module.__name__} must have a getter')
rcbs[name] = _jit_internal.createResolutionCallbackFromClosure(item.fget)
stubs = [PropertyStub(rcbs[ast.name().name], ast) for ast in properties_asts]
return stubs
def interface_script(mod_interface, nn_module):
"""
Makes a ScriptModule from an nn.Module, using the interface methods rule for
determining which methods to compile.
Args:
mod_interface: the interface type that the module have
nn_module: The original Python nn.Module that we are creating a ScriptModule for.
"""
if isinstance(nn_module, torch.jit.ScriptModule):
return nn_module
check_module_initialized(nn_module)
def infer_interface_methods_to_compile(nn_module):
"""
Rule to infer the methods from the interface type to know which
methods need to act as starting points for compilation.
"""
stubs = []
for method in mod_interface.getMethodNames():
stubs.append(make_stub_from_method(nn_module, method))
return stubs
return create_script_module(nn_module, infer_interface_methods_to_compile)
def try_compile_fn(fn, loc):
if _jit_internal.is_ignored_fn(fn):
# Don't do anything for @ignore'd functions
return None
if isinstance(fn, torch.nn.Module):
# Since modules are callable pybind recognizes them as functions, but
# don't do anything for them
return None
if not inspect.isfunction(fn) and not inspect.ismethod(fn):
raise RuntimeError("`{}` is not a function. Recursive scripting only supports "
"Python functions or methods currently.\n"
"Consider manually annotating `{}` with @torch.jit.script.".format(fn, fn))
# We don't have the actual scope where the function was defined, but we can
# extract the necessary info from the closed over variables on the function
# object
rcb = _jit_internal.createResolutionCallbackFromClosure(fn)
return torch.jit.script(fn, _rcb=rcb)
def wrap_cpp_module(cpp_module):
"""
Wrap this torch._C.ScriptModule in a Python ScriptModule, recursively for all submodules
"""
def init_fn(script_module):
for name, cpp_module in torch._C.ModuleDict(script_module._c).items():
setattr(script_module, name, wrap_cpp_module(cpp_module))
script_module._concrete_type = torch._C.ConcreteModuleType.from_jit_type(script_module._c._type())
for idx, fn in enumerate(script_module._c._get_forward_pre_hooks()):
script_module._forward_pre_hooks[idx] = fn
for idx, fn in enumerate(script_module._c._get_forward_hooks()):
script_module._forward_hooks[idx] = fn
return torch.jit.RecursiveScriptModule._construct(cpp_module, init_fn)
def compile_unbound_method(concrete_type, fn):
if _jit_internal.is_ignored_fn(fn):
return None
stub = make_stub(fn, fn.__name__)
with torch._jit_internal._disable_emit_hooks():
# We don't want to call the hooks here since the graph that is calling
# this function is not yet complete
create_methods_and_properties_from_stubs(concrete_type, (stub,), ())
return stub
def lazy_bind(concrete_type, unbound_method):
"""
Returns a function that lazily binds `unbound_method` to a provided
Module IValue, then invokes the method. We do this so that any Python
shenanigans that will poison type sharing are impossible at compile
time.
"""
def lazy_binding_method(cpp_module, *args):
def init_fn(script_module):
orig_class = concrete_type.py_class
# Copy @ignored/@unused methods from the original module to the new one.
# This ensures they are available during execution.
for name in dir(orig_class):
item = getattr(orig_class, name, None)
if _jit_internal.is_ignored_fn(item):
setattr(script_module, name, item)
# Copy constants over so they are available during execution.
for name, value in concrete_type.get_constants().items():
setattr(script_module, name, value)
script_module = torch.jit.RecursiveScriptModule._construct(cpp_module, init_fn)
method = bind_method(unbound_method, script_module, torch.jit.RecursiveScriptModule)
return method(*args)
# make the lazy binding method "look like" the original method
lazy_binding_method.original_fn = unbound_method # type: ignore
lazy_binding_method.__name__ = unbound_method.__name__
torch._jit_internal.copy_torchscript_modifier(unbound_method, lazy_binding_method)
return lazy_binding_method
|
StarcoderdataPython
|
1620733
|
<reponame>linherest/pgoapi
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: pogoprotos/networking/responses/encounter_tutorial_complete_response.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from pogoprotos.data import pokemon_data_pb2 as pogoprotos_dot_data_dot_pokemon__data__pb2
from pogoprotos.data.capture import capture_award_pb2 as pogoprotos_dot_data_dot_capture_dot_capture__award__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='pogoprotos/networking/responses/encounter_tutorial_complete_response.proto',
package='pogoprotos.networking.responses',
syntax='proto3',
serialized_pb=_b('\nJpogoprotos/networking/responses/encounter_tutorial_complete_response.proto\x12\x1fpogoprotos.networking.responses\x1a\"pogoprotos/data/pokemon_data.proto\x1a+pogoprotos/data/capture/capture_award.proto\"\xad\x02\n!EncounterTutorialCompleteResponse\x12Y\n\x06result\x18\x01 \x01(\x0e\x32I.pogoprotos.networking.responses.EncounterTutorialCompleteResponse.Result\x12\x32\n\x0cpokemon_data\x18\x02 \x01(\x0b\x32\x1c.pogoprotos.data.PokemonData\x12<\n\rcapture_award\x18\x03 \x01(\x0b\x32%.pogoprotos.data.capture.CaptureAward\";\n\x06Result\x12\t\n\x05UNSET\x10\x00\x12\x0b\n\x07SUCCESS\x10\x01\x12\x19\n\x15\x45RROR_INVALID_POKEMON\x10\x02\x62\x06proto3')
,
dependencies=[pogoprotos_dot_data_dot_pokemon__data__pb2.DESCRIPTOR,pogoprotos_dot_data_dot_capture_dot_capture__award__pb2.DESCRIPTOR,])
_ENCOUNTERTUTORIALCOMPLETERESPONSE_RESULT = _descriptor.EnumDescriptor(
name='Result',
full_name='pogoprotos.networking.responses.EncounterTutorialCompleteResponse.Result',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='UNSET', index=0, number=0,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='SUCCESS', index=1, number=1,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='ERROR_INVALID_POKEMON', index=2, number=2,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=435,
serialized_end=494,
)
_sym_db.RegisterEnumDescriptor(_ENCOUNTERTUTORIALCOMPLETERESPONSE_RESULT)
_ENCOUNTERTUTORIALCOMPLETERESPONSE = _descriptor.Descriptor(
name='EncounterTutorialCompleteResponse',
full_name='pogoprotos.networking.responses.EncounterTutorialCompleteResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='result', full_name='pogoprotos.networking.responses.EncounterTutorialCompleteResponse.result', index=0,
number=1, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='pokemon_data', full_name='pogoprotos.networking.responses.EncounterTutorialCompleteResponse.pokemon_data', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='capture_award', full_name='pogoprotos.networking.responses.EncounterTutorialCompleteResponse.capture_award', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
_ENCOUNTERTUTORIALCOMPLETERESPONSE_RESULT,
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=193,
serialized_end=494,
)
_ENCOUNTERTUTORIALCOMPLETERESPONSE.fields_by_name['result'].enum_type = _ENCOUNTERTUTORIALCOMPLETERESPONSE_RESULT
_ENCOUNTERTUTORIALCOMPLETERESPONSE.fields_by_name['pokemon_data'].message_type = pogoprotos_dot_data_dot_pokemon__data__pb2._POKEMONDATA
_ENCOUNTERTUTORIALCOMPLETERESPONSE.fields_by_name['capture_award'].message_type = pogoprotos_dot_data_dot_capture_dot_capture__award__pb2._CAPTUREAWARD
_ENCOUNTERTUTORIALCOMPLETERESPONSE_RESULT.containing_type = _ENCOUNTERTUTORIALCOMPLETERESPONSE
DESCRIPTOR.message_types_by_name['EncounterTutorialCompleteResponse'] = _ENCOUNTERTUTORIALCOMPLETERESPONSE
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
EncounterTutorialCompleteResponse = _reflection.GeneratedProtocolMessageType('EncounterTutorialCompleteResponse', (_message.Message,), dict(
DESCRIPTOR = _ENCOUNTERTUTORIALCOMPLETERESPONSE,
__module__ = 'pogoprotos.networking.responses.encounter_tutorial_complete_response_pb2'
# @@protoc_insertion_point(class_scope:pogoprotos.networking.responses.EncounterTutorialCompleteResponse)
))
_sym_db.RegisterMessage(EncounterTutorialCompleteResponse)
# @@protoc_insertion_point(module_scope)
|
StarcoderdataPython
|
3301333
|
<reponame>carthage-college/django-djbeca
# -*- coding: utf-8 -*-
import datetime
from django import forms
from django.conf import settings
from django.contrib.auth.models import User
from djauth.managers import LDAPManager
from djbeca.core import choices
from djbeca.core.models import GenericChoice
from djbeca.core.models import Proposal
from djbeca.core.models import ProposalBudget
from djbeca.core.models import ProposalDocument
from djbeca.core.models import ProposalImpact
from djbeca.core.utils import get_proposals
from djimix.people.utils import get_peeps
from djtools.fields import BINARY_CHOICES
SUBCONTRACTS_CHOICES = GenericChoice.objects.filter(
tags__name__in=['Subcontracts'],
).filter(active=True).order_by('rank')
class ProposalForm(forms.ModelForm):
"""Proposal form for the data model."""
def __init__(self, department_choices, *args, **kwargs):
"""Set the department field choices."""
super(ProposalForm, self).__init__(*args, **kwargs)
self.fields['department'].choices = department_choices
# Investigator Information
# NOTE: we have name, email, ID from user profile data
department = forms.ChoiceField(
label='Department',
choices=(),
)
# NOTE "Co-Principal Investigators & Associated Institution"
# are GenericContact() Foreign Key relationships.
# Name, Institution fields [limit 5]
# Project Overview
start_date = forms.DateField(
label="Project start date",
)
end_date = forms.DateField(
label="Project end date",
)
class Meta:
"""Attributes about the form class."""
model = Proposal
exclude = (
'opened',
'closed',
'awarded',
'user',
'created_at',
'updated_at',
'email_approved',
'save_submit',
'decline',
'level3',
'comments',
)
def clean_grant_agency_funding_source_other(self):
"""Insure that other value is populated."""
cd = self.cleaned_data
other = cd.get('grant_agency_funding_source_other')
if cd.get('grant_agency_funding_source') == 'Other' and not other:
self.add_error(
'grant_agency_funding_source_other',
"""
Please provide additional information about the
funding source
""",
)
return other
def clean_proposal_type_other(self):
"""Insure that other value is populated."""
cd = self.cleaned_data
other = cd.get('proposal_type_other')
if cd.get('proposal_type') == 'Other' and not other:
self.add_error(
'proposal_type_other',
"Please provide additional information about the proposal type",
)
return other
def clean_project_type_other(self):
"""Insure that other value is populated."""
cd = self.cleaned_data
other = cd.get('project_type_other')
if cd.get('project_type') == 'Other' and not other:
self.add_error(
'project_type_other',
"Please provide additional information about the project type",
)
return other
class InvestigatorsForm(forms.Form):
"""Ivestigators form."""
name1 = forms.CharField(required=False)
name2 = forms.CharField(required=False)
name3 = forms.CharField(required=False)
name4 = forms.CharField(required=False)
name5 = forms.CharField(required=False)
institution1 = forms.CharField(required=False)
institution2 = forms.CharField(required=False)
institution3 = forms.CharField(required=False)
institution4 = forms.CharField(required=False)
institution5 = forms.CharField(required=False)
class BudgetForm(forms.ModelForm):
"""Proposal Budget form."""
class Meta:
"""Attributes about the form class."""
model = ProposalBudget
exclude = (
'proposal', 'created_at', 'updated_at',
)
class ImpactForm(forms.ModelForm):
"""Proposal impact form."""
institutional_funds = forms.TypedChoiceField(
label="""
Will institutional or departmental funds be used in this proposal?
""",
choices=BINARY_CHOICES,
widget=forms.RadioSelect(),
)
indirect_funds_solicitation = forms.TypedChoiceField(
label="Does the sponsor allow the inclusion of indirect in the budget?",
choices=BINARY_CHOICES,
widget=forms.RadioSelect(),
)
subcontracts = forms.ModelMultipleChoiceField(
label="Does your proposal include any of the following?",
queryset=SUBCONTRACTS_CHOICES,
widget=forms.CheckboxSelectMultiple(),
help_text="Check all that apply.",
required=False,
)
subaward_monitoring = forms.TypedChoiceField(
label="Sub Award Monitoring",
choices=choices.BINARY_CHOICES,
widget=forms.RadioSelect(),
)
human_subjects = forms.TypedChoiceField(
label="IRB (Human Subjects Research)",
choices=BINARY_CHOICES,
widget=forms.RadioSelect(),
)
animal_subjects = forms.TypedChoiceField(
label="IACUC (Animal Research)",
choices=BINARY_CHOICES,
widget=forms.RadioSelect(),
)
students_involved = forms.TypedChoiceField(
label="Student Employment or Work Study",
choices=choices.BINARY_CHOICES,
widget=forms.RadioSelect(),
)
students_stipends = forms.TypedChoiceField(
label="Student stipends",
choices=choices.BINARY_CHOICES,
widget=forms.RadioSelect(),
)
personnel_salary = forms.TypedChoiceField(
label="Job posting, hiring, salary/wage changes",
choices=choices.BINARY_CHOICES,
widget=forms.RadioSelect(),
)
marketing = forms.TypedChoiceField(
label="Brochures, PR, websites",
choices=choices.BINARY_CHOICES,
widget=forms.RadioSelect(),
)
contract_procurement = forms.TypedChoiceField(
label="Contract Review and Negotiation",
choices=choices.BINARY_CHOICES,
widget=forms.RadioSelect(),
)
data_management = forms.TypedChoiceField(
label="Institutional Data",
choices=choices.BINARY_CHOICES,
widget=forms.RadioSelect(),
)
new_hires = forms.TypedChoiceField(
label="Will this project create a new position at Carthage?",
choices=choices.BINARY_CHOICES,
widget=forms.RadioSelect(),
)
course_relief = forms.TypedChoiceField(
label="""
Will this project require that your department hire someone
to teach the courses you are scheduled to teach
or any other type of course relief?
""",
choices=BINARY_CHOICES,
widget=forms.RadioSelect(),
)
infrastructure_requirements = forms.TypedChoiceField(
label="Is new or renovated space required?",
choices=choices.BINARY_CHOICES,
widget=forms.RadioSelect(),
)
international = forms.TypedChoiceField(
label="International or off-campus studies",
choices=choices.BINARY_CHOICES,
widget=forms.RadioSelect(),
)
event_services = forms.TypedChoiceField(
label="Conferences and event services",
choices=choices.BINARY_CHOICES,
widget=forms.RadioSelect(),
)
financial_aid = forms.TypedChoiceField(
label="Financial aid / scholarships",
choices=choices.BINARY_CHOICES,
widget=forms.RadioSelect(),
)
tech_support = forms.TypedChoiceField(
label="Computer support, computer equipment, data management needs",
choices=choices.BINARY_CHOICES,
widget=forms.RadioSelect(),
)
purchase_equipment = forms.TypedChoiceField(
label="Equipment Purchases (over $5000)",
choices=choices.BINARY_CHOICES,
widget=forms.RadioSelect(),
)
admin_comments = forms.CharField(
widget=forms.Textarea,
required=False,
help_text="""
Provide any administrative comments that you might want
others to consider.
""",
)
disclosure_assurance = forms.BooleanField(required=True)
class Meta:
"""Attributes about the form class."""
model = ProposalImpact
exclude = (
'proposal',
'created_at',
'updated_at',
'level3',
'level2',
'level1',
)
def clean(self):
"""Form validation for various fields."""
cd = self.cleaned_data
for key in list(cd.keys()):
if '_detail' in key:
radio = cd.get(key.split('_detail')[0])
error = (
radio
and (radio == 'Yes' or 'Student' in radio)
and not cd.get(key)
)
if error:
self.add_error(key, "Please provide additional information")
return cd
class DocumentForm(forms.ModelForm):
"""Proposal documents form."""
def __init__(self, *args, **kwargs):
"""Add placeholder value to fields."""
super(DocumentForm, self).__init__(*args, **kwargs)
self.fields['name'].widget.attrs['placeholder'] = 'Name or short description'
class Meta:
"""Attributes about the form class."""
model = ProposalDocument
fields = ('name', 'phile')
class CommentsForm(forms.Form):
"""Proposal comments form."""
comments = forms.CharField(
widget=forms.Textarea,
required=False,
help_text="Provide any additional comments if need be",
)
class ProposalApproverForm(forms.Form):
"""Proposal approver form."""
def __init__(self, *args, **kwargs):
"""Set up choices for select field."""
user = kwargs.pop('user')
super(ProposalApproverForm, self).__init__(*args, **kwargs)
# populate the approvers select field with faculty/staff
facstaff = get_peeps('facstaff')
approvers = [('', '-----------')]
username = None
for fac in facstaff:
if username != fac['username']:
name = '{0}, {1}'.format(fac['lastname'], fac['firstname'])
approvers.append((fac['username'], name))
username = fac['username']
self.fields['user'].choices = approvers
# populate the proposals select field
proposals = get_proposals(user)
if proposals['objects']:
props = [('', '-----------')]
for prop in proposals['objects']:
title = '{0}: by {1}, {2}'.format(
prop.title, prop.user.last_name, prop.user.first_name,
)
props.append((prop.id, title))
self.fields['proposal'].choices = props
else:
self.fields['proposal'].widget.attrs['class'] = 'error'
user = forms.ChoiceField(label="Faculty/Staff", choices=())
proposal = forms.ChoiceField(label="Proposal", choices=())
def clean(self):
"""Check for a valid proposal, user, and if approver already exists."""
cd = self.cleaned_data
username = cd.get('user')
try:
user = User.objects.get(username=username)
except User.DoesNotExist:
# create a new user
eldap = LDAPManager()
result_data = eldap.search(username, field='cn')
groups = eldap.get_groups(result_data)
user = eldap.dj_create(result_data, groups=groups)
proposal = Proposal.objects.filter(pk=cd.get('proposal')).first()
if proposal:
for approver in proposal.approvers.all():
if approver.user == user:
self.add_error('user', "That user is already an approver.")
else:
self.add_error('proposal', "That is not a valid proposal")
return cd
class EmailInvestigatorForm(forms.Form):
"""Send an email to investigator form."""
content = forms.CharField(widget=forms.Textarea, label="Email content")
|
StarcoderdataPython
|
1745801
|
<gh_stars>1-10
# __init__.py
__version__ = "0.1.0"
__author__ = "<NAME>"
"""
The :mod:`scifin.timeseries` module includes methods for time series analysis.
"""
from .timeseries import (Series, TimeSeries, CatTimeSeries,
get_list_timezones, build_from_csv, build_from_dataframe, build_from_list, build_from_lists,
linear_tvalue, bins_from_trend, tick_imbalance, imbalance, multi_plot, multi_plot_distrib)
from .randomseries import (constant, auto_regressive, random_walk, drift_random_walk, moving_average,
arma, rca, arch, garch, charma)
|
StarcoderdataPython
|
1604658
|
def answer_five():
return census_df.groupby('STNAME')['COUNTY'].count().idxmax()
answer_five()
|
StarcoderdataPython
|
1659674
|
<reponame>duttaprat/proteinGAN
"""The discriminator of WGAN."""
import tensorflow as tf
from common.model import ops
from model.ops import block
def discriminator_fully_connected(x, labels, df_dim, number_classes, kernel=(3, 3), strides=(2, 2), dilations=(1, 1),
pooling='avg', update_collection=None, act=tf.nn.relu, scope_name='Discriminator',
reuse=False):
with tf.variable_scope(scope_name) as scope:
if reuse:
scope.reuse_variables()
x = tf.layers.flatten(x)
x = tf.layers.dense(x, df_dim, name="dense_1")
x = tf.nn.leaky_relu(x)
tf.summary.histogram(x.name, x)
output = tf.layers.dense(x, 1, name="dense_2")
tf.summary.histogram(output.name, output)
return output
def original_discriminator(x, labels, df_dim, number_classes, kernel=(3, 3), strides=(2, 2), dilations=(1, 1),
pooling='avg', update_collection=None, act=tf.nn.relu, scope_name='Discriminator',
reuse=False):
with tf.variable_scope(scope_name) as scope:
if reuse:
scope.reuse_variables()
conv1 = tf.layers.conv2d(
inputs=x,
filters=df_dim / 4,
kernel_size=[3, 3],
strides=(2, 2),
padding="same",
activation=tf.nn.leaky_relu,
name="dconv1")
tf.summary.histogram(conv1.name, conv1)
# Convolutional Layer #2
conv2 = tf.layers.conv2d(
inputs=conv1,
filters=df_dim / 2,
kernel_size=[3, 3],
strides=(2, 2),
padding="same",
activation=tf.nn.leaky_relu,
name="dconv2")
tf.summary.histogram(conv2.name, conv2)
conv3 = tf.layers.conv2d(
inputs=conv2,
filters=df_dim,
kernel_size=[3, 3],
strides=(2, 2),
padding="same",
activation=tf.nn.leaky_relu,
name="dconv3")
tf.summary.histogram(conv3.name, conv3)
flat = tf.layers.flatten(conv3, name="dflat")
output = tf.layers.dense(inputs=flat,
activation=None,
units=1,
name="doutput")
output = tf.reshape(output, [-1])
tf.summary.histogram(output.name, output)
return output
def discriminator_resnet(x, labels, df_dim, number_classes, kernel=(3, 3), strides=(2, 2), dilations=(1, 1),
pooling='avg', update_collection=None, act=tf.nn.relu, scope_name='Discriminator',
reuse=False):
with tf.variable_scope(scope_name) as scope:
if reuse:
scope.reuse_variables()
h0 = block(x, df_dim, 'd_optimized_block1', act=act) # 12 * 12
h1 = block(h0, df_dim * 2, 'd_block2', act=act) # 6 * 6
h2 = block(h1, df_dim * 4, 'd_block3', act=act) # 3 * 3
tf.summary.histogram(h2.name, h2)
# h3 = block(h2, df_dim * 4, 'd_block4', update_collection, act=act) # 8 * 8 # 3*12
# h4 = block(h3, df_dim * 8, 'd_block5', update_collection, act=act) # 3*6
h5 = block(h2, df_dim * 8, 'd_block6', False, act=act)
h5_act = act(h5)
tf.summary.histogram(h5_act.name, h5_act)
h6 = tf.reduce_sum(h5_act, [1, 2])
output = ops.linear(h6, 1, scope='d_linear')
tf.summary.histogram(output.name, output)
return output
|
StarcoderdataPython
|
1661403
|
from utils.decorators import timer, debug
from utils.task import Task
def all_orientations():
orientations = []
for facing in range(6):
for rotation in range(4):
orientations.append((facing, rotation))
return orientations
def transform_position(pos, orientation, rotation):
# Credit to SwampThingTom
pos = orientate(pos, orientation)
pos = rotate(pos, rotation)
return pos
def rotate(pos, rotation):
x, y, z = pos
if rotation == 0:
return x, y, z
elif rotation == 1:
return z, y, -x
elif rotation == 2:
return -x, y, -z
elif rotation == 3:
return -z, y, x
def orientate(pos, orientation):
x, y, z = pos
if orientation == 0:
return x, y, z
elif orientation == 1:
# (0, -1, 0)
return x, -y, -z
elif orientation == 2:
# (1, 0, 0)
return y, x, -z
elif orientation == 3:
# (-1, 0, 0)
return y, -x, z
elif orientation == 4:
# (0, 0, 1)
return y, z, x
elif orientation == 5:
# (0, 0, -1)
return y, -z, -x
class Beacon:
def __init__(self, x: int, y: int, z: int):
# Position is relative to scanner that found this beacon
self.x = x
self.y = y
self.z = z
self.neighbor_distance = []
def __repr__(self):
return f"({self.x}, {self.y}, {self.z})"
def __eq__(self, other):
"""
Equality of absolute beacons
"""
return self.x == other.x and self.y == other.y and self.z == other.z
def add_neighbor(self, beacon):
"""
Add a neighbor, FOUND BY THE SAME SCANNER
:param beacon: Another beacon
"""
# distance = (self.x - beacon.x) ** 2 + (self.y - beacon.y) ** 2 + (self.z - beacon.z) ** 2
distance = [abs(self.x - beacon.x), abs(self.y - beacon.y), abs(self.z - beacon.z)]
self.neighbor_distance.append(distance)
def equal_beacons(self, other):
"""
Two beacons are considered equal if they have 11 or more similar distances
:param other: Another beacon
:return: True if the two beacons have 11 or more similar distances, False otherwise
"""
num_duplicates = 0
for distance in self.neighbor_distance:
for dist in other.neighbor_distance:
if self.equal_distance(dist, distance):
num_duplicates += 1
return num_duplicates > 10
def absolute_position(self, scanner: 'Scanner') -> tuple:
"""
:param scanner: Absolute scanner
:return:
"""
new_pos = transform_position((self.x, self.y, self.z), scanner.orientation[0], scanner.orientation[1])
x = scanner.x + new_pos[0]
y = scanner.y + new_pos[1]
z = scanner.z + new_pos[2]
return x, y, z
@staticmethod
def equal_distance(distance1: list, distance2: list) -> bool:
for dist in distance1:
if dist not in distance2:
return False
return True
# def angle_position(self, axis, orientation):
# """
# Return the value of the given axis (axis 0 = x, axis 1 = y, axis 2 = z) considering the given angle
# """
# index = [abs(val) for val in angle].index(axis)
# if index == 0:
# return self.x if angle[index] >= 0 else -self.x
# if index == 1:
# return self.y if angle[index] >= 0 else -self.y
# if index == 2:
# return self.z if angle[index] >= 0 else -self.z
class Scanner:
def __init__(self, number: int):
self.num = number
self.x = None
self.y = None
self.z = None
self.orientation = None
self.overlapping_scanners = {}
self.beacons = []
self.has_processed = False
def __repr__(self):
return f"<Scanner ({self.x}, {self.y}, {self.z}, {self.orientation}): {str(self.beacons)}>"
def set_position(self, x: int, y: int, z: int) -> None:
self.x = x
self.y = y
self.z = z
def add_beacon(self, x: int, y: int, z: int) -> None:
new_beacon = Beacon(x, y, z)
for beacon in self.beacons:
beacon.add_neighbor(new_beacon)
new_beacon.add_neighbor(beacon)
self.beacons.append(new_beacon)
def determine_overlap(self, other: 'Scanner') -> bool:
"""
Determine overlapping beacons by finding at least 12 common beacons in sensor range
Find patterns in beacons of one scanner that match patterns in the other scanner
Patterns can be found by looking that the distances between beacons
if two beacons from each scanner have many similar distances, then they might be the same beacon
:param other: Another scanner
:return: True if there are at least 12 common beacons, False otherwise
"""
num_beacons = 0
stop = False
for i, beacon1 in enumerate(self.beacons):
# Time save, since this function takes the most time due to repeated looping
if stop or i > len(self.beacons) - 15:
break
for beacon2 in other.beacons:
if beacon1.equal_beacons(beacon2):
if other not in self.overlapping_scanners:
self.overlapping_scanners[other] = []
other.overlapping_scanners[self] = []
self.overlapping_scanners[other].append([beacon1, beacon2])
other.overlapping_scanners[self].append([beacon2, beacon1])
num_beacons += 1
if num_beacons == 2:
stop = True
break
if num_beacons > 0:
return True
return False
def determine_absolute_position(self, scanner: 'Scanner'):
"""
Assign absolute x, y and z values to the scanner, based on another absolute scanner and a common beacon
First, identify which way the scanner faces and its rotation
Then, identify its location
:param scanner: An absolute scanner
"""
solution = None
same_beacons = self.overlapping_scanners[scanner]
orientations = all_orientations()
for orientation in orientations:
pair1 = 0
pair2 = 1
# First pair produces possible positions of the scanner
unknown_beacon = same_beacons[pair1][0]
known_beacon = same_beacons[pair1][1]
loc = self.absolute_scanner_position(orientation, unknown_beacon, known_beacon, scanner)
# Check 2nd pair to find position
unknown_beacon = same_beacons[pair2][0]
known_beacon = same_beacons[pair2][1]
new_loc = self.absolute_scanner_position(orientation, unknown_beacon, known_beacon, scanner)
if loc == new_loc:
solution = (loc, orientation)
break
# print(f"Scanner {self.num} from scanner {scanner.num}: {solution}")
angle = solution[1]
coords = solution[0]
self.orientation = angle
self.x = coords[0]
self.y = coords[1]
self.z = coords[2]
@staticmethod
def absolute_scanner_position(orientation: tuple, beacon: Beacon, known_beacon: Beacon, known_scanner: 'Scanner'):
"""
Compute the position of a beacon, based on a given beacon and angle
Known beacon is relative to a beacon with known position and rotation
:param orientation: The orientation used to orient the unknown beacon
:param beacon: The beacon used to determine the expected location of the unknown scanner
:param known_beacon: The corresponding beacon from a known scanner
:param known_scanner: The known scanner associated with the known beacon
:return:
"""
oriented_beacon = transform_position((beacon.x, beacon.y, beacon.z), orientation[0], orientation[1])
x = known_beacon.absolute_position(known_scanner)[0] - oriented_beacon[0]
y = known_beacon.absolute_position(known_scanner)[1] - oriented_beacon[1]
z = known_beacon.absolute_position(known_scanner)[2] - oriented_beacon[2]
return [x, y, z]
class Task19(Task):
# Task constants
YEAR = 2021
TASK_NUM = 19
def preprocess(self, data: list) -> list:
output = []
scanner = []
i = 0
for element in data:
if len(element) == 4:
scanner = Scanner(i)
i += 1
elif element == "":
output.append(scanner)
else:
x, y, z = element.split(',')
scanner.add_beacon(int(x), int(y), int(z))
output.append(scanner)
output[0].set_position(0, 0, 0)
output[0].orientation = (0, 0)
return output
@debug
@timer(YEAR, TASK_NUM)
def part_1(self, data: list) -> int:
# Find which scanners overlap
for i, scanner1 in enumerate(data):
for j, scanner2 in enumerate(data):
if i >= j:
continue
scanner1.determine_overlap(scanner2)
# Convert all scanners and beacons to absolute
absolute_beacons = []
scanners_to_do = [data[0]]
data[0].has_processed = True
while len(scanners_to_do) != 0:
scanner = scanners_to_do.pop()
# Add beacons of scanner to list of absolute beacons
for beacon in scanner.beacons:
x, y, z = beacon.absolute_position(scanner)
absolute = Beacon(x, y, z)
# print(f"Absolute beacon: {absolute}")
if absolute not in absolute_beacons:
absolute_beacons.append(absolute)
# Find absolute position of overlapping scanners and add in queue
for scan in scanner.overlapping_scanners:
if scan.has_processed is False:
scan.has_processed = True
scan.determine_absolute_position(scanner)
scanners_to_do.append(scan)
return len(absolute_beacons)
@debug
@timer(YEAR, TASK_NUM)
def part_2(self, data: list) -> int:
# Find which scanners overlap
for i, scanner1 in enumerate(data):
for j, scanner2 in enumerate(data):
if i >= j:
continue
scanner1.determine_overlap(scanner2)
# Convert all scanners and beacons to absolute
scanners_to_do = [data[0]]
data[0].has_processed = True
while len(scanners_to_do) != 0:
scanner = scanners_to_do.pop()
# Find absolute position of overlapping scanners and add in queue
for scan in scanner.overlapping_scanners:
if scan.has_processed is False:
scan.has_processed = True
scan.determine_absolute_position(scanner)
scanners_to_do.append(scan)
# Find largest manhattan distance between scanners
max_distance = 0
for scanner1 in data:
for scanner2 in data:
distance = self.manhattan_distance(scanner1, scanner2)
if distance > max_distance:
max_distance = distance
return max_distance
@staticmethod
def manhattan_distance(scanner1: Scanner, scanner2: Scanner) -> int:
return abs(scanner1.x - scanner2.x) + abs(scanner1.y - scanner2.y) + abs(scanner1.z - scanner2.z)
if __name__ == "__main__":
# Load task
t = Task19()
# Run task
t.run_all()
|
StarcoderdataPython
|
3286619
|
from loguru import logger
from fastapi import Request
from fastapi.responses import JSONResponse
from starlette import status
from utils.constant.ResponseCode import ResponseCodeType
class BaseDatabaseException(Exception):
pass
class TableCreateException(BaseDatabaseException):
pass
class DatabaseCreateException(BaseDatabaseException):
pass
async def table_create_exception_handler(request: Request, exc: TableCreateException):
logger.warning(str(exc))
return JSONResponse(status_code=status.HTTP_202_ACCEPTED,
content={"code": ResponseCodeType.DataBaseCreateError, "message": str(exc)})
|
StarcoderdataPython
|
4446
|
<gh_stars>10-100
"""
A customer walks into a store. Do the steps to interact with them:
- Get *a* (not *the*) greeter
- Interact with them
Simple wired application:
- Settings that say what punctuation to use
- Registry
- Two factories that says hello, one for the FrenchCustomer context
- A default Customer and FrenchCustomer
"""
from dataclasses import dataclass
from wired import ServiceRegistry
@dataclass
class Customer:
name: str
@dataclass
class FrenchCustomer(Customer):
pass
@dataclass
class Settings:
punctuation: str
@dataclass
class Greeter:
punctuation: str
greeting: str = 'Hello'
def __call__(self, customer: Customer) -> str:
return f'{self.greeting} {customer.name} {self.punctuation}'
@dataclass
class FrenchGreeter(Greeter):
greeting: str = 'Bonjour'
def __call__(self, customer: Customer) -> str:
return f'{self.greeting} {customer.name} {self.punctuation}'
def setup(settings: Settings) -> ServiceRegistry:
# Make the registry
registry = ServiceRegistry()
# Make the greeter factories, using punctuation from settings
punctuation = settings.punctuation
# First the default greeter, no context
def default_greeter_factory(container) -> Greeter:
# Use the dataclass default for greeting
return Greeter(punctuation=punctuation)
# Register it as a factory using its class for the "key"
registry.register_factory(default_greeter_factory, Greeter)
# Now the French greeter, using context of FrenchCustomer
def french_greeter_factory(container) -> Greeter:
# Use the dataclass default for greeting
return FrenchGreeter(punctuation=punctuation)
# Register it as a factory using its class for the "key", but
# this time register with a "context"
registry.register_factory(
french_greeter_factory, Greeter, context=FrenchCustomer
)
return registry
def greet_customer(registry: ServiceRegistry, customer: Customer) -> str:
# A customer comes in, handle the steps in the greeting
# as a container.
container = registry.create_container()
# Get a Greeter using the customer as context. Use the Customer when
# generating the greeting.
greeter: Greeter = container.get(Greeter, context=customer)
greeting = greeter(customer)
return greeting
def main():
settings = Settings(punctuation='!!')
registry = setup(settings)
# *** Default Customer
# Make a Customer, pass into the "greet_customer" interaction,
# then test the result.
customer = Customer(name='Mary')
assert 'Hello Mary !!' == greet_customer(registry, customer)
# *** French Customer
# Make a FrenchCustomer, pass into the "greet_customer" interaction,
# then test the result.
french_customer = FrenchCustomer(name='Henri')
assert 'Bonjour Henri !!' == greet_customer(registry, french_customer)
|
StarcoderdataPython
|
4811376
|
import hashlib
import bcrypt
from pymongo import MongoClient
from online_judge.db import db
user_collection = db['users']
class User(object):
@staticmethod
def exists(username):
return user_collection.find_one({'username': username}) is not None
def __init__(self, username, password=None, salt=None):
user = user_collection.find_one({'username': username})
if not user:
# Create
self.username = username
self.salt = bcrypt.gensalt()
self.password = hashlib.sha512(password + self.salt).hexdigest()
self.save()
else:
# Load
self.username = user['username']
self.salt = user['salt']
self.password = user['password']
def save(self):
user_collection.update_one({'username': self.username},
{'$set': {'username': self.username, 'password': <PASSWORD>, 'salt': self.salt}},
True)
def delete(self):
user_collection.delete_one({'username': self.username})
def verify(self, password):
return self.password == hashlib.sha512(password + self.salt).hexdigest()
|
StarcoderdataPython
|
1649742
|
# Copyright 2018/2019 The RLgraph authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import random
from rlgraph.environments import Environment
from six.moves import queue
from threading import Thread
from rlgraph import get_distributed_backend
from rlgraph.agents import Agent
from rlgraph.execution.ray import RayValueWorker
from rlgraph.execution.ray.apex.ray_memory_actor import RayMemoryActor
from rlgraph.execution.ray.ray_executor import RayExecutor
from rlgraph.execution.ray.ray_util import create_colocated_ray_actors, RayTaskPool, RayWeight
if get_distributed_backend() == "ray":
import ray
class ApexExecutor(RayExecutor):
"""
Implements the distributed update semantics of distributed prioritized experience replay (Ape-X),
as described in:
https://arxiv.org/abs/1803.00933
"""
def __init__(self, environment_spec, agent_config, discard_queued_samples=False):
"""
Args:
environment_spec (dict, callable): Environment spec or callable creating
an environment. Each worker in the cluster will instantiate
an environment using this spec.
agent_config (dict): Config dict containing agent and execution specs.
discard_queued_samples (bool): If true, discard samples if the learner queue is full instead
of blocking until free.
"""
ray_spec = agent_config["execution_spec"].pop("ray_spec")
self.apex_replay_spec = ray_spec.pop("apex_replay_spec")
self.worker_spec = ray_spec.pop("worker_spec")
self.discard_queued_samples = discard_queued_samples
super(ApexExecutor, self).__init__(executor_spec=ray_spec.pop("executor_spec"),
environment_spec=environment_spec,
worker_spec=self.worker_spec)
# Must specify an agent type.
assert "type" in agent_config
self.agent_config = agent_config
# These are the Ray remote tasks which sample batches from the replay memory
# and pass them to the learner.
self.prioritized_replay_tasks = RayTaskPool()
self.replay_sampling_task_depth = self.executor_spec["replay_sampling_task_depth"]
self.replay_batch_size = self.agent_config["update_spec"]["batch_size"]
self.num_cpus_per_replay_actor = self.executor_spec.get("num_cpus_per_replay_actor",
self.replay_sampling_task_depth)
# How often weights are synced to remote workers.
self.weight_sync_steps = self.executor_spec["weight_sync_steps"]
# Necessary for target network updates.
self.weight_syncs_executed = 0
self.steps_since_weights_synced = {}
# These are the tasks actually interacting with the environment.
self.env_sample_tasks = RayTaskPool()
self.env_interaction_task_depth = self.executor_spec["env_interaction_task_depth"]
self.worker_sample_size = self.executor_spec["num_worker_samples"] + self.worker_spec["n_step_adjustment"] - 1
assert not ray_spec, "ERROR: ray_spec still contains items: {}".format(ray_spec)
self.logger.info("Setting up execution for Apex executor.")
self.setup_execution()
def setup_execution(self):
# Create local worker agent according to spec.
# Extract states and actions space.
environment = None
if isinstance(self.environment_spec, dict):
environment = Environment.from_spec(self.environment_spec)
elif hasattr(self.environment_spec, '__call__'):
environment = self.environment_spec()
self.agent_config["state_space"] = environment.state_space
self.agent_config["action_space"] = environment.action_space
# Start Ray cluster and connect to it.
self.local_agent = Agent.from_spec(self.agent_config)
# Set up worker thread for performing updates.
self.update_worker = UpdateWorker(
agent=self.local_agent,
in_queue_size=self.executor_spec["learn_queue_size"]
)
self.ray_init()
# Create remote sample workers based on ray cluster spec.
self.num_replay_workers = self.executor_spec["num_replay_workers"]
self.num_sample_workers = self.executor_spec["num_sample_workers"]
self.logger.info("Initializing {} local replay memories.".format(self.num_replay_workers))
# Update memory size for num of workers
shard_size = int(self.apex_replay_spec["memory_spec"]["capacity"] / self.num_replay_workers)
self.apex_replay_spec["memory_spec"]["capacity"] = shard_size
self.logger.info("Shard size per memory: {}".format(self.apex_replay_spec["memory_spec"]["capacity"]))
min_sample_size = self.apex_replay_spec["min_sample_memory_size"]
self.apex_replay_spec["min_sample_memory_size"] = int(min_sample_size / self.num_replay_workers)
self.logger.info("Sampling for learning starts at: {}".format( self.apex_replay_spec["min_sample_memory_size"]))
# Set sample batch size:
self.apex_replay_spec["sample_batch_size"] = self.agent_config["update_spec"]["batch_size"]
self.logger.info("Sampling batch size {}".format(self.apex_replay_spec["sample_batch_size"]))
self.ray_local_replay_memories = create_colocated_ray_actors(
cls=RayMemoryActor.as_remote(num_cpus=self.num_cpus_per_replay_actor),
config=self.apex_replay_spec,
num_agents=self.num_replay_workers
)
# Create remote workers for data collection.
self.worker_spec["worker_sample_size"] = self.worker_sample_size
self.logger.info("Initializing {} remote data collection agents, sample size: {}".format(
self.num_sample_workers, self.worker_spec["worker_sample_size"]))
self.ray_env_sample_workers = self.create_remote_workers(
RayValueWorker, self.num_sample_workers, self.agent_config,
# *args
self.worker_spec, self.environment_spec, self.worker_frame_skip
)
self.init_tasks()
def init_tasks(self):
# Start learner thread.
self.update_worker.start()
# Prioritized replay sampling tasks via RayAgents.
for ray_memory in self.ray_local_replay_memories:
for _ in range(self.replay_sampling_task_depth):
# This initializes remote tasks to sample from the prioritized replay memories of each worker.
self.prioritized_replay_tasks.add_task(ray_memory, ray_memory.get_batch.remote())
# Env interaction tasks via RayWorkers which each
# have a local agent.
weights = RayWeight(self.local_agent.get_weights())
for ray_worker in self.ray_env_sample_workers:
ray_worker.set_weights.remote(weights)
self.steps_since_weights_synced[ray_worker] = 0
self.logger.info("Synced worker {} weights, initializing sample tasks.".format(
self.worker_ids[ray_worker]))
for _ in range(self.env_interaction_task_depth):
self.env_sample_tasks.add_task(ray_worker, ray_worker.execute_and_get_with_count.remote())
def _execute_step(self):
"""
Executes a workload on Ray. The main loop performs the following
steps until the specified number of steps or episodes is finished:
- Retrieve sample batches via Ray from remote workers
- Insert these into the local memory
- Have a separate learn thread sample batches from the memory and compute updates
- Sync weights to the shared model so remot eworkers can update their weights.
"""
# Env steps done during this rollout.
env_steps = 0
update_steps = 0
discarded = 0
queue_inserts = 0
weights = None
# 1. Fetch results from RayWorkers.
completed_sample_tasks = list(self.env_sample_tasks.get_completed())
sample_batch_sizes = ray.get([task[1][1] for task in completed_sample_tasks])
for i, (ray_worker, (env_sample_obj_id, sample_size)) in enumerate(completed_sample_tasks):
# Randomly add env sample to a local replay actor.
random.choice(self.ray_local_replay_memories).observe.remote(env_sample_obj_id)
sample_steps = sample_batch_sizes[i]
env_steps += sample_steps
self.steps_since_weights_synced[ray_worker] += sample_steps
if self.steps_since_weights_synced[ray_worker] >= self.weight_sync_steps:
if weights is None or self.update_worker.update_done:
self.update_worker.update_done = False
weights = ray.put(RayWeight(self.local_agent.get_weights()))
# self.logger.debug("Syncing weights for worker {}".format(self.worker_ids[ray_worker]))
# self.logger.debug("Weights type: {}, weights = {}".format(type(weights), weights))
ray_worker.set_weights.remote(weights)
self.weight_syncs_executed += 1
self.steps_since_weights_synced[ray_worker] = 0
# Reschedule environment samples.
self.env_sample_tasks.add_task(ray_worker, ray_worker.execute_and_get_with_count.remote())
# 2. Fetch completed replay priority sampling task, move to worker, reschedule.
for ray_memory, replay_remote_task in self.prioritized_replay_tasks.get_completed():
# Immediately schedule new batch sampling tasks on these workers.
self.prioritized_replay_tasks.add_task(ray_memory, ray_memory.get_batch.remote())
# Retrieve results via id.
# self.logger.info("replay task obj id {}".format(replay_remote_task))
if self.discard_queued_samples and self.update_worker.input_queue.full():
discarded += 1
else:
sampled_batch = ray.get(object_ids=replay_remote_task)
# Pass to the agent doing the actual updates.
# The ray worker is passed along because we need to update its priorities later in the subsequent
# task (see loop below).
# Copy due to memory leaks in Ray, see https://github.com/ray-project/ray/pull/3484/
self.update_worker.input_queue.put((ray_memory, sampled_batch and sampled_batch.copy()))
queue_inserts += 1
# 3. Update priorities on priority sampling workers using loss values produced by update worker.
while not self.update_worker.output_queue.empty():
ray_memory, indices, loss_per_item = self.update_worker.output_queue.get()
# self.logger.info('indices = {}'.format(batch["indices"]))
# self.logger.info('loss = {}'.format(loss_per_item))
ray_memory.update_priorities.remote(indices, loss_per_item)
# len of loss per item is update count.
update_steps += len(indices)
return env_steps, update_steps, discarded, queue_inserts
class UpdateWorker(Thread):
"""
Executes learning separate from the main event loop as described in the Ape-X paper.
Communicates with the main thread via a queue.
"""
def __init__(self, agent, in_queue_size):
"""
Initializes the worker with a RLGraph agent and queues for
Args:
agent (Agent): RLGraph agent used to execute local updates.
input_queue (queue.Queue): Input queue the worker will use to poll samples.
output_queue (queue.Queue): Output queue the worker will use to push results of local
update computations.
"""
super(UpdateWorker, self).__init__()
# Agent to use for updating.
self.agent = agent
self.input_queue = queue.Queue(maxsize=in_queue_size)
self.output_queue = queue.Queue()
# Terminate when host process terminates.
self.daemon = True
# Flag for main thread.
self.update_done = False
def run(self):
while True:
self.step()
def step(self):
# Fetch input for update:
# Replay memory used.
memory_actor, sample_batch = self.input_queue.get()
if sample_batch is not None:
losses = self.agent.update(batch=sample_batch)
# Just pass back indices for updating.
self.output_queue.put((memory_actor, sample_batch["indices"], losses[1]))
self.update_done = True
|
StarcoderdataPython
|
1790038
|
<reponame>QuantLet/spd_trading<gh_stars>1-10
import numpy as np
from sklearn.neighbors import KernelDensity
from ..utils.smoothing import bspline
def density_estimation(sample, X, h, kernel="epanechnikov"):
"""Kernel Density Estimation over the sample in domain X.
Routine for `sklearn.neighbors.KernelDensity`.
Args:
sample (np.array): Sample of observations. shape: (n_samples, n_features) List of n_features-dimensional data
points. Each row corresponds to a single data point.
X (np.array): Domain in which the density is estimated. An array of points to query. Last dimension should match
dimension of training data. shape: (n_estimates, n_features)
h (float): Bandwidth of the kernel. Needs to be chosen wisely or estimated. Sensitive parameter.
kernel (str, optional): The kernel to use for the estimation, so far only the Epanechnikov kernel is
implemented. Defaults to "epanechnikov".
Returns:
[np.array]: The array of log(density) evaluations. These are normalized to be probability densities, so values
will be low for high-dimensional data. shape: (n_estimates,)
"""
kde = KernelDensity(kernel=kernel, bandwidth=h).fit(sample.reshape(-1, 1))
log_dens = kde.score_samples(X.reshape(-1, 1))
density = np.exp(log_dens)
return density
def pointwise_density_trafo_K2M(K, q_K, S_vals, M_vals):
"""Pointwise density transformation from K (Strike Price) to M (Moneyness) domain. M = S/K
First, a spline has to be fitted to q_K, so that it is possible to extract the q_K-value at every point of
interest, not just at the known points K.
Then, it is iterated through the (M, S)-tuples and the density q_K is transformed to q_M.
Args:
K (np.array): Strike Price values for which the density q_K is know.
q_K (np.array): Density values in Strike Price domain.
S_vals (array-like): Prices of underlying for the density points.
M_vals (array-like): Moneyness values for the density point.
Returns:
[np.array]: Density values in Moneyness domain.
"""
_, q_K, _ = bspline(K, q_K, 15) # fit spline to q_K
num = len(M_vals)
q_pointsM = np.zeros(num)
# loop through (M, S)-tuples and calculate the q_M value at this point
for i, m, s in zip(range(num), M_vals, S_vals):
q_pointsM[i] = s / (m ** 2) * q_K(s / m)
return q_pointsM
def density_trafo_K2M(K, q_K, S):
"""Density transformation from K (Strike Price) to M (Moneyness) domain. M = S/K
First, a spline has to be fitted to q_K, so that it is possible to extract the q_K-value at every point of
interest, not just at the known points K.
Then, it is iterated through the (M, S)-tuples and the density q_K is transformed to q_M.
Args:
K (np.array): Strike Price values for which the density q_K is know.
q_K (np.array): Density values in Strike Price domain.
S (array-like): Prices of underlying for the density points.
Returns:
[np.array]: Density values in Moneyness domain.
"""
_, q_K, _ = bspline(K, q_K, 30)
num = len(K)
M = np.linspace(0.5, 1.5, num)
q_M = np.zeros(num)
for i, m in enumerate(M):
q_M[i] = S / (m ** 2) * q_K(S / m)
return M, q_M
|
StarcoderdataPython
|
1762562
|
from bancointer.bancointer import BancoInter
from decouple import config
cert = (config("PUBLIC_KEY"), config("PRIVATE_KEY"))
bi = BancoInter(config("CPFCNPJ_BENEF"), config("X-INTER-CONTA-CORRENTE"), cert)
reponse = bi.consulta(nosso_numero="00709421471")
print(reponse["situacao"])
|
StarcoderdataPython
|
61016
|
from asyncio import FastChildWatcher
import os
from unicodedata import category
from flask import request, current_app, url_for
from flask_restful import Resource
from datetime import datetime
from flask_jwt_extended import (
jwt_required, current_user
)
from werkzeug.utils import secure_filename
from sqlalchemy.orm import aliased
from bigeye.models.user import UserRoles
from ..models.base import db
from ..models.challenge import ChallengeCategory, Challenge, ChallengeResolve
from ..schemas.schemas import challengecategory_schema, challenge_schema, challengeresolve_schema, challenges_schema, challengecategorysingle_schema
def allowed_file(filename):
return '.' in filename and \
filename.rsplit('.', 1)[1].lower() in current_app.config['ALLOWED_EXTENSIONS']
class ChallengeCategoryListResource(Resource):
@jwt_required()
def get(self):
challenge_categories = ChallengeCategory.query.all()
categories = []
for category in challenge_categories:
category.total_challenges_resolved = ChallengeResolve.query.filter(ChallengeResolve.user_id == current_user.id).\
join(ChallengeResolve.challenge).\
filter(Challenge.category_id == category.id).count()
categories.append(category)
challenge_categories_dump = challengecategory_schema.dump(categories)
return challenge_categories_dump, 200
@jwt_required()
def post(self):
if current_user.role != UserRoles.ADMIN:
return {'error': 'You cannot create a category. Insufisant privilege.'}, 403
dataForm = request.form
if not dataForm:
return {'error': 'No content provided'}, 400
errors = challengecategorysingle_schema.validate(dataForm)
if len(errors) > 0:
return {'error': errors}, 400
data = challengecategorysingle_schema.load(dataForm)
category = ChallengeCategory(
name=data['name']
)
db.session.add(category)
db.session.commit()
category = challengecategorysingle_schema.dump(category)
return category, 201
class ChallengeListResource(Resource):
@jwt_required()
def get(self, category_id):
category = ChallengeCategory.query.get(category_id)
if not category:
return {'error': 'Category not found'}, 404
challenges = Challenge.query.filter_by(category=category).order_by(Challenge.difficulty.asc())
challenge_list = []
for challenge in challenges:
challenge.is_resolved = ChallengeResolve.query.filter_by(user_id=current_user.id, challenge_id=challenge.id).count() > 0
challenge_list.append(challenge)
challenges_dump = challenges_schema.dump(challenge_list)
return challenges_dump, 200
@jwt_required()
def post(self, category_id):
if current_user.role != UserRoles.ADMIN:
return {'error': 'You cannot create a challenge. Insufisant privilege.'}, 403
dataForm = request.form
if not dataForm:
return {'error': 'No content provided'}, 400
errors = challenge_schema.validate(dataForm)
if len(errors) > 0:
return {'error': errors}, 400
if 'file' not in request.files and 'link' not in dataForm:
return {'error': 'Please send a file or a link!'}, 400
if 'file' in request.files and 'link' in dataForm:
return {'error': 'Please send a file or a link, not both!'}, 400
data = challenge_schema.load(dataForm)
if 'file' in request.files:
file = request.files['file']
if file.filename == '':
return {'error': 'No file has been sent!'}, 400
if file and allowed_file(file.filename):
filename = secure_filename(file.filename)
file.save(os.path.join(current_app.config['UPLOAD_FOLDER'], filename))
if current_app.config['ENV'] == 'development':
resource_link = f'http://localhost:8000'
else:
resource_link = f'https://api.bigeye.codexus.fr'
resource_link += url_for('static', filename='challenges/' + filename)
else:
return {'error': 'The file extension is not allowed! Allowed: ' + ','.join(current_app.config['ALLOWED_EXTENSIONS'])}, 400
else:
resource_link = data['link']
category = ChallengeCategory.query.get(category_id)
if category is None:
return {'error': 'The given category is not found!'}, 404
challenge = Challenge(
title=data['title'],
description=data['description'],
difficulty=data['difficulty'],
flag=data['flag'],
category=category,
points=data['points'],
created_at=datetime.now(),
resource_link=resource_link,
hint=data['hint']
)
challenge.is_resolved = False
db.session.add(challenge)
db.session.commit()
challenge = challenge_schema.dump(challenge)
return challenge, 201
class ChallengeResource(Resource):
@jwt_required()
def get(self, challenge_id):
challenge = Challenge.query.get(challenge_id)
if not challenge:
return {'error': 'Challenge not found'}, 404
challenge.is_resolved = ChallengeResolve.query.filter_by(user_id=current_user.id, challenge_id=challenge.id).count() > 0
challenge_dump = challenge_schema.dump(challenge)
return challenge_dump, 200
class ChallengeResolveResource(Resource):
@jwt_required()
def post(self, challenge_id):
challenge = Challenge.query.get(challenge_id)
if not challenge:
return {'error': 'Challenge not found'}, 404
resolved = ChallengeResolve.query.filter_by(user_id=current_user.id, challenge_id=challenge.id).first()
if resolved is not None:
return {'error': 'You already solved this challenge!'}, 400
data = request.form
if not data:
return {'error': 'No flag provided'}, 400
flag = data.get('flag', None)
if not flag:
return {'error': 'No flag provided'}, 400
if flag != challenge.flag:
return {'error': 'Wrong flag! Don\'t get discouraged and persevere!'}, 400
resolved = ChallengeResolve(
user_id=current_user.id,
challenge_id=challenge.id,
points=challenge.points,
resolved_at=datetime.now()
)
db.session.add(resolved)
db.session.commit()
resolved = challengeresolve_schema.dump(resolved)
return resolved, 201
|
StarcoderdataPython
|
1621424
|
from operator import attrgetter
import pyangbind.lib.xpathhelper as xpathhelper
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType, RestrictedClassType, TypedListType
from pyangbind.lib.yangtypes import YANGBool, YANGListType, YANGDynClass, ReferenceType
from pyangbind.lib.base import PybindBase
from decimal import Decimal
from bitarray import bitarray
import __builtin__
import statistics
import igp_sync
import interfaces
import sessions
import neighbors
class rsvp(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module brocade-mpls-operational - based on the path /mpls-state/rsvp. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: MPLS RSVP Operational Information
"""
__slots__ = ('_pybind_generated_by', '_path_helper', '_yang_name', '_rest_name', '_extmethods', '__protocol_status','__refresh_interval','__refresh_multiple','__transport_address','__delay_resv_sending','__backup_bandwidth_requirement','__msgid_epoch','__statistics','__igp_sync','__interfaces','__sessions','__neighbors',)
_yang_name = 'rsvp'
_rest_name = 'rsvp'
_pybind_generated_by = 'container'
def __init__(self, *args, **kwargs):
path_helper_ = kwargs.pop("path_helper", None)
if path_helper_ is False:
self._path_helper = False
elif path_helper_ is not None and isinstance(path_helper_, xpathhelper.YANGPathHelper):
self._path_helper = path_helper_
elif hasattr(self, "_parent"):
path_helper_ = getattr(self._parent, "_path_helper", False)
self._path_helper = path_helper_
else:
self._path_helper = False
extmethods = kwargs.pop("extmethods", None)
if extmethods is False:
self._extmethods = False
elif extmethods is not None and isinstance(extmethods, dict):
self._extmethods = extmethods
elif hasattr(self, "_parent"):
extmethods = getattr(self._parent, "_extmethods", None)
self._extmethods = extmethods
else:
self._extmethods = False
self.__neighbors = YANGDynClass(base=YANGListType("neighbor_ip_addr",neighbors.neighbors, yang_name="neighbors", rest_name="neighbors", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='neighbor-ip-addr', extensions={u'tailf-common': {u'callpoint': u'mpls-rsvp-neighbor', u'cli-suppress-show-path': None}}), is_container='list', yang_name="neighbors", rest_name="neighbors", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'mpls-rsvp-neighbor', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='list', is_config=False)
self.__delay_resv_sending = YANGDynClass(base=YANGBool, is_leaf=True, yang_name="delay-resv-sending", rest_name="delay-resv-sending", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='boolean', is_config=False)
self.__transport_address = YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])(%[\\p{N}\\p{L}]+)?'}), is_leaf=True, yang_name="transport-address", rest_name="transport-address", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='inet:ipv4-address', is_config=False)
self.__msgid_epoch = YANGDynClass(base=TypedListType(allowed_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32)), is_leaf=False, yang_name="msgid-epoch", rest_name="msgid-epoch", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)
self.__sessions = YANGDynClass(base=YANGListType("dest_ip_addr src_ip_addr tunnel_id session_role",sessions.sessions, yang_name="sessions", rest_name="sessions", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='dest-ip-addr src-ip-addr tunnel-id session-role', extensions={u'tailf-common': {u'callpoint': u'mpls-rsvp-session', u'cli-suppress-show-path': None}}), is_container='list', yang_name="sessions", rest_name="sessions", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'mpls-rsvp-session', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='list', is_config=False)
self.__interfaces = YANGDynClass(base=YANGListType("interface_index",interfaces.interfaces, yang_name="interfaces", rest_name="interfaces", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='interface-index', extensions={u'tailf-common': {u'callpoint': u'mpls-rsvp-interface', u'cli-suppress-show-path': None}}), is_container='list', yang_name="interfaces", rest_name="interfaces", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'mpls-rsvp-interface', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='list', is_config=False)
self.__protocol_status = YANGDynClass(base=YANGBool, is_leaf=True, yang_name="protocol-status", rest_name="protocol-status", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='boolean', is_config=False)
self.__refresh_multiple = YANGDynClass(base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..65535']},int_size=16), is_leaf=True, yang_name="refresh-multiple", rest_name="refresh-multiple", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint16', is_config=False)
self.__statistics = YANGDynClass(base=statistics.statistics, is_container='container', presence=False, yang_name="statistics", rest_name="statistics", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'mpls-rsvp-statistics', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='container', is_config=False)
self.__igp_sync = YANGDynClass(base=igp_sync.igp_sync, is_container='container', presence=False, yang_name="igp-sync", rest_name="igp-sync", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'mpls-rsvp-igp-sync', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='container', is_config=False)
self.__backup_bandwidth_requirement = YANGDynClass(base=YANGBool, is_leaf=True, yang_name="backup-bandwidth-requirement", rest_name="backup-bandwidth-requirement", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='boolean', is_config=False)
self.__refresh_interval = YANGDynClass(base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..65535']},int_size=16), is_leaf=True, yang_name="refresh-interval", rest_name="refresh-interval", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint16', is_config=False)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path()+[self._yang_name]
else:
return [u'mpls-state', u'rsvp']
def _rest_path(self):
if hasattr(self, "_parent"):
if self._rest_name:
return self._parent._rest_path()+[self._rest_name]
else:
return self._parent._rest_path()
else:
return [u'mpls-state', u'rsvp']
def _get_protocol_status(self):
"""
Getter method for protocol_status, mapped from YANG variable /mpls_state/rsvp/protocol_status (boolean)
YANG Description: RSVP protocol status (enabled or disabled)
"""
return self.__protocol_status
def _set_protocol_status(self, v, load=False):
"""
Setter method for protocol_status, mapped from YANG variable /mpls_state/rsvp/protocol_status (boolean)
If this variable is read-only (config: false) in the
source YANG file, then _set_protocol_status is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_protocol_status() directly.
YANG Description: RSVP protocol status (enabled or disabled)
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGBool, is_leaf=True, yang_name="protocol-status", rest_name="protocol-status", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='boolean', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """protocol_status must be of a type compatible with boolean""",
'defined-type': "boolean",
'generated-type': """YANGDynClass(base=YANGBool, is_leaf=True, yang_name="protocol-status", rest_name="protocol-status", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='boolean', is_config=False)""",
})
self.__protocol_status = t
if hasattr(self, '_set'):
self._set()
def _unset_protocol_status(self):
self.__protocol_status = YANGDynClass(base=YANGBool, is_leaf=True, yang_name="protocol-status", rest_name="protocol-status", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='boolean', is_config=False)
def _get_refresh_interval(self):
"""
Getter method for refresh_interval, mapped from YANG variable /mpls_state/rsvp/refresh_interval (uint16)
YANG Description: Refresh interval (R)
"""
return self.__refresh_interval
def _set_refresh_interval(self, v, load=False):
"""
Setter method for refresh_interval, mapped from YANG variable /mpls_state/rsvp/refresh_interval (uint16)
If this variable is read-only (config: false) in the
source YANG file, then _set_refresh_interval is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_refresh_interval() directly.
YANG Description: Refresh interval (R)
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..65535']},int_size=16), is_leaf=True, yang_name="refresh-interval", rest_name="refresh-interval", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint16', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """refresh_interval must be of a type compatible with uint16""",
'defined-type': "uint16",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..65535']},int_size=16), is_leaf=True, yang_name="refresh-interval", rest_name="refresh-interval", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint16', is_config=False)""",
})
self.__refresh_interval = t
if hasattr(self, '_set'):
self._set()
def _unset_refresh_interval(self):
self.__refresh_interval = YANGDynClass(base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..65535']},int_size=16), is_leaf=True, yang_name="refresh-interval", rest_name="refresh-interval", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint16', is_config=False)
def _get_refresh_multiple(self):
"""
Getter method for refresh_multiple, mapped from YANG variable /mpls_state/rsvp/refresh_multiple (uint16)
YANG Description: Refresh multiple (K)
"""
return self.__refresh_multiple
def _set_refresh_multiple(self, v, load=False):
"""
Setter method for refresh_multiple, mapped from YANG variable /mpls_state/rsvp/refresh_multiple (uint16)
If this variable is read-only (config: false) in the
source YANG file, then _set_refresh_multiple is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_refresh_multiple() directly.
YANG Description: Refresh multiple (K)
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..65535']},int_size=16), is_leaf=True, yang_name="refresh-multiple", rest_name="refresh-multiple", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint16', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """refresh_multiple must be of a type compatible with uint16""",
'defined-type': "uint16",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..65535']},int_size=16), is_leaf=True, yang_name="refresh-multiple", rest_name="refresh-multiple", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint16', is_config=False)""",
})
self.__refresh_multiple = t
if hasattr(self, '_set'):
self._set()
def _unset_refresh_multiple(self):
self.__refresh_multiple = YANGDynClass(base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..65535']},int_size=16), is_leaf=True, yang_name="refresh-multiple", rest_name="refresh-multiple", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint16', is_config=False)
def _get_transport_address(self):
"""
Getter method for transport_address, mapped from YANG variable /mpls_state/rsvp/transport_address (inet:ipv4-address)
YANG Description: Transport Address
"""
return self.__transport_address
def _set_transport_address(self, v, load=False):
"""
Setter method for transport_address, mapped from YANG variable /mpls_state/rsvp/transport_address (inet:ipv4-address)
If this variable is read-only (config: false) in the
source YANG file, then _set_transport_address is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_transport_address() directly.
YANG Description: Transport Address
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])(%[\\p{N}\\p{L}]+)?'}), is_leaf=True, yang_name="transport-address", rest_name="transport-address", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='inet:ipv4-address', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """transport_address must be of a type compatible with inet:ipv4-address""",
'defined-type': "inet:ipv4-address",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])(%[\\p{N}\\p{L}]+)?'}), is_leaf=True, yang_name="transport-address", rest_name="transport-address", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='inet:ipv4-address', is_config=False)""",
})
self.__transport_address = t
if hasattr(self, '_set'):
self._set()
def _unset_transport_address(self):
self.__transport_address = YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])(%[\\p{N}\\p{L}]+)?'}), is_leaf=True, yang_name="transport-address", rest_name="transport-address", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='inet:ipv4-address', is_config=False)
def _get_delay_resv_sending(self):
"""
Getter method for delay_resv_sending, mapped from YANG variable /mpls_state/rsvp/delay_resv_sending (boolean)
YANG Description: Delayed RESV sending
"""
return self.__delay_resv_sending
def _set_delay_resv_sending(self, v, load=False):
"""
Setter method for delay_resv_sending, mapped from YANG variable /mpls_state/rsvp/delay_resv_sending (boolean)
If this variable is read-only (config: false) in the
source YANG file, then _set_delay_resv_sending is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_delay_resv_sending() directly.
YANG Description: Delayed RESV sending
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGBool, is_leaf=True, yang_name="delay-resv-sending", rest_name="delay-resv-sending", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='boolean', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """delay_resv_sending must be of a type compatible with boolean""",
'defined-type': "boolean",
'generated-type': """YANGDynClass(base=YANGBool, is_leaf=True, yang_name="delay-resv-sending", rest_name="delay-resv-sending", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='boolean', is_config=False)""",
})
self.__delay_resv_sending = t
if hasattr(self, '_set'):
self._set()
def _unset_delay_resv_sending(self):
self.__delay_resv_sending = YANGDynClass(base=YANGBool, is_leaf=True, yang_name="delay-resv-sending", rest_name="delay-resv-sending", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='boolean', is_config=False)
def _get_backup_bandwidth_requirement(self):
"""
Getter method for backup_bandwidth_requirement, mapped from YANG variable /mpls_state/rsvp/backup_bandwidth_requirement (boolean)
YANG Description: Backup bandwidth requirement's interpretation
"""
return self.__backup_bandwidth_requirement
def _set_backup_bandwidth_requirement(self, v, load=False):
"""
Setter method for backup_bandwidth_requirement, mapped from YANG variable /mpls_state/rsvp/backup_bandwidth_requirement (boolean)
If this variable is read-only (config: false) in the
source YANG file, then _set_backup_bandwidth_requirement is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_backup_bandwidth_requirement() directly.
YANG Description: Backup bandwidth requirement's interpretation
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGBool, is_leaf=True, yang_name="backup-bandwidth-requirement", rest_name="backup-bandwidth-requirement", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='boolean', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """backup_bandwidth_requirement must be of a type compatible with boolean""",
'defined-type': "boolean",
'generated-type': """YANGDynClass(base=YANGBool, is_leaf=True, yang_name="backup-bandwidth-requirement", rest_name="backup-bandwidth-requirement", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='boolean', is_config=False)""",
})
self.__backup_bandwidth_requirement = t
if hasattr(self, '_set'):
self._set()
def _unset_backup_bandwidth_requirement(self):
self.__backup_bandwidth_requirement = YANGDynClass(base=YANGBool, is_leaf=True, yang_name="backup-bandwidth-requirement", rest_name="backup-bandwidth-requirement", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='boolean', is_config=False)
def _get_msgid_epoch(self):
"""
Getter method for msgid_epoch, mapped from YANG variable /mpls_state/rsvp/msgid_epoch (uint32)
YANG Description: Message id epoch
"""
return self.__msgid_epoch
def _set_msgid_epoch(self, v, load=False):
"""
Setter method for msgid_epoch, mapped from YANG variable /mpls_state/rsvp/msgid_epoch (uint32)
If this variable is read-only (config: false) in the
source YANG file, then _set_msgid_epoch is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_msgid_epoch() directly.
YANG Description: Message id epoch
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=TypedListType(allowed_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32)), is_leaf=False, yang_name="msgid-epoch", rest_name="msgid-epoch", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """msgid_epoch must be of a type compatible with uint32""",
'defined-type': "uint32",
'generated-type': """YANGDynClass(base=TypedListType(allowed_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32)), is_leaf=False, yang_name="msgid-epoch", rest_name="msgid-epoch", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)""",
})
self.__msgid_epoch = t
if hasattr(self, '_set'):
self._set()
def _unset_msgid_epoch(self):
self.__msgid_epoch = YANGDynClass(base=TypedListType(allowed_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32)), is_leaf=False, yang_name="msgid-epoch", rest_name="msgid-epoch", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)
def _get_statistics(self):
"""
Getter method for statistics, mapped from YANG variable /mpls_state/rsvp/statistics (container)
YANG Description: MPLS RSVP global statistics
"""
return self.__statistics
def _set_statistics(self, v, load=False):
"""
Setter method for statistics, mapped from YANG variable /mpls_state/rsvp/statistics (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_statistics is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_statistics() directly.
YANG Description: MPLS RSVP global statistics
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=statistics.statistics, is_container='container', presence=False, yang_name="statistics", rest_name="statistics", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'mpls-rsvp-statistics', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='container', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """statistics must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=statistics.statistics, is_container='container', presence=False, yang_name="statistics", rest_name="statistics", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'mpls-rsvp-statistics', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='container', is_config=False)""",
})
self.__statistics = t
if hasattr(self, '_set'):
self._set()
def _unset_statistics(self):
self.__statistics = YANGDynClass(base=statistics.statistics, is_container='container', presence=False, yang_name="statistics", rest_name="statistics", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'mpls-rsvp-statistics', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='container', is_config=False)
def _get_igp_sync(self):
"""
Getter method for igp_sync, mapped from YANG variable /mpls_state/rsvp/igp_sync (container)
YANG Description: MPLS Rsvp IGP Synchronization information
"""
return self.__igp_sync
def _set_igp_sync(self, v, load=False):
"""
Setter method for igp_sync, mapped from YANG variable /mpls_state/rsvp/igp_sync (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_igp_sync is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_igp_sync() directly.
YANG Description: MPLS Rsvp IGP Synchronization information
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=igp_sync.igp_sync, is_container='container', presence=False, yang_name="igp-sync", rest_name="igp-sync", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'mpls-rsvp-igp-sync', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='container', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """igp_sync must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=igp_sync.igp_sync, is_container='container', presence=False, yang_name="igp-sync", rest_name="igp-sync", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'mpls-rsvp-igp-sync', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='container', is_config=False)""",
})
self.__igp_sync = t
if hasattr(self, '_set'):
self._set()
def _unset_igp_sync(self):
self.__igp_sync = YANGDynClass(base=igp_sync.igp_sync, is_container='container', presence=False, yang_name="igp-sync", rest_name="igp-sync", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'mpls-rsvp-igp-sync', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='container', is_config=False)
def _get_interfaces(self):
"""
Getter method for interfaces, mapped from YANG variable /mpls_state/rsvp/interfaces (list)
YANG Description: MPLS RSVP interface information
"""
return self.__interfaces
def _set_interfaces(self, v, load=False):
"""
Setter method for interfaces, mapped from YANG variable /mpls_state/rsvp/interfaces (list)
If this variable is read-only (config: false) in the
source YANG file, then _set_interfaces is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_interfaces() directly.
YANG Description: MPLS RSVP interface information
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGListType("interface_index",interfaces.interfaces, yang_name="interfaces", rest_name="interfaces", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='interface-index', extensions={u'tailf-common': {u'callpoint': u'mpls-rsvp-interface', u'cli-suppress-show-path': None}}), is_container='list', yang_name="interfaces", rest_name="interfaces", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'mpls-rsvp-interface', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='list', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """interfaces must be of a type compatible with list""",
'defined-type': "list",
'generated-type': """YANGDynClass(base=YANGListType("interface_index",interfaces.interfaces, yang_name="interfaces", rest_name="interfaces", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='interface-index', extensions={u'tailf-common': {u'callpoint': u'mpls-rsvp-interface', u'cli-suppress-show-path': None}}), is_container='list', yang_name="interfaces", rest_name="interfaces", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'mpls-rsvp-interface', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='list', is_config=False)""",
})
self.__interfaces = t
if hasattr(self, '_set'):
self._set()
def _unset_interfaces(self):
self.__interfaces = YANGDynClass(base=YANGListType("interface_index",interfaces.interfaces, yang_name="interfaces", rest_name="interfaces", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='interface-index', extensions={u'tailf-common': {u'callpoint': u'mpls-rsvp-interface', u'cli-suppress-show-path': None}}), is_container='list', yang_name="interfaces", rest_name="interfaces", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'mpls-rsvp-interface', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='list', is_config=False)
def _get_sessions(self):
"""
Getter method for sessions, mapped from YANG variable /mpls_state/rsvp/sessions (list)
YANG Description: MPLS RSVP Sessions
"""
return self.__sessions
def _set_sessions(self, v, load=False):
"""
Setter method for sessions, mapped from YANG variable /mpls_state/rsvp/sessions (list)
If this variable is read-only (config: false) in the
source YANG file, then _set_sessions is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_sessions() directly.
YANG Description: MPLS RSVP Sessions
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGListType("dest_ip_addr src_ip_addr tunnel_id session_role",sessions.sessions, yang_name="sessions", rest_name="sessions", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='dest-ip-addr src-ip-addr tunnel-id session-role', extensions={u'tailf-common': {u'callpoint': u'mpls-rsvp-session', u'cli-suppress-show-path': None}}), is_container='list', yang_name="sessions", rest_name="sessions", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'mpls-rsvp-session', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='list', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """sessions must be of a type compatible with list""",
'defined-type': "list",
'generated-type': """YANGDynClass(base=YANGListType("dest_ip_addr src_ip_addr tunnel_id session_role",sessions.sessions, yang_name="sessions", rest_name="sessions", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='dest-ip-addr src-ip-addr tunnel-id session-role', extensions={u'tailf-common': {u'callpoint': u'mpls-rsvp-session', u'cli-suppress-show-path': None}}), is_container='list', yang_name="sessions", rest_name="sessions", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'mpls-rsvp-session', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='list', is_config=False)""",
})
self.__sessions = t
if hasattr(self, '_set'):
self._set()
def _unset_sessions(self):
self.__sessions = YANGDynClass(base=YANGListType("dest_ip_addr src_ip_addr tunnel_id session_role",sessions.sessions, yang_name="sessions", rest_name="sessions", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='dest-ip-addr src-ip-addr tunnel-id session-role', extensions={u'tailf-common': {u'callpoint': u'mpls-rsvp-session', u'cli-suppress-show-path': None}}), is_container='list', yang_name="sessions", rest_name="sessions", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'mpls-rsvp-session', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='list', is_config=False)
def _get_neighbors(self):
"""
Getter method for neighbors, mapped from YANG variable /mpls_state/rsvp/neighbors (list)
YANG Description: RSVP Neighbor Operational Information
"""
return self.__neighbors
def _set_neighbors(self, v, load=False):
"""
Setter method for neighbors, mapped from YANG variable /mpls_state/rsvp/neighbors (list)
If this variable is read-only (config: false) in the
source YANG file, then _set_neighbors is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_neighbors() directly.
YANG Description: RSVP Neighbor Operational Information
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGListType("neighbor_ip_addr",neighbors.neighbors, yang_name="neighbors", rest_name="neighbors", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='neighbor-ip-addr', extensions={u'tailf-common': {u'callpoint': u'mpls-rsvp-neighbor', u'cli-suppress-show-path': None}}), is_container='list', yang_name="neighbors", rest_name="neighbors", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'mpls-rsvp-neighbor', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='list', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """neighbors must be of a type compatible with list""",
'defined-type': "list",
'generated-type': """YANGDynClass(base=YANGListType("neighbor_ip_addr",neighbors.neighbors, yang_name="neighbors", rest_name="neighbors", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='neighbor-ip-addr', extensions={u'tailf-common': {u'callpoint': u'mpls-rsvp-neighbor', u'cli-suppress-show-path': None}}), is_container='list', yang_name="neighbors", rest_name="neighbors", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'mpls-rsvp-neighbor', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='list', is_config=False)""",
})
self.__neighbors = t
if hasattr(self, '_set'):
self._set()
def _unset_neighbors(self):
self.__neighbors = YANGDynClass(base=YANGListType("neighbor_ip_addr",neighbors.neighbors, yang_name="neighbors", rest_name="neighbors", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='neighbor-ip-addr', extensions={u'tailf-common': {u'callpoint': u'mpls-rsvp-neighbor', u'cli-suppress-show-path': None}}), is_container='list', yang_name="neighbors", rest_name="neighbors", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'mpls-rsvp-neighbor', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='list', is_config=False)
protocol_status = __builtin__.property(_get_protocol_status)
refresh_interval = __builtin__.property(_get_refresh_interval)
refresh_multiple = __builtin__.property(_get_refresh_multiple)
transport_address = __builtin__.property(_get_transport_address)
delay_resv_sending = __builtin__.property(_get_delay_resv_sending)
backup_bandwidth_requirement = __builtin__.property(_get_backup_bandwidth_requirement)
msgid_epoch = __builtin__.property(_get_msgid_epoch)
statistics = __builtin__.property(_get_statistics)
igp_sync = __builtin__.property(_get_igp_sync)
interfaces = __builtin__.property(_get_interfaces)
sessions = __builtin__.property(_get_sessions)
neighbors = __builtin__.property(_get_neighbors)
_pyangbind_elements = {'protocol_status': protocol_status, 'refresh_interval': refresh_interval, 'refresh_multiple': refresh_multiple, 'transport_address': transport_address, 'delay_resv_sending': delay_resv_sending, 'backup_bandwidth_requirement': backup_bandwidth_requirement, 'msgid_epoch': msgid_epoch, 'statistics': statistics, 'igp_sync': igp_sync, 'interfaces': interfaces, 'sessions': sessions, 'neighbors': neighbors, }
|
StarcoderdataPython
|
3245966
|
<reponame>gitter-badger/mlmodels<filename>mlmodels/model_tch/vae/util.py
import os, sys
import numpy as np
import scipy as sci
import matplotlib.pyplot as plt
import pandas as pd
import cv2
"""
functionality: sine wave npz generation and image gerneration
"""
# default image shape 64x64x3
# default npz element size ([sample number], 64, 64)
data = {'resolution': 64, 'amplitude': 5, 'img_relative_folder':"/sinus_img/verbose", \
'npz_relative_folder':"/sinus_npz", 'npz_name': "sinus.npz", \
'img_cv_folder':"/sinus_img_cv/verbose", 'npz_cv_folder':"/sinus_npz_cv", 'npz_cv_name': "sinus_cv.npz"}
def set_resolution(resolution = 64):
data['resolution'] = resolution
def get_resolution():
return data['resolution']
# sinus: y = a * sin(w * t)
def generate_random_sin(n_rand_starts = 100, amplitude = 1, n_pis = 4, omega = 1, step = 0.2):
r = np.random.randint(n_rand_starts)
x = np.arange(r, r + n_pis*np.pi, step)
y = amplitude * np.sin(omega * x)
return x,y
# cosinus: y = a * cos (w * t + b) + c
def generate_random_cos(n_rand_starts = 1, a = 1, w= 1, b = 0, c = 0, x_upbound =1, x_downbound = -1, step = 0.2):
r = np.random.randint(n_rand_starts)
x = np.arange(x_downbound*2*np.pi+r, x_upbound*2*np.pi+r, step)
y = a * np.cos(w * x + b) + c
return x,y
# opencv: create wave image as numpy array
def create_sin_2d_array_cv(x, y, resoltuion = data['resolution'],amp=data['amplitude']):
size = len(x), len(y), 3
linewidth = int(len(y)/resoltuion + 0.5)
vis = np.zeros(size, dtype=np.uint8)
new_y = y.copy()
# amplitude set here for plot
y_max = amp
y_min = -1*amp
border = 16
ratio = float((len(y)-border) /( y_max - y_min))
for i in range(len(y)):
new_y[i] = int(border/2+(len(y)-border)-1-(y[i]-y_min)*ratio)
pointList = []
for i in range(int(len(x))):
pointList.append((i,int(new_y[i])))
pointList = np.array(pointList)
cv2.polylines(vis, [pointList], False, (255,255,255), linewidth)
vis = cv2.resize(vis, (resoltuion, resoltuion), interpolation=cv2.INTER_CUBIC)
# threshold as 50
result = np.where(vis[:,:,0] > 50, 1, 0)
return result
# opencv: create wave image save as images to disk
def plot_save_disk_cv(x, y, filename, xmax=data['resolution'], ymax=data['resolution'],amp=data['amplitude']):
size = len(x), len(y), 3
linewidth = int(len(y)/ymax + 0.5)
vis = np.ones(size, dtype=np.uint8)
vis = vis * 255
new_y = y.copy()
y_max = amp
y_min = -1*amp
border = 16
ratio = float((len(y)-border) /( y_max - y_min))
for i in range(len(y)):
new_y[i] = int(border/2+(len(y)-border)-1-(y[i]-y_min)*ratio)
pointList = []
for i in range(int(len(x))):
pointList.append((i,int(new_y[i])))
pointList = np.array(pointList)
cv2.polylines(vis, [pointList], False, (0,0,0), linewidth)
vis = cv2.resize(vis, (xmax, ymax), interpolation=cv2.INTER_CUBIC)
cv2.imwrite(filename, vis)
# matplotlit: create wave image as numpy array
def create_sin_2d_array_plt(x, y, xmax=data['resolution'], ymax=data['resolution'],amp=data['amplitude']):
plt.rcParams['axes.facecolor']='white'
plt.rcParams['savefig.facecolor']='white'
fig = plt.figure(frameon=False, figsize=(xmax, ymax), dpi=1)
ax = plt.Axes(fig, [0., 0., 1., 1.])
# amplitude set here for plot
ax.set_ylim([-1*amp,1*amp])
ax.set_axis_off()
fig.add_axes(ax)
plt.plot(x,y, c="black", linewidth=100)
fig.canvas.draw()
frame = np.frombuffer(fig.canvas.tostring_rgb(), dtype=np.uint8)
frame = frame.reshape(fig.canvas.get_width_height()[::-1] + (3,))
plt.clf()
plt.close('all')
# frame is binary
result = np.where(frame[:,:,0] > 254, 0, 1)
return result
# matplotlib: create wave image save as images to disk
def plot_save_disk(x, y, filename, xmax=data['resolution'], ymax=data['resolution'], amp=data['amplitude']):
fig = plt.figure(frameon=False, figsize=(xmax, ymax), dpi=1)
ax = plt.Axes(fig, [0., 0., 1., 1.])
ax.set_ylim([-1*amp,1*amp])
ax.set_axis_off()
fig.add_axes(ax)
plt.plot(x,y, c="black", linewidth=100)
fig.savefig(filename)
plt.clf()
plt.close('all')
# matplotlib: images saves to /path/sinus_img/verbose/*.png
def generate_train_img(folder, N_type=1, amax=5, wmin=5, wmax=10, bmin=-2, bmax=2, cmin=-2 ,cmax=2 , step = 0.1, wfreq=0.5 ) :
folder_w_subfolder = folder + data['img_relative_folder']
os.makedirs(folder, exist_ok=True)
os.makedirs(folder_w_subfolder, exist_ok=True)
folder = os.path.abspath(folder_w_subfolder)
for type_i in range(N_type):
for amp_int in range(1, amax*2+1):
amp_i = amp_int*0.5
for omega_i in range(wmin, wmax, 1):
omega_ii = wfreq * omega_i
for b_i in range(bmin, bmax, 1):
for c_i in range(cmin, cmax, 1):
# use sinus gernerate:
# x,y = generate_random_sin(N_type, amp_i, 4, omega_ii, step)
# use cosinus gernerate:
x,y = generate_random_cos(n_rand_starts=N_type,a=amp_i, w=omega_ii, b = b_i, c = c_i, step = step)
filename = '{folder}/sin_{amp_i}-{omega_ii}-{b_i}-{c_i}-{type_i}'.format(folder=folder, amp_i=amp_i, omega_ii=omega_ii, b_i=b_i, c_i=c_i,type_i=type_i).replace(".","_")
filename = filename + ".png"
plot_save_disk(x,y, filename, xmax = data['resolution'], ymax = data['resolution'], amp = data['amplitude'])
# matplotlib: images saves to /path/sinus_npz/verbose/sinus.npz
def generate_train_npz(folder, N_type=1, amax=5, wmin=5, wmax=10, bmin=-2, bmax=2, cmin=-2 ,cmax=2 , step = 0.1, wfreq=0.5, resoltuion = data['resolution'] ) :
folder_w_subfolder = folder + data['npz_relative_folder']
# inital with empty numpy which is random
generate_npy = [np.empty([resoltuion, resoltuion], dtype=int)]
os.makedirs(folder, exist_ok=True)
os.makedirs(folder_w_subfolder, exist_ok=True)
folder = os.path.abspath(folder)
is_inital = False
for type_i in range(N_type):
for amp_int in range(1, amax*2+1):
amp_i = amp_int*0.5
for omega_i in range(wmin, wmax, 1):
omega_ii = wfreq * omega_i
for b_i in range(bmin, bmax, 1):
for c_i in range(cmin, cmax, 1):
# use sinus gernerate:
# x,y = generate_random_sin(N_type, amp_i, 4, omega_ii, step)
# use cosinus gernerate:
x,y = generate_random_cos(n_rand_starts=N_type,a=amp_i, w=omega_ii, b = b_i, c = c_i, step = step)
if len(generate_npy) == 1 and is_inital == False:
# replace the random array with first data, only do once
generate_npy = [create_sin_2d_array_plt(x,y)]
is_inital = True
else:
generate_npy=np.append(generate_npy, [create_sin_2d_array_plt(x,y)],axis=0)
np.savez(folder_w_subfolder+ "/"+ data['npz_name'], sinus=generate_npy)
print("npz saved")
# opencv_version: images saves to /path/sinus_img_cv/verbose/*.png
def generate_train_npz_cv(folder, N_type=1, amax=5, wmin=5, wmax=10, bmin=-2, bmax=2, cmin=-2 ,cmax=2 , step = 0.1, wfreq=0.5, resoltuion = data['resolution'] ) :
folder_w_subfolder = folder + data['npz_cv_folder']
# inital with empty numpy which is random
generate_npy = [np.empty([resoltuion, resoltuion], dtype=int)]
os.makedirs(folder, exist_ok=True)
os.makedirs(folder_w_subfolder, exist_ok=True)
folder = os.path.abspath(folder)
is_inital = False
for type_i in range(N_type):
for amp_int in range(1, amax*2+1):
amp_i = amp_int*0.5
for omega_i in range(wmin, wmax, 1):
omega_ii = wfreq * omega_i
for b_i in range(bmin, bmax, 1):
for c_i in range(cmin, cmax, 1):
# use sinus gernerate:
# x,y = generate_random_sin(N_type, amp_i, 4, omega_ii, step)
# use cosinus gernerate:
x,y = generate_random_cos(n_rand_starts=N_type,a=amp_i, w=omega_ii, b = b_i, c = c_i, step = step)
if len(generate_npy) == 1 and is_inital == False:
# replace the random array with first data, only do once
generate_npy = [create_sin_2d_array_cv(x,y)]
is_inital = True
else:
generate_npy=np.append(generate_npy, [create_sin_2d_array_cv(x,y)],axis=0)
np.savez(folder_w_subfolder+ "/"+ data['npz_cv_name'], sinus=generate_npy)
print("npz saved")
# opencv_version: images saves to /path/sinus_npz/verbose/sinus.npz
def generate_train_img_cv(folder, N_type=1, amax=5, wmin=5, wmax=10, bmin=-2, bmax=2, cmin=-2 ,cmax=2 , step = 0.1, wfreq=0.5) :
folder_w_subfolder = folder + data['img_cv_folder']
os.makedirs(folder, exist_ok=True)
os.makedirs(folder_w_subfolder, exist_ok=True)
folder = os.path.abspath(folder_w_subfolder)
for type_i in range(N_type):
for amp_int in range(1, amax*2+1):
amp_i = amp_int*0.5
for omega_i in range(wmin, wmax, 1):
omega_ii = wfreq * omega_i
for b_i in range(bmin, bmax, 1):
for c_i in range(cmin, cmax, 1):
# use sinus gernerate:
# x,y = generate_random_sin(N_type, amp_i, 4, omega_ii, step)
# use cosinus gernerate:
x,y = generate_random_cos(n_rand_starts=N_type,a=amp_i, w=omega_ii, b = b_i, c = c_i, step = step)
filename = '{folder}/sin_{amp_i}-{omega_ii}-{b_i}-{c_i}-{type_i}'.format(folder=folder, amp_i=amp_i, omega_ii=omega_ii, b_i=b_i, c_i=c_i,type_i=type_i).replace(".","_")
filename = filename + ".png"
plot_save_disk_cv(x,y, filename, xmax = data['resolution'], ymax = data['resolution'])
print("loaded")
|
StarcoderdataPython
|
1728726
|
<reponame>DTenore/skulpt
import _sk_fail; _sk_fail._("ctypes")
|
StarcoderdataPython
|
4837916
|
<gh_stars>0
#!/usr/bin/python3
# ------------------------------------------------------------------------------
"""@package ble_lights.py
Sets the attached light values and notifies when they change.
"""
# ------------------------------------------------------------------------------
# <NAME> <EMAIL> 2020.
# ------------------------------------------------------------------------------
import pybleno as ble
import wiringpi
from typing import Dict
from sys import exit
from time import sleep
from colour_printer import ColourPrinter
class KrisCharacteristic(ColourPrinter, ble.Characteristic):
"""
Provides the base for debugging a characteristic
"""
def __init__(self, settings: Dict[str, any], name: str, colour: str) -> None:
"""
Initialises the object
"""
ble.Characteristic.__init__(self, settings)
ColourPrinter.__init__(self, colour, name)
class UnlockChar(KrisCharacteristic):
SECRET_KEY = 'abc123'
"""
Provides the characteristic for the UnlockChar
"""
def __init__(self, uuid: str) -> None:
"""
Constructs ths UnlockChar
"""
self._changeObservers = {}
KrisCharacteristic.__init__(self, {
'uuid': uuid,
'properties': ['write'],
'value': ''
},
'UnlockChar',
ColourPrinter.GREEN
)
self._value = ''
def onWriteRequest(self, data, offset, withoutResponse, callback):
"""
Handles the write request
"""
self.print(dir(self))
value = data.decode()
self.print(f'Write request received, data: {data}, offset: {offset}')
self.print(f'Current value is {self._value}')
if data != self._value:
self.print('The value has changed - Signal any listeners')
for key, observer in self._changeObservers.items():
self.print(f'Signalling observer {key}')
observer(self._value[0])
self._value = value
def onNotify(self):
self.print('onNotify called... apparently')
class StatusChar(KrisCharacteristic):
"""
Provides the characteristic for an LED
"""
def __init__(self, uuid: str) -> None:
"""
Constructs the StatusChar
"""
# self._value = wiringpi.digitalRead(self._led)
KrisCharacteristic.__init__(self, {
'uuid': uuid,
'properties': ['notify'],
'value': ''
},
'StatusChar',
ColourPrinter.GOLD
)
self._value = 'Locked'
self._updateValueCallbacks = None
def onSubscribe(self, maxValueSize: int, updateValueCallback) -> None:
"""
Sets the update value callback
"""
self.print('New subscriber added.')
self._updateValueCallback = updateValueCallback
def onUnsubscribe(self) -> None:
"""
Removes the update value callback
"""
self.print('Subscriber removed')
self._updateValueCallback = None
def onStateChange(state: str) -> None:
"""
The state change handler function
"""
global server
printer = ColourPrinter(ColourPrinter.BG_BLUE, 'StateChange')
printer.print(f'on -> State Change: {state}')
if state == 'poweredOn':
server.startAdvertising('Raspberry Pi Lock', ['FF10'])
else:
server.stopAdvertising()
def onAdvertisingStart(error: bool) -> None:
"""
The advertising handler function
"""
printer = ColourPrinter(ColourPrinter.BG_RED, 'AdvertisingStart')
printer.print(f'on -> Advertising Start: {error}')
if not error:
global server
status = StatusChar('FF12')
switch = UnlockChar('FF11')
# switch.addObserver('FF12', status.set)
server.setServices([
ble.BlenoPrimaryService({
'uuid': 'FF10',
'characteristics': [status, switch]
})
]
)
RED_GPIO = 0
GRN_GPIO = 2
BLU_GPIO = 3
LED_SEQUENCE = [RED_GPIO, GRN_GPIO, BLU_GPIO]
BTN_GPIO = 1
wiringpi.wiringPiSetup() # For GPIO pin numbering
for led in LED_SEQUENCE:
wiringpi.pinMode(led, 1)
wiringpi.digitalWrite(led, 0)
wiringpi.pinMode(BTN_GPIO, 0)
cp = ColourPrinter(ColourPrinter.BG_SILVER + ColourPrinter.GOLD, 'Script')
cp.print('Creating the server...')
server = ble.Bleno()
cp.print('Binding the onStateChange handler')
server.on('stateChange', onStateChange)
cp.print('Binding the onAdvertisingStart handler')
server.on('advertisingStart', onAdvertisingStart)
cp.print('Starting the server...')
server.start()
running = True
while running:
try:
sleep(0.1)
except KeyboardInterrupt:
cp.print('Polite exit.')
running = False
server.stopAdvertising()
server.disconnect()
cp.print('Exiting.')
|
StarcoderdataPython
|
3243102
|
<reponame>schallerdavid/perses
"""
Test storage layer.
TODO:
* Write tests
"""
__author__ = '<NAME>'
################################################################################
# IMPORTS
################################################################################
import os
import os.path
import tempfile
from perses.analysis.analysis import Analysis
from unittest import skipIf
running_on_github_actions = os.environ.get('GITHUB_ACTIONS', None) == 'true'
################################################################################
# TEST ANALYSIS
################################################################################
@skipIf(running_on_github_actions, "Skip analysis test on GH Actions. Currently broken")
def test_analysis():
"""Test analysis tools.
"""
testsystem_names = ['ValenceSmallMoleculeLibraryTestSystem']
for testsystem_name in testsystem_names:
# Create storage.
tmpfile = tempfile.NamedTemporaryFile()
storage_filename = tmpfile.name
import perses.tests.testsystems
testsystem_class = getattr(perses.tests.testsystems, testsystem_name)
# Instantiate test system.
testsystem = testsystem_class(storage_filename=storage_filename)
# Alter settings
for environment in testsystem.environments:
testsystem.mcmc_samplers[environment].verbose = False
testsystem.mcmc_samplers[environment].nsteps = 5 # use fewer MD steps to speed things up
testsystem.exen_samplers[environment].verbose = False
# HBM this line is broken - ExpandedEnsembleSampler doesn't have attribute ncmc_engine
testsystem.exen_samplers[environment].ncmc_engine.nsteps = 5 # NCMC switching
testsystem.sams_samplers[environment].verbose = False
# Run test simulations.
niterations = 5 # just a few iterations
if testsystem.designer is not None:
# Run the designer
testsystem.designer.verbose = False
testsystem.designer.run(niterations=niterations)
else:
# Run the SAMS samplers.
for environment in testsystem.environments:
testsystem.sams_samplers[environment].run(niterations=niterations)
# Analyze file.
# TODO: Use temporary filenames
analysis = Analysis(storage_filename)
analysis.plot_ncmc_work('ncmc.pdf')
if __name__ == '__main__':
#analysis = Analysis('output-10000.nc')
#analysis.plot_ncmc_work('ncmc-10000.pdf')
test_analysis()
|
StarcoderdataPython
|
109210
|
<gh_stars>0
"""
*Temporal Number*
"""
from abc import ABCMeta
__all__ = ["TemporalNumber"]
class TemporalNumber:
__metaclass__ = ABCMeta
|
StarcoderdataPython
|
4840591
|
import json
import os
import pyotp
import requests
from urllib import parse as url_parse
from cli_tasks import common
from lib.auth487 import common as acm
APP_PORT = int(os.getenv('APP_PORT', 8080))
AUTH_INFO_FILE = os.path.join(os.path.dirname(__file__), 'test_data', 'test-auth-info.json')
with open(AUTH_INFO_FILE) as fp:
AUTH_INFO_DATA = json.load(fp)
def make_app_request(handler, method='GET', data=None, headers=None, cookies=None, set_token=True):
if cookies is None:
cookies = {}
auth_token = <PASSWORD>.get_auth_token()
url = f'http://127.0.0.1:{APP_PORT}{handler}'
if set_token:
cookies[acm.AUTH_COOKIE_NAME] = auth_token
return requests.request(method, url, cookies=cookies, headers=headers, allow_redirects=False, data=data)
class TestIndexPage:
def test_no_auth(self):
res = make_app_request('/', set_token=False)
assert res.status_code == 200
assert res.headers['content-type'] == 'text/html; charset=utf-8'
assert res.headers['content-security-policy'] == (
"default-src 'none'; "
"style-src 'self'; "
"script-src 'self'; "
"img-src 'self';"
)
assert res.headers['x-frame-options'] == 'deny'
assert '<!-- Page: Auth form -->' in res.text
def test_main(self):
res = make_app_request('/')
assert res.status_code == 200
assert res.headers['content-type'] == 'text/html; charset=utf-8'
assert res.headers['content-security-policy'] == (
"default-src 'none'; "
"style-src 'self'; "
"script-src 'self'; "
"img-src 'self';"
)
assert res.headers['x-frame-options'] == 'deny'
assert '<!-- Page: User panel -->' in res.text
class TestLoginPage:
def test_main(self):
res = make_app_request('/login', method='POST', cookies={
acm.CSRF_COOKIE_NAME: common.get_csrf_token(),
}, data={
'login': 'test',
'password': '<PASSWORD>',
'return-path': 'http://foo',
acm.CSRF_FIELD_NAME: common.get_csrf_token(),
}, set_token=False)
assert res.status_code == 200
assert res.headers['content-type'] == 'text/html; charset=utf-8'
assert '<!-- Page: TOTP form -->' in res.text
def test_no_csrf_cookie(self):
res = make_app_request('/login', method='POST', data={
'login': 'test',
'password': '<PASSWORD>',
'return-path': 'http://foo',
acm.CSRF_FIELD_NAME: common.get_csrf_token(),
}, set_token=False)
assert res.status_code == 403
assert res.headers['content-type'] == 'text/html; charset=utf-8'
assert 'No CSRF token' in res.text
def test_no_csrf_field(self):
res = make_app_request('/login', method='POST', cookies={
acm.CSRF_COOKIE_NAME: common.get_csrf_token(),
}, data={
'login': 'test',
'password': '<PASSWORD>',
'return-path': 'http://foo',
}, set_token=False)
assert res.status_code == 403
assert res.headers['content-type'] == 'text/html; charset=utf-8'
assert 'No CSRF token' in res.text
def test_no_login(self):
res = make_app_request('/login', method='POST', cookies={
acm.CSRF_COOKIE_NAME: common.get_csrf_token(),
}, data={
'password': '<PASSWORD>',
'return-path': 'http://foo',
acm.CSRF_FIELD_NAME: common.get_csrf_token(),
}, set_token=False)
assert res.status_code == 400
assert res.headers['content-type'] == 'text/html; charset=utf-8'
assert 'No auth info' in res.text
def test_no_password(self):
res = make_app_request('/login', method='POST', cookies={
acm.CSRF_COOKIE_NAME: common.get_csrf_token(),
}, data={
'login': 'test',
'return-path': 'http://foo',
acm.CSRF_FIELD_NAME: common.get_csrf_token(),
}, set_token=False)
assert res.status_code == 400
assert res.headers['content-type'] == 'text/html; charset=utf-8'
assert 'No auth info' in res.text
def test_wrong_login(self):
res = make_app_request('/login', method='POST', cookies={
acm.CSRF_COOKIE_NAME: common.get_csrf_token(),
}, data={
'login': 'invalid-login',
'password': '<PASSWORD>',
'return-path': 'http://foo',
acm.CSRF_FIELD_NAME: common.get_csrf_token(),
}, set_token=False)
assert res.status_code == 403
assert res.headers['content-type'] == 'text/html; charset=utf-8'
assert 'User is not found' in res.text
def test_wrong_method(self):
res = make_app_request('/login', method='GET')
assert res.status_code == 405
assert res.headers['content-type'] == 'text/html; charset=utf-8'
assert '<title>405 Method Not Allowed</title>' in res.text
class TestOtpPage:
def test_auth_process(self):
auth_id = self._log_in()
res = make_app_request('/submit-totp', method='POST', cookies={
acm.CSRF_COOKIE_NAME: common.get_csrf_token(),
}, data={
'login': 'test',
'password': self._<PASSWORD>(),
'auth_id': auth_id,
'return-path': 'http://foo',
acm.CSRF_FIELD_NAME: common.get_csrf_token(),
}, set_token=False)
assert res.status_code == 302
assert res.headers['content-type'] == 'text/html; charset=utf-8'
assert res.headers['location'] == 'http://foo'
assert 'Redirecting...' in res.text
message = url_parse.unquote(res.headers['x-login-message'])
assert 'New login success' in message
assert 'IP:' in message
assert 'Browser:' in message
def test_no_login(self):
auth_id = self._log_in()
res = make_app_request('/submit-totp', method='POST', cookies={
acm.CSRF_COOKIE_NAME: common.get_csrf_token(),
}, data={
'password': <PASSWORD>(),
'auth_id': auth_id,
'return-path': 'http://foo',
acm.CSRF_FIELD_NAME: common.get_csrf_token(),
}, set_token=False)
assert res.status_code == 400
assert res.headers['content-type'] == 'text/html; charset=utf-8'
assert 'No auth info' in res.text
def test_no_auth_id(self):
self._log_in()
res = make_app_request('/submit-totp', method='POST', cookies={
acm.CSRF_COOKIE_NAME: common.get_csrf_token(),
}, data={
'login': 'test',
'password': <PASSWORD>(),
'return-path': 'http://foo',
acm.CSRF_FIELD_NAME: common.get_csrf_token(),
}, set_token=False)
assert res.status_code == 400
assert res.headers['content-type'] == 'text/html; charset=utf-8'
assert 'No auth info' in res.text
def test_wrong_auth_id(self):
self._log_in()
res = make_app_request('/submit-totp', method='POST', cookies={
acm.CSRF_COOKIE_NAME: common.get_csrf_token(),
}, data={
'login': 'test',
'password': <PASSWORD>(),
'auth_id': 'wrong-auth-id',
'return-path': 'http://foo',
acm.CSRF_FIELD_NAME: common.get_csrf_token(),
}, set_token=False)
assert res.status_code == 400
assert res.headers['content-type'] == 'text/html; charset=utf-8'
assert 'No auth info' in res.text
def test_no_password(self):
auth_id = self._log_in()
res = make_app_request('/submit-totp', method='POST', cookies={
acm.CSRF_COOKIE_NAME: common.get_csrf_token(),
}, data={
'login': 'test',
'auth_id': auth_id,
'return-path': 'http://foo',
acm.CSRF_FIELD_NAME: common.get_csrf_token(),
}, set_token=False)
assert res.status_code == 400
assert res.headers['content-type'] == 'text/html; charset=utf-8'
assert 'No auth info' in res.text
def test_wrong_login(self):
auth_id = self._log_in()
res = make_app_request('/submit-totp', method='POST', cookies={
acm.CSRF_COOKIE_NAME: common.get_csrf_token(),
}, data={
'login': 'invalid-login',
'password': <PASSWORD>(),
'auth_id': auth_id,
'return-path': 'http://foo',
acm.CSRF_FIELD_NAME: common.get_csrf_token(),
}, set_token=False)
assert res.status_code == 403
assert res.headers['content-type'] == 'text/html; charset=utf-8'
assert 'User is not found' in res.text
def test_wrong_password(self):
auth_id = self._log_in()
res = make_app_request('/submit-totp', method='POST', cookies={
acm.CSRF_COOKIE_NAME: common.get_csrf_token(),
}, data={
'login': 'test',
'password': '<PASSWORD>',
'auth_id': auth_id,
'return-path': 'http://foo',
acm.CSRF_FIELD_NAME: common.get_csrf_token(),
}, set_token=False)
assert res.status_code == 403
assert res.headers['content-type'] == 'text/html; charset=utf-8'
assert 'Wrong OTP' in res.text
def test_no_csrf_cookie(self):
auth_id = self._log_in()
res = make_app_request('/submit-totp', method='POST', data={
'login': 'test',
'password': self._<PASSWORD>(),
'auth_id': auth_id,
'return-path': 'http://foo',
acm.CSRF_FIELD_NAME: common.get_csrf_token(),
}, set_token=False)
assert res.status_code == 403
assert res.headers['content-type'] == 'text/html; charset=utf-8'
assert 'No CSRF token' in res.text
def test_no_csrf_field(self):
auth_id = self._log_in()
res = make_app_request('/submit-totp', method='POST', cookies={
acm.CSRF_COOKIE_NAME: common.get_csrf_token(),
}, data={
'login': 'test',
'password': self._<PASSWORD>(),
'auth_id': auth_id,
'return-path': 'http://foo',
}, set_token=False)
assert res.status_code == 403
assert res.headers['content-type'] == 'text/html; charset=utf-8'
assert 'No CSRF token' in res.text
def test_wrong_method(self):
res = make_app_request('/submit-totp', method='GET')
assert res.status_code == 405
assert res.headers['content-type'] == 'text/html; charset=utf-8'
assert '<title>405 Method Not Allowed</title>' in res.text
def _log_in(self, login='test'):
res = make_app_request('/login', method='POST', cookies={
acm.CSRF_COOKIE_NAME: common.get_csrf_token(),
}, data={
'login': login,
'password': '<PASSWORD>',
'return-path': 'http://foo',
acm.CSRF_FIELD_NAME: common.get_csrf_token(),
}, set_token=False)
assert res.status_code == 200
auth_id = res.headers.get('x-auth-id')
assert auth_id is not None
return auth_id
def _get_otp(self, login='test'):
return pyotp.TOTP(AUTH_INFO_DATA[login]['totp_secret']).now()
class TestLogout:
def test_main(self):
res = make_app_request('/logout', method='POST', cookies={
acm.CSRF_COOKIE_NAME: common.get_csrf_token(),
}, data={
'login': 'test',
'password': '<PASSWORD>',
'return-path': 'http://foo',
acm.CSRF_FIELD_NAME: common.get_csrf_token(),
})
assert res.status_code == 302
assert res.headers['content-type'] == 'text/html; charset=utf-8'
assert res.headers['location'] == 'http://foo'
assert f'{acm.AUTH_COOKIE_NAME}=;' in res.headers['set-cookie']
assert 'Redirecting...' in res.text
class TestGetPublicKey:
def test_main(self):
res = make_app_request('/get-public-key', set_token=False)
assert res.status_code == 200
assert res.headers['content-type'] == 'text/plain; charset=utf-8'
assert '-----BEGIN RSA PUBLIC KEY-----' in res.text
assert '-----END RSA PUBLIC KEY-----' in res.text
|
StarcoderdataPython
|
1770312
|
from simple_ddl_parser import DDLParser
def test_no_unexpected_logs(capsys):
ddl = """
CREATE EXTERNAL TABLE test (
test STRING NULL COMMENT 'xxxx',
)
PARTITIONED BY (snapshot STRING, cluster STRING)
"""
parser = DDLParser(ddl)
out, err = capsys.readouterr()
assert out == ""
assert err == ""
parser.run(output_mode="hql", group_by_type=True)
out, err = capsys.readouterr()
assert out == ""
assert err == ""
|
StarcoderdataPython
|
1779738
|
<reponame>Guymer/fmc
def load_airport_list():
# Import standard modules ...
import csv
import os
# Create the empty list ...
airports = []
# Make database path ...
dbpath = f"{os.path.dirname(__file__)}/../openflights/data/airports.dat"
# Check that database is there ...
if not os.path.exists(dbpath):
print("INFO: The airport database is missing. It is included as a ")
print(" submodule in Git. If you did not clone this repository with")
print(" the \"--recursive\" option then you can still pull down the")
print(" submodule by running \"git submodule update --init\" now.")
raise Exception("the airport database is missing") from None
# Open database ...
with open(dbpath, "rt", encoding = "utf-8") as fobj:
# Loop over all airports ...
for row in csv.reader(fobj):
# Load string parameters ...
tmp = {
"Name" : row[ 1],
"City" : row[ 2],
"Country" : row[ 3],
"IATA" : row[ 4],
"ICAO" : row[ 5],
"DST-scheme" : row[10],
"TZ-name" : row[11]
}
# Try loading numeric parameter ...
try:
tmp["ID"] = int(row[0])
except ValueError:
pass
# Try loading numeric parameter ...
try:
tmp["Latitude"] = float(row[6]) # [°]
except ValueError:
pass
# Try loading numeric parameter ...
try:
tmp["Longitude"] = float(row[7]) # [°]
except ValueError:
pass
# Try loading numeric parameter ...
try:
tmp["Altitude"] = float(row[8]) # [ft]
except ValueError:
pass
# Try loading numeric parameter ...
try:
tmp["UTC-offset"] = float(row[9]) # [hr]
except ValueError:
pass
# Append dictionary to the list ...
airports.append(tmp)
# Return the full list ...
return airports
|
StarcoderdataPython
|
83602
|
<filename>src/train/features/__init__.py
from sklearn.pipeline import FeatureUnion
from .features import (
Speed,
NetClearance,
DistanceFromSideline,
Depth,
PlayerDistanceTravelled,
PlayerImpactDepth,
PreviousDistanceFromSideline,
PreviousTimeToNet,
Hitpoint,
Out,
WeirdNetClearance,
DistanceTravelledRatio
)
FEATURES_LIST = [
Speed,
NetClearance,
DistanceFromSideline,
Depth,
PlayerDistanceTravelled,
PlayerImpactDepth,
PreviousDistanceFromSideline,
PreviousTimeToNet,
Hitpoint,
Out,
WeirdNetClearance,
DistanceTravelledRatio
]
FEATURES_STORE = [(f.name(), f()) for f in FEATURES_LIST]
features_generator = FeatureUnion(FEATURES_STORE)
|
StarcoderdataPython
|
1725813
|
<filename>djexperience/service/admin.py
from django.contrib import admin
from .models import Service, TypeService, Protest
@admin.register(Service)
class ServiceAdmin(admin.ModelAdmin):
list_display = ('__str__', )
search_fields = ('title',)
@admin.register(TypeService)
class TypeServiceAdmin(admin.ModelAdmin):
list_display = ('__str__', 'service')
search_fields = ('title',)
@admin.register(Protest)
class ProtestAdmin(admin.ModelAdmin):
list_display = (
'__str__',
'service',
'type_service',
)
search_fields = (
'typeservice__service__title',
'typeservice__title',
)
list_filter = ('typeservice__service', )
def service(self, obj):
return obj.typeservice.service
service.admin_order_field = 'title'
service.short_description = 'Serviço'
def type_service(self, obj):
return obj.typeservice
type_service.admin_order_field = 'title'
type_service.short_description = 'Tipo de Serviço'
|
StarcoderdataPython
|
1734096
|
<gh_stars>0
from .models import QueueItem
def enqueue(queue_type='s'):
item = QueueItem.objects.create(item_type=queue_type)
return item.id
def peek(queue_type,queue_id, upto_first_n=1):
# check if job_id is one of the first N items from the head of queue
top_items = QueueItem.objects.filter(item_type=queue_type).order_by('pk')[:upto_first_n]
for item in top_items:
if queue_id == item.id:
return True
return False
def dequeue(queue_id):
QueueItem.objects.get(pk=queue_id).delete()
|
StarcoderdataPython
|
3236861
|
import numpy as np
import tensorflow as tf
import tensorflow.python.platform
from tensorflow.models.rnn import rnn
from tensorflow.models.rnn import rnn_cell
from bi_rnn import bi_rnn
from utils import *
###############################################
# NN creation functions #
###############################################
class Parameters:
def __init__(self, init={}, emb={}, w_c=False, b_c=False, w_p=False,
b_p=False, w_po=False, b_po=False):
self.init_dic = init
self.embeddings = emb
self.W_conv = w_c
self.b_conv = b_c
self.W_pred = w_p
self.b_pred = b_p
self.W_pot = w_po
self.b_pot = b_po
def device_for_node(n):
if n.type == "MatMul":
return "/gpu:0"
else:
return "/cpu:0"
def conv2d(x, W):
return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME')
def weight_variable(shape, name='weight'):
initial = tf.truncated_normal(shape, stddev=0.1)
return tf.Variable(initial, name=name+'_W')
def bias_variable(shape, name='weight'):
initial = tf.constant(0.1, shape=shape)
return tf.Variable(initial, name=name+'_b')
def feature_layer(in_layer, config, params, reuse=False):
in_features = config.input_features
features_dim = config.features_dim
batch_size = config.batch_size
num_steps = config.num_steps
feature_mappings = config.feature_maps
# inputs
num_features = len(in_features)
input_ids = in_layer
if reuse:
tf.get_variable_scope().reuse_variables()
param_vars = params.embeddings
# lookup layer
else:
param_dic = params.init_dic
param_vars = {}
for feat in in_features:
if feat in param_dic:
param_vars[feat] = \
tf.Variable(tf.convert_to_tensor(param_dic[feat],
dtype=tf.float32),
name=feat + '_embedding',
trainable=False)
else:
shape = [len(feature_mappings[feat]['reverse']), features_dim]
initial = tf.truncated_normal(shape, stddev=0.1)
param_vars[feat] = tf.Variable(initial,
name=feat + '_embedding')
params = [param_vars[feat] for feat in in_features]
input_embeddings = tf.nn.embedding_lookup(params, input_ids, name='lookup')
# add and return
embedding_layer = tf.reduce_sum(input_embeddings, 2)
return (embedding_layer, param_vars)
def bi_lstm_layer(in_layer, config, reuse=False, name='Bi_LSTM'):
num_units = config.rnn_hidden_units
output_size = config.rnn_output_size
batch_size = int(in_layer.get_shape()[0])
num_steps = int(in_layer.get_shape()[1])
input_size = int(in_layer.get_shape()[2])
initializer = tf.random_uniform_initializer(-0.1, 0.1)
lstm_cell_f = rnn_cell.LSTMCell(num_units, input_size, use_peepholes=True,
num_proj=output_size, cell_clip=1.0,
initializer=initializer)
lstm_cell_b = rnn_cell.LSTMCell(num_units, input_size, use_peepholes=True,
num_proj=output_size, cell_clip=1.0,
initializer=initializer)
initial_state_f = lstm_cell_f.zero_state(batch_size, tf.float32)
inputs_list = [tf.reshape(x, [batch_size, input_size])
for x in tf.split(1, num_steps, in_layer)]
rnn_out, rnn_states = bi_rnn(lstm_cell_f, lstm_cell_b, inputs_list,
initial_state=initial_state_f, scope=name,
reuse=reuse)
out_layer = tf.transpose(tf.pack(rnn_out), perm=[1, 0, 2])
return out_layer
def convo_layer(in_layer, config, params, reuse=False, name='Convo'):
conv_window = config.conv_window
output_size = config.conv_dim
batch_size = int(in_layer.get_shape()[0])
num_steps = int(in_layer.get_shape()[1])
input_size = int(in_layer.get_shape()[2])
if reuse:
tf.get_variable_scope().reuse_variables()
W_conv = params.W_conv
b_conv = params.b_conv
else:
W_conv = weight_variable([conv_window, 1, input_size, output_size],
name=name)
b_conv = bias_variable([output_size], name=name)
reshaped = tf.reshape(in_layer, [batch_size, num_steps, 1, input_size])
conv_layer = tf.nn.relu(tf.reshape(conv2d(reshaped, W_conv),
[batch_size, num_steps, output_size],
name=name) + b_conv)
return (conv_layer, W_conv, b_conv)
def predict_layer(in_layer, config, params, reuse=False, name='Predict'):
n_outcomes = config.n_outcomes
batch_size = int(in_layer.get_shape()[0])
num_steps = int(in_layer.get_shape()[1])
input_size = int(in_layer.get_shape()[2])
if reuse:
tf.get_variable_scope().reuse_variables()
W_pred = params.W_pred
b_pred = params.b_pred
else:
W_pred = weight_variable([input_size, n_outcomes], name=name)
b_pred = bias_variable([n_outcomes], name=name)
flat_input = tf.reshape(in_layer, [-1, input_size])
pre_scores = tf.nn.softmax(tf.matmul(flat_input, W_pred) + b_pred)
preds_layer = tf.reshape(pre_scores, [batch_size, num_steps, -1])
return (preds_layer, W_pred, b_pred)
def optim_outputs(outcome, targets, config, params):
batch_size = int(outcome.get_shape()[0])
num_steps = int(outcome.get_shape()[1])
n_outputs = int(outcome.get_shape()[2])
# We are currently using cross entropy as criterion
criterion = -tf.reduce_sum(targets * tf.log(outcome))
for feat in config.l1_list:
criterion += config.l1_reg * \
tf.reduce_sum(tf.abs(params.embeddings[feat]))
# We also compute the per-tag accuracy
correct_prediction = tf.equal(tf.argmax(outcome, 2), tf.argmax(targets, 2))
accuracy = tf.reduce_sum(tf.cast(correct_prediction,
"float") * tf.reduce_sum(targets, 2)) /\
tf.reduce_sum(targets)
return (criterion, accuracy)
class SequNN:
def __init__(self, config):
self.batch_size = config.batch_size
self.num_steps = config.num_steps
num_features = len(config.input_features)
# input_ids <- batch.features
self.input_ids = tf.placeholder(tf.int32, shape=[self.batch_size,
self.num_steps,
num_features])
# targets <- batch.tag_windows_one_hot
self.targets = tf.placeholder(tf.float32, shape=[self.batch_size,
self.num_steps,
config.n_outcomes])
def make(self, config, params, reuse=False, name='SequNN'):
with tf.variable_scope(name):
if reuse:
tf.get_variable_scope().reuse_variables()
(out_layer, embeddings) = feature_layer(self.input_ids, config,
params, reuse=reuse)
params.embeddings = embeddings
if config.verbose:
print('features layer done')
if config.use_rnn:
out_layer = bi_lstm_layer(embedding_layer, config, reuse=reuse)
if config.verbose:
print('rnn layer done')
if config.use_convo:
(out_layer, W_conv, b_conv) = convo_layer(out_layer, config,
params, reuse=reuse)
params.W_conv = W_conv
params.b_conv = b_conv
if config.verbose:
print('convolution layer done')
self.out_layer = out_layer
(preds_layer, W_pred, b_pred) = predict_layer(out_layer, config,
params, reuse=reuse)
params.W_pred = W_pred
params.b_pred = b_pred
self.preds_layer = preds_layer
(criterion, accuracy) = optim_outputs(preds_layer, config, params)
if config.verbose:
print('output layer done')
self.criterion = criterion
self.accuracy = accuracy
def train_epoch(self, data, train_step, config, params):
batch_size = config.batch_size
train_step = tf.train.AdagradOptimizer(config.learning_rate).minimize(criterion)
batch = Batch()
for i in range(len(data) / batch_size):
batch.read(data, i * batch_size, config)
f_dict = {self.input_ids: batch.features,
self.targets: batch.tag_windows_one_hot}
if i % 100 == 0:
train_accuracy = self.accuracy.eval(feed_dict=f_dict)
print("step %d of %d, training accuracy %f, Lemma_l1 %f" %
(i, len(data) / batch_size, train_accuracy,
tf.reduce_sum(tf.abs(params.embeddings['lemma'])).eval()))
train_step.run(feed_dict=f_dict)
def validate_accuracy(self, data, config):
batch_size = config.batch_size
batch = Batch()
total_accuracy = 0.
total = 0.
for i in range(len(data) / batch_size):
batch.read(data, i * batch_size, config)
f_dict = {self.input_ids: batch.features,
self.targets: batch.tag_windows_one_hot}
dev_accuracy = self.accuracy.eval(feed_dict=f_dict)
total_accuracy += dev_accuracy
total += 1
if i % 100 == 0:
print("%d of %d: \t:%f" % (i, len(data) / batch_size,
total_accuracy / total))
return total_accuracy / total
|
StarcoderdataPython
|
90748
|
from django import forms
from django.core.exceptions import ValidationError
from htmx_tutorial.clients.models import Client
from htmx_tutorial.clients.utils import get_max_order
class SimpleClientForm(forms.Form):
input_text = forms.CharField()
first_name = forms.CharField(required=False)
last_name = forms.CharField(required=False)
def clean(self):
cleaned_data = super(SimpleClientForm, self).clean()
input_text = cleaned_data['input_text'].title()
if ',' in input_text:
parts = input_text.split(',')
if len(parts) == 2:
cleaned_data['last_name'] = parts[0]
cleaned_data['first_name'] = parts[1]
else:
msg = 'More than one comma'
raise ValidationError(msg)
else:
parts = input_text.split(' ')
if len(parts) < 2:
msg = 'Need to include first and last name'
raise ValidationError(msg)
else:
cleaned_data['last_name'] = parts[-1]
cleaned_data['first_name'] = ' '.join(parts[:-1])
return cleaned_data
def save(self):
data = self.cleaned_data
if Client.objects.filter(first_name=data['first_name'], last_name=data['last_name']).exists():
client = Client.objects.get(first_name=data['first_name'], last_name=data['last_name'])
client.display_order = get_max_order()
client.is_active = True
client.save()
else:
client = Client.objects.create(first_name=data['first_name'], last_name=data['last_name'],
display_order=get_max_order())
# client, _ = Client.objects.get_or_create(first_name=data['first_name'], last_name=data['last_name'])
# if not client.is_active:
# client.is_active = True
# client.save()
return client
|
StarcoderdataPython
|
3232948
|
import Room
import Constants as c
class E24_R2(Room.Room):
def __init__(self):
super().__init__('Amnesia Totale', 'https://escaperoomromaexitus.com/quest/amnesiatotale/', 'E24_R2')
def get_prices(self):
p = ['2 GIOCATORI – € 25,00 a persona',
'3 GIOCATORI – € 20,00 a persona',
'4-8 GIOCATORI – € 18,00 a persona']
return c.SEPARATOR.join(p)
def get_availabilities(self):
return c.NO_RETR_AVAILS
|
StarcoderdataPython
|
1764789
|
# Code Author: <NAME>
# Uniform Preprocessing file for performing Out-painting for SML project
from segmentation_models import Unet, Nestnet, Xnet
import numpy as np
from keras import backend as K
from keras.models import Model
from keras.layers import Flatten, Dense, Dropout
from keras.layers import Conv2D
from keras.applications.resnet50 import ResNet50, preprocess_input
from keras.optimizers import Adam
from keras.preprocessing.image import ImageDataGenerator
from keras.models import load_model
import os
import sys
from PIL import Image
from skimage.color import rgb2gray
import numpy as np
from matplotlib import pyplot as plt
import cv2
from keras.callbacks import EarlyStopping, ModelCheckpoint, TensorBoard
TRAIN_DATASET_PATH='/home/dhruv/Allprojects/NIH-XRAY/Train/'
VALID_DATASET_PATH='/home/dhruv/Allprojects/NIH-XRAY/Validation/'
IMAGE_SIZE = (256, 256)
CROP_LENGTH = 836
NUM_CLASSES = 2
BATCH_SIZE = 10 # try reducing batch size or freeze more layers if your GPU runs out of memory
FREEZE_LAYERS = 2 # freeze the first this many layers for training
NUM_EPOCHS = 3
WEIGHTS_FINAL = 'model-cropped-final.h5'
NUMBER=0
def searchForPatches(filename,lines,index):
for i in range(len(lines)):
line=lines[i][0:-1]
fields=line.split('#')
if(filename == fields[0]):
line=lines[i+index][0:-1]
# print('index=',index)
fields=line.split('#')
# print('Returning!!',filename)
return fields
fields=['',0,0,155,155]
return fields
def random_crop(img, random_crop_size, filename,index,lines):
# Note: image_data_format is 'channel_last'
# Now we need to search for the filename in the patchfile and extract
# the patches
fields=searchForPatches(filename,lines,index)
# print('filename',filename)
# print('index',index)
# line=lines[index][0:-1]
# fields=line.split('#')
# print(fields)
x=0
y=0
dx=0
dy=0
# if(filename == fields[0]):
x=int(fields[1])
y=int(fields[2])
dx=int(fields[3])
dy=int(fields[4])
img=img[y:(y+dy), x:(x+dx), :]
img = cv2.resize(img,(224,224))
img=img/255.0
# print(x,y,dx,dy)
# plt.imshow(img)
# plt.show()
# plt.imshow(img)
# plt.show()
# print(img)
# print('numbers=',NUMBER)
# NUMBER=NUMBER+1
return img
def crop(img, random_crop_size):
# Note: image_data_format is 'channel_last'
# assert img.shape[2] == 3
height, width = img.shape[0], img.shape[1]
dy0, dx0 = 836,836
x0 = 94
y0 = 45
img=img[y0:(y0+dy0), x0:(x0+dx0), :]
img=img/255
img = cv2.resize(img,(224,224))
return img
def crop_generator(batches, crop_length,lines):#224
"""Take as input a Keras ImageGen (Iterator) and generate random
crops from the image batches generated by the original iterator.
"""
filenames=((batches.filenames))
# for i in batches:
# # print(i)
# idx = (batches.batch_index - 1) * batches.batch_size
# print(batches.filenames[idx : idx + batches.batch_size])
while True:
batch_x= next(batches)
# print('batch_shape=',batch_x.shape)
# print('batch_names=',batch_x.filenames)
batch_crops_inp = np.zeros((4,batch_x.shape[0], 224, 224,3))#224
# batch_crops_tar = np.zeros((batch_x.shape[0], 224, 224,3))
# index=0
for i in range(batch_x.shape[0]):
for j in range(4):
batch_crops_inp[j][i] = random_crop(batch_x[i], (crop_length, crop_length),filenames[i],j,lines)
# index=index+1
batch_crops_inp=np.reshape(batch_crops_inp,(batch_crops_inp.shape[0]*batch_crops_inp.shape[1],224,224,3))
batch_crops_out=out_painting_mask(batch_crops_inp)
batch_crops_inp=rgb2gray(batch_crops_inp)
batch_crops_inp=np.reshape(batch_crops_inp,(batch_crops_inp.shape[0],224,224,1))
# print(batch_crops_inp.shape,'inp')
# plt.imshow(batch_crops_inp[1,:,:,0],cmap='gray',vmin=0,vmax=1)
# plt.show()
# print(batch_crops_out.shape,'out')
# plt.imshow(batch_crops_out[1,:,:,0],cmap='gray',vmin=0,vmax=1)
# plt.show()
# print(batch_crops_out.shape,'out')
# print(batch_crops_inp.shape,'inp')
# plt.imshow(batch_crops_inp[2,:,:,0],cmap='gray',vmin=0,vmax=1)
# plt.show()
# print(batch_crops_out.shape,'out')
# plt.imshow(batch_crops_out[2,:,:,0],cmap='gray',vmin=0,vmax=1)
# plt.show()
# print(batch_crops_out.shape,'out')
# print(batch_crops_inp.shape,'inp')
# plt.imshow(batch_crops_inp[3,:,:,0],cmap='gray',vmin=0,vmax=1)
# plt.show()
# print(batch_crops_out.shape,'out')
# plt.imshow(batch_crops_out[3,:,:,0],cmap='gray',vmin=0,vmax=1)
# plt.show()
# print(batch_crops_out.shape,'out')
# print(batch_crops_inp.shape,'inp')
# print(batch_crops_inp.shape,np.min(batch_crops_inp),np.max(batch_crops_inp))
# print(batch_crops_out.shape,np.min(batch_crops_out),np.max(batch_crops_out))
yield(batch_crops_out,batch_crops_inp)
# return batch_crops_inp
# yield (batch_crops_inp,batch_crops_tar)
def main():
# with open('/home/kunal/Desktop/Feature-Learning-for-Disease-Classification/temp_patch.txt') as f:
# lines = f.readlines()
with open('/home/dhruv/Allprojects/Feature-Learning-for-Disease-Classification/PatchFiles/train_sml.txt') as f1:
lines1 = f1.readlines()
with open('/home/dhruv/Allprojects/Feature-Learning-for-Disease-Classification/PatchFiles/validation_sml.txt') as f2:
lines2 = f2.readlines()
# print((lines1))
train_datagen = ImageDataGenerator()
train_batches = train_datagen.flow_from_directory(TRAIN_DATASET_PATH,
target_size=(1024,1024),
shuffle=True,
class_mode=None,
batch_size=BATCH_SIZE)
valid_datagen = ImageDataGenerator()
valid_batches = valid_datagen.flow_from_directory(VALID_DATASET_PATH ,
target_size=(1024,1024),
shuffle=False,
class_mode=None,
batch_size=BATCH_SIZE)
train_crops_orig = crop_generator(train_batches, CROP_LENGTH,lines1) #224
valid_crops_orig = crop_generator(valid_batches, CROP_LENGTH,lines2)
# batch_x_random_crop, batch_y_targeted_crop = next(train_crops)
# valid_x, valid_y = next(valid_crops)
# print(train_crops_orig.shape)
# train_crops_orig=np.reshape(train_crops_orig,(train_crops_orig.shape[0]*train_crops_orig.shape[1],224,224,3))
# print(train_crops_orig.shape)
# in_painted_x= out_painting_mask(train_crops_orig)
# valid_in_x=in_painting_mask(valid_x,valid_y)
# train_crops_1_ch=rgb2gray(train_crops_orig)
# train_crops_1_ch=np.reshape(train_crops_1_ch,(train_crops_1_ch.shape[0],224,224,1))
# valid_x=rgb2gray(valid_x)
# valid_x=np.reshape(valid_x,(valid_x.shape[0],224,224,1))
# model = Unet(backbone_name='resnet18', encoder_weights='imagenet', decoder_block_type='transpose') # build U-Net
model = Unet(backbone_name='resnet18', encoder_weights=None) # build U-Net
model.compile(optimizer='Adam', loss='mean_squared_error')
model.summary()
# print('inpaited',in_painted_x.shape)
# print('1 channel y',train_crops_1_ch.shape)
# print(in_painted_x.shape)
# print(train_crops_1_ch.shape)
callbacks = [EarlyStopping(monitor='val_loss', patience=2),
ModelCheckpoint(filepath='best_model.h5', monitor='val_loss', save_best_only=True),
TensorBoard(log_dir='./logs', histogram_freq=0, write_graph=True, write_images=True)]
model.fit_generator(generator=train_crops_orig,
steps_per_epoch=15,
validation_data=valid_crops_orig,
callbacks=callbacks,
validation_steps=15,
epochs=15)
model.save('outpaint.h5')
def out_painting_mask(batch_x):
#Creating random mask dimensions for inpainting
out_paint_x=np.zeros((batch_x.shape[0],224,224,3))
width=224
height=224
for i in range(0,batch_x.shape[0]):
mask=np.ones((224,224,3))
#choosing h and w such that it conserves 50% of the image
h=np.random.randint(0,224)
w=np.random.randint(0,224)
while not ((224-2*h)*(224-2*w) >=25088 and 2*h<224 and 2*w<224):
h=np.random.randint(0,224)
w=np.random.randint(0,224)
# print('h',h)
# print('w',w)
# print((224-2*h)*(224-2*w))
# print('*********************')
#locations of masks
mask[0:h,0:224,:]=0
mask[224-h:224,0:224,:]=0
mask[0:224,0:w,:]=0
mask[0:224,224-w:224,:]=0
# plt.imshow(batch_x[i])
# plt.show()
out_paint_x[i]=np.multiply(batch_x[i],mask)
# plt.imshow(out_paint_x[i])
# plt.show()
return out_paint_x
def in_painting_out_painting_mask(batch_x):
#Creating random mask dimensions for inpainting
in_paint_x=np.zeros((batch_x.shape[0],224,224,3))
width=224
height=224
for i in range(0,batch_x.shape[0]):
mask=np.zeros((224,224,3))
#random size of mask
dx= np.random.randint(70,224+1)
dy= np.random.randint(70,224+1)
#random location of mask
x = np.random.randint(0, 224 + 1)
y = np.random.randint(0, 224 + 1)
x_plus_dx=min(x+dx,224)
y_plus_dy=min(y+dy,224)
mask[x:(x_plus_dx),y:(y_plus_dy),:]=1
# plt.imshow(batch_x[i])
# plt.show()
in_paint_x[i]=np.multiply(batch_x[i],mask)
# plt.imshow(in_paint_x[i])
# plt.show()
return in_paint_x
if __name__=="__main__":
main()
|
StarcoderdataPython
|
170054
|
<gh_stars>10-100
"""Collect macro definitions from header files.
"""
# Copyright The Mbed TLS Contributors
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import itertools
import re
from typing import Dict, Iterable, Iterator, List, Set
class PSAMacroEnumerator:
"""Information about constructors of various PSA Crypto types.
This includes macro names as well as information about their arguments
when applicable.
This class only provides ways to enumerate expressions that evaluate to
values of the covered types. Derived classes are expected to populate
the set of known constructors of each kind, as well as populate
`self.arguments_for` for arguments that are not of a kind that is
enumerated here.
"""
def __init__(self) -> None:
"""Set up an empty set of known constructor macros.
"""
self.statuses = set() #type: Set[str]
self.algorithms = set() #type: Set[str]
self.ecc_curves = set() #type: Set[str]
self.dh_groups = set() #type: Set[str]
self.key_types = set() #type: Set[str]
self.key_usage_flags = set() #type: Set[str]
self.hash_algorithms = set() #type: Set[str]
self.mac_algorithms = set() #type: Set[str]
self.ka_algorithms = set() #type: Set[str]
self.kdf_algorithms = set() #type: Set[str]
self.aead_algorithms = set() #type: Set[str]
# macro name -> list of argument names
self.argspecs = {} #type: Dict[str, List[str]]
# argument name -> list of values
self.arguments_for = {
'mac_length': [],
'min_mac_length': [],
'tag_length': [],
'min_tag_length': [],
} #type: Dict[str, List[str]]
def gather_arguments(self) -> None:
"""Populate the list of values for macro arguments.
Call this after parsing all the inputs.
"""
self.arguments_for['hash_alg'] = sorted(self.hash_algorithms)
self.arguments_for['mac_alg'] = sorted(self.mac_algorithms)
self.arguments_for['ka_alg'] = sorted(self.ka_algorithms)
self.arguments_for['kdf_alg'] = sorted(self.kdf_algorithms)
self.arguments_for['aead_alg'] = sorted(self.aead_algorithms)
self.arguments_for['curve'] = sorted(self.ecc_curves)
self.arguments_for['group'] = sorted(self.dh_groups)
@staticmethod
def _format_arguments(name: str, arguments: Iterable[str]) -> str:
"""Format a macro call with arguments.."""
return name + '(' + ', '.join(arguments) + ')'
_argument_split_re = re.compile(r' *, *')
@classmethod
def _argument_split(cls, arguments: str) -> List[str]:
return re.split(cls._argument_split_re, arguments)
def distribute_arguments(self, name: str) -> Iterator[str]:
"""Generate macro calls with each tested argument set.
If name is a macro without arguments, just yield "name".
If name is a macro with arguments, yield a series of
"name(arg1,...,argN)" where each argument takes each possible
value at least once.
"""
try:
if name not in self.argspecs:
yield name
return
argspec = self.argspecs[name]
if argspec == []:
yield name + '()'
return
argument_lists = [self.arguments_for[arg] for arg in argspec]
arguments = [values[0] for values in argument_lists]
yield self._format_arguments(name, arguments)
# Dear Pylint, enumerate won't work here since we're modifying
# the array.
# pylint: disable=consider-using-enumerate
for i in range(len(arguments)):
for value in argument_lists[i][1:]:
arguments[i] = value
yield self._format_arguments(name, arguments)
arguments[i] = argument_lists[0][0]
except BaseException as e:
raise Exception('distribute_arguments({})'.format(name)) from e
def generate_expressions(self, names: Iterable[str]) -> Iterator[str]:
"""Generate expressions covering values constructed from the given names.
`names` can be any iterable collection of macro names.
For example:
* ``generate_expressions(['PSA_ALG_CMAC', 'PSA_ALG_HMAC'])``
generates ``'PSA_ALG_CMAC'`` as well as ``'PSA_ALG_HMAC(h)'`` for
every known hash algorithm ``h``.
* ``macros.generate_expressions(macros.key_types)`` generates all
key types.
"""
return itertools.chain(*map(self.distribute_arguments, names))
class PSAMacroCollector(PSAMacroEnumerator):
"""Collect PSA crypto macro definitions from C header files.
"""
def __init__(self, include_intermediate: bool = False) -> None:
"""Set up an object to collect PSA macro definitions.
Call the read_file method of the constructed object on each header file.
* include_intermediate: if true, include intermediate macros such as
PSA_XXX_BASE that do not designate semantic values.
"""
super().__init__()
self.include_intermediate = include_intermediate
self.key_types_from_curve = {} #type: Dict[str, str]
self.key_types_from_group = {} #type: Dict[str, str]
self.algorithms_from_hash = {} #type: Dict[str, str]
def is_internal_name(self, name: str) -> bool:
"""Whether this is an internal macro. Internal macros will be skipped."""
if not self.include_intermediate:
if name.endswith('_BASE') or name.endswith('_NONE'):
return True
if '_CATEGORY_' in name:
return True
return name.endswith('_FLAG') or name.endswith('_MASK')
def record_algorithm_subtype(self, name: str, expansion: str) -> None:
"""Record the subtype of an algorithm constructor.
Given a ``PSA_ALG_xxx`` macro name and its expansion, if the algorithm
is of a subtype that is tracked in its own set, add it to the relevant
set.
"""
# This code is very ad hoc and fragile. It should be replaced by
# something more robust.
if re.match(r'MAC(?:_|\Z)', name):
self.mac_algorithms.add(name)
elif re.match(r'KDF(?:_|\Z)', name):
self.kdf_algorithms.add(name)
elif re.search(r'0x020000[0-9A-Fa-f]{2}', expansion):
self.hash_algorithms.add(name)
elif re.search(r'0x03[0-9A-Fa-f]{6}', expansion):
self.mac_algorithms.add(name)
elif re.search(r'0x05[0-9A-Fa-f]{6}', expansion):
self.aead_algorithms.add(name)
elif re.search(r'0x09[0-9A-Fa-f]{2}0000', expansion):
self.ka_algorithms.add(name)
elif re.search(r'0x08[0-9A-Fa-f]{6}', expansion):
self.kdf_algorithms.add(name)
# "#define" followed by a macro name with either no parameters
# or a single parameter and a non-empty expansion.
# Grab the macro name in group 1, the parameter name if any in group 2
# and the expansion in group 3.
_define_directive_re = re.compile(r'\s*#\s*define\s+(\w+)' +
r'(?:\s+|\((\w+)\)\s*)' +
r'(.+)')
_deprecated_definition_re = re.compile(r'\s*MBEDTLS_DEPRECATED')
def read_line(self, line):
"""Parse a C header line and record the PSA identifier it defines if any.
This function analyzes lines that start with "#define PSA_"
(up to non-significant whitespace) and skips all non-matching lines.
"""
# pylint: disable=too-many-branches
m = re.match(self._define_directive_re, line)
if not m:
return
name, parameter, expansion = m.groups()
expansion = re.sub(r'/\*.*?\*/|//.*', r' ', expansion)
if parameter:
self.argspecs[name] = [parameter]
if re.match(self._deprecated_definition_re, expansion):
# Skip deprecated values, which are assumed to be
# backward compatibility aliases that share
# numerical values with non-deprecated values.
return
if self.is_internal_name(name):
# Macro only to build actual values
return
elif (name.startswith('PSA_ERROR_') or name == 'PSA_SUCCESS') \
and not parameter:
self.statuses.add(name)
elif name.startswith('PSA_KEY_TYPE_') and not parameter:
self.key_types.add(name)
elif name.startswith('PSA_KEY_TYPE_') and parameter == 'curve':
self.key_types_from_curve[name] = name[:13] + 'IS_' + name[13:]
elif name.startswith('PSA_KEY_TYPE_') and parameter == 'group':
self.key_types_from_group[name] = name[:13] + 'IS_' + name[13:]
elif name.startswith('PSA_ECC_FAMILY_') and not parameter:
self.ecc_curves.add(name)
elif name.startswith('PSA_DH_FAMILY_') and not parameter:
self.dh_groups.add(name)
elif name.startswith('PSA_ALG_') and not parameter:
if name in ['PSA_ALG_ECDSA_BASE',
'PSA_ALG_RSA_PKCS1V15_SIGN_BASE']:
# Ad hoc skipping of duplicate names for some numerical values
return
self.algorithms.add(name)
self.record_algorithm_subtype(name, expansion)
elif name.startswith('PSA_ALG_') and parameter == 'hash_alg':
if name in ['PSA_ALG_DSA', 'PSA_ALG_ECDSA']:
# A naming irregularity
tester = name[:8] + 'IS_RANDOMIZED_' + name[8:]
else:
tester = name[:8] + 'IS_' + name[8:]
self.algorithms_from_hash[name] = tester
elif name.startswith('PSA_KEY_USAGE_') and not parameter:
self.key_usage_flags.add(name)
else:
# Other macro without parameter
return
_nonascii_re = re.compile(rb'[^\x00-\x7f]+')
_continued_line_re = re.compile(rb'\\\r?\n\Z')
def read_file(self, header_file):
for line in header_file:
m = re.search(self._continued_line_re, line)
while m:
cont = next(header_file)
line = line[:m.start(0)] + cont
m = re.search(self._continued_line_re, line)
line = re.sub(self._nonascii_re, rb'', line).decode('ascii')
self.read_line(line)
|
StarcoderdataPython
|
4825776
|
<filename>tests/llvm/observation_spaces_test.py
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Integrations tests for the LLVM CompilerGym environments."""
import os
import sys
from typing import Any, Dict, List
import gym
import networkx as nx
import numpy as np
import pytest
from flaky import flaky
from compiler_gym.envs.llvm.llvm_env import LlvmEnv
from compiler_gym.spaces import Box
from compiler_gym.spaces import Dict as DictSpace
from compiler_gym.spaces import Scalar, Sequence
from tests.pytest_plugins.common import ci_only
from tests.test_main import main
pytest_plugins = ["tests.pytest_plugins.llvm"]
def test_default_observation_space(env: LlvmEnv):
env.observation_space = "Autophase"
assert env.observation_space.shape == (56,)
assert env.observation_space_spec.id == "Autophase"
env.observation_space = None
assert env.observation_space is None
assert env.observation_space_spec is None
invalid = "invalid value"
with pytest.raises(LookupError, match=f"Observation space not found: {invalid}"):
env.observation_space = invalid
def test_observation_spaces(env: LlvmEnv):
env.reset("cbench-v1/crc32")
assert set(env.observation.spaces.keys()) == {
"Autophase",
"AutophaseDict",
"Bitcode",
"BitcodeFile",
"Buildtime",
"CpuInfo",
"Inst2vec",
"Inst2vecEmbeddingIndices",
"Inst2vecPreprocessedText",
"InstCount",
"InstCountDict",
"InstCountNorm",
"InstCountNormDict",
"Ir",
"IrInstructionCount",
"IrInstructionCountO0",
"IrInstructionCountO3",
"IrInstructionCountOz",
"IrSha1",
"IsBuildable",
"IsRunnable",
"ObjectTextSizeBytes",
"ObjectTextSizeO0",
"ObjectTextSizeO3",
"ObjectTextSizeOz",
"Programl",
"ProgramlJson",
"Runtime",
"TextSizeBytes",
"TextSizeO0",
"TextSizeO3",
"TextSizeOz",
}
def test_ir_observation_space(env: LlvmEnv):
env.reset("cbench-v1/crc32")
key = "Ir"
space = env.observation.spaces[key]
assert isinstance(space.space, Sequence)
assert space.space.dtype == str
assert space.space.size_range == (0, np.iinfo(np.int64).max)
value: str = env.observation[key]
print(value) # For debugging in case of error.
assert isinstance(value, str)
assert space.space.contains(value)
assert space.deterministic
assert not space.platform_dependent
def test_ir_sha1_observation_space(env: LlvmEnv):
env.reset("cbench-v1/crc32")
key = "IrSha1"
space = env.observation.spaces[key]
assert isinstance(space.space, Sequence)
assert space.space.dtype == str
assert space.space.size_range == (40, 40)
value: str = env.observation[key]
print(value) # For debugging in case of error.
assert isinstance(value, str)
assert len(value) == 40
assert space.space.contains(value)
assert space.deterministic
assert not space.platform_dependent
def test_bitcode_observation_space(env: LlvmEnv):
env.reset("cbench-v1/crc32")
key = "Bitcode"
space = env.observation.spaces[key]
assert isinstance(space.space, Sequence)
assert space.space.dtype == np.int8
assert space.space.size_range == (0, np.iinfo(np.int64).max)
assert space.deterministic
assert not space.platform_dependent
value: str = env.observation[key]
print(value) # For debugging in case of error.
assert isinstance(value, np.ndarray)
assert value.dtype == np.int8
assert space.space.contains(value)
def test_bitcode_file_observation_space(env: LlvmEnv):
env.reset("cbench-v1/crc32")
key = "BitcodeFile"
space = env.observation.spaces[key]
assert isinstance(space.space, Sequence)
assert space.space.dtype == str
assert space.space.size_range == (0, 4096)
assert not space.deterministic
assert not space.platform_dependent
value: str = env.observation[key]
print(value) # For debugging in case of error.
try:
assert isinstance(value, str)
assert os.path.isfile(value)
assert space.space.contains(value)
finally:
os.unlink(value)
@pytest.mark.parametrize(
"benchmark_uri", ["cbench-v1/crc32", "cbench-v1/qsort", "cbench-v1/gsm"]
)
def test_bitcode_file_equivalence(env: LlvmEnv, benchmark_uri: str):
"""Test that LLVM produces the same bitcode as a file and as a byte array."""
env.reset(benchmark=benchmark_uri)
bitcode = env.observation.Bitcode()
bitcode_file = env.observation.BitcodeFile()
try:
with open(bitcode_file, "rb") as f:
bitcode_from_file = f.read()
assert bitcode.tobytes() == bitcode_from_file
finally:
os.unlink(bitcode_file)
# The Autophase feature vector for benchmark://cbench-v1/crc32 in its initial
# state.
AUTOPHASE_CBENCH_CRC32 = [
0,
0,
16,
12,
2,
16,
8,
2,
4,
8,
0,
0,
0,
29,
0,
24,
9,
2,
32,
44,
41,
14,
36,
16,
13,
0,
5,
26,
3,
5,
24,
20,
24,
33,
5,
10,
3,
51,
0,
1,
0,
5,
0,
0,
0,
42,
0,
1,
8,
5,
29,
242,
157,
15,
0,
103,
]
def test_autophase_observation_space_reset(env: LlvmEnv):
"""Test that the intial observation is returned on env.reset()."""
env.observation_space = "Autophase"
observation = env.reset("cbench-v1/crc32")
print(observation.tolist()) # For debugging on error.
np.testing.assert_array_equal(observation, AUTOPHASE_CBENCH_CRC32)
def test_instcount_observation_space(env: LlvmEnv):
env.reset("cbench-v1/crc32")
key = "InstCount"
space = env.observation.spaces[key]
assert isinstance(space.space, Box)
assert space.space.dtype == np.int64
assert space.space.shape == (70,)
assert space.deterministic
assert not space.platform_dependent
value: np.ndarray = env.observation[key]
print(value.tolist()) # For debugging in case of error.
expected_values = [
242,
29,
15,
5,
24,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
5,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
3,
0,
3,
1,
8,
26,
51,
42,
5,
0,
0,
0,
1,
5,
0,
0,
0,
0,
0,
0,
0,
0,
0,
20,
0,
0,
0,
10,
0,
0,
33,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
]
np.testing.assert_array_equal(value, expected_values)
assert value.dtype == np.int64
# The first value is the total number of instructions. This should equal the
# number of instructions.
assert sum(value[3:]) == value[0]
def test_instcount_dict_observation_space(env: LlvmEnv):
env.reset("cbench-v1/crc32")
key = "InstCountDict"
space = env.observation.spaces[key]
assert isinstance(space.space, DictSpace)
assert space.deterministic
assert not space.platform_dependent
value: Dict[str, int] = env.observation[key]
print(value) # For debugging in case of error.
assert len(value) == 70
def test_instcount_norm_observation_space(env: LlvmEnv):
env.reset("cbench-v1/crc32")
key = "InstCountNorm"
space = env.observation.spaces[key]
assert isinstance(space.space, Box)
assert space.space.dtype == np.float32
assert space.space.shape == (69,)
assert space.deterministic
assert not space.platform_dependent
value: np.ndarray = env.observation[key]
print(value.tolist()) # For debugging in case of error.
assert value.shape == (69,)
assert value.dtype == np.float32
# Assert that the normalized instruction counts sum to 1. Note that the
# first two features (#blocks and #funcs) must be excluded.
assert pytest.approx(sum(value[2:]), 1.0)
def test_instcount_norm_dict_observation_space(env: LlvmEnv):
env.reset("cbench-v1/crc32")
key = "InstCountNormDict"
space = env.observation.spaces[key]
assert isinstance(space.space, DictSpace)
assert space.deterministic
assert not space.platform_dependent
value: Dict[str, int] = env.observation[key]
print(value) # For debugging in case of error.
assert len(value) == 69
def test_autophase_observation_space(env: LlvmEnv):
env.reset("cbench-v1/crc32")
key = "Autophase"
space = env.observation.spaces[key]
assert isinstance(space.space, Box)
value: np.ndarray = env.observation[key]
print(value.tolist()) # For debugging in case of error.
assert isinstance(value, np.ndarray)
assert value.shape == (56,)
assert space.deterministic
assert not space.platform_dependent
np.testing.assert_array_equal(value, AUTOPHASE_CBENCH_CRC32)
assert space.space.contains(value)
def test_autophase_dict_observation_space(env: LlvmEnv):
env.reset("cbench-v1/crc32")
key = "AutophaseDict"
space = env.observation.spaces[key]
assert isinstance(space.space, DictSpace)
value: Dict[str, int] = env.observation[key]
print(value) # For debugging in case of error.
assert len(value) == 56
assert space.deterministic
assert not space.platform_dependent
def test_programl_observation_space(env: LlvmEnv):
env.reset("cbench-v1/crc32")
key = "Programl"
space = env.observation.spaces[key]
assert isinstance(space.space, Sequence)
graph: nx.MultiDiGraph = env.observation[key]
assert isinstance(graph, nx.MultiDiGraph)
assert graph.number_of_nodes() == 512
assert graph.number_of_edges() == 907
assert graph.nodes[0] == {
"block": 0,
"function": 0,
"text": "[external]",
"type": 0,
}
assert space.deterministic
assert not space.platform_dependent
def test_programl_json_observation_space(env: LlvmEnv):
env.reset("cbench-v1/crc32")
key = "ProgramlJson"
space = env.observation.spaces[key]
assert isinstance(space.space, Sequence)
graph: Dict[str, Any] = env.observation[key]
assert isinstance(graph, dict)
def test_cpuinfo_observation_space(env: LlvmEnv):
env.reset("cbench-v1/crc32")
key = "CpuInfo"
space = env.observation.spaces[key]
assert isinstance(space.space, DictSpace)
value: Dict[str, Any] = env.observation[key]
print(value) # For debugging in case of error.
assert isinstance(value, dict)
# Test each expected key, removing it as we go.
assert isinstance(value.pop("name"), str)
assert isinstance(value.pop("cores_count"), int)
assert isinstance(value.pop("l1i_cache_size"), int)
assert isinstance(value.pop("l1i_cache_count"), int)
assert isinstance(value.pop("l1d_cache_size"), int)
assert isinstance(value.pop("l1d_cache_count"), int)
assert isinstance(value.pop("l2_cache_size"), int)
assert isinstance(value.pop("l2_cache_count"), int)
assert isinstance(value.pop("l3_cache_size"), int)
assert isinstance(value.pop("l3_cache_count"), int)
assert isinstance(value.pop("l4_cache_size"), int)
assert isinstance(value.pop("l4_cache_count"), int)
# Anything left in the JSON dictionary now is an unexpected key.
assert not value
invalid = "invalid value"
with pytest.raises(KeyError) as ctx:
_ = env.observation[invalid]
assert str(ctx.value) == f"'{invalid}'"
assert space.deterministic
assert space.platform_dependent
@pytest.fixture
def cbench_crc32_inst2vec_embedding_indices() -> List[int]:
"""The expected inst2vec embedding indices for cbench-v1/crc32."""
# The linux/macOS builds of clang produce slightly different bitcodes.
if sys.platform.lower().startswith("linux"):
return [
8564,
8564,
8564,
8564,
5,
46,
46,
40,
8564,
13,
8,
8564,
1348,
178,
286,
214,
182,
235,
697,
1513,
192,
8564,
182,
182,
395,
1513,
2298,
8564,
289,
291,
3729,
3729,
8564,
178,
289,
289,
200,
1412,
1412,
8564,
3032,
180,
3032,
293,
3032,
205,
415,
205,
213,
8564,
8564,
8564,
204,
8564,
213,
215,
364,
364,
216,
8564,
216,
8564,
8564,
8564,
311,
634,
204,
8564,
415,
182,
640,
214,
182,
295,
675,
697,
1513,
192,
8564,
182,
182,
395,
1513,
214,
216,
8564,
8564,
8564,
634,
204,
8564,
213,
215,
415,
205,
216,
8564,
8564,
8564,
182,
961,
214,
415,
214,
364,
364,
216,
8564,
293,
3032,
180,
3032,
8564,
3032,
295,
257,
8564,
291,
178,
178,
200,
214,
180,
3032,
205,
216,
8564,
182,
977,
204,
8564,
182,
213,
235,
697,
1513,
192,
8564,
182,
182,
395,
1513,
214,
216,
8564,
182,
420,
214,
213,
8564,
200,
216,
8564,
182,
961,
180,
3032,
2298,
8564,
289,
8564,
289,
178,
178,
289,
311,
594,
311,
180,
3032,
180,
3032,
293,
3032,
364,
216,
8564,
295,
431,
311,
425,
204,
8564,
597,
8564,
594,
213,
8564,
295,
653,
311,
295,
634,
204,
8564,
182,
182,
597,
213,
8564,
216,
8564,
216,
8564,
295,
634,
612,
293,
3032,
180,
3032,
180,
3032,
257,
8564,
289,
289,
8564,
8564,
178,
178,
289,
364,
311,
594,
8564,
3032,
8564,
180,
3032,
180,
3032,
8564,
8564,
8564,
204,
8564,
8564,
8564,
364,
364,
216,
8564,
8564,
8564,
8564,
8564,
205,
216,
8564,
182,
182,
488,
204,
8564,
295,
597,
182,
640,
182,
540,
612,
8564,
216,
8564,
182,
640,
214,
216,
8564,
364,
364,
216,
8564,
180,
3032,
180,
3032,
8564,
3032,
295,
257,
]
elif sys.platform.lower().startswith("darwin"):
return [
8564,
8564,
5,
46,
46,
40,
8564,
13,
8,
8564,
1348,
178,
286,
214,
182,
235,
697,
1513,
192,
8564,
182,
182,
395,
1513,
2298,
8564,
289,
291,
3729,
3729,
8564,
178,
289,
289,
200,
1412,
1412,
8564,
3032,
180,
3032,
293,
3032,
205,
415,
205,
213,
8564,
8564,
5666,
204,
8564,
213,
215,
364,
364,
216,
8564,
216,
8564,
5665,
8564,
311,
634,
204,
8564,
415,
182,
640,
214,
182,
295,
675,
697,
1513,
192,
8564,
182,
182,
395,
1513,
214,
216,
8564,
5665,
8564,
634,
204,
8564,
213,
215,
415,
205,
216,
8564,
5665,
8564,
182,
961,
214,
415,
214,
364,
364,
216,
8564,
293,
3032,
180,
3032,
8564,
3032,
295,
257,
8564,
291,
178,
178,
200,
214,
180,
3032,
205,
216,
8564,
182,
977,
204,
8564,
182,
213,
235,
697,
1513,
192,
8564,
182,
182,
395,
1513,
214,
216,
8564,
182,
420,
214,
213,
8564,
200,
216,
8564,
182,
961,
180,
3032,
2298,
8564,
289,
8564,
289,
178,
178,
289,
311,
594,
311,
180,
3032,
180,
3032,
293,
3032,
364,
216,
8564,
295,
431,
311,
425,
204,
8564,
597,
8564,
594,
213,
8564,
295,
653,
311,
295,
634,
204,
8564,
182,
182,
597,
213,
8564,
216,
8564,
216,
8564,
295,
634,
612,
293,
3032,
180,
3032,
180,
3032,
257,
8564,
289,
289,
8564,
8564,
178,
178,
289,
364,
311,
594,
8564,
3032,
8564,
180,
3032,
180,
3032,
8564,
8564,
5666,
204,
8564,
5391,
8564,
364,
364,
216,
8564,
5665,
8564,
5665,
8564,
205,
216,
8564,
182,
182,
488,
204,
8564,
295,
597,
182,
640,
182,
540,
612,
8564,
216,
8564,
182,
640,
214,
216,
8564,
364,
364,
216,
8564,
180,
3032,
180,
3032,
8564,
3032,
295,
257,
]
else:
raise NotImplementedError(f"Unknown platform: {sys.platform}")
def test_inst2vec_preprocessed_observation_space(
env: LlvmEnv, cbench_crc32_inst2vec_embedding_indices: List[int]
):
env.reset("cbench-v1/crc32")
key = "Inst2vecPreprocessedText"
space = env.observation.spaces[key]
assert isinstance(space.space, Sequence)
value: List[str] = env.observation[key]
assert isinstance(value, list)
for item, idx in zip(value, cbench_crc32_inst2vec_embedding_indices):
assert isinstance(item, str)
unk = env.inst2vec.vocab["!UNK"]
indices = [env.inst2vec.vocab.get(item, unk) for item in value]
print(indices) # For debugging in case of error.
assert indices == cbench_crc32_inst2vec_embedding_indices
assert space.deterministic
assert not space.platform_dependent
def test_inst2vec_embedding_indices_observation_space(
env: LlvmEnv, cbench_crc32_inst2vec_embedding_indices: List[int]
):
env.reset("cbench-v1/crc32")
key = "Inst2vecEmbeddingIndices"
space = env.observation.spaces[key]
assert isinstance(space.space, Sequence)
value: List[int] = env.observation[key]
print(value) # For debugging in case of error.
print(value)
assert isinstance(value, list)
for item in value:
assert isinstance(item, int)
assert value == cbench_crc32_inst2vec_embedding_indices
assert space.deterministic
assert not space.platform_dependent
def test_inst2vec_observation_space(
env: LlvmEnv, cbench_crc32_inst2vec_embedding_indices: List[int]
):
env.reset("cbench-v1/crc32")
key = "Inst2vec"
space = env.observation.spaces[key]
assert isinstance(space.space, Sequence)
value: np.ndarray = env.observation[key]
print(value) # For debugging in case of error.
assert isinstance(value, np.ndarray)
assert value.dtype == np.float32
height, width = value.shape
assert width == len(env.inst2vec.embeddings[0])
assert height == len(cbench_crc32_inst2vec_embedding_indices)
# Check a handful of values.
np.testing.assert_array_almost_equal(
value.tolist(),
[
env.inst2vec.embeddings[idx]
for idx in cbench_crc32_inst2vec_embedding_indices
],
)
assert space.deterministic
assert not space.platform_dependent
def test_ir_instruction_count_observation_spaces(env: LlvmEnv):
env.reset("cbench-v1/crc32")
key = "IrInstructionCount"
space = env.observation.spaces[key]
assert isinstance(space.space, Scalar)
assert space.deterministic
assert not space.platform_dependent
value: int = env.observation[key]
print(value) # For debugging in case of error.
assert isinstance(value, int)
assert value == 242
key = "IrInstructionCountO0"
space = env.observation.spaces[key]
assert isinstance(space.space, Scalar)
assert space.deterministic
assert not space.platform_dependent
value: int = env.observation[key]
print(value) # For debugging in case of error.
assert isinstance(value, int)
assert value == 242
key = "IrInstructionCountO3"
space = env.observation.spaces[key]
assert isinstance(space.space, Scalar)
assert space.deterministic
assert not space.platform_dependent
value: int = env.observation[key]
print(value) # For debugging in case of error.
assert isinstance(value, int)
assert value == 164
key = "IrInstructionCountOz"
space = env.observation.spaces[key]
assert isinstance(space.space, Scalar)
assert space.deterministic
assert not space.platform_dependent
value: int = env.observation[key]
print(value) # For debugging in case of error.
assert isinstance(value, int)
assert value == 114
def test_object_text_size_observation_spaces(env: LlvmEnv):
env.reset("cbench-v1/crc32")
# Expected .text sizes for this benchmark: -O0, -O3, -Oz.
crc32_code_sizes = {"darwin": [1171, 3825, 3289], "linux": [1183, 3961, 3286]}
key = "ObjectTextSizeBytes"
space = env.observation.spaces[key]
assert isinstance(space.space, Scalar)
assert space.deterministic
assert space.platform_dependent
value: int = env.observation[key]
print(value) # For debugging in case of error.
assert isinstance(value, int)
assert value == crc32_code_sizes[sys.platform][0]
key = "ObjectTextSizeO0"
space = env.observation.spaces[key]
assert isinstance(space.space, Scalar)
assert space.deterministic
assert space.platform_dependent
value: int = env.observation[key]
print(value) # For debugging in case of error.
assert isinstance(value, int)
assert value == crc32_code_sizes[sys.platform][0]
key = "ObjectTextSizeO3"
space = env.observation.spaces[key]
assert isinstance(space.space, Scalar)
assert space.deterministic
assert space.platform_dependent
value: int = env.observation[key]
print(value) # For debugging in case of error.
assert isinstance(value, int)
assert value == crc32_code_sizes[sys.platform][1]
key = "ObjectTextSizeOz"
space = env.observation.spaces[key]
assert isinstance(space.space, Scalar)
assert space.deterministic
assert space.platform_dependent
value: int = env.observation[key]
print(value) # For debugging in case of error.
assert isinstance(value, int)
assert value == crc32_code_sizes[sys.platform][2]
def test_text_size_observation_spaces(env: LlvmEnv):
env.reset("cbench-v1/crc32")
key = "TextSizeBytes"
space = env.observation.spaces[key]
assert isinstance(space.space, Scalar)
assert space.deterministic
assert space.platform_dependent
value: int = env.observation[key]
print(value) # For debugging in case of error.
assert isinstance(value, int)
key = "TextSizeO0"
space = env.observation.spaces[key]
assert isinstance(space.space, Scalar)
assert space.deterministic
assert space.platform_dependent
value: int = env.observation[key]
print(value) # For debugging in case of error.
assert isinstance(value, int)
assert value > 0 # Exact value is system dependent, see below.
key = "TextSizeO3"
space = env.observation.spaces[key]
assert isinstance(space.space, Scalar)
assert space.deterministic
assert space.platform_dependent
value: int = env.observation[key]
print(value) # For debugging in case of error.
assert isinstance(value, int)
assert value > 0 # Exact value is system dependent, see below.
key = "TextSizeOz"
space = env.observation.spaces[key]
assert isinstance(space.space, Scalar)
assert space.deterministic
assert space.platform_dependent
value: int = env.observation[key]
print(value) # For debugging in case of error.
assert isinstance(value, int)
assert value > 0 # Exact value is system dependent, see below.
# NOTE(cummins): The exact values here depend on the system toolchain and
# libraries, so only run this test on the GitHub CI runner environment where we
# can hardcode the values. If this test starts to fail, it may be because the CI
# runner environment has changed.
@ci_only
def test_text_size_observation_space_values(env: LlvmEnv):
env.reset("cbench-v1/crc32")
# Expected .text sizes for this benchmark: -O0, -O3, -Oz.
crc32_code_sizes = {"darwin": [16384, 16384, 16384], "linux": [2850, 5652, 4980]}
# For debugging in case of error.
print(env.observation["TextSizeO0"])
print(env.observation["TextSizeO3"])
print(env.observation["TextSizeOz"])
assert env.observation.TextSizeO0() == crc32_code_sizes[sys.platform][0]
assert env.observation.TextSizeO0() == crc32_code_sizes[sys.platform][0]
assert env.observation.TextSizeO3() == crc32_code_sizes[sys.platform][1]
assert env.observation.TextSizeOz() == crc32_code_sizes[sys.platform][2]
@flaky # Runtimes can timeout
def test_runtime_observation_space(env: LlvmEnv):
env.reset("cbench-v1/crc32")
key = "Runtime"
space = env.observation.spaces[key]
assert isinstance(space.space, Sequence)
value: np.ndarray = env.observation[key]
print(value.tolist()) # For debugging in case of error.
assert isinstance(value, np.ndarray)
assert env.runtime_observation_count == 1
assert value.shape == (1,)
assert not space.deterministic
assert space.platform_dependent
assert space.space.contains(value)
for buildtime in value:
assert buildtime > 0
@flaky # Runtimes can timeout
def test_runtime_observation_space_different_observation_count(env: LlvmEnv):
"""Test setting a custom observation count for LLVM runtimes."""
env.reset("cbench-v1/crc32")
env.runtime_observation_count = 3
value: np.ndarray = env.observation["Runtime"]
print(value.tolist()) # For debugging in case of error.
assert value.shape == (3,)
env.reset()
value: np.ndarray = env.observation["Runtime"]
print(value.tolist()) # For debugging in case of error.
assert value.shape == (3,)
env.runtime_observation_count = 5
value: np.ndarray = env.observation["Runtime"]
print(value.tolist()) # For debugging in case of error.
assert value.shape == (5,)
@flaky # Runtimes can timeout
def test_runtime_observation_space_invalid_observation_count(env: LlvmEnv):
"""Test setting an invalid custom observation count for LLVM runtimes."""
env.reset("cbench-v1/crc32")
val = env.runtime_observation_count
with pytest.raises(
ValueError, match="runtimes_per_observation_count must be >= 1. Received: -5"
):
env.runtime_observation_count = -5
assert env.runtime_observation_count == val # unchanged
def test_runtime_observation_space_not_runnable(env: LlvmEnv):
env.reset("chstone-v0/gsm")
key = "Runtime"
space = env.observation.spaces[key]
assert isinstance(space.space, Sequence)
assert env.observation[key] is None
@flaky # Build can timeout
def test_buildtime_observation_space(env: LlvmEnv):
env.reset("cbench-v1/crc32")
key = "Buildtime"
space = env.observation.spaces[key]
assert isinstance(space.space, Sequence)
assert not space.deterministic
assert space.platform_dependent
value: np.ndarray = env.observation[key]
print(value) # For debugging in case of error.
assert value.shape == (1,)
assert space.space.contains(value)
assert value[0] >= 0
def test_buildtime_observation_space_not_runnable(env: LlvmEnv):
env.reset("chstone-v0/gsm")
key = "Buildtime"
space = env.observation.spaces[key]
assert isinstance(space.space, Sequence)
assert not space.deterministic
assert space.platform_dependent
assert env.observation[key] is None
def test_is_runnable_observation_space(env: LlvmEnv):
env.reset("cbench-v1/crc32")
key = "IsRunnable"
space = env.observation.spaces[key]
assert isinstance(space.space, Scalar)
assert space.deterministic
assert space.platform_dependent
value: int = env.observation[key]
print(value) # For debugging in case of error.
assert isinstance(value, int)
assert value == 1
def test_is_runnable_observation_space_not_runnable(env: LlvmEnv):
env.reset("chstone-v0/gsm")
key = "IsRunnable"
space = env.observation.spaces[key]
assert isinstance(space.space, Scalar)
assert space.deterministic
assert space.platform_dependent
value: int = env.observation[key]
print(value) # For debugging in case of error.
assert isinstance(value, int)
assert value == 0
def test_is_buildable_observation_space(env: LlvmEnv):
env.reset("cbench-v1/crc32")
key = "IsBuildable"
space = env.observation.spaces[key]
assert isinstance(space.space, Scalar)
assert space.deterministic
assert space.platform_dependent
value: int = env.observation[key]
print(value) # For debugging in case of error.
assert isinstance(value, int)
assert value == 1
def test_is_buildable_observation_space_not_buildable(env: LlvmEnv):
env.reset("chstone-v0/gsm")
key = "IsBuildable"
space = env.observation.spaces[key]
assert isinstance(space.space, Scalar)
assert space.deterministic
assert space.platform_dependent
value: int = env.observation[key]
print(value) # For debugging in case of error.
assert isinstance(value, int)
assert value == 0
def test_add_derived_space(env: LlvmEnv):
env.reset()
with pytest.deprecated_call(
match="Use the derived_observation_spaces argument to CompilerEnv constructor."
):
env.observation.add_derived_space(
id="IrLen",
base_id="Ir",
space=Box(name="IrLen", low=0, high=float("inf"), shape=(1,), dtype=int),
translate=lambda base: [15],
)
value = env.observation["IrLen"]
assert isinstance(value, list)
assert value == [15]
# Repeat the above test using the generated bound method.
value = env.observation.IrLen()
assert isinstance(value, list)
assert value == [15]
def test_derived_space_constructor():
"""Test that derived observation space can be specified at construction
time.
"""
with gym.make("llvm-v0") as env:
env.observation_space = "AutophaseDict"
a = env.reset()
with gym.make("llvm-v0", observation_space="AutophaseDict") as env:
b = env.reset()
assert a == b
if __name__ == "__main__":
main()
|
StarcoderdataPython
|
1689761
|
<reponame>NickolausDS/deriva-action-provider
import os
import json
import csv
from deriva.core import urlquote
from deriva.core.ermrest_config import tag
from . import tableschema2erm
# we'll use this utility function later...
def topo_sorted(depmap):
"""Return list of items topologically sorted.
depmap: { item: [required_item, ...], ... }
Raises ValueError if a required_item cannot be satisfied in any order.
The per-item required_item iterables must allow revisiting on
multiple iterations.
"""
ordered = [item for item, requires in depmap.items() if not requires]
depmap = {item: set(requires) for item, requires in depmap.items() if requires}
satisfied = set(ordered)
while depmap:
additions = []
for item, requires in list(depmap.items()):
if requires.issubset(satisfied):
additions.append(item)
satisfied.add(item)
del depmap[item]
if not additions:
raise ValueError(("unsatisfiable", depmap))
ordered.extend(additions)
additions = []
return ordered
class CfdeDataPackage(object):
# the translation stores frictionless table resource metadata under this annotation
resource_tag = 'tag:isrd.isi.edu,2019:table-resource'
# the translation leaves extranneous table-schema stuff under this annotation
# (i.e. stuff that perhaps wasn't translated to deriva equivalents)
schema_tag = 'tag:isrd.isi.edu,2019:table-schema-leftovers'
def __init__(self, filename, verbose=True):
self.filename = filename
self.dirname = os.path.dirname(self.filename)
self.catalog = None
self.model_root = None
self.cfde_schema = None
self.verbose = verbose
with open(self.filename, 'r') as f:
tableschema = json.loads(f.read())
self.model_doc = tableschema2erm.convert_tableschema(tableschema, 'CFDE', True)
if set(self.model_doc['schemas']) != {'CFDE'}:
raise NotImplementedError('Unexpected schema set in data package: '
'%s' % (self.model_doc['schemas'],))
def set_catalog(self, catalog):
self.catalog = catalog
self.get_model()
def get_model(self):
self.model_root = self.catalog.getCatalogModel()
self.cfde_schema = self.model_root.schemas.get('CFDE')
def provision(self):
if 'CFDE' not in self.model_root.schemas:
# blindly load the whole model on an apparently empty catalog
self.catalog.post('/schema', json=self.model_doc).raise_for_status()
else:
# do some naively idempotent model definitions on existing catalog
# adding missing tables and missing columns
need_tables = []
need_columns = []
hazard_fkeys = {}
for tname, tdoc in self.model_doc['schemas']['CFDE']['tables'].items():
if tname in self.cfde_schema.tables:
table = self.cfde_schema.tables[tname]
for cdoc in tdoc['column_definitions']:
if cdoc['name'] in table.column_definitions.elements:
column = table.column_definitions.elements[cdoc['name']]
# TODO: check existing columns for compatibility?
else:
cdoc.update({'table_name': tname, 'nullok': True})
need_columns.append(cdoc)
# TODO: check existing table keys/foreign keys for compatibility?
else:
tdoc['schema_name'] = 'CFDE'
need_tables.append(tdoc)
if need_tables:
if self.verbose:
print("Added tables %s" % ([tdoc['table_name'] for tdoc in need_tables]))
self.catalog.post('/schema', json=need_tables).raise_for_status()
for cdoc in need_columns:
self.catalog.post(
'/schema/CFDE/table/%s/column' % urlquote(cdoc['table_name']),
json=cdoc
).raise_for_status()
if self.verbose:
print("Added column %s.%s" % (cdoc['table_name'], cdoc['name']))
self.get_model()
def apply_acls(self, acls):
self.get_model()
self.model_root.acls.update(acls)
# set custom chaise configuration values for this catalog
self.model_root.annotations[tag.chaise_config] = {
# hide system metadata by default in tabular listings, to focus on CFDE-specific content
"SystemColumnsDisplayCompact": [],
}
# apply the above ACL and annotation changes to server
self.model_root.apply(self.catalog)
self.get_model()
@classmethod
def make_row2dict(cls, table, header):
"""Pickle a row2dict(row) function for use with a csv reader"""
numcols = len(header)
missingValues = set(table.annotations[cls.schema_tag].get("missingValues", []))
for cname in header:
if cname not in table.column_definitions.elements:
raise ValueError("header column %s not found in table %s" % (cname, table.name))
def row2dict(row):
"""Convert row tuple to dictionary of {col: val} mappings."""
return dict(zip(
header,
[None if x in missingValues else x for x in row]
))
return row2dict
def data_tnames_topo_sorted(self):
def target_tname(fkey):
return fkey.referenced_columns[0]["table_name"]
tables_doc = self.model_doc['schemas']['CFDE']['tables']
return topo_sorted({
table.name: [
target_tname(fkey)
for fkey in table.foreign_keys
if target_tname(fkey) != table.name and target_tname(fkey) in tables_doc
]
for table in self.cfde_schema.tables.values()
if table.name in tables_doc
})
def load_data_files(self):
tables_doc = self.model_doc['schemas']['CFDE']['tables']
for tname in self.data_tnames_topo_sorted():
# we are doing a clean load of data in fkey dependency order
table = self.model_root.table("CFDE", tname)
resource = tables_doc[tname]["annotations"].get(self.resource_tag, {})
if "path" in resource:
fname = "%s/%s" % (self.dirname, resource["path"])
with open(fname, "r") as f:
# translate TSV to python dicts
reader = csv.reader(f, delimiter="\t")
raw_rows = list(reader)
row2dict = self.make_row2dict(table, raw_rows[0])
dict_rows = [row2dict(row) for row in raw_rows[1:]]
self.catalog.post("/entity/CFDE:%s" % urlquote(table.name), json=dict_rows)
if self.verbose:
print("Table %s data loaded from %s." % (table.name, fname))
|
StarcoderdataPython
|
1686887
|
#!/usr/bin/env python
# Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Prints "1" if Chrome targets should be built with hermetic Xcode.
Prints "2" if Chrome targets should be built with hermetic Xcode, but the OS
version does not meet the minimum requirements of the hermetic version of Xcode.
Otherwise prints "0".
Usage:
python should_use_hermetic_xcode.py <target_os>
"""
import os
import sys
_THIS_DIR_PATH = os.path.abspath(os.path.dirname(os.path.realpath(__file__)))
_BUILD_PATH = os.path.join(_THIS_DIR_PATH, os.pardir)
sys.path.insert(0, _BUILD_PATH)
import mac_toolchain
def _IsCorpMachine():
return os.path.isdir('/Library/GoogleCorpSupport/')
def main():
allow_corp = sys.argv[1] == 'mac' and _IsCorpMachine()
if os.environ.get('FORCE_MAC_TOOLCHAIN') or allow_corp:
if not mac_toolchain.PlatformMeetsHermeticXcodeRequirements(sys.argv[1]):
return "2"
return "1"
else:
return "0"
if __name__ == '__main__':
print(main())
sys.exit(0)
|
StarcoderdataPython
|
1604339
|
<gh_stars>0
s = input()
t = input()
u = list(s)
v = list(t)
com1 = []
com1.append(u[0])
p = 0
for i in range(len(u)):
for j in range(len(u)):
if j == len(u) - 1:
u[0] = com1[j]
else:
com1.append(u[j + 1])
u[j + 1] = com1[j]
if u == v:
print("Yes")
p = 1
break
com1 = []
com1.append(u[0])
if p == 0:
print("No")
|
StarcoderdataPython
|
1787889
|
<filename>Chapter 07/Chap07_Example7.141.py<gh_stars>0
def my_generator_function():
num = 1
print('Printing first')
yield num
num += 1
print('Printing second')
yield num
num += 1
print('Printing third')
yield num
num += 1
print('Printing at last')
yield num
mynum = my_generator_function() # G0
print(type(mynum)) # G1
print(next(mynum)) # G2
print(next(mynum)) # G3
print(next(mynum)) # G4
print(next(mynum)) # G5
print(next(mynum)) # G6
|
StarcoderdataPython
|
1775253
|
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
import platform
from selenium import webdriver
from bs4 import BeautifulSoup
# Check Python version
# print(platform.python_version())
# Using the right PhantomJS for the corresponding OS
if platform.system() == "Windows":
PHANTOMJS_EXE = "./PhantomJS/phantomjs.exe"
else:
PHANTOMJS_EXE = "./PhantomJS/phantomjs"
def main():
# Use PhantomJS to browse the page, alternatively we can use
# browser = webdriver.Firefox()
browser = webdriver.PhantomJS(PHANTOMJS_EXE)
browser.get('http://www.scoreboard.com/en/tennis/atp-singles/us-open-2015/results/')
# Parse the html source
soup = BeautifulSoup(browser.page_source, "html.parser")
# Get all the games
games = soup.find_all('tr', {'class': 'stage-finished'})
# Print out the html for the first game
print(games[0].prettify())
if __name__ == "__main__":
main()
# print("This Python script was started as the main program.")
else:
print("This Python script is to be called as main(), not as the module ", __name__ + ".")
|
StarcoderdataPython
|
3367145
|
import pytest
from floodlight.io.utils import get_and_convert
# Test get_and_convert function
@pytest.mark.unit
def test_get_and_convert() -> None:
sample_dict = {"foo": "1"}
# get
assert get_and_convert(sample_dict, "foo", int) == 1
# convert
assert type(get_and_convert(sample_dict, "foo", int)) is int
# missing entry
assert get_and_convert(sample_dict, "bar", int) is None
# fallback if failed type conversion
assert get_and_convert(sample_dict, "foo", dict) == "1"
# custom default with failed conversion
assert get_and_convert(sample_dict, "bar", int, "default") == "default"
|
StarcoderdataPython
|
167420
|
<gh_stars>0
import numpy as np
from g2p.data import DoubleBets
def mean_score(eval_func, pred, label_seq):
return np.mean([
eval_func(p, DoubleBets.arpabet.unwrap_iseq(l))
for p, l in zip(pred, label_seq.t())
])
|
StarcoderdataPython
|
3246994
|
from .database import *
from .LyceumGroup import *
from .LyceumUser import *
from .Settings import *
from .Student import *
from .Task import *
from .ActiveTop import *
|
StarcoderdataPython
|
3383410
|
import os
from django.core.management.base import BaseCommand
from django.conf import settings
from crawler import search_changelog, _parse_changelog_text
from allmychanges.models import Repo
from allmychanges.utils import cd, get_package_metadata, download_repo
class Command(BaseCommand):
help = u"""Updates single project."""
def handle(self, *args, **options):
for path in args:
self._update(path)
def _update(self, url):
if '://' in url or url.startswith('git@'):
path = download_repo(url)
else:
path = url
with cd(path):
changelog_filename = search_changelog()
if changelog_filename:
fullfilename = os.path.normpath(
os.path.join(os.getcwd(), changelog_filename))
with open(fullfilename) as f:
changes = _parse_changelog_text(f.read())
if changes:
repo, created = Repo.objects.get_or_create(
url=url,
title=get_package_metadata('.', 'Name'))
repo.versions.all().delete()
for change in changes:
version = repo.versions.create(name=change['version'])
for section in change['sections']:
item = version.items.create(text=section['notes'])
for section_item in section['items']:
item.changes.create(type='new', text=section_item)
|
StarcoderdataPython
|
193629
|
<filename>rlbench/tasks/stack_chairs.py
from pyrep.objects.proximity_sensor import ProximitySensor
from pyrep.objects.object import Object
from pyrep.objects.shape import Shape
from rlbench.backend.conditions import DetectedCondition, NothingGrasped, Condition
from rlbench.backend.spawn_boundary import SpawnBoundary
from rlbench.backend.task import Task
from rlbench.const import colors
from typing import List
import numpy as np
class ChairsOrientedCondition(Condition):
def __init__(self, objs: List[Object], error: float):
self.objs = objs
self.error = error
def condition_met(self):
for obj in self.objs[:-1]:
x, y, z = obj.get_orientation(self.objs[-1])
if abs(x) > self.error or abs(y) > self.error or abs(z) > self.error:
return False, False
return True, False
class StackChairs(Task):
def init_task(self) -> None:
self.chair1 = Shape('chair1')
self.chair2 = Shape('chair2')
self.chair3 = Shape('chair3')
self.cushions1 = Shape('cushions1')
self.cushions2 = Shape('cushions2')
self.cushions3 = Shape('cushions3')
self.boundary = SpawnBoundary([Shape('boundary')])
self.detector = ProximitySensor('success')
self.register_graspable_objects([self.chair1, self.chair2, self.chair3])
self.register_success_conditions([
DetectedCondition(self.chair1, self.detector),
DetectedCondition(self.chair2, self.detector),
NothingGrasped(self.robot.gripper),
ChairsOrientedCondition([self.chair1, self.chair2, self.chair3], error = 0.25)
])
def init_episode(self, index: int) -> List[str]:
indices = np.random.choice(np.arange(len(colors)), size = 2, replace = False)
color1, rgb1 = colors[index]
color2, rgb2 = colors[indices[0]]
color3, rgb3 = colors[indices[1]] # target chair color
self.cushions1.set_color(rgb1)
self.cushions2.set_color(rgb2)
self.cushions3.set_color(rgb3)
self.boundary.clear()
self.boundary.sample(self.chair1, min_distance=0.1,
min_rotation=(0, 0, 0), max_rotation=(0, 0, 0))
self.boundary.sample(self.chair2, min_distance=0.1,
min_rotation=(0, 0, 0), max_rotation=(0, 0, 0))
self.boundary.sample(self.chair3, min_distance=0.1,
min_rotation=(0, 0, 0), max_rotation=(0, 0, 0))
return [f'stack the other chairs on top of the {color1} chair',
f'put the remaining two chairs on top of the {color1} chair',
f'pick up and set the chairs down onto the {color1} chair',
f'create a stack of chairs with the {color1} chair at its base',
f'keeping the {color1} chair on the table, stack the other two onto it']
def variation_count(self) -> int:
return len(colors)
|
StarcoderdataPython
|
95685
|
<reponame>Basdanso/reimbursementApi
from unittest import TestCase
from daos.account_dao_postgres import AccountDaoPostgres
from entities.account import Account
account_dao = AccountDaoPostgres()
#account_dao = AccountDaoLocal()
test_account = Account(0, "Bas", "<EMAIL>", "password", "<PASSWORD>", 2000)
def test_create_account():
acc = account_dao.create_account(test_account)
print('test_create_account')
print(test_account)
print(acc)
assert test_account.id != 0
def test_get_account_by_id():
account = account_dao.find_account_by_id(test_account.id)
print('test_get_account_by_id')
print(account)
print(test_account)
TestCase().assertDictEqual(account.as_json_dict(), test_account.as_json_dict())
def test_update_account():
test_account.available = True
update_account = account_dao.update_account(test_account)
print('update_account.id')
print(update_account)
assert update_account.id == test_account.id
def test_delete_account():
result = account_dao.delete_account(test_account.id)
assert result == True
|
StarcoderdataPython
|
1734223
|
<reponame>aruymgaart/AMATH
import numpy as np
from scipy.signal import convolve2d
from skimage.color import rgb2grey
import matplotlib.pyplot as plt
import pickle, copy
def getCanImg(MX,s,fr,x,y,w=10,h=15,offy=1): return MX[s][fr][y+offy-h:y+offy+h,x-w:x+w,:]
def rgbDifference(im1, im2):
ret = np.ones(im1.shape) * 99999.0
nI, nJ, mxd = im2.shape[0], im2.shape[1], 0.0
for i in range(im1.shape[0] - nI):
for j in range(im1.shape[1] - nJ):
d = np.sum(np.abs(im1[i:i+nI, j:j+nJ] - im2))
ret[i,j] = d
if d > mxd: mxd = d
ret[ret==99999] = mxd
return ret/mxd
def imageSetToObjectCoordinates(M, objImg, bConv=False, name='set1', maxD=10.0):
coords = []
try: nObj = len(objImg)
except: objImg = [objImg]
for k,A in enumerate(M):
D, pLast, pNew = [], None, None
for j in range(len(A)):
dAll,diffs = [],[]
for no, ob in enumerate(objImg):
if bConv:
R = convolve2d(rgb2grey(A[j]), objImg)
m = np.unravel_index(np.argmax(R), R.shape)
else:
R = rgbDifference(A[j], ob)
m = np.unravel_index(np.argmin(R), R.shape)
dAll.append( [j, m[1]+ob.shape[1]/2, m[0]+ob.shape[0]/2, no] )
diffs.append(R[m])
if pLast is None:
pLast = dAll[np.argmin(np.array(diffs))]
pNew = pLast
else:
pNew = None
for k, ind in enumerate(np.argsort(np.array(diffs))):
dist = np.linalg.norm( np.array(dAll[ind])[1:3] - np.array(pLast)[1:3] )
if dist < maxD:
pNew = dAll[ind]
print('\t UPDATING pLast', dAll[ind])
pLast = copy.copy(dAll[ind])
break
else:
print('skipping index', k, 'dist', dist, 'skipped img', ind)
if pNew is None:
pNew = dAll[np.argmin(np.array(diffs))]
dist = np.linalg.norm( np.array(pNew)[1:3] - np.array(pLast)[1:3] )
pLast = pNew
print('* no close match, setting', pNew, 'dist', dist )
D.append(pNew)
coords.append(np.array(D))
f = open('%s_coords.dict' % (name), "wb")
pickle.dump(coords,f)
f.close()
def pca(A, nv=3, t=True):
As = np.zeros(A.shape)
for k in range(A.shape[0]): As[k,:] = A[k,:] - np.average(A[k,:])
As /= np.sqrt(float(A.shape[1]-1))
M = As.T if t else As
[U,S,Vt] = np.linalg.svd(M, full_matrices=False)
Ured = U[:,0:nv]
P = np.matmul( np.diag(S), Vt ).T # = np.matmul(M.T, Ured) # same
return Ured,S,As,P
def plotCanRot(can1,can2,can3,can4,can5,can6,can7,can8,can9,can10,can11,can12,can13,can14,can15,can16,fname=''):
fig, ax = plt.subplots(4,4)
for i in range(4):
for j in range(4): ax[i,j].axis('off')
ax[0,0].imshow(can1), ax[0,1].imshow(can2), ax[0,2].imshow(can3), ax[0,3].imshow(can4)
ax[1,0].imshow(can5), ax[1,1].imshow(can6), ax[1,2].imshow(can7), ax[1,3].imshow(can8)
ax[2,0].imshow(can9), ax[2,1].imshow(can10),ax[2,2].imshow(can11),ax[2,3].imshow(can12)
ax[3,0].imshow(can13),ax[3,1].imshow(can14),ax[3,2].imshow(can15),ax[3,3].imshow(can16)
if fname == '': plt.show()
else: plt.savefig(fname, figsize=(8, 6), dpi=300, bbox_inches='tight')
def plotTrendMatrix(A, names, ttl='', fname=''):
for j in range(len(A)): plt.plot(A[j], label=names[j])
if ttl != '': plt.title(ttl)
plt.xlabel('Frame nr.')
plt.legend()
if fname == '': plt.show()
else: plt.savefig(fname, figsize=(8, 6), dpi=300, bbox_inches='tight')
def plotInvPcProjection(Rs, A, names, nrPC, ttl='Coordinate trajectories recoverd from projection onto first PC'):
fig, ax = plt.subplots(len(names),1, sharex=True)
plt.subplots_adjust(wspace=0, hspace=0)
ax[0].set_title(ttl)
for k in range(len(A)):
ax[k].plot(Rs[k], label=names[k]+(' %dPC' % (nrPC))), ax[k].plot(A[k], label=names[k])
ax[k].set_yticks([0]), ax[k].legend(loc='right')
ax[len(names)-1].set_xlabel('Frame nr.')
plt.legend()
plt.show()
def plotSV(S, fname=''):
fig, ax = plt.subplots()
plt.title('Singular values')
if False: ax.bar(np.arange(len(S)), np.log(S))
else: ax.bar(np.arange(len(S)), S, color=(0.2, 0.4, 0.6, 0.6), width=0.25)
plt.savefig('images/exp2_sing_values.png', figsize=(8, 6), dpi=300, bbox_inches='tight')
xlabs = []
for j in range(len(S)): xlabs.append('PC%d' % (j+1))
ax.set_xticks(np.arange(len(S)))
ax.set_xticklabels(xlabs)
if fname == '': plt.show()
else:
plt.savefig(fname, figsize=(8, 6), dpi=300, bbox_inches='tight')
plt.clf()
|
StarcoderdataPython
|
86453
|
#!/usr/bin/env python
import os
import glob
HERE = os.path.dirname(__file__)
files = glob.glob(os.path.join(HERE, '../data/json/*.json'))
from codetalker import testing
import codetalker.contrib.json as json
parse_rule = testing.parse_rule(__name__, json.grammar)
def make_parse(fname):
text = open(fname).read()
def meta():
if os.path.basename(fname).startswith('fail'):
try:
res = json.loads(text)
except:
pass
else:
raise Exception('JSON parsing of %s should have failed: %s' % (fname, text))
else:
res = json.loads(text)
return meta
for fname in files:
globals()['test_json "%s"' % fname] = make_parse(fname)
parse_rule(json.dict_, (
'{}',
'{"one": 3}',
'{"one": 4, "two": "hi"}',
), (
'{,}',
'{',
))
parse_rule(json.list_, (
'[]',
'[1,2]',
'[1,2,]',
'["hi"]',
'[1,"hi"]',
), (
'[,]',
'[',
))
# vim: et sw=4 sts=4
|
StarcoderdataPython
|
3279777
|
import sys
K,L,R = map(int ,input().split(' '))
bad_apples = set()
for line in sys.stdin:
y,x = map(lambda x: int(x) - 1 ,line.split(' '))
bad_apples.add((y,x))
new_bad_apples = set()
for i in range(R):
for apple in bad_apples:
if apple[0] + 1 < K:
new_bad_apples.add((apple[0]+1, apple[1]))
if apple[0] - 1 >= 0:
new_bad_apples.add((apple[0]-1, apple[1]))
if apple[1] + 1 < L:
new_bad_apples.add((apple[0], apple[1]+1))
if apple[1] - 1 >= 0:
new_bad_apples.add((apple[0], apple[1]-1))
bad_apples.update(new_bad_apples)
new_bad_apples = set()
print(K*L-len(bad_apples))
|
StarcoderdataPython
|
122078
|
from .Badge import Badge as BadgeDBModel
from .BadgeCondition import BadgeCondition as BadgeConditionDBModel
from .BadgeType import BadgeType as BadgeTypeDBModel
from .Organization import Organization as OrganizationDBModel
from .OrganizationType import OrganizationType as OrganizationTypeDBModel
from .OrgPos import OrgPos as OrgPosDBModel
from .People import People as PeopleDBModel
from .PeopleCredential import PeopleCredential as PeopleCredentialDBModel
from .PeoplePosRole import PeoplePosRole as PeoplePosRoleDBModel
from .Phone import Phone as PhoneDBModel
from .PhoneType import PhoneType as PhoneTypeDBModel
from .Position import Position as PositionDBModel
from .PreSkill import PreSkill as PreSkillDBModel
from .Role import Role as RoleDBModel
from .RoleSkillBlock import RoleSkillBlock as RoleSkillBlockDBModel
from .Sex import Sex as SexDBModel
from .Skill import Skill as SkillDBModel
from .SkillBlock import SkillBlock as SkillBlockDBModel
from .SkillBlockScore import SkillBlockScore as SkillBlockScoreDBModel
from .SkillBlockSkill import SkillBlockSkill as SkillBlockSkillDBModel
from .SkillGroup import SkillGroup as SkillGroupDBModel
from .SkillGroupSkillBlock import SkillGroupSkillBlock as SkillGroupSkillBlockDBModel
from .SkillScore import SkillScore as SkillScoreDBModel
from .SkillScoreHistory import SkillScoreHistory as SkillScoreHistoryDBModel
from .Status import Status as StatusDBModel
from .Structure import Structure as StructureDBModel
|
StarcoderdataPython
|
1783248
|
<filename>util/correlation/get_hw_stats.py
#!/usr/bin/env python
from optparse import OptionParser
import os
import subprocess
this_directory = os.path.dirname(os.path.realpath(__file__)) + "/"
import sys
sys.path.insert(0,os.path.join(this_directory,"..","job_launching"))
import common
import re
import shutil
import glob
import datetime
import yaml
parser = OptionParser()
parser.add_option("-B", "--benchmark_list", dest="benchmark_list",
help="a comma seperated list of benchmark suites to run. See apps/define-*.yml for " +\
"the benchmark suite names.",
default="rodinia_2.0-ft")
parser.add_option("-d", "--directory", dest="directory",
help="directory in run_hw",
default="")
parser.add_option("-D", "--device_num", dest="device_num",
help="CUDA device number",
default="0")
parser.add_option("-m", "--metrics", dest="metrics",
help="nsight metrics to find",
default="Kernel Name,l1tex__t_sectors_pipe_lsu_mem_global_op_ld.sum,l1tex__t_sectors_pipe_lsu_mem_local_op_ld.sum,l1tex__t_sectors_pipe_lsu_mem_global_op_st.sum,l1tex__t_sectors_pipe_lsu_mem_local_op_st.sum,l1tex__t_sectors_pipe_lsu_mem_global_op_ld_lookup_hit.sum,l1tex__t_sectors_pipe_lsu_mem_local_op_ld_lookup_hit.sum,l1tex__t_sectors_pipe_lsu_mem_global_op_st_lookup_hit.sum,l1tex__t_sectors_pipe_lsu_mem_local_op_st_lookup_hit.sum")
parser.add_option("-k", "--kernels", dest="kernels",
help="kernels to compute",
default="kernel_ProducerCell_create_car,kernel_traffic_light_step,kernel_Car_step_prepare_path,kernel_Car_step_move,DeviceScanInitKernel,DeviceScanKernel,kernel_compact_initialize,kernel_compact_cars,kernel_compact_swap_pointers,candidate_prepare,alive_prepare,candidate_update,alive_update,kernel_AnchorPullNode_pull,kernel_Spring_compute_force,kernel_Node_move,kernel_NodeBase_initialize_bfs,kernel_NodeBase_bfs_visit,kernel_NodeBase_bfs_set_delete_flags,kernel_Spring_bfs_delete,alive_update,prepare,update,ConnectedComponent,BFS,PageRank,render,Body_compute_force,Body_update,Body_initialize_merge,Body_prepare_merge,Body_update_merge,Body_delete_merged,parallel_do")
parser.add_option("-c", "--cycle", dest="cycle", action="store_true",
help="Just get kernel duration stats")
parser.add_option("-p", "--disable_nvprof", dest="disable_nvprof", action="store_true",
help="do not use nvprof (decrecated in Turing+)")
parser.add_option("-C", "--cache", dest="cache", action="store_true",
help="Get comprehensive cache hit rate")
(options, args) = parser.parse_args()
common.load_defined_yamls()
metrics_set = set(options.metrics.split(","))
kernel_set = set(options.kernels.split(","))
nominator_set = set("l1tex__t_sectors_pipe_lsu_mem_global_op_ld_lookup_hit.sum,l1tex__t_sectors_pipe_lsu_mem_local_op_ld_lookup_hit.sum,l1tex__t_sectors_pipe_lsu_mem_global_op_st_lookup_hit.sum,l1tex__t_sectors_pipe_lsu_mem_local_op_st_lookup_hit.sum".split(","))
denominator_set = set("l1tex__t_sectors_pipe_lsu_mem_global_op_ld.sum,l1tex__t_sectors_pipe_lsu_mem_local_op_ld.sum,l1tex__t_sectors_pipe_lsu_mem_global_op_st.sum,l1tex__t_sectors_pipe_lsu_mem_local_op_st.sum".split(","))
benchmarks = []
benchmarks = common.gen_apps_from_suite_list(options.benchmark_list.split(","))
cuda_version = common.get_cuda_version( this_directory )
foutput_name = "hw_stats.csv"
fsum_name = "hw_summary.csv"
foutput = open(foutput_name, "w")
fsum = open(fsum_name, "w")
base = 0
base_cycle = 0
num_tech = 5
cycles = dict()
for i in range(num_tech):
cycles[i] = [["", 0.0]]
for bench in benchmarks:
edir, ddir, exe, argslist = bench
specific_ddir = os.path.join(this_directory,ddir)
for args in argslist:
run_name = os.path.join( exe, common.get_argfoldername( args ) )
this_run_dir = os.path.join(this_directory, "..", "..", "run_hw", options.directory, "device-" + options.device_num, cuda_version, run_name)
if not options.disable_nvprof:
#nvprof get stats
this_pattern = this_run_dir + '/*.csv.elapsed_cycles_sm.0'
for fname in glob.glob(this_pattern):
start = 0
kernel_name_idx = 0
exe_cycles = 0
flist = open(fname, "r")
lines = flist.readlines()
# kernel name needs wildcard match
kernel_pattern = options.kernels.replace(",","|")
for line in lines:
csv_line = line.split(',')
if start == 0 and len(csv_line) > 5 and csv_line[0].replace("\"", "") == "Device":
start = 1
for idx,element in enumerate(csv_line):
if element.replace("\"", "") == "Kernel":
kernel_name_idx = idx
elif start == 1:
if re.search(kernel_pattern, csv_line[kernel_name_idx]):
exe_cycles += float(csv_line[-1].replace(",", ""))
if exe_cycles == 0:
print('{} retrieves zero cycles, could you send to me this file: \'{}\'?'.format(exe, fname))
else:
cycles[base] = [exe, exe_cycles]
base = (base + 1) % num_tech
if base == 3:
base_cycle = exe_cycles
if base == 0:
for i in range(num_tech):
print('{} {}'.format(cycles[i][0], base_cycle/cycles[i][1]))
else:
# nsight get stats
if options.cycle:
options.metrics="Kernel Name,Metric Value"
metrics_set = set(options.metrics.split(","))
this_pattern = this_run_dir + '/*.csv.gpc__cycles_elapsed.0'
for fname in glob.glob(this_pattern):
print(fname)
fsum.write(fname)
fsum.write("\n")
foutput.write(fname)
foutput.write("\n")
flist = open(fname, "r")
lines = flist.readlines()
start = 0
kernel_idx = 0
cycle_idx = 0
total_cycles = 0.0
exe_cycles = 0.0
metrics_idx = []
for line in lines:
csv_line = line.split("\"")
if start == 0 and len(csv_line) > 10 and csv_line[1] == "ID":
start = 1
metric_line = csv_line
for idx,element in enumerate(csv_line):
if element in metrics_set:
if element == "Metric Value":
cycle_idx = idx
elif element == "Kernel Name":
kernel_idx = idx
metrics_idx.append(idx)
foutput.write("\"")
foutput.write(csv_line[idx])
foutput.write("\"")
foutput.write(",")
foutput.write("\n")
elif start == 1:
if csv_line[kernel_idx] in kernel_set:
exe_cycles += float(csv_line[cycle_idx].replace(",", ""))
total_cycles += float(csv_line[cycle_idx].replace(",", ""))
for idx in metrics_idx:
#print(csv_line[idx])
foutput.write("\"")
foutput.write(csv_line[idx])
foutput.write("\"")
foutput.write(",")
foutput.write("\n")
flist.close()
fsum.writelines('\"{}\",\"{}\"\n'.format("exe_cycles",exe_cycles))
fsum.writelines('\"{}\",\"{}\"\n'.format("total_cycles",total_cycles))
print('\"{}\",\"{}\"'.format("exe_cycles",exe_cycles))
print('\"{}\",\"{}\"'.format("total_cycles",total_cycles))
else:
this_pattern = this_run_dir + '/*.csv.nsight'
for fname in glob.glob(this_pattern):
print(fname)
fsum.write(fname)
fsum.write("\n")
foutput.write(fname)
foutput.write("\n")
flist = open(fname, "r")
lines = flist.readlines()
start = 0
metrics_dict = dict()
metric_line = []
metrics_idx = []
nominator = 0.0
denominator = 0.0
for line in lines:
csv_line = line.split("\"")
if start == 1 and csv_line[0] == "": # unit line
start = 2
for idx in metrics_idx:
if csv_line[idx] == "sector" or csv_line[idx] == "cycle":
metrics_dict[idx] = 0
elif start == 0 and len(csv_line) > 10 and csv_line[1] == "ID": # metric line
start = 1
metric_line = csv_line
for idx,element in enumerate(csv_line):
if element in metrics_set:
metrics_idx.append(idx)
foutput.write("\"")
foutput.write(csv_line[idx])
foutput.write("\"")
foutput.write(",")
foutput.write("\n")
elif start == 2: # print with metric line, unit line and data line
for idx in metrics_idx:
if metric_line[idx] == "Kernel Name":
# print csv_line[idx]
if not csv_line[idx] in kernel_set:
print('Not match {}'.format(csv_line[idx]))
continue
if idx in metrics_dict.keys():
metrics_dict[idx] += float(csv_line[idx].replace(",", ""))
if options.cache:
if metric_line[idx] in denominator_set:
denominator += int(csv_line[idx].replace(",", ""))
elif metric_line[idx] in nominator_set:
nominator += int(csv_line[idx].replace(",", ""))
#print(csv_line[idx])
foutput.write("\"")
foutput.write(csv_line[idx])
foutput.write("\"")
foutput.write(",")
foutput.write("\n")
for idx,values in metrics_dict.items():
fsum.writelines('\"{}\",\"{}\"\n'.format(metric_line[idx],values))
print('\"{}\",\"{}\"'.format(metric_line[idx],values))
if options.cache:
fsum.writelines('\"{}\",\"{:.2f}%\"\n'.format("L1_hit_rate",(nominator/denominator * 100)))
flist.close()
foutput.close()
fsum.close()
|
StarcoderdataPython
|
3266511
|
<filename>src/resources/python/grab_team.py
import requests
import pandas as pd
import json
import time
import os, sys
import pickle
import datetime as dt
import constants
from nba_api.stats.library import data
from nba_api.stats.endpoints import franchisehistory,commonteamroster,leaguegamelog, leaguedashteamstats, leaguedashteamclutch, leaguedashptstats, leaguedashptteamdefend, leaguedashteamptshot, leaguedashteamshotlocations,
leaguehustle
class team_games(object):
# Add Regular Season, Playoffs and Pre-Season Indicator to DataFrame
return fact_team_games
class franchise(object):
return fact_franchise
class teamroster(object):
return fact_teamroster
class coaches(object):
return fact_coaches
class team_stats(object):
return fact_team_stats
class team_clutch_stats(object):
return fact_team_clutch_stats
class team_tracking_stats(object):
return fact_team_tracking_stats
class team_defensive_stats(object):
return fact_defensive_stats
class team_shooting_stats(object):
return fact_team_shooting_stats
class team_shot_location_stats(object):
return fact_team_shot_location_stats
class team_defensiveshot_location_stats(object):
return fact_team_defensiveshot_location_stats
class team_hustle_stats(object):
return team_hustle_stats
|
StarcoderdataPython
|
44047
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import time
from bms_state_machine import BMSChargeStateMachine, BMSChargeModel, BMSChargeController
# 48V 16S LiFePO4 Battery
# Absorption: 58V (56.4 for longer life)
# Float: 54.4V
# Restart bulk voltage: Float-0.8 (max of 54V)
# Inverter Cut-off: 42.8V-48V (depending on size of load and voltage drop etc)
bms_controller = BMSChargeController(charge_bulk_current=160, charge_absorb_voltage=58.4, \
charge_float_voltage=54.4, time_min_absorb=0.5, rebulk_voltage=53.6) # or 30 seconds for the simulation
ret = bms_controller.start_charging()
print ("{0}, Start Charging: {1}".format(bms_controller, ret))
# simulated battery voltage
bat_voltage = 42.8
counter = 0
while (True):
charge_current = 0.0
is_state_changed = bms_controller.update_battery_data(bat_voltage, charge_current)
state = bms_controller.get_state()
charge_current = bms_controller.get_charge_current()
print ("Battery Voltage: {0}, Charge Current: {1}, Charge State: {2}, State Changed: {3}".format(bat_voltage, charge_current, state, is_state_changed))
time.sleep(1)
# update simulated values
if (is_state_changed):
if (state == "absorb_chg"):
bat_voltage = 58.2
elif (state == "float_chg"):
bat_voltage = 56.1
if (state == "bulk_chg"):
bat_voltage += 1.8
elif (state == "absorb_chg"):
if (charge_current > 0):
bat_voltage += charge_current * 0.1
elif (charge_current == 0):
bat_voltage -= 0.01
if (counter > 5):
counter += 1
if (counter > 15):
bat_voltage = 54
counter = 0
elif (state == "float_chg"):
counter += 1
if (counter > 5) :
bat_voltage = 53
if (charge_current > 0):
bat_voltage += charge_current * 0.1
elif (charge_current == 0):
bat_voltage -= 0.03
|
StarcoderdataPython
|
1653676
|
import os
from typing import Any
import ftputil.error
from PyQt5.QtCore import QMutex
from backend_file import BackendFile
class FTPBackendFile(BackendFile):
_file: Any
_file_mutex: QMutex = QMutex()
_host: ftputil.FTPHost
_path: str
_pos: int
_size: int
def __init__(self, host: ftputil.FTPHost, path: str):
self._file = None
self._host = host
self._path = path
self._size = 0
self._pos = 0
stats: Any = host.stat(path)
self._size = stats.st_size
def _get_file(self, rest=None):
if self._file is None:
self._file_mutex.lock()
self._file = self._host.open(self._path, "rb", rest=rest)
self._file_mutex.unlock()
if rest:
self._pos = rest
else:
self._pos = 0
return self._file
def read(self, length: int = -1) -> bytes:
if length == 0:
return b""
if length < 0 or self._pos + length > self._size:
length = self._size - self._pos
file: Any = self._get_file()
data: bytes = file.read(length)
self._pos += len(data)
return data
def close(self) -> None:
if self._file is not None:
try:
self._file.close()
except ftputil.error.FTPIOError:
pass
self._file = None
self._pos = 0
def tell(self) -> int:
return self._pos
def seek(self, offset: int, whence: int = 0) -> int:
if whence == os.SEEK_CUR:
offset = self._pos + offset
elif whence == os.SEEK_END:
offset = self._size + offset - 1
if offset == self._pos:
return self._pos
self.close()
self._get_file(offset)
return self._pos
|
StarcoderdataPython
|
1742459
|
from weblab.core.coordinator.clients.weblabdeusto import WebLabDeustoClient
import os
import time
from weblab.core.reservations import WaitingReservation, ConfirmedReservation, WaitingConfirmationReservation
from weblab.data.command import Command
from weblab.data.experiments import ExperimentId
import threading
import config
threads = {}
users_in = 0
def user_t():
do_full_experiment_use()
def main():
print "Starting all threads... "
for i in range(config.NUM_USERS):
t = threading.Thread(None, user_t)
threads[t] = t
t.start()
print " done."
while True:
print "Users in the experiment: %d" % users_in
time.sleep(5)
for t in threads.values():
if t.is_alive(): break
else:
break
print "GOING OUT."
def do_full_experiment_use():
"""
Uses the configured experiment trying to resemble the way a human would do it.
This method will block for a while.
:return:
"""
wc = WebLabDeustoClient(config.WEBLAB_BASE_URL)
sessionid = wc.login(config.LOGIN, config.PASSWORD)
if not sessionid: raise Exception("Wrong login")
# Reserve the flash dummy experiment.
experiment_id = ExperimentId(config.EXP_NAME, config.EXP_CATEGORY)
waiting = wc.reserve_experiment(sessionid, experiment_id, "{}", "{}", None)
# print "Reserve response: %r" % waiting
reservation_id = waiting.reservation_id
while True:
status = wc.get_reservation_status(reservation_id)
# print "Reservation status: %r" % status
if type(status) is WaitingReservation:
time.sleep(0.5)
elif type(status) is ConfirmedReservation:
break
elif type(status) is WaitingConfirmationReservation:
time.sleep(0.5)
else:
print "Unknown reservation status."
print "Experiment reserved."
global users_in
users_in += 1
# Send some commands.
for i in range(config.COMMANDS_PER_USER):
# What's commandstring actually for??
cmd = Command(config.COMMAND)
result = wc.send_command(reservation_id, cmd)
if not result.commandstring.startswith("Received command"):
raise Exception("Unrecognized command response")
# print "Command result: %r" % result
time.sleep(config.TIME_BETWEEN_COMMANDS)
users_in -= 1
result = wc.logout(sessionid)
print "Logout result: %r" % result
if __name__ == '__main__':
main()
|
StarcoderdataPython
|
1753268
|
<filename>tests/test_data/test_pipelines/test_random_degradations.py
# Copyright (c) OpenMMLab. All rights reserved.
import numpy as np
import pytest
from mmedit.datasets.pipelines import (DegradationsWithShuffle, RandomBlur,
RandomJPEGCompression, RandomNoise,
RandomResize)
def test_random_noise():
results = {}
results['lq'] = np.ones((8, 8, 3)).astype(np.float32)
# Gaussian noise
model = RandomNoise(
params=dict(
noise_type=['gaussian'],
noise_prob=[1],
gaussian_sigma=[0, 50],
gaussian_gray_noise_prob=1),
keys=['lq'])
results = model(results)
assert results['lq'].shape == (8, 8, 3)
# Poisson noise
model = RandomNoise(
params=dict(
noise_type=['poisson'],
noise_prob=[1],
poisson_scale=[0, 1],
poisson_gray_noise_prob=1),
keys=['lq'])
results = model(results)
assert results['lq'].shape == (8, 8, 3)
# skip degradations with prob < 1
params = dict(
noise_type=['gaussian'],
noise_prob=[1],
gaussian_sigma=[0, 50],
gaussian_gray_noise_prob=1,
prob=0)
model = RandomNoise(params=params, keys=['lq'])
assert model(results) == results
assert repr(model) == model.__class__.__name__ + f'(params={params}, ' \
+ "keys=['lq'])"
def test_random_jpeg_compression():
results = {}
results['lq'] = np.ones((8, 8, 3)).astype(np.float32)
model = RandomJPEGCompression(params=dict(quality=[5, 50]), keys=['lq'])
results = model(results)
assert results['lq'].shape == (8, 8, 3)
# skip degradations with prob < 1
params = dict(quality=[5, 50], prob=0)
model = RandomJPEGCompression(params=params, keys=['lq'])
assert model(results) == results
assert repr(model) == model.__class__.__name__ + f'(params={params}, ' \
+ "keys=['lq'])"
def test_random_resize():
results = {}
results['lq'] = np.ones((8, 8, 3)).astype(np.float32)
# upscale
model = RandomResize(
params=dict(
resize_mode_prob=[1, 0, 0],
resize_scale=[0.5, 1.5],
resize_opt=['bilinear', 'area', 'bicubic'],
resize_prob=[1 / 3., 1 / 3., 1 / 3.]),
keys=['lq'])
results = model(results)
assert results['lq'].shape[0] >= 8 and results['lq'].shape[1] >= 8
# downscale
results['lq'] = np.ones((8, 8, 3)).astype(np.float32)
model = RandomResize(
params=dict(
resize_mode_prob=[0, 1, 0],
resize_scale=[0.5, 1.5],
resize_opt=['bilinear', 'area', 'bicubic'],
resize_prob=[1 / 3., 1 / 3., 1 / 3.]),
keys=['lq'])
results = model(results)
assert results['lq'].shape[0] <= 8 and results['lq'].shape[1] <= 8
# keep size
results['lq'] = np.ones((8, 8, 3)).astype(np.float32)
model = RandomResize(
params=dict(
resize_mode_prob=[0, 0, 1],
resize_scale=[0.5, 1.5],
resize_opt=['bilinear', 'area', 'bicubic'],
resize_prob=[1 / 3., 1 / 3., 1 / 3.]),
keys=['lq'])
results = model(results)
assert results['lq'].shape[0] == 8 and results['lq'].shape[1] == 8
# given target_size
results['lq'] = np.ones((8, 8, 3)).astype(np.float32)
model = RandomResize(
params=dict(
resize_mode_prob=[0, 0, 1],
resize_scale=[0.5, 1.5],
resize_opt=['bilinear', 'area', 'bicubic'],
resize_prob=[1 / 3., 1 / 3., 1 / 3.],
target_size=(16, 16)),
keys=['lq'])
results = model(results)
assert results['lq'].shape == (16, 16, 3)
# skip degrdation
model = RandomResize(
params=dict(
resize_mode_prob=[1, 0, 0],
resize_scale=[0.5, 1.5],
resize_opt=['bilinear', 'area', 'bicubic'],
resize_prob=[1 / 3., 1 / 3., 1 / 3.],
prob=0),
keys=['lq'])
assert model(results) == results
with pytest.raises(NotImplementedError):
params = dict(
resize_mode_prob=[1],
resize_scale=[1],
resize_opt=['abc'],
resize_prob=[1])
model = RandomResize(params=params, keys=['lq'])
results = model(results)
assert repr(model) == model.__class__.__name__ + f'(params={params}, ' \
+ "keys=['lq'])"
def test_random_blur():
results = {}
results['lq'] = np.ones((8, 8, 3)).astype(np.float32)
# isotropic Gaussian
model = RandomBlur(
params=dict(
kernel_size=[41],
kernel_list=['iso'],
kernel_prob=[1],
sigma_x=[0.2, 10],
sigma_y=[0.2, 10],
rotate_angle=[-3.1416, 3.1416]),
keys=['lq'])
results = model(results)
assert results['lq'].shape == (8, 8, 3)
# anisotropic Gaussian
model = RandomBlur(
params=dict(
kernel_size=[41],
kernel_list=['aniso'],
kernel_prob=[1],
sigma_x=[0.2, 10],
sigma_y=[0.2, 10],
rotate_angle=[-3.1416, 3.1416]),
keys=['lq'])
results = model(results)
assert results['lq'].shape == (8, 8, 3)
# isotropic generalized Gaussian
model = RandomBlur(
params=dict(
kernel_size=[41],
kernel_list=['generalized_iso'],
kernel_prob=[1],
sigma_x=[0.2, 10],
sigma_y=[0.2, 10],
rotate_angle=[-3.1416, 3.1416]),
keys=['lq'])
results = model(results)
assert results['lq'].shape == (8, 8, 3)
# anisotropic generalized Gaussian
model = RandomBlur(
params=dict(
kernel_size=[41],
kernel_list=['generalized_aniso'],
kernel_prob=[1],
sigma_x=[0.2, 10],
sigma_y=[0.2, 10],
rotate_angle=[-3.1416, 3.1416]),
keys=['lq'])
results = model(results)
assert results['lq'].shape == (8, 8, 3)
# isotropic plateau Gaussian
model = RandomBlur(
params=dict(
kernel_size=[41],
kernel_list=['plateau_iso'],
kernel_prob=[1],
sigma_x=[0.2, 10],
sigma_y=[0.2, 10],
rotate_angle=[-3.1416, 3.1416]),
keys=['lq'])
results = model(results)
assert results['lq'].shape == (8, 8, 3)
# anisotropic plateau Gaussian
model = RandomBlur(
params=dict(
kernel_size=[41],
kernel_list=['plateau_aniso'],
kernel_prob=[1],
sigma_x=[0.2, 10],
sigma_y=[0.2, 10],
rotate_angle=[-3.1416, 3.1416]),
keys=['lq'])
results = model(results)
assert results['lq'].shape == (8, 8, 3)
# sinc (kernel size < 13)
model = RandomBlur(
params=dict(
kernel_size=[11],
kernel_list=['sinc'],
kernel_prob=[1],
sigma_x=[0.2, 10],
sigma_y=[0.2, 10],
rotate_angle=[-3.1416, 3.1416]),
keys=['lq'])
results = model(results)
assert results['lq'].shape == (8, 8, 3)
# sinc (kernel size >= 13)
model = RandomBlur(
params=dict(
kernel_size=[15],
kernel_list=['sinc'],
kernel_prob=[1],
sigma_x=[0.2, 10],
sigma_y=[0.2, 10],
rotate_angle=[-3.1416, 3.1416]),
keys=['lq'])
results = model(results)
assert results['lq'].shape == (8, 8, 3)
# sinc (given omega)
model = RandomBlur(
params=dict(
kernel_size=[15],
kernel_list=['sinc'],
kernel_prob=[1],
sigma_x=[0.2, 10],
sigma_y=[0.2, 10],
rotate_angle=[-3.1416, 3.1416],
omega=[0.1, 0.1]),
keys=['lq'])
results = model(results)
assert results['lq'].shape == (8, 8, 3)
# skip degradation
params = dict(
kernel_size=[15],
kernel_list=['sinc'],
kernel_prob=[1],
sigma_x=[0.2, 10],
sigma_y=[0.2, 10],
rotate_angle=[-3.1416, 3.1416],
prob=0)
model = RandomBlur(params=params, keys=['lq'])
assert model(results) == results
assert repr(model) == model.__class__.__name__ + f'(params={params}, ' \
+ "keys=['lq'])"
def test_degradations_with_shuffle():
results = {}
results['lq'] = np.ones((8, 8, 3)).astype(np.float32)
# shuffle all
model = DegradationsWithShuffle(
degradations=[
dict(
type='RandomBlur',
params=dict(
kernel_size=[15],
kernel_list=['sinc'],
kernel_prob=[1],
sigma_x=[0.2, 10],
sigma_y=[0.2, 10],
rotate_angle=[-3.1416, 3.1416],
omega=[0.1, 0.1])),
dict(
type='RandomResize',
params=dict(
resize_mode_prob=[0, 0, 1],
resize_scale=[0.5, 1.5],
resize_opt=['bilinear', 'area', 'bicubic'],
resize_prob=[1 / 3., 1 / 3., 1 / 3.],
target_size=(16, 16))),
[
dict(
type='RandomJPEGCompression',
params=dict(quality=[5, 10])),
dict(
type='RandomJPEGCompression',
params=dict(quality=[15, 20]))
]
],
keys=['lq'],
shuffle_idx=None)
model(results)
# shuffle last 2
degradations = [
dict(
type='RandomBlur',
params=dict(
kernel_size=[15],
kernel_list=['sinc'],
kernel_prob=[1],
sigma_x=[0.2, 10],
sigma_y=[0.2, 10],
rotate_angle=[-3.1416, 3.1416],
omega=[0.1, 0.1])),
dict(
type='RandomResize',
params=dict(
resize_mode_prob=[0, 0, 1],
resize_scale=[0.5, 1.5],
resize_opt=['bilinear', 'area', 'bicubic'],
resize_prob=[1 / 3., 1 / 3., 1 / 3.],
target_size=(16, 16))),
[
dict(type='RandomJPEGCompression', params=dict(quality=[5, 10])),
dict(type='RandomJPEGCompression', params=dict(quality=[15, 20]))
]
]
model = DegradationsWithShuffle(
degradations=degradations, keys=['lq'], shuffle_idx=(1, 2))
model(results)
assert repr(model) == model.__class__.__name__ \
+ f'(degradations={degradations}, ' \
+ "keys=['lq'], " \
+ 'shuffle_idx=(1, 2))'
|
StarcoderdataPython
|
191381
|
<reponame>Arbupa/DAS_Sistemas<filename>Ene-Jun-2019/Ejemplos/Code Golf/albertos-solution.py
for j in [6,11,4,19,61,100,1001,5001,55556,777778]:
print(' '.join([str(i) for i in range(1,j)]),end='' if j==777778 else '\n')
|
StarcoderdataPython
|
1629537
|
<gh_stars>0
from __future__ import absolute_import
from .message import Message, ExceptionMessage
from . import log, parallel_backend_loaded, remote_import
from ..util.check_deleted import check_deleted
import pynbody
import gc
import six.moves.cPickle as pickle
import numpy as np
from six.moves import zip
import time
class ConfirmLoadPynbodySnapshot(Message):
pass
class ObjectSpecification(object):
def __init__(self, object_number, object_typetag='halo'):
self.object_number = object_number
self.object_typetag = object_typetag
def __repr__(self):
return "ObjectSpecification(%d,%r)"%(self.object_number, self.object_typetag)
def __eq__(self, other):
if not isinstance(other, ObjectSpecification):
return False
return self.object_number==other.object_number and self.object_typetag==other.object_typetag
def __hash__(self):
return hash((self.object_number, self.object_typetag))
class PynbodySnapshotQueue(object):
def __init__(self):
self.timestep_queue = []
self.handler_queue = []
self.load_requester_queue = []
self.current_timestep = None
self.current_snapshot = None
self.current_subsnap_cache = {}
self.current_handler = None
self.in_use_by = []
def add(self, handler, filename, requester):
log.logger.debug("Pynbody server: client %d requests access to %r", requester, filename)
if filename==self.current_timestep:
self._notify_available(requester)
self.in_use_by.append(requester)
elif filename in self.timestep_queue:
queue_position = self.timestep_queue.index(filename)
self.load_requester_queue[queue_position].append(requester)
assert self.handler_queue[queue_position] == handler
else:
self.timestep_queue.append(filename)
self.handler_queue.append(handler)
self.load_requester_queue.append([requester])
self._load_next_if_free()
def free(self, requester):
self.in_use_by.remove(requester)
log.logger.debug("Pynbody server: client %d is now finished with %r", requester, self.current_timestep)
self._free_if_unused()
self._load_next_if_free()
def get_subsnap(self, filter_or_object_spec, fam):
if (filter_or_object_spec, fam) in self.current_subsnap_cache:
log.logger.debug("Pynbody server: cache hit for %r (fam %r)",filter_or_object_spec, fam)
return self.current_subsnap_cache[(filter_or_object_spec, fam)]
else:
log.logger.debug("Pynbody server: cache miss for %r (fam %r)",filter_or_object_spec, fam)
subsnap = self.get_subsnap_uncached(filter_or_object_spec, fam)
self.current_subsnap_cache[(filter_or_object_spec, fam)] = subsnap
return subsnap
def get_subsnap_uncached(self, filter_or_object_spec, fam):
snap = self.current_snapshot
if isinstance(filter_or_object_spec, pynbody.filt.Filter):
snap = snap[filter_or_object_spec]
elif isinstance(filter_or_object_spec, ObjectSpecification):
snap = self.current_handler.load_object(self.current_timestep, filter_or_object_spec.object_number,
filter_or_object_spec.object_typetag)
else:
raise TypeError("filter_or_object_spec must be either a pynbody filter or an ObjectRequestInformation object")
if fam is not None:
snap = snap[fam]
return snap
def _free_if_unused(self):
if len(self.in_use_by)==0:
log.logger.debug("Pynbody server: all clients are finished with the current snapshot; freeing.")
with check_deleted(self.current_snapshot):
self.current_snapshot = None
self.current_timestep = None
self.current_subsnap_cache = {}
self.current_handler = None
def _notify_available(self, node):
log.logger.debug("Pynbody server: notify %d that snapshot is now available", node)
ConfirmLoadPynbodySnapshot(type(self.current_snapshot)).send(node)
def _load_next_if_free(self):
if len(self.timestep_queue)==0:
return
if self.current_handler is None:
# TODO: Error handling
self.current_timestep = self.timestep_queue.pop(0)
self.current_handler = self.handler_queue.pop(0)
self.current_snapshot = self.current_handler.load_timestep(self.current_timestep)
self.current_snapshot.physical_units()
log.logger.info("Pynbody server: loaded %r", self.current_timestep)
notify = self.load_requester_queue.pop(0)
self.in_use_by = notify
for n in notify:
self._notify_available(n)
else:
log.logger.info("The currently loaded snapshot is still required and so other clients will have to wait")
log.logger.info("(Currently %d snapshots are in the queue to be loaded later)", len(self.timestep_queue))
_server_queue = PynbodySnapshotQueue()
class RequestLoadPynbodySnapshot(Message):
def process(self):
_server_queue.add(self.contents[0], self.contents[1], self.source)
class ReleasePynbodySnapshot(Message):
def process(self):
_server_queue.free(self.source)
class ReturnPynbodyArray(Message):
@classmethod
def deserialize(cls, source, message):
from . import backend
contents = backend.receive_numpy_array(source=source)
if message!="":
contents = contents.view(pynbody.array.SimArray)
contents.units = pickle.loads(message)
obj = ReturnPynbodyArray(contents)
obj.source = source
return obj
def serialize(self):
assert isinstance(self.contents, np.ndarray)
if hasattr(self.contents, 'units'):
serialized_info = pickle.dumps(self.contents.units)
else:
serialized_info = ""
return serialized_info
def send(self, destination):
# send envelope
super(ReturnPynbodyArray, self).send(destination)
# send contents
from . import backend
backend.send_numpy_array(self.contents.view(np.ndarray), destination)
class RequestPynbodyArray(Message):
def __init__(self, filter_or_object_spec, array, fam=None):
self.filter_or_object_spec = filter_or_object_spec
self.array = array
self.fam = fam
@classmethod
def deserialize(cls, source, message):
obj = RequestPynbodyArray(*message)
obj.source = source
return obj
def serialize(self):
return (self.filter_or_object_spec, self.array, self.fam)
def process(self):
start_time = time.time()
try:
log.logger.debug("Receive request for array %r from %d",self.array,self.source)
subsnap = _server_queue.get_subsnap(self.filter_or_object_spec, self.fam)
with subsnap.immediate_mode, subsnap.lazy_derive_off:
if subsnap._array_name_implies_ND_slice(self.array):
raise KeyError("Not transferring a single slice %r of a ND array"%self.array)
if self.array=='remote-index-list':
subarray = subsnap.get_index_list(subsnap.ancestor)
else:
subarray = subsnap[self.array]
assert isinstance(subarray, pynbody.array.SimArray)
array_result = ReturnPynbodyArray(subarray)
except Exception as e:
array_result = ExceptionMessage(e)
array_result.send(self.source)
del array_result
gc.collect()
log.logger.debug("Array sent after %.2fs"%(time.time()-start_time))
class ReturnPynbodySubsnapInfo(Message):
def __init__(self, families, sizes, properties, loadable_keys, fam_loadable_keys):
super(ReturnPynbodySubsnapInfo, self).__init__()
self.families = families
self.sizes = sizes
self.properties = properties
self.loadable_keys = loadable_keys
self.fam_loadable_keys = fam_loadable_keys
def serialize(self):
return self.families, self.sizes, self.properties, self.loadable_keys, self.fam_loadable_keys
@classmethod
def deserialize(cls, source, message):
obj = cls(*message)
obj.source = source
return obj
class RequestPynbodySubsnapInfo(Message):
def __init__(self, filename, filter_):
super(RequestPynbodySubsnapInfo, self).__init__()
self.filename = filename
self.filter_or_object_spec = filter_
@classmethod
def deserialize(cls, source, message):
obj = cls(*message)
obj.source = source
return obj
def serialize(self):
return (self.filename, self.filter_or_object_spec)
def process(self):
start_time = time.time()
assert(_server_queue.current_timestep == self.filename)
log.logger.debug("Received request for subsnap info, spec %r", self.filter_or_object_spec)
obj = _server_queue.get_subsnap(self.filter_or_object_spec, None)
families = obj.families()
fam_lengths = [len(obj[fam]) for fam in families]
fam_lkeys = [obj.loadable_keys(fam) for fam in families]
lkeys = obj.loadable_keys()
ReturnPynbodySubsnapInfo(families, fam_lengths, obj.properties, lkeys, fam_lkeys).send(self.source)
log.logger.debug("Subsnap info sent after %.2f",(time.time()-start_time))
class RemoteSubSnap(pynbody.snapshot.SimSnap):
def __init__(self, connection, filter_or_object_spec):
super(RemoteSubSnap, self).__init__()
self.connection = connection
self._filename = connection.identity
self._server_id = connection._server_id
RequestPynbodySubsnapInfo(connection.filename, filter_or_object_spec).send(self._server_id)
info = ReturnPynbodySubsnapInfo.receive(self._server_id)
index = 0
for fam, size in zip(info.families, info.sizes):
self._family_slice[fam] = slice(index, index+size)
index+=size
self._num_particles = index
self.properties.update(info.properties)
self._loadable_keys = info.loadable_keys
self._fam_loadable_keys = {fam: lk for fam, lk in zip(info.families, info.fam_loadable_keys)}
self._filter_or_object_spec = filter_or_object_spec
def _find_deriving_function(self, name):
cl = self.connection.underlying_pynbody_class
if cl in self._derived_quantity_registry \
and name in self._derived_quantity_registry[cl]:
return self._derived_quantity_registry[cl][name]
else:
return super(RemoteSubSnap, self)._find_deriving_function(name)
def _load_array(self, array_name, fam=None):
RequestPynbodyArray(self._filter_or_object_spec, array_name, fam).send(self._server_id)
try:
start_time=time.time()
log.logger.debug("Send array request")
data = ReturnPynbodyArray.receive(self._server_id).contents
log.logger.debug("Array received; waited %.2fs",time.time()-start_time)
except KeyError:
raise IOError("No such array %r available from the remote"%array_name)
if fam is None:
self[array_name] = data
else:
self[fam][array_name] = data
_connection_active = False
class RemoteSnapshotConnection(object):
def __init__(self, input_handler, ts_extension, server_id=0):
global _connection_active
from ..input_handlers import pynbody
assert isinstance(input_handler, pynbody.PynbodyInputHandler)
if _connection_active:
raise RuntimeError("Each client can only have one remote snapshot connection at any time")
_connection_active = True
super(RemoteSnapshotConnection, self).__init__()
self._server_id = server_id
self._input_handler = input_handler
self.filename = ts_extension
self.identity = "%d: %s"%(self._server_id, ts_extension)
# ensure server knows what our messages are about
remote_import.ImportRequestMessage(__name__).send(self._server_id)
log.logger.debug("Pynbody client: attempt to connect to remote snapshot %r", ts_extension)
RequestLoadPynbodySnapshot((input_handler, ts_extension)).send(self._server_id)
self.underlying_pynbody_class = ConfirmLoadPynbodySnapshot.receive(self._server_id).contents
self.connected = True
log.logger.info("Pynbody client: connected to remote snapshot %r", ts_extension)
def get_view(self, filter_or_object_spec):
"""Return a RemoteSubSnap that contains either the pynbody filtered region, or the specified object from a catalogue
filter_or_object_spec is either an instance of pynbody.filt.Filter, or a tuple containing
(typetag, number), which are respectively the object type tag and object number to be loaded
"""
return RemoteSubSnap(self, filter_or_object_spec)
def disconnect(self):
global _connection_active
if not self.connected:
return
ReleasePynbodySnapshot(self.filename).send(self._server_id)
_connection_active = False
self.connected = False
def __del__(self):
self.disconnect()
|
StarcoderdataPython
|
1694071
|
#
# This file is part of pysmi software.
#
# Copyright (c) 2015-2020, <NAME> <<EMAIL>>
# License: http://snmplabs.com/pysmi/license.html
#
import sys
try:
import unittest2 as unittest
except ImportError:
import unittest
from pysmi.parser.smi import parserFactory
from pysmi.codegen.pysnmp import PySnmpCodeGen
from pysmi.codegen.symtable import SymtableCodeGen
from pysnmp.smi.builder import MibBuilder
class TrapTypeTestCase(unittest.TestCase):
"""
TEST-MIB DEFINITIONS ::= BEGIN
IMPORTS
TRAP-TYPE
FROM RFC-1215
OBJECT-TYPE
FROM RFC1155-SMI;
testId OBJECT IDENTIFIER ::= { 1 3 }
testObject OBJECT-TYPE
SYNTAX INTEGER
MAX-ACCESS accessible-for-notify
STATUS current
DESCRIPTION "Test object"
::= { 1 3 }
testTrap TRAP-TYPE
ENTERPRISE testId
VARIABLES { testObject }
DESCRIPTION
"Test trap"
::= 1
END
"""
def setUp(self):
ast = parserFactory()().parse(self.__class__.__doc__)[0]
mibInfo, symtable = SymtableCodeGen().genCode(ast, {}, genTexts=True)
self.mibInfo, pycode = PySnmpCodeGen().genCode(ast, {mibInfo.name: symtable}, genTexts=True)
codeobj = compile(pycode, 'test', 'exec')
mibBuilder = MibBuilder()
mibBuilder.loadTexts = True
self.ctx = {'mibBuilder': mibBuilder}
exec(codeobj, self.ctx, self.ctx)
def testTrapTypeSymbol(self):
self.assertTrue(
'testTrap' in self.ctx,
'symbol not present'
)
def testTrapTypeName(self):
self.assertEqual(
self.ctx['testTrap'].getName(),
(1, 3, 0, 1),
'bad name'
)
def testTrapTypeDescription(self):
self.assertEqual(
self.ctx['testTrap'].getDescription(),
'Test trap\n',
'bad DESCRIPTION'
)
def testTrapTypeClass(self):
self.assertEqual(
self.ctx['testTrap'].__class__.__name__,
'NotificationType',
'bad SYNTAX class'
)
suite = unittest.TestLoader().loadTestsFromModule(sys.modules[__name__])
if __name__ == '__main__':
unittest.TextTestRunner(verbosity=2).run(suite)
|
StarcoderdataPython
|
4806850
|
<filename>Spread_Strat/Inflation_Spread.py<gh_stars>1-10
# -*- coding: utf-8 -*-
"""
Created on Sat Apr 25 03:00:21 2020
Gold vs Silver combined with 10Y bond price
to check inflation or deflation
@author: <NAME>
@contact: <EMAIL>
"""
import DB.dbFetch as dbFetch
import pandas as pd
import datetime as dt
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
def gen_INF_spread(start,end):
#generate bean and meal and oil comparison
myFmt = mdates.DateFormatter('%y/%m') # Dates format
Ticker_1 = "AU"
Ticker_2 = "AG"
Ticker_3 = "T"
Ticker_4 = "CU"
#Get raw market data
index_data = pd.DataFrame(dbFetch.get_index_all()).set_index("Dates")
inv_df = pd.DataFrame(dbFetch.get_historical_inventory()).set_index("Dates")
market_1 = index_data[index_data["name"]==Ticker_1][start:end]
market_1["mid_price"] = 0.5*(market_1["open"]+market_1["close"])
market_1["mid_price"] = market_1["mid_price"]/market_1["mid_price"][0]
market_1 = market_1[["mid_price","r1"]]
Inventory = inv_df.loc[inv_df["Product"].str.upper()==Ticker_1][start:end]["INV"]
market_1 = market_1.join( Inventory, how="left" ).fillna(0)
market_2 = index_data[index_data["name"]==Ticker_2][start:end]
market_2["mid_price"] = 0.5*(market_2["open"]+market_2["close"])
market_2["mid_price"] = market_2["mid_price"]/market_2["mid_price"][0]
market_2 = market_2[["mid_price","r1"]]
Inventory = inv_df.loc[inv_df["Product"].str.upper()==Ticker_2][start:end]["INV"]
market_2 = market_2.join(Inventory,how="left").fillna(0)
market_3 = index_data[index_data["name"]==Ticker_3][start:end]
market_3["T_price"] = 0.5*(market_3["open"]+market_3["close"])
market_3["T_price"] = market_3["T_price"]/market_3["T_price"][0]
market_3 = market_3[["T_price","r1"]]
market_1 = market_1.join(market_3[["T_price"]],how="left").fillna(method="ffill")
market_4 = index_data[index_data["name"]==Ticker_4][start:end]
market_4["mid_price"] = 0.5*(market_4["open"]+market_4["close"])
market_4["mid_price"] = market_4["mid_price"]/market_4["mid_price"][0]
market_4 = market_4[["mid_price","r1"]]
Inventory = inv_df.loc[inv_df["Product"].str.upper()==Ticker_4][start:end]["INV"]
market_4 = market_4.join(Inventory,how="left").fillna(0)
# Start plotting
spread_name = Ticker_1+"_"+Ticker_2+"_Spread"
fig, axes = plt.subplots(nrows=3, ncols=2,figsize=(10,12))
market_1[spread_name] = market_1["mid_price"]-market_2["mid_price"]
axes[0,0].plot(market_1[spread_name],color='C0', label=Ticker_1+"-"+Ticker_2)
ax2 = axes[0,0].twinx()
ax2.plot(market_1["T_price"],color='C1', label="T_Bond")
axes[0,0].legend()
axes[0,0].xaxis.set_major_formatter(myFmt)
axes[0,1].hist(market_1[spread_name],bins=50,color='C1', label=spread_name)
axes[0,1].axvline(market_1[spread_name][-1], color='k', linestyle='dashed', linewidth=3)
bottom, top = axes[0,1].get_ylim()
pct_rank = market_1[spread_name].sort_values().values.searchsorted(market_1[spread_name][-1])/len(market_1[spread_name])
axes[0,1].text(market_1[spread_name][-1], top*0.9, 'Current:{:.1},\nPct:{:.1%}'.format(market_1[spread_name][-1],pct_rank))
spread_name = Ticker_1+"_"+Ticker_4+"_Spread"
market_1[spread_name] = market_1["mid_price"]-market_4["mid_price"]
axes[1,0].plot(market_1[spread_name],color='C0', label=Ticker_1+"-"+Ticker_4)
ax2 = axes[1,0].twinx()
ax2.plot(market_3["T_price"],color='C1', label="T_Bond")
axes[1,0].legend()
axes[1,0].xaxis.set_major_formatter(myFmt)
axes[1,1].hist(market_1[spread_name],bins=50,color='C1', label=spread_name)
axes[1,1].axvline(market_1[spread_name][-1], color='k', linestyle='dashed', linewidth=3)
bottom, top = axes[1,1].get_ylim()
pct_rank = market_1[spread_name].sort_values().values.searchsorted(market_1[spread_name][-1])/len(market_1[spread_name])
axes[1,1].text(market_1[spread_name][-1], top*0.9, 'Current:{:.1},\nPct:{:.1%}'.format(market_1[spread_name][-1],pct_rank))
spread_name = Ticker_2+"_"+Ticker_4+"_Spread"
market_2[spread_name] = market_2["mid_price"]-market_4["mid_price"]
axes[2,0].plot(market_2[spread_name],color='C0', label=Ticker_2+"-"+Ticker_4)
ax2 = axes[2,0].twinx()
ax2.plot(market_3["T_price"],color='C1', label="T_Bond")
axes[2,0].legend()
axes[2,0].xaxis.set_major_formatter(myFmt)
axes[2,1].hist(market_2[spread_name],bins=50,color='C1', label=spread_name)
axes[2,1].axvline(market_2[spread_name][-1], color='k', linestyle='dashed', linewidth=3)
bottom, top = axes[2,1].get_ylim()
pct_rank = market_2[spread_name].sort_values().values.searchsorted(market_2[spread_name][-1])/len(market_2[spread_name])
axes[2,1].text(market_2[spread_name][-1], top*0.9, 'Current:{:.1},\nPct:{:.1%}'.format(market_2[spread_name][-1],pct_rank))
fig.suptitle("Inflation Spread Strat",y=0.9)
return fig
|
StarcoderdataPython
|
3227134
|
from itertools import count
import logging
import networkx
import ailment
from claripy.utils.orderedset import OrderedSet
from ...utils.graph import dfs_back_edges, subgraph_between_nodes, dominates, shallow_reverse
from .. import Analysis, register_analysis
from .utils import replace_last_statement
from .structurer_nodes import MultiNode, ConditionNode
from .graph_region import GraphRegion
from .condition_processor import ConditionProcessor
l = logging.getLogger(name=__name__)
# an ever-incrementing counter
CONDITIONNODE_ADDR = count(0xff000000)
class RegionIdentifier(Analysis):
"""
Identifies regions within a function.
"""
def __init__(self, func, cond_proc=None, graph=None):
self.function = func
self.cond_proc = cond_proc if cond_proc is not None else ConditionProcessor()
self._graph = graph if graph is not None else self.function.graph
self.region = None
self._start_node = None
self._loop_headers = None
self._analyze()
@staticmethod
def slice_graph(graph, node, frontier, include_frontier=False):
"""
Generate a slice of the graph from the head node to the given frontier.
:param networkx.DiGraph graph: The graph to work on.
:param node: The starting node in the graph.
:param frontier: A list of frontier nodes.
:param bool include_frontier: Whether the frontier nodes are included in the slice or not.
:return: A subgraph.
:rtype: networkx.DiGraph
"""
subgraph = subgraph_between_nodes(graph, node, frontier, include_frontier=include_frontier)
if not list(subgraph.nodes):
# HACK: FIXME: for infinite loop nodes, this would return an empty set, so we include the loop body itself
# Make sure this makes sense (EDG thinks it does)
if (node, node) in graph.edges:
subgraph.add_edge(node, node)
return subgraph
def _analyze(self):
# make a copy of the graph
graph = networkx.DiGraph(self._graph)
# preprocess: make it a super graph
self._make_supergraph(graph)
self._start_node = self._get_start_node(graph)
# preprocess: find loop headers
self._loop_headers = self._find_loop_headers(graph)
self.region = self._make_regions(graph)
@staticmethod
def _get_start_node(graph):
try:
return next(n for n in graph.nodes() if graph.in_degree(n) == 0)
except StopIteration:
return None
def _test_reducibility(self):
# make a copy of the graph
graph = networkx.DiGraph(self._graph)
# preprocess: make it a super graph
self._make_supergraph(graph)
while True:
changed = False
# find a node with a back-edge, remove the edge (deleting the loop), and replace it with a MultiNode
changed |= self._remove_self_loop(graph)
# find a node that has only one predecessor, and merge it with its predecessor (replace them with a
# MultiNode)
changed |= self._merge_single_entry_node(graph)
if not changed:
# a fixed-point is reached
break
def _make_supergraph(self, graph):
while True:
for src, dst, data in graph.edges(data=True):
type_ = data.get('type', None)
if type_ == 'fake_return':
if len(list(graph.successors(src))) == 1 and len(list(graph.predecessors(dst))) == 1:
self._merge_nodes(graph, src, dst, force_multinode=True)
break
elif type_ == 'call':
graph.remove_node(dst)
break
else:
break
def _find_loop_headers(self, graph):
return OrderedSet(sorted((t for _,t in dfs_back_edges(graph, self._start_node)), key=lambda x: x.addr))
def _find_initial_loop_nodes(self, graph, head):
# TODO optimize
latching_nodes = { s for s,t in dfs_back_edges(graph, self._start_node) if t == head }
loop_subgraph = self.slice_graph(graph, head, latching_nodes, include_frontier=True)
nodes = set(loop_subgraph.nodes())
return nodes
def _refine_loop(self, graph, head, initial_loop_nodes, initial_exit_nodes):
refined_loop_nodes = initial_loop_nodes.copy()
refined_exit_nodes = initial_exit_nodes.copy()
idom = networkx.immediate_dominators(graph, self._start_node)
new_exit_nodes = refined_exit_nodes
while len(refined_exit_nodes) > 1 and new_exit_nodes:
new_exit_nodes = set()
for n in list(refined_exit_nodes):
if all(pred in refined_loop_nodes for pred in graph.predecessors(n)) and dominates(idom, head, n):
refined_loop_nodes.add(n)
refined_exit_nodes.remove(n)
for u in (set(graph.successors(n)) - refined_loop_nodes):
new_exit_nodes.add(u)
refined_exit_nodes |= new_exit_nodes
refined_loop_nodes = refined_loop_nodes - refined_exit_nodes
return refined_loop_nodes, refined_exit_nodes
def _remove_self_loop(self, graph):
r = False
while True:
for node in graph.nodes():
if node in graph[node]:
# found a self loop
self._remove_node(graph, node)
r = True
break
else:
break
return r
def _merge_single_entry_node(self, graph):
r = False
while True:
for node in networkx.dfs_postorder_nodes(graph):
preds = graph.predecessors(node)
if len(preds) == 1:
# merge the two nodes
self._absorb_node(graph, preds[0], node)
r = True
break
else:
break
return r
def _make_regions(self, graph):
structured_loop_headers = set()
new_regions = [ ]
# FIXME: _get_start_node() will fail if the graph is just a loop
# Find all loops
while True:
restart = False
self._start_node = self._get_start_node(graph)
# Start from loops
for node in self._loop_headers:
if node in structured_loop_headers:
continue
region = self._make_cyclic_region(node, graph)
if region is not None:
l.debug("Structured a loop region %r.", region)
new_regions.append(region)
structured_loop_headers.add(node)
restart = True
break
if restart:
continue
break
new_regions.append(GraphRegion(self._get_start_node(graph), graph, None, None, False))
l.debug("Identified %d loop regions.", len(structured_loop_headers))
l.debug("No more loops left. Start structuring acyclic regions.")
# No more loops left. Structure acyclic regions.
while new_regions:
region = new_regions.pop(0)
head = region.head
subgraph = region.graph
failed_region_attempts = set()
while self._make_acyclic_region(head, subgraph, region.graph_with_successors, failed_region_attempts,
region.cyclic):
if head not in subgraph:
# update head
head = next(iter(n for n in subgraph.nodes() if n.addr == head.addr))
head = next(iter(n for n in subgraph.nodes() if n.addr == head.addr))
region.head = head
if len(graph.nodes()) == 1 and isinstance(list(graph.nodes())[0], GraphRegion):
return list(graph.nodes())[0]
# create a large graph region
new_head = self._get_start_node(graph)
region = GraphRegion(new_head, graph, None, None, False)
return region
#
# Cyclic regions
#
def _make_cyclic_region(self, head, graph):
l.debug("Found cyclic region at %#08x", head.addr)
initial_loop_nodes = self._find_initial_loop_nodes(graph, head)
l.debug("Initial loop nodes %s", self._dbg_block_list(initial_loop_nodes))
# Make sure there is no other loop contained in the current loop
if {n for n in initial_loop_nodes if n.addr != head.addr}.intersection(self._loop_headers):
return None
normal_entries = {n for n in graph.predecessors(head) if n not in initial_loop_nodes}
abnormal_entries = set()
for n in initial_loop_nodes:
if n == head:
continue
preds = set(graph.predecessors(n))
abnormal_entries |= (preds - initial_loop_nodes)
l.debug("Normal entries %s", self._dbg_block_list(normal_entries))
l.debug("Abnormal entries %s", self._dbg_block_list(abnormal_entries))
initial_exit_nodes = set()
for n in initial_loop_nodes:
succs = set(graph.successors(n))
initial_exit_nodes |= (succs - initial_loop_nodes)
l.debug("Initial exit nodes %s", self._dbg_block_list(initial_exit_nodes))
refined_loop_nodes, refined_exit_nodes = self._refine_loop(graph, head, initial_loop_nodes,
initial_exit_nodes)
l.debug("Refined loop nodes %s", self._dbg_block_list(refined_loop_nodes))
l.debug("Refined exit nodes %s", self._dbg_block_list(refined_exit_nodes))
if len(refined_exit_nodes) > 1:
# self._get_start_node(graph)
node_post_order = list(networkx.dfs_postorder_nodes(graph, head))
sorted_exit_nodes = sorted(list(refined_exit_nodes), key=node_post_order.index)
normal_exit_node = sorted_exit_nodes[0]
abnormal_exit_nodes = set(sorted_exit_nodes[1:])
else:
normal_exit_node = next(iter(refined_exit_nodes)) if len(refined_exit_nodes) > 0 else None
abnormal_exit_nodes = set()
region = self._abstract_cyclic_region(graph, refined_loop_nodes, head, normal_entries, abnormal_entries,
normal_exit_node, abnormal_exit_nodes)
if len(region.successors) > 1:
# multi-successor region. refinement is required
self._refine_loop_successors(region, graph)
return region
def _refine_loop_successors(self, region, graph):
"""
If there are multiple successors of a loop, convert them into conditional gotos. Eventually there should be
only one loop successor.
:param GraphRegion region: The cyclic region to refine.
:param networkx.DiGraph graph: The current graph that is being structured.
:return: None
"""
if len(region.successors) <= 1:
return
# recover reaching conditions
self.cond_proc.recover_reaching_conditions(region, with_successors=True)
successors = list(region.successors)
condnode_addr = next(CONDITIONNODE_ADDR)
# create a new successor
cond = ConditionNode(
condnode_addr,
None,
self.cond_proc.reaching_conditions[successors[0]],
successors[0],
false_node=None,
)
for succ in successors[1:]:
cond = ConditionNode(condnode_addr,
None,
self.cond_proc.reaching_conditions[succ],
succ,
false_node=cond,
)
g = region.graph_with_successors
# modify region in place
region.successors = {cond}
for succ in successors:
for src, _, data in list(g.in_edges(succ, data=True)):
removed_edges = [ ]
for src2src, _, data_ in list(g.in_edges(src, data=True)):
removed_edges.append((src2src, src, data_))
g.remove_edge(src2src, src)
g.remove_edge(src, succ)
# TODO: rewrite the conditional jumps in src so that it goes to cond-node instead.
# modify the last statement of src so that it jumps to cond
replaced_any_stmt = False
last_stmts = self.cond_proc.get_last_statements(src)
for last_stmt in last_stmts:
if isinstance(last_stmt, ailment.Stmt.ConditionalJump):
if last_stmt.true_target.value == succ.addr:
new_last_stmt = ailment.Stmt.ConditionalJump(
last_stmt.idx,
last_stmt.condition,
ailment.Expr.Const(None, None, condnode_addr, self.project.arch.bits),
last_stmt.false_target,
ins_addr=last_stmt.ins_addr,
)
elif last_stmt.false_target.value == succ.addr:
new_last_stmt = ailment.Stmt.ConditionalJump(
last_stmt.idx,
last_stmt.condition,
last_stmt.true_target,
ailment.Expr.Const(None, None, condnode_addr, self.project.arch.bits),
ins_addr=last_stmt.ins_addr,
)
else:
# none of the two branches is jumping out of the loop
continue
else:
new_last_stmt = ailment.Stmt.Jump(
last_stmt.idx,
ailment.Expr.Const(None, None, condnode_addr, self.project.arch.bits),
ins_addr=last_stmt.ins_addr,
)
replace_last_statement(src, last_stmt, new_last_stmt)
replaced_any_stmt = True
if not replaced_any_stmt:
l.warning("No statement was replaced. Is there anything wrong?")
# raise Exception()
# add src back
for src2src, _, data_ in removed_edges:
g.add_edge(src2src, src, **data_)
g.add_edge(src, cond, **data)
# modify graph
graph.add_edge(region, cond)
for succ in successors:
edge_data = graph.get_edge_data(region, succ)
graph.remove_edge(region, succ)
graph.add_edge(cond, succ, **edge_data)
#
# Acyclic regions
#
def _make_acyclic_region(self, head, graph, secondary_graph, failed_region_attempts, cyclic):
# pre-processing
# we need to create a copy of the original graph if
# - there are in edges to the head node, or
# - there are more than one end nodes
head_inedges = list(graph.in_edges(head))
if head_inedges:
# we need a copy of the graph to remove edges coming into the head
graph_copy = networkx.DiGraph(graph)
# remove any in-edge to the head node
for src, _ in head_inedges:
graph_copy.remove_edge(src, head)
else:
graph_copy = graph
endnodes = [node for node in graph_copy.nodes() if graph_copy.out_degree(node) == 0]
if len(endnodes) == 0:
# sanity check: there should be at least one end node
l.critical("No end node is found in a supposedly acyclic graph. Is it really acyclic?")
return False
if len(endnodes) > 1:
# we need a copy of the graph!
graph_copy = networkx.DiGraph(graph_copy)
# if this graph has multiple end nodes: create a single end node
dummy_endnode = None
if len(endnodes) > 1:
dummy_endnode = "DUMMY_ENDNODE"
for endnode in endnodes:
graph_copy.add_edge(endnode, dummy_endnode)
endnodes = [ dummy_endnode ]
else:
dummy_endnode = None
# compute dominator tree
doms = networkx.immediate_dominators(graph_copy, head)
# compute post-dominator tree
inverted_graph = shallow_reverse(graph_copy)
postdoms = networkx.immediate_dominators(inverted_graph, endnodes[0])
# dominance frontiers
df = networkx.algorithms.dominance_frontiers(graph_copy, head)
# visit the nodes in post-order
for node in networkx.dfs_postorder_nodes(graph_copy, source=head):
if node is dummy_endnode:
# skip the dummy endnode
continue
if cyclic and node is head:
continue
out_degree = graph_copy.out_degree[node]
if out_degree == 0:
# the root element of the region hierarchy should always be a GraphRegion,
# so we transform it into one, if necessary
if graph_copy.in_degree(node) == 0 and not isinstance(node, GraphRegion):
subgraph = networkx.DiGraph()
subgraph.add_node(node)
self._abstract_acyclic_region(graph, GraphRegion(node, subgraph, None, None, False), [],
secondary_graph=secondary_graph)
continue
# test if this node is an entry to a single-entry, single-successor region
levels = 0
postdom_node = postdoms.get(node, None)
while postdom_node is not None:
if (node, postdom_node) not in failed_region_attempts:
if self._check_region(graph_copy, node, postdom_node, doms, df):
frontier = [ postdom_node ]
region = self._compute_region(graph_copy, node, frontier, dummy_endnode=dummy_endnode)
if region is not None:
# l.debug("Walked back %d levels in postdom tree.", levels)
l.debug("Node %r, frontier %r.", node, frontier)
# l.debug("Identified an acyclic region %s.", self._dbg_block_list(region.graph.nodes()))
self._abstract_acyclic_region(graph, region, frontier, dummy_endnode=dummy_endnode,
secondary_graph=secondary_graph)
# assert dummy_endnode not in graph
return True
failed_region_attempts.add((node, postdom_node))
if not dominates(doms, node, postdom_node):
break
if postdom_node is postdoms.get(postdom_node, None):
break
postdom_node = postdoms.get(postdom_node, None)
levels += 1
# l.debug("Walked back %d levels in postdom tree and did not find anything for %r. Next.", levels, node)
return False
@staticmethod
def _check_region(graph, start_node, end_node, doms, df):
"""
:param graph:
:param start_node:
:param end_node:
:param doms:
:param df:
:return:
"""
# if the exit node is the header of a loop that contains the start node, the dominance frontier should only
# contain the exit node.
if not dominates(doms, start_node, end_node):
frontier = df.get(start_node, set())
for node in frontier:
if node is not start_node and node is not end_node:
return False
# no edges should enter the region.
for node in df.get(end_node, set()):
if dominates(doms, start_node, node) and node is not end_node:
return False
# no edges should leave the region.
for node in df.get(start_node, set()):
if node is start_node or node is end_node:
continue
if node not in df.get(end_node, set()):
return False
for pred in graph.predecessors(node):
if dominates(doms, start_node, pred) and not dominates(doms, end_node, pred):
return False
return True
@staticmethod
def _compute_region(graph, node, frontier, include_frontier=False, dummy_endnode=None):
subgraph = networkx.DiGraph()
frontier_edges = [ ]
queue = [ node ]
traversed = set()
while queue:
node_ = queue.pop()
if node_ in frontier:
continue
traversed.add(node_)
subgraph.add_node(node_)
for succ in graph.successors(node_):
edge_data = graph.get_edge_data(node_, succ)
if node_ in frontier and succ in traversed:
if include_frontier:
# if frontier nodes are included, do not keep traversing their successors
# however, if it has an edge to an already traversed node, we should add that edge
subgraph.add_edge(node_, succ, **edge_data)
else:
frontier_edges.append((node_, succ, edge_data))
continue
if succ is dummy_endnode:
continue
if succ in frontier:
if not include_frontier:
# skip all frontier nodes
frontier_edges.append((node_, succ, edge_data))
continue
subgraph.add_edge(node_, succ, **edge_data)
if succ in traversed:
continue
queue.append(succ)
if dummy_endnode is not None:
frontier = { n for n in frontier if n is not dummy_endnode }
if subgraph.number_of_nodes() > 1:
subgraph_with_frontier = networkx.DiGraph(subgraph)
for src, dst, edge_data in frontier_edges:
if dst is not dummy_endnode:
subgraph_with_frontier.add_edge(src, dst, **edge_data)
# assert dummy_endnode not in frontier
# assert dummy_endnode not in subgraph_with_frontier
return GraphRegion(node, subgraph, frontier, subgraph_with_frontier, False)
else:
return None
def _abstract_acyclic_region(self, graph, region, frontier, dummy_endnode=None, secondary_graph=None):
in_edges = self._region_in_edges(graph, region, data=True)
out_edges = self._region_out_edges(graph, region, data=True)
nodes_set = set()
for node_ in list(region.graph.nodes()):
nodes_set.add(node_)
if node_ is not dummy_endnode:
graph.remove_node(node_)
graph.add_node(region)
for src, _, data in in_edges:
if src not in nodes_set:
graph.add_edge(src, region, **data)
for _, dst, data in out_edges:
if dst not in nodes_set:
graph.add_edge(region, dst, **data)
if frontier:
for frontier_node in frontier:
if frontier_node is not dummy_endnode:
graph.add_edge(region, frontier_node)
if secondary_graph is not None:
self._abstract_acyclic_region(secondary_graph, region, { })
@staticmethod
def _abstract_cyclic_region(graph, loop_nodes, head, normal_entries, abnormal_entries, normal_exit_node,
abnormal_exit_nodes):
region = GraphRegion(head, None, None, None, True)
subgraph = networkx.DiGraph()
region_outedges = [ ]
graph.add_node(region)
for node in loop_nodes:
subgraph.add_node(node)
in_edges = list(graph.in_edges(node, data=True))
out_edges = list(graph.out_edges(node, data=True))
for src, dst, data in in_edges:
if src in normal_entries:
graph.add_edge(src, region, **data)
elif src in abnormal_entries:
data['region_dst_node'] = dst
graph.add_edge(src, region, **data)
elif src in loop_nodes:
subgraph.add_edge(src, dst, **data)
elif src is region:
subgraph.add_edge(head, dst, **data)
else:
assert 0
for src, dst, data in out_edges:
if dst in loop_nodes:
subgraph.add_edge(src, dst, **data)
elif dst is region:
subgraph.add_edge(src, head, **data)
elif dst is normal_exit_node:
region_outedges.append((node, dst))
graph.add_edge(region, dst, **data)
elif dst in abnormal_exit_nodes:
region_outedges.append((node, dst))
# data['region_src_node'] = src
graph.add_edge(region, dst, **data)
else:
assert 0
subgraph_with_exits = networkx.DiGraph(subgraph)
for src, dst in region_outedges:
subgraph_with_exits.add_edge(src, dst)
region.graph = subgraph
region.graph_with_successors = subgraph_with_exits
if normal_exit_node is not None:
region.successors = [normal_exit_node]
else:
region.successors = [ ]
region.successors += list(abnormal_exit_nodes)
for node in loop_nodes:
graph.remove_node(node)
return region
@staticmethod
def _region_in_edges(graph, region, data=False):
return list(graph.in_edges(region.head, data=data))
@staticmethod
def _region_out_edges(graph, region, data=False):
out_edges = [ ]
for node in region.graph.nodes():
out_ = graph.out_edges(node, data=data)
for _, dst, data_ in out_:
if dst in region.graph:
continue
out_edges.append((region, dst, data_))
return out_edges
def _remove_node(self, graph, node): # pylint:disable=no-self-use
in_edges = [ (src, dst, data) for (src, dst, data) in graph.in_edges(node, data=True) if not src is node ]
out_edges = [ (src, dst, data) for (src, dst, data) in graph.out_edges(node, data=True) if not dst is node ]
if len(in_edges) <= 1 and len(out_edges) <= 1:
# it forms a region by itself :-)
new_node = None
else:
new_node = MultiNode([ node ])
graph.remove_node(node)
if new_node is not None:
for src, _, data in in_edges:
graph.add_edge(src, new_node, **data)
for _, dst, data in out_edges:
graph.add_edge(new_node, dst, **data)
def _merge_nodes(self, graph, node_a, node_b, force_multinode=False): # pylint:disable=no-self-use
in_edges = list(graph.in_edges(node_a, data=True))
out_edges = list(graph.out_edges(node_b, data=True))
if not force_multinode and len(in_edges) <= 1 and len(out_edges) <= 1:
# it forms a region by itself :-)
new_node = None
else:
new_node = MultiNode([ node_a, node_b ])
graph.remove_node(node_a)
graph.remove_node(node_b)
if new_node is not None:
graph.add_node(new_node)
for src, _, data in in_edges:
if src is node_b:
src = new_node
graph.add_edge(src, new_node, **data)
for _, dst, data in out_edges:
if dst is node_a:
dst = new_node
graph.add_edge(new_node, dst, **data)
assert not node_a in graph
assert not node_b in graph
def _absorb_node(self, graph, node_mommy, node_kiddie, force_multinode=False): # pylint:disable=no-self-use
in_edges_mommy = graph.in_edges(node_mommy, data=True)
out_edges_mommy = graph.out_edges(node_mommy, data=True)
out_edges_kiddie = graph.out_edges(node_kiddie, data=True)
if not force_multinode and len(in_edges_mommy) <= 1 and len(out_edges_kiddie) <= 1:
# it forms a region by itself :-)
new_node = None
else:
new_node = MultiNode([node_mommy, node_kiddie])
graph.remove_node(node_mommy)
graph.remove_node(node_kiddie)
if new_node is not None:
graph.add_node(new_node)
for src, _, data in in_edges_mommy:
if src == node_kiddie:
src = new_node
graph.add_edge(src, new_node, **data)
for _, dst, data in out_edges_mommy:
if dst == node_kiddie:
continue
if dst == node_mommy:
dst = new_node
graph.add_edge(new_node, dst, **data)
for _, dst, data in out_edges_kiddie:
if dst == node_mommy:
dst = new_node
graph.add_edge(new_node, dst, **data)
assert not node_mommy in graph
assert not node_kiddie in graph
@staticmethod
def _dbg_block_list(blocks):
return [(hex(b.addr) if hasattr(b, 'addr') else repr(b)) for b in blocks]
register_analysis(RegionIdentifier, 'RegionIdentifier')
|
StarcoderdataPython
|
1742542
|
from django.test import TestCase
from dali.gallery.models import Gallery, Picture, _get_viewable_size, _get_thumbnail_size
from dali.gallery.tests.utils import create_picture, get_image, get_temp_name
class GalleryTestCase(TestCase):
fixtures = ['gallery.json']
def setUp(self):
self.gallery = Gallery.objects.get(slug='TestGallery')
def test_count_no_pictures(self):
count = self.gallery.picture_count()
self.assertEquals(0, count)
def test_count_with_pictures(self):
expected = 4
for i in range(expected):
create_picture(self.gallery).save()
actual = self.gallery.picture_count()
self.assertEquals(expected, actual)
class PictureTestCase(TestCase):
fixtures = ['gallery.json']
def setUp(self):
self.gallery = Gallery.objects.get(slug='TestGallery')
self.picture = create_picture(self.gallery)
def test_generate_on_new_picture(self):
self.assert_(self.picture.save())
def test_no_generate_on_save(self):
self.assert_(self.picture.save())
self.failIf(self.picture.save())
def test_generate_on_save_with_new_image(self):
pic = self.picture
self.assert_(pic.save())
pic.original = get_image(get_temp_name())
self.assert_(pic.save())
def test_get_viewable_size_landscape(self):
w, h = _get_viewable_size(1000, 800)
self.assertEquals(320, h, 'The height is not correct')
self.assertEquals(400, w, 'The width is not correct')
def test_get_viewable_size_portrait(self):
w, h = _get_viewable_size(800, 1000)
self.assertEquals(400, h, 'The height is not correct')
self.assertEquals(320, w, 'The width is not correct')
def test_get_viewable_size_square(self):
w, h = _get_viewable_size(832, 832)
self.assertEquals(w, h, 'The width is not equal to the height')
def test_get_thumbnail_size_landscape(self):
w, h = _get_thumbnail_size(1000, 800)
self.assertEquals(75, h, 'The height is not correct')
self.assertEquals(93, w, 'The width is not correct')
def test_get_thumbnail_size_portrait(self):
w, h = _get_thumbnail_size(800, 1000)
self.assertEquals(93, h, 'The height is not correct')
self.assertEquals(75, w, 'The width is not correct')
def test_get_thumbnail_size_square(self):
w, h = _get_thumbnail_size(1232, 1232)
self.assertEquals(w, h, 'The width is not equal to the height')
|
StarcoderdataPython
|
3258317
|
<gh_stars>10-100
# Copyright Contributors to the Pyro project.
# SPDX-License-Identifier: Apache-2.0
# model file: example-models/misc/moving-avg/stochastic-volatility-optimized.stan
import torch
import pyro
import pyro.distributions as dist
def init_vector(name, dims=None):
return pyro.sample(name, dist.Normal(torch.zeros(dims), 0.2 * torch.ones(dims)).to_event(1))
def validate_data_def(data):
assert 'T' in data, 'variable not found in data: key=T'
assert 'y' in data, 'variable not found in data: key=y'
# initialize data
T = data["T"]
y = data["y"]
def init_params(data):
params = {}
# initialize data
T = data["T"]
y = data["y"]
# assign init values for parameters
params["phi"] = pyro.sample("phi", dist.Uniform(-(1), 1))
return params
def model(data, params):
# initialize data
T = data["T"]
y = data["y"]
# init parameters
phi = params["phi"]
# initialize transformed parameters
h = init_vector("h", dims=(T)) # vector
# model block
sigma = pyro.sample("sigma", dist.HalfCauchy(5.))
mu = pyro.sample("mu", dist.Cauchy(0., 10.))
h_std = pyro.sample("h_std", dist.Normal(0., 1.).expand([T]))
with torch.no_grad():
h = h_std * sigma
h[0] = h[0] / torch.sqrt(1. - phi * phi)
h = h + mu
for t in range(1, T):
h[t] = h[t] + phi * (h[t-1] - mu);
y = pyro.sample(y, dist.Normal(0., (h / 2.).exp()), obs=y)
|
StarcoderdataPython
|
154226
|
<filename>train.py<gh_stars>1-10
from Trainer import *
from TrainerOptions import *
opt = TrainerOptions()
opt.parse_args()
trainer = Trainer(opt)
trainer.train()
|
StarcoderdataPython
|
4808267
|
import sys
with open(sys.argv[1], "rb") as fin:
with open(sys.argv[2], "w") as fout:
fout.write("package %s\n" % sys.argv[3])
fout.write("var %s = []byte{" % sys.argv[4])
while True:
chunk = fin.read(1024)
if not chunk:
break
for c in chunk:
# Python 2 requires explicit conversion to an integer.
try:
c = ord(c)
except TypeError:
pass
fout.write("%d," % c)
fout.write("}")
|
StarcoderdataPython
|
3311556
|
import unittest
from fearquantlib.wavelib import __max_successive_series_len as max_len
class TestMxSuccSeriesLen(unittest.TestCase):
def test_fn(self):
arr = [1,7,3,4,5,2,4,5,6,1,0,4]
l = max_len(arr, asc=False)
self.assertEqual(3,l)# 6,1,0
l2 = max_len(arr) # 2,4,5,6,
self.assertEqual(4, l2)
if __name__ == '__main__':
unittest.main()
|
StarcoderdataPython
|
3263290
|
<filename>src/modules/geodesy/src/geodesy_conversion_UTM.py
#!/usr/bin/env python
"""Converts geodetic coordinate system to and from UTM"""
from __future__ import print_function
from __future__ import division
import math
import utm
from geodesy import Geodesy
class GeodesyConverterUTM(Geodesy):
def __init__(self, latitudesData, longitudesData):
self.latitudesData = latitudesData
self.longitudesData = longitudesData
def geodetic_data_to_UTM_data(self):
eastings = []
northings = []
zoneNumbers = []
zoneLetters = []
for i in range(len(self.latitudesData)):
easting, northing, zoneNumber, zoneLetter = utm.from_latlon(self.latitudesData[i], self.longitudesData[i])
eastings.append(easting)
northings.append(northing)
zoneLetters.append(zoneNumber)
zoneLetters.append(zoneLetter)
return eastings, northings, zoneNumbers, zoneLetters
def global_to_relative_UTM(self, eastings, northings):
"""Convert global UTM coordinates to relative coordinates at a given index"""
globalEastingInitial = eastings[0]
globalNorthingInitial = northings[0]
relativeEastings = []
relativeNorthings = []
for i in range(len(eastings)):
relativeEastings.append(eastings[i] - globalEastingInitial)
relativeNorthings.append(northings[i] - globalNorthingInitial)
return relativeEastings, relativeNorthings
###############################
##### Self Implementation #####
###############################
# """Reference: https://en.wikipedia.org/wiki/Universal_Transverse_Mercator_coordinate_system#From_latitude,_longitude_(%CF%86,_%CE%BB)_to_UTM_coordinates_(E,_N)
# NOTE: Lengths are in kilometers
# """
# TODO: Self-implemented fast geodetic to UTM conversion
# # Flattening coefficient
# flatteningCoeff = Geodesy.f
# N_0 = 0.0 # for Northern Hemisphere, in kilometers
# k_0 = 0.9996
# E_0 = 500
# def geodetic_to_UTM(lat, lng):
# """Converts input geodetic latitude/longitude to UTM"""
# # Calculate preliminary values
# n = flatteningCoeff / (2-flatteningCoeff)
# A = ( a / (1 + n) ) * (1 + ((n**2)/4) + ((n**4)/64)) )
# alpha1 = (1/2)*n - (2/3)*(n**2) + (5/16)*(n**3)
# alpha2 = (13/48)*(n**2) - (3/5)*(n**3)
# alpha3 = (61/240)*(n**3)
# alpha1 = (1/2)*n - (2/3)*(n**2) + (37/96)*(n**3)
# alpha2 = (1/48)*(n**2) + (1/15)*(n**3)
# alpha3 = (61/240)*(n**3)
# return
|
StarcoderdataPython
|
45524
|
'''
A função inverte strings e coloca todas as letras em maiúsculo:
'''
def fazAlgo(string):
pos = len(string)-1
string = string.upper()
while pos >= 0:
print(string[pos], end="")
pos = pos - 1
fazAlgo("amora")
|
StarcoderdataPython
|
1638423
|
"""Place of record for the package version"""
__version__ = "2.0.0"
|
StarcoderdataPython
|
3345885
|
<reponame>ian-chong/bucketlist-backend
""" Package to manage the API configurations """
|
StarcoderdataPython
|
196056
|
# -*- coding: utf-8 -*-
"""
@author: <NAME>
@contact: <EMAIL>
@time: 2022/05/05 1:06 PM
"""
import sys
if './' not in sys.path: sys.path.append('./')
from screws.freeze.base import FrozenOnly
class _2nCSCG_CellTypeWr2Metric_Base(FrozenOnly):
"""
A base for all cell types w.r.t. metric. For each type of cscg mesh element, we can classify its
sub-cells into different types. These types are all coded here.
"""
@property
def mark(self):
"""
A mark is key that identifies the cell metric. If the marks of two cells are the same, then
they have the same metric, otherwise, their metric are different. A mark normally is a string. But for
chaotic cell, it is an int: the id of the object.
:return:
"""
# noinspection PyUnresolvedReferences
return self._mark_
def __eq__(self, other):
"""We ask that the marks to be the same."""
# The later judge is to make sure we are not comparing it to something else having a mark property
return self.mark == other.mark and other.___IS_2nCSCG_CellTypeWr2Metric___
@property
def ___IS_2nCSCG_CellTypeWr2Metric___(self):
return True
if __name__ == "__main__":
# mpiexec -n 4 python
pass
|
StarcoderdataPython
|
54592
|
from django.contrib import admin
from safe.models import PublicKey, Credential, UserSecret
class PublicKeyAdmin(admin.ModelAdmin):
raw_id_fields = ['user']
readonly_fields = ['created', 'modified']
list_display = ['user', 'created', 'modified']
class UserSecretInline(admin.StackedInline):
model = UserSecret
extra = 0
raw_id_fields = ['user']
readonly_fields = ['encrypted_secret', 'created', 'modified']
class CredentialAdmin(admin.ModelAdmin):
inlines = [UserSecretInline]
list_display = ['title', 'slug', 'tags', 'login_name', 'created', 'modified']
readonly_fields = ['created', 'modified']
admin.site.register(PublicKey, PublicKeyAdmin)
admin.site.register(Credential, CredentialAdmin)
|
StarcoderdataPython
|
1610465
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, print_function, division
import logging
import itertools
import numpy as np
from allel.util import asarray_ndim, check_dim0_aligned, ensure_dim1_aligned
from allel.model.ndarray import GenotypeArray
from allel.stats.window import windowed_statistic, moving_statistic
from allel.stats.diversity import mean_pairwise_difference, \
mean_pairwise_difference_between
from allel.stats.misc import jackknife
from allel.chunked import get_blen_array
logger = logging.getLogger(__name__)
debug = logger.debug
def weir_cockerham_fst(g, subpops, max_allele=None, chunked=False, blen=None):
"""Compute the variance components from the analyses of variance of
allele frequencies according to <NAME> (1984).
Parameters
----------
g : array_like, int, shape (n_variants, n_samples, ploidy)
Genotype array.
subpops : sequence of sequences of ints
Sample indices for each subpopulation.
max_allele : int, optional
The highest allele index to consider.
chunked : bool, optional
If True, use a block-wise implementation to avoid loading the entire
input array into memory.
blen : int, optional
Block length to use for chunked implementation.
Returns
-------
a : ndarray, float, shape (n_variants, n_alleles)
Component of variance between populations.
b : ndarray, float, shape (n_variants, n_alleles)
Component of variance between individuals within populations.
c : ndarray, float, shape (n_variants, n_alleles)
Component of variance between gametes within individuals.
Examples
--------
Calculate variance components from some genotype data::
>>> import allel
>>> g = [[[0, 0], [0, 0], [1, 1], [1, 1]],
... [[0, 1], [0, 1], [0, 1], [0, 1]],
... [[0, 0], [0, 0], [0, 0], [0, 0]],
... [[0, 1], [1, 2], [1, 1], [2, 2]],
... [[0, 0], [1, 1], [0, 1], [-1, -1]]]
>>> subpops = [[0, 1], [2, 3]]
>>> a, b, c = allel.stats.weir_cockerham_fst(g, subpops)
>>> a
array([[ 0.5 , 0.5 , 0. ],
[ 0. , 0. , 0. ],
[ 0. , 0. , 0. ],
[ 0. , -0.125, -0.125],
[-0.375, -0.375, 0. ]])
>>> b
array([[ 0. , 0. , 0. ],
[-0.25 , -0.25 , 0. ],
[ 0. , 0. , 0. ],
[ 0. , 0.125 , 0.25 ],
[ 0.41666667, 0.41666667, 0. ]])
>>> c
array([[ 0. , 0. , 0. ],
[ 0.5 , 0.5 , 0. ],
[ 0. , 0. , 0. ],
[ 0.125 , 0.25 , 0.125 ],
[ 0.16666667, 0.16666667, 0. ]])
Estimate the parameter theta (a.k.a., Fst) for each variant
and each allele individually::
>>> fst = a / (a + b + c)
>>> fst
array([[ 1. , 1. , nan],
[ 0. , 0. , nan],
[ nan, nan, nan],
[ 0. , -0.5, -0.5],
[-1.8, -1.8, nan]])
Estimate Fst for each variant individually (averaging over alleles)::
>>> fst = (np.sum(a, axis=1) /
... (np.sum(a, axis=1) + np.sum(b, axis=1) + np.sum(c, axis=1)))
>>> fst
array([ 1. , 0. , nan, -0.4, -1.8])
Estimate Fst averaging over all variants and alleles::
>>> fst = np.sum(a) / (np.sum(a) + np.sum(b) + np.sum(c))
>>> fst
-4.3680905886891398e-17
Note that estimated Fst values may be negative.
"""
# check inputs
if not hasattr(g, 'shape') or not hasattr(g, 'ndim'):
g = GenotypeArray(g, copy=False)
if g.ndim != 3:
raise ValueError('g must have three dimensions')
if g.shape[2] != 2:
raise NotImplementedError('only diploid genotypes are supported')
# determine highest allele index
if max_allele is None:
max_allele = g.max()
if chunked:
# use a block-wise implementation
blen = get_blen_array(g, blen)
n_variants = g.shape[0]
shape = (n_variants, max_allele + 1)
a = np.zeros(shape, dtype='f8')
b = np.zeros(shape, dtype='f8')
c = np.zeros(shape, dtype='f8')
for i in range(0, n_variants, blen):
j = min(n_variants, i+blen)
gb = g[i:j]
ab, bb, cb = _weir_cockerham_fst(gb, subpops, max_allele)
a[i:j] = ab
b[i:j] = bb
c[i:j] = cb
else:
a, b, c = _weir_cockerham_fst(g, subpops, max_allele)
return a, b, c
# noinspection PyPep8Naming
def _weir_cockerham_fst(g, subpops, max_allele):
# check inputs
g = GenotypeArray(g, copy=False)
n_variants, n_samples, ploidy = g.shape
n_alleles = max_allele + 1
# number of populations sampled
r = len(subpops)
n_populations = r
debug('r: %r', r)
# count alleles within each subpopulation
ac = [g.count_alleles(subpop=s, max_allele=max_allele) for s in subpops]
# stack allele counts from each sub-population into a single array
ac = np.dstack(ac)
assert ac.shape == (n_variants, n_alleles, n_populations)
debug('ac: %s, %r', ac.shape, ac)
# count number of alleles called within each population by summing
# allele counts along the alleles dimension
an = np.sum(ac, axis=1)
assert an.shape == (n_variants, n_populations)
debug('an: %s, %r', an.shape, an)
# compute number of individuals sampled from each population
n = an // 2
assert n.shape == (n_variants, n_populations)
debug('n: %s, %r', n.shape, n)
# compute the total number of individuals sampled across all populations
n_total = np.sum(n, axis=1)
assert n_total.shape == (n_variants,)
debug('n_total: %s, %r', n_total.shape, n_total)
# compute the average sample size across populations
n_bar = np.mean(n, axis=1)
assert n_bar.shape == (n_variants,)
debug('n_bar: %s, %r', n_bar.shape, n_bar)
# compute the term n sub C incorporating the coefficient of variation in
# sample sizes
n_C = (n_total - (np.sum(n**2, axis=1) / n_total)) / (r - 1)
assert n_C.shape == (n_variants,)
debug('n_C: %s, %r', n_C.shape, n_C)
# compute allele frequencies within each population
p = ac / an[:, np.newaxis, :]
assert p.shape == (n_variants, n_alleles, n_populations)
debug('p: %s, %r', p.shape, p)
# compute the average sample frequency of each allele
ac_total = np.sum(ac, axis=2)
an_total = np.sum(an, axis=1)
p_bar = ac_total / an_total[:, np.newaxis]
assert p_bar.shape == (n_variants, n_alleles)
debug('p_bar: %s, %r', p_bar.shape, p_bar)
# add in some extra dimensions to enable broadcasting
n_bar = n_bar[:, np.newaxis]
n_C = n_C[:, np.newaxis]
n = n[:, np.newaxis, :]
p_bar = p_bar[:, :, np.newaxis]
# compute the sample variance of allele frequencies over populations
s_squared = (
np.sum(n * ((p - p_bar) ** 2),
axis=2) /
(n_bar * (r - 1))
)
assert s_squared.shape == (n_variants, n_alleles)
debug('s_squared: %s, %r', s_squared.shape, s_squared)
# remove extra dimensions for correct broadcasting
p_bar = p_bar[:, :, 0]
# compute the average heterozygosity over all populations
# N.B., take only samples in subpops of interest
gs = g.take(list(itertools.chain(*subpops)), axis=1)
h_bar = [gs.count_het(allele=allele, axis=1) / n_total
for allele in range(n_alleles)]
h_bar = np.column_stack(h_bar)
assert h_bar.shape == (n_variants, n_alleles)
debug('h_bar: %s, %r', h_bar.shape, h_bar)
# now comes the tricky bit...
# component of variance between populations
a = ((n_bar / n_C) *
(s_squared -
((1 / (n_bar - 1)) *
((p_bar * (1 - p_bar)) -
((r - 1) * s_squared / r) -
(h_bar / 4)))))
assert a.shape == (n_variants, n_alleles)
# component of variance between individuals within populations
b = ((n_bar / (n_bar - 1)) *
((p_bar * (1 - p_bar)) -
((r - 1) * s_squared / r) -
(((2 * n_bar) - 1) * h_bar / (4 * n_bar))))
assert b.shape == (n_variants, n_alleles)
# component of variance between gametes within individuals
c = h_bar / 2
assert c.shape == (n_variants, n_alleles)
return a, b, c
def hudson_fst(ac1, ac2, fill=np.nan):
"""Calculate the numerator and denominator for Fst estimation using the
method of Hudson (1992) elaborated by Bhatia et al. (2013).
Parameters
----------
ac1 : array_like, int, shape (n_variants, n_alleles)
Allele counts array from the first population.
ac2 : array_like, int, shape (n_variants, n_alleles)
Allele counts array from the second population.
fill : float
Use this value where there are no pairs to compare (e.g.,
all allele calls are missing).
Returns
-------
num : ndarray, float, shape (n_variants,)
Divergence between the two populations minus average
of diversity within each population.
den : ndarray, float, shape (n_variants,)
Divergence between the two populations.
Examples
--------
Calculate numerator and denominator for Fst estimation::
>>> import allel
>>> g = allel.GenotypeArray([[[0, 0], [0, 0], [1, 1], [1, 1]],
... [[0, 1], [0, 1], [0, 1], [0, 1]],
... [[0, 0], [0, 0], [0, 0], [0, 0]],
... [[0, 1], [1, 2], [1, 1], [2, 2]],
... [[0, 0], [1, 1], [0, 1], [-1, -1]]])
>>> subpops = [[0, 1], [2, 3]]
>>> ac1 = g.count_alleles(subpop=subpops[0])
>>> ac2 = g.count_alleles(subpop=subpops[1])
>>> num, den = allel.stats.hudson_fst(ac1, ac2)
>>> num
array([ 1. , -0.16666667, 0. , -0.125 , -0.33333333])
>>> den
array([ 1. , 0.5 , 0. , 0.625, 0.5 ])
Estimate Fst for each variant individually::
>>> fst = num / den
>>> fst
array([ 1. , -0.33333333, nan, -0.2 , -0.66666667])
Estimate Fst averaging over variants::
>>> fst = np.sum(num) / np.sum(den)
>>> fst
0.1428571428571429
""" # flake8: noqa
# check inputs
ac1 = asarray_ndim(ac1, 2)
ac2 = asarray_ndim(ac2, 2)
check_dim0_aligned(ac1, ac2)
ac1, ac2 = ensure_dim1_aligned(ac1, ac2)
# calculate these once only
an1 = np.sum(ac1, axis=1)
an2 = np.sum(ac2, axis=1)
# calculate average diversity (a.k.a. heterozygosity) within each
# population
within = (mean_pairwise_difference(ac1, an1, fill=fill) +
mean_pairwise_difference(ac2, an2, fill=fill)) / 2
# calculate divergence (a.k.a. heterozygosity) between each population
between = mean_pairwise_difference_between(ac1, ac2, an1, an2, fill=fill)
# define numerator and denominator for Fst calculations
num = between - within
den = between
return num, den
def patterson_fst(aca, acb):
"""Estimator of differentiation between populations A and B based on the
F2 parameter.
Parameters
----------
aca : array_like, int, shape (n_variants, 2)
Allele counts for population A.
acb : array_like, int, shape (n_variants, 2)
Allele counts for population B.
Returns
-------
num : ndarray, shape (n_variants,), float
Numerator.
den : ndarray, shape (n_variants,), float
Denominator.
Notes
-----
See Patterson (2012), Appendix A.
TODO check if this is numerically equivalent to Hudson's estimator.
"""
from allel.stats.admixture import patterson_f2, h_hat
num = patterson_f2(aca, acb)
den = num + h_hat(aca) + h_hat(acb)
return num, den
def windowed_weir_cockerham_fst(pos, g, subpops, size=None, start=None,
stop=None, step=None, windows=None,
fill=np.nan, max_allele=None):
"""Estimate average Fst in windows over a single chromosome/contig,
following the method of Weir and Cockerham (1984).
Parameters
----------
pos : array_like, int, shape (n_items,)
Variant positions, using 1-based coordinates, in ascending order.
g : array_like, int, shape (n_variants, n_samples, ploidy)
Genotype array.
subpops : sequence of sequences of ints
Sample indices for each subpopulation.
size : int
The window size (number of bases).
start : int, optional
The position at which to start (1-based).
stop : int, optional
The position at which to stop (1-based).
step : int, optional
The distance between start positions of windows. If not given,
defaults to the window size, i.e., non-overlapping windows.
windows : array_like, int, shape (n_windows, 2), optional
Manually specify the windows to use as a sequence of (window_start,
window_stop) positions, using 1-based coordinates. Overrides the
size/start/stop/step parameters.
fill : object, optional
The value to use where there are no variants within a window.
max_allele : int, optional
The highest allele index to consider.
Returns
-------
fst : ndarray, float, shape (n_windows,)
Average Fst in each window.
windows : ndarray, int, shape (n_windows, 2)
The windows used, as an array of (window_start, window_stop) positions,
using 1-based coordinates.
counts : ndarray, int, shape (n_windows,)
Number of variants in each window.
"""
# compute values per-variant
a, b, c = weir_cockerham_fst(g, subpops, max_allele=max_allele)
# define the statistic to compute within each window
def average_fst(wa, wb, wc):
return np.nansum(wa) / (np.nansum(wa) + np.nansum(wb) + np.nansum(wc))
# calculate average Fst in windows
fst, windows, counts = windowed_statistic(pos, values=(a, b, c),
statistic=average_fst,
size=size, start=start,
stop=stop, step=step,
windows=windows, fill=fill)
return fst, windows, counts
def windowed_hudson_fst(pos, ac1, ac2, size=None, start=None, stop=None,
step=None, windows=None, fill=np.nan):
"""Estimate average Fst in windows over a single chromosome/contig,
following the method of Hudson (1992) elaborated by Bhatia et al. (2013).
Parameters
----------
pos : array_like, int, shape (n_items,)
Variant positions, using 1-based coordinates, in ascending order.
ac1 : array_like, int, shape (n_variants, n_alleles)
Allele counts array from the first population.
ac2 : array_like, int, shape (n_variants, n_alleles)
Allele counts array from the second population.
size : int, optional
The window size (number of bases).
start : int, optional
The position at which to start (1-based).
stop : int, optional
The position at which to stop (1-based).
step : int, optional
The distance between start positions of windows. If not given,
defaults to the window size, i.e., non-overlapping windows.
windows : array_like, int, shape (n_windows, 2), optional
Manually specify the windows to use as a sequence of (window_start,
window_stop) positions, using 1-based coordinates. Overrides the
size/start/stop/step parameters.
fill : object, optional
The value to use where there are no variants within a window.
Returns
-------
fst : ndarray, float, shape (n_windows,)
Average Fst in each window.
windows : ndarray, int, shape (n_windows, 2)
The windows used, as an array of (window_start, window_stop) positions,
using 1-based coordinates.
counts : ndarray, int, shape (n_windows,)
Number of variants in each window.
"""
# compute values per-variants
num, den = hudson_fst(ac1, ac2)
# define the statistic to compute within each window
def average_fst(wn, wd):
return np.nansum(wn) / np.nansum(wd)
# calculate average Fst in windows
fst, windows, counts = windowed_statistic(pos, values=(num, den),
statistic=average_fst,
size=size, start=start,
stop=stop, step=step,
windows=windows, fill=fill)
return fst, windows, counts
def windowed_patterson_fst(pos, ac1, ac2, size=None, start=None, stop=None,
step=None, windows=None, fill=np.nan):
"""Estimate average Fst in windows over a single chromosome/contig,
following the method of Patterson (2012).
Parameters
----------
pos : array_like, int, shape (n_items,)
Variant positions, using 1-based coordinates, in ascending order.
ac1 : array_like, int, shape (n_variants, n_alleles)
Allele counts array from the first population.
ac2 : array_like, int, shape (n_variants, n_alleles)
Allele counts array from the second population.
size : int, optional
The window size (number of bases).
start : int, optional
The position at which to start (1-based).
stop : int, optional
The position at which to stop (1-based).
step : int, optional
The distance between start positions of windows. If not given,
defaults to the window size, i.e., non-overlapping windows.
windows : array_like, int, shape (n_windows, 2), optional
Manually specify the windows to use as a sequence of (window_start,
window_stop) positions, using 1-based coordinates. Overrides the
size/start/stop/step parameters.
fill : object, optional
The value to use where there are no variants within a window.
Returns
-------
fst : ndarray, float, shape (n_windows,)
Average Fst in each window.
windows : ndarray, int, shape (n_windows, 2)
The windows used, as an array of (window_start, window_stop) positions,
using 1-based coordinates.
counts : ndarray, int, shape (n_windows,)
Number of variants in each window.
"""
# compute values per-variants
num, den = patterson_fst(ac1, ac2)
# define the statistic to compute within each window
def average_fst(wn, wd):
return np.nansum(wn) / np.nansum(wd)
# calculate average Fst in windows
fst, windows, counts = windowed_statistic(pos, values=(num, den),
statistic=average_fst,
size=size, start=start,
stop=stop, step=step,
windows=windows, fill=fill)
return fst, windows, counts
def blockwise_weir_cockerham_fst(g, subpops, blen, max_allele=None):
"""Estimate average Fst and standard error using the block-jackknife.
Parameters
----------
g : array_like, int, shape (n_variants, n_samples, ploidy)
Genotype array.
subpops : sequence of sequences of ints
Sample indices for each subpopulation.
blen : int
Block size (number of variants).
max_allele : int, optional
The highest allele index to consider.
Returns
-------
fst : float
Estimated value of the statistic using all data.
se : float
Estimated standard error.
vb : ndarray, float, shape (n_blocks,)
Value of the statistic in each block.
vj : ndarray, float, shape (n_blocks,)
Values of the statistic from block-jackknife resampling.
"""
# calculate per-variant values
a, b, c = weir_cockerham_fst(g, subpops, max_allele=max_allele)
# calculate overall estimate
a_sum = np.nansum(a)
b_sum = np.nansum(b)
c_sum = np.nansum(c)
fst = a_sum / (a_sum + b_sum + c_sum)
# compute the numerator and denominator within each block
num_bsum = moving_statistic(a, statistic=np.nansum, size=blen)
den_bsum = moving_statistic(a + b + c, statistic=np.nansum, size=blen)
# calculate the statistic values in each block
vb = num_bsum / den_bsum
# estimate standard error
_, se, vj = jackknife((num_bsum, den_bsum),
statistic=lambda n, d: np.sum(n) / np.sum(d))
return fst, se, vb, vj
def blockwise_hudson_fst(ac1, ac2, blen):
"""Estimate average Fst between two populations and standard error using
the block-jackknife.
Parameters
----------
ac1 : array_like, int, shape (n_variants, n_alleles)
Allele counts array from the first population.
ac2 : array_like, int, shape (n_variants, n_alleles)
Allele counts array from the second population.
blen : int
Block size (number of variants).
Returns
-------
fst : float
Estimated value of the statistic using all data.
se : float
Estimated standard error.
vb : ndarray, float, shape (n_blocks,)
Value of the statistic in each block.
vj : ndarray, float, shape (n_blocks,)
Values of the statistic from block-jackknife resampling.
"""
# calculate per-variant values
num, den = hudson_fst(ac1, ac2, fill=np.nan)
# calculate overall estimate
fst = np.nansum(num) / np.nansum(den)
# compute the numerator and denominator within each block
num_bsum = moving_statistic(num, statistic=np.nansum, size=blen)
den_bsum = moving_statistic(den, statistic=np.nansum, size=blen)
# calculate the statistic values in each block
vb = num_bsum / den_bsum
# estimate standard error
_, se, vj = jackknife((num_bsum, den_bsum),
statistic=lambda n, d: np.sum(n) / np.sum(d))
return fst, se, vb, vj
def blockwise_patterson_fst(ac1, ac2, blen):
"""Estimate average Fst between two populations and standard error using
the block-jackknife.
Parameters
----------
ac1 : array_like, int, shape (n_variants, n_alleles)
Allele counts array from the first population.
ac2 : array_like, int, shape (n_variants, n_alleles)
Allele counts array from the second population.
blen : int
Block size (number of variants).
Returns
-------
fst : float
Estimated value of the statistic using all data.
se : float
Estimated standard error.
vb : ndarray, float, shape (n_blocks,)
Value of the statistic in each block.
vj : ndarray, float, shape (n_blocks,)
Values of the statistic from block-jackknife resampling.
"""
# calculate per-variant values
num, den = patterson_fst(ac1, ac2)
# calculate overall estimate
fst = np.nansum(num) / np.nansum(den)
# compute the numerator and denominator within each block
num_bsum = moving_statistic(num, statistic=np.nansum, size=blen)
den_bsum = moving_statistic(den, statistic=np.nansum, size=blen)
# calculate the statistic values in each block
vb = num_bsum / den_bsum
# estimate standard error
_, se, vj = jackknife((num_bsum, den_bsum),
statistic=lambda n, d: np.sum(n) / np.sum(d))
return fst, se, vb, vj
|
StarcoderdataPython
|
1607594
|
import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
author="<NAME>",
author_email="<EMAIL>",
name="temporal-infinities",
version="0.1.0",
description="Infinities for datetime, date and timedelta.",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/alexhill/temporal-infinities",
py_modules=["temporal_infinities"],
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: BSD License",
"Operating System :: OS Independent",
],
)
|
StarcoderdataPython
|
4814913
|
<reponame>rh01/Deep-reinforcement-learning-with-pytorch
import argparse
import pickle
from collections import namedtuple
import os
import numpy as np
import matplotlib.pyplot as plt
import torch
def discount(sequence, Gamma = 0.99):
R = 0
reward = []
for r in sequence[::-1]:
R = r + Gamma * R
reward.insert(0, R)
return reward
def makedir():
if not os.path.exists('./exp'):
os.makedirs('./exp/model')
os.makedirs('./exp/logs')
def save_model(model, iteration_time):
path = './exp/model/model'+str(iteration_time)+'.pkl'
torch.save(model.state_dict(), path)
|
StarcoderdataPython
|
3380917
|
import logging
from django.contrib import messages
from django.shortcuts import redirect
from django.utils.translation import ugettext_lazy as _
from django.views.generic import FormView
from pretix.base.models import Order
from pretix.base.services.mail import mail
from pretix.control.permissions import EventPermissionRequiredMixin
from . import forms
logger = logging.getLogger('pretix.plugins.sendmail')
class SenderView(EventPermissionRequiredMixin, FormView):
template_name = 'pretixplugins/sendmail/send_form.html'
permission = 'can_change_orders'
form_class = forms.MailForm
def get_form_kwargs(self):
kwargs = super().get_form_kwargs()
kwargs['event'] = self.request.event
return kwargs
def form_valid(self, form):
orders = Order.objects.current.filter(
event=self.request.event, status__in=form.cleaned_data['sendto']
).select_related("user")
users = set([o.user for o in orders])
for u in users:
mail(u, form.cleaned_data['subject'], form.cleaned_data['message'],
None, self.request.event)
messages.success(self.request, _('Your message will be sent to the selected users.'))
return redirect(
'plugins:sendmail:send',
event=self.request.event.slug,
organizer=self.request.event.organizer.slug
)
|
StarcoderdataPython
|
1767802
|
from utils.solution_base import SolutionBase
class Solution(SolutionBase):
def solve(self, part_num: int):
self.test_runner(part_num)
func = getattr(self, f"part{part_num}")
result = func(self.data)
return result
def test_runner(self, part_num):
test_inputs = self.get_test_input()
test_results = self.get_test_result(part_num)
test_counter = 1
func = getattr(self, f"part{part_num}")
for i, r in zip(test_inputs, test_results):
if len(r):
if (tr := str(func(i))) == r[0]:
print(f"test {test_counter} passed")
else:
print(f"your result: {tr}")
print(f"test answer: {r[0]}")
print(f"test {test_counter} NOT passed")
test_counter += 1
print()
def part1(self, data):
points = [tuple(map(int, line.split(", "))) for line in data]
areas = [0] * len(points)
edges = set()
x_range = range(min(points, key=lambda p: p[0])[0], max(points, key=lambda p: p[0])[0] + 1)
y_range = range(min(points, key=lambda p: p[1])[1], max(points, key=lambda p: p[1])[1] + 1)
for y in y_range:
for x in x_range:
distances = [abs(x - p[0]) + abs(y - p[1]) for p in points]
min_distance = min(distances)
if distances.count(min_distance) == 1:
closest_point = distances.index(min_distance)
areas[closest_point] += 1
if x in [min(x_range), max(x_range)] or y in [min(y_range), max(y_range)]:
edges.add(closest_point)
for p in edges:
areas[p] = -1
return max(areas)
def part2(self, data):
points = [tuple(map(int, line.split(", "))) for line in data]
safes = set()
safe_range = 32 if len(data) == 6 else 10000
x_range = range(min(points, key=lambda p: p[0])[0], max(points, key=lambda p: p[0])[0] + 1)
y_range = range(min(points, key=lambda p: p[1])[1], max(points, key=lambda p: p[1])[1] + 1)
for y in y_range:
for x in x_range:
distances = sum([abs(x - p[0]) + abs(y - p[1]) for p in points])
if distances < safe_range:
safes.add((x, y))
return len(safes)
|
StarcoderdataPython
|
4815445
|
<reponame>Wikunia/hakank
#!/usr/bin/python -u
# -*- coding: latin-1 -*-
#
# Coins grid problem in Z3
#
# Problem from
# <NAME>: "A coin puzzle - SVOR-contest 2007"
# http://www.svor.ch/competitions/competition2007/AsroContestSolution.pdf
# '''
# In a quadratic grid (or a larger chessboard) with 31x31 cells, one should
# place coins in such a way that the following conditions are fulfilled:
# 1. In each row exactly 14 coins must be placed.
# 2. In each column exactly 14 coins must be placed.
# 3. The sum of the quadratic horizontal distance from the main diagonal
# of all cells containing a coin must be as small as possible.
# 4. In each cell at most one coin can be placed.
# The description says to place 14x31 = 434 coins on the chessboard each row
# containing 14 coins and each column also containing 14 coins.
# '''
#
# Original problem is:
# n = 7 # 31 # The grid size
# c = 4 # 14 # Number of coins per row/column
# which a traditional MIP solver solves in millis.
#
#
# Note that using Optimize() is much slower than using Solver():
# for n=7, c=4:
# Optimize takes 13.5s
# Solver takes 0.45s!
#
# Tested with SolverFor("LIA") and it's faster:
# 7,4: 0.45s (about the same as for Solver())
# 10,6: 0.69s
# 15,10: 3.1s
#
# This Z3 model was written by <NAME> (<EMAIL>)
# See also my Z3 page: http://hakank.org/z3/
#
#
from z3_utils_hakank import *
# Using Optimize(): slow
def coins_grid_optimize(n=7, c= 4):
# set_option('smt.arith.solver', 3)
# set_option("smt.arith.euclidean_solver",True)
sol = Optimize()
# sol = OptimizeFor("LIA")
# n = 7 # 31 # The grid size
# c = 4 # 14 # Number of coins per row/column
x = {}
for i in range(n):
for j in range(n):
x[(i,j)] = Int("x[%i,%i]" % (i,j))
sol.add(x[(i,j)] >= 0)
sol.add(x[(i,j)] <= 1)
# rows and columns == c
for i in range(n):
sol.add(c == Sum([x[(i,j)] for j in range(n)]))
sol.add(c == Sum([x[(j,i)] for j in range(n)]))
z = Int("z")
# quadratic horizonal distance var
sol.add(z == Sum([x[(i, j)] * (i - j) * (i - j) for i in range(n) for j in range(n)]))
sol.minimize(z)
if sol.check() == sat:
mod = sol.model()
print("diff=",mod.evaluate(z))
for i in range(n):
for j in range(n):
print(mod.evaluate(x[(i, j)]),end=" ")
print()
print()
else:
print("No solution")
#
# Using Solver() and handling the optimization step is _much_ faster.
#
def coins_grid_solver(n=7,c=4):
# sol = Solver()
sol = SolverFor("LIA") # This is much faster still,
x = {}
for i in range(n):
for j in range(n):
x[(i,j)] = Int("x[%i,%i]" % (i,j))
sol.add(x[(i,j)] >= 0)
sol.add(x[(i,j)] <= 1)
# rows and columns == c
for i in range(n):
sol.add(c == Sum([x[(i,j)] for j in range(n)]))
sol.add(c == Sum([x[(j,i)] for j in range(n)]))
z = Int("z")
# quadratic horizonal distance var
sol.add(z == Sum([x[(i, j)] * (i - j) * (i - j) for i in range(n) for j in range(n)]))
while sol.check() == sat:
mod = sol.model()
print("diff=",mod.evaluate(z))
for i in range(n):
for j in range(n):
print(mod.evaluate(x[(i, j)]),end=" ")
print()
print()
getLessSolution(sol,mod,z)
coins_grid_optimize(7,4)
# coins_grid_solver(7,4)
# coins_grid_solver(31,14) # still too slow
# coins_grid_solver(10,6)
# coins_grid_solver(15,10)
|
StarcoderdataPython
|
3236018
|
<gh_stars>1000+
# Copyright 2021 Huawei, Inc.
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
from neutron_lib.api.definitions import local_ip as local_ip_apidef
from neutron_lib.api import extensions as api_extensions
from neutron_lib.plugins import directory
from neutron_lib.services import base as service_base
from neutron.api import extensions
from neutron.api.v2 import base
PLUGIN_TYPE = 'LOCAL_IP'
class Local_ip(api_extensions.APIExtensionDescriptor):
"""Extension class supporting Local IPs."""
api_definition = local_ip_apidef
@classmethod
def get_resources(cls):
plugin = directory.get_plugin(PLUGIN_TYPE)
collection_name = local_ip_apidef.COLLECTION_NAME.replace('_', '-')
params = local_ip_apidef.RESOURCE_ATTRIBUTE_MAP.get(
local_ip_apidef.COLLECTION_NAME, dict())
controller = base.create_resource(collection_name,
local_ip_apidef.RESOURCE_NAME,
plugin, params,
allow_bulk=True,
allow_pagination=True,
allow_sorting=True)
ext = extensions.ResourceExtension(collection_name, controller,
attr_map=params)
resources = [ext]
for collection_name in local_ip_apidef.SUB_RESOURCE_ATTRIBUTE_MAP:
resource_name = local_ip_apidef.LOCAL_IP_ASSOCIATION
parent = local_ip_apidef.SUB_RESOURCE_ATTRIBUTE_MAP[
collection_name].get('parent')
params = local_ip_apidef.SUB_RESOURCE_ATTRIBUTE_MAP[
collection_name].get('parameters')
controller = base.create_resource(collection_name, resource_name,
plugin, params,
allow_bulk=True,
parent=parent,
allow_pagination=True,
allow_sorting=True)
resource = extensions.ResourceExtension(
collection_name,
controller, parent,
attr_map=params)
resources.append(resource)
return resources
class LocalIPPluginBase(service_base.ServicePluginBase, metaclass=abc.ABCMeta):
@classmethod
def get_plugin_type(cls):
return PLUGIN_TYPE
def get_plugin_description(self):
return "Local IP Service Plugin"
@abc.abstractmethod
def create_local_ip(self, context, local_ip):
pass
@abc.abstractmethod
def update_local_ip(self, context, lip_id, local_ip):
pass
@abc.abstractmethod
def get_local_ip(self, context, lip_id, fields=None):
pass
@abc.abstractmethod
def get_local_ips(self, context, filters=None, fields=None,
sorts=None, limit=None, marker=None,
page_reverse=False):
pass
@abc.abstractmethod
def delete_local_ip(self, context, lip_id):
pass
@abc.abstractmethod
def create_local_ip_port_association(self, context, local_ip_id,
port_association):
pass
@abc.abstractmethod
def get_local_ip_port_association(self, context, fixed_port_id,
local_ip_id, fields=None):
pass
@abc.abstractmethod
def get_local_ip_port_associations(self, context, local_ip_id,
filters=None, fields=None,
sorts=None, limit=None,
marker=None, page_reverse=False):
pass
@abc.abstractmethod
def delete_local_ip_port_association(self, context, fixed_port_id,
local_ip_id):
pass
|
StarcoderdataPython
|
1667802
|
<gh_stars>1-10
from rest_framework import serializers
from .models import NotificationUser , Notification
from push_notifications.models import GCMDevice
class NotificationUserSerializer(serializers.ModelSerializer):
class Meta:
model = NotificationUser
fields = '__all__'
class NotificationUserRelatedField(serializers.RelatedField):
def get_queryset(self):
return NotificationUser.objects.all()
def to_internal_value(self , data):
user , created = NotificationUser.objects.get_or_create(username = data['username'] , fcmkey = data['fcmkey'] ,
email = data['email'],first_name = data['first_name'] ,
last_name = data['last_name'])
if created:
fcm_device , created = GCMDevice.objects.get_or_create(registration_id=user.fcmkey , name=user.device_name ,device_id=user.device_id , cloud_message_type="FCM" , user=user)
return user
def to_representation(self, value):
serializer = NotificationUserSerializer(value)
return serializer.data
class NotificationSerializer(serializers.ModelSerializer):
title = serializers.CharField(required=False)
body = serializers.CharField(required=False)
extra = serializers.JSONField()
createdAt = serializers.SerializerMethodField(method_name='get_created_at')
updatedAt = serializers.SerializerMethodField(method_name='get_updated_at')
userList = NotificationUserRelatedField(many=True , required=False , source='users')
class Meta:
model = Notification
fields = (
'title',
'body',
'extra',
'createdAt',
'updatedAt',
'userList'
)
def create(self , validated_data):
title = validated_data['title']
body = validated_data['body']
extra = validated_data['extra']
users = validated_data['users']
print(validated_data)
notification = Notification.objects.create(title=title , body=body , extra=extra)
if users is not None:
for user in users:
notification.users.add(user.pk)
return notification
def get_created_at(self , instance):
return instance.created_at.isoformat()
def get_updated_at(self, instance):
return instance.updated_at.isoformat()
|
StarcoderdataPython
|
40159
|
<reponame>KaroliShp/Quantumformatics
import pytest
from pytest_mock import mocker
from hamcrest import *
import numpy as np
from src.dirac_notation.ket import Ket
from src.dirac_notation.bra import Bra
from src.dirac_notation.matrix import Matrix
@pytest.mark.parametrize('input_1,input_2,expected_output_1,expected_output_2', [
(
Bra([1,1]),
[
Bra([1,0]), Bra([0,1])
],
True,
[
1, 1
]
), (
Bra([1,1]),
[
Bra([1,0]), Bra([2,0])
],
False, 0
)
])
def test_linear_combination(input_1, input_2, expected_output_1, expected_output_2):
output = input_1.linear_combination(input_2)
assert_that(output[0], equal_to(expected_output_1))
np.testing.assert_array_equal(output[1], expected_output_2)
# Magic methods
@pytest.mark.parametrize('input_1,input_2,expected_output', [
(
Bra([1, 0]), Ket([1, 0]), 1
), (
Bra([0, 1]), Ket([1, 0]), 0
),(
Bra([1, 0]), Matrix([[1, 0],[0, 1]]), Bra([1, 0])
)
])
def test_mul(input_1, input_2, expected_output):
output = input_1 * input_2
assert_that(output, equal_to(expected_output))
@pytest.mark.parametrize('input_1,input_2,expected_output', [
(
Ket([1,0]), Bra([0, 1]), Matrix([[0, 1],[0, 0]])
)
])
def test_rmul(input_1, input_2, expected_output):
output = input_1 * input_2
assert_that(output, equal_to(expected_output))
|
StarcoderdataPython
|
1650680
|
import numpy as np
from abc import ABC, abstractmethod
from .model import Model
from ..util.metrics import mse, mse_prime
class Layer(ABC):
def __init__(self):
self.input = None
self.output = None
@abstractmethod
def forward(self, input):
raise NotImplementedError
@abstractmethod
def backward(self, output_erros, learing_rate):
raise NotImplementedError
class Dense(Layer):
def __init__(self, input_size, output_size):
"""Fully Connected layer"""
self.weights = np.random.rand(input_size, output_size) - 0.5
self.bias = np.zeros((1, output_size))
def set_weights(self, weights, bias):
if weights.shape != self.weights.shape:
raise ValueError(f"Shapes mismatch {weights.shape} and {self.weights.shape}")
if bias.shape != self.bias.shape:
raise ValueError(f"Shapes mismatch {weights.shape} and {self.weights.shape}")
self.weights = weights
self.bias = bias
def forward(self, input_data):
self.input = input_data
self.output = np.dot(self.input, self.weights) + self.bias
return self.output
def backward(self, output_error, learning_rate):
"""
Computes dE/dW, dE/dB for a given output_error=dE/dY
Returns input_erros=dE/dX to fedd the previous layer.
"""
# Compute the weights erros dE/dW = X.T * dE/dY
weights_error = np.dot(self.input.T, output_error)
# Compute the bias error dE/dB = dE/dY
bias_error = np.sum(output_error, axis=0)
# Error dE/dX to pass on to the previous layer
input_error = np.dot(output_error, self.weights.T)
# Update parameters
self.weights -= learning_rate*weights_error
self.bias -= learning_rate*bias_error
return input_error
class Activation(Layer):
def __init__(self, activation):
self.activation = activation
def foward(self, input_data):
self.input = input_data
self.output = self.activation(self.input)
return self.output
def backward(self, output_error, learning_rate):
# learning_rate is not used because thre is no "learnable" parameters.
# Only passed the error do the previous layer
return np.multiply(self.activation.prime(self.input), output_error)
class NN(Model):
def __init__(self, epochs=1000, lr=0.001, verbose=True):
self.epochs = epochs
self.lr = lr
self.verbose = verbose
self.layers = []
self.loss = mse
self.loss_prime = mse_prime
def fit(self, dataset):
X, y = dataset.getXy()
self.dataset = dataset
self.history = dict()
for epoch in range(self.epochs):
output = X
# forward propagation
for layer in self.layers:
output = layer.forward(output)
# backward propagation
error = self.loss_prime(y, output)
for layer in reversed(self.layers):
error = layer.backward(error, self.lr)
# calculate average error
err = self.loss(y, output)
self.history[epoch] = err
if self.verbose:
print(f'epoch {epoch + 1}/{self.epochs} error={err}')
if not self.verbose:
print(f'error={err}')
self.is_fitted = True
def add(self, layer):
self.layers.append(layer)
def predict(self, x):
self.is_fitted = True
output = x
for layer in self.layers:
output = layer.forward(output)
return output
def cost(self, X=None, y=None):
assert self.is_fitted, 'Model must be fit before predict'
X = X if X is not None else self.dataset.X
y = y if y is not None else self.dataset.Y
output = self.predict(X)
return self.loss(y, output)
class Conv2D:
...
class Pooling2D(Layer):
def __init__(self, size=2, stride=2):
self.size = size
self.stride = stride
def pool(self):
pass
def dpool(self):
pass
def forward(self, input):
self.X_shape = input.shape
n, h, w, d = input.shape
h_out = (h.self.size)/self.stride+1
w_out = (w.self.size) / self.stride + 1
if not w_out.is_integer() or not h_out.is_integer():
raise Exception('Invaid output dimension!')
h_out, w_out = int(h_out), int(w_out)
X_reshaped = input.reshape(n*d, h, w, 1)
self.X_col = im2col(X_reshaped, self.size, padding=0, stride=self.stride)
out, self.max_idx = self.pool(self.X_col)
out = out.reshape(h_out, w_out, n, d)
out = out.transpose(3, 2, 0, 1)
return out
def backward(self, output_erros, learing_rate):
n, w, h, d = self.X_shape
dX_col = np.zeros_like(self.X_col)
dout_col = output_error.transpose(1, 2, 3, 0).ravel()
dX = self.dpool(dX_col, dout_col, self.max_idx)
dX = self.col2im(dX, (n*d, h, w, 1),
self.size, self.size, padding=0, stride=self.stride)
dX = dX.reshape(self.X_shape)
return dX
class MaxPooling(Pooling2D):
def __init__(self, size=2, stride=2):
self.size = size
self.stride = stride
def pool(self):
pass
def dpool(self):
pass
def forward(self, input):
pass
def backward(self, output_erros, learing_rate):
pass
|
StarcoderdataPython
|
3347023
|
"""
This top-level module conditionally imports some other sub modules in a way that
tracks their third party deps
"""
# Local
from . import setup_tools
from .import_tracker import track_module
from .lazy_import_errors import lazy_import_errors
from .lazy_module import LazyModule
|
StarcoderdataPython
|
1781945
|
<gh_stars>0
import hashlib
def hash_text(text):
return hashlib.md5(text.encode('ascii')).hexdigest()
|
StarcoderdataPython
|
1648886
|
<gh_stars>0
# Import necesarry modules
import json
import urllib.request, urllib.parse
# Class for the Geocoder API
class Geocoder():
# Contructor of the class
def __init__(self):
# Initialize variables
self.google_api_key_ = '<KEY>'
self.here_app_id_ = 'Mnswcv5a6ivjZ2XGDR4s'
self.here_app_code_ = 'EZ1qqKVhs452aUBKdUe1Gg'
self.google_geocode_url_ = "https://maps.googleapis.com/maps/api/geocode/json"
self.google_places_url_ = "https://maps.googleapis.com/maps/api/place/details/json"
self.here_geocode_url_ = "https://geocoder.cit.api.here.com/6.2/geocode.json"
self.here_places_url_ = "https://wego.here.com/?x=ep&map="
# Function that takes serach string and returns the response
def googleGeocoder(self, search=""):
# Create a dictionary to store the parameters used for constructing the request
values = dict()
values['address'] = search
values['key'] = self.google_api_key_
# Parse the parameters
payload = urllib.parse.urlencode(values)
# Create complete URL
complete_url = self.google_geocode_url_ + '?' + payload
# Get the response
response = urllib.request.urlopen(complete_url)
# Read the data
data = response.read()
# Define the encoding
encoding = response.info().get_content_charset('utf-8')
# Decode the JSON data
response_json = json.loads(data.decode(encoding))
return response_json
# Function that takes in the unique place_id and returns the response
def googlePlace(self, place_id=""):
# Create a dictionary to store the parameters used for constructing the request
values = dict()
values['key'] = self.google_api_key_
values['place_id'] = place_id
# Parse the parameters
payload = urllib.parse.urlencode(values)
# Create complete URL
complete_url = self.google_places_url_ + '?' + payload
# Get the response
response = urllib.request.urlopen(complete_url)
# Read the data
data = response.read()
# Define the encoding
encoding = response.info().get_content_charset('utf-8')
# Decode the JSON data
response_json = json.loads(data.decode(encoding))
return response_json
# Function that takes serach string and returns the response
def hereGeocoder(self, search=""):
# Create a dictionary to store the parameters used for constructing the request
values = dict()
values['app_id'] = self.here_app_id_
values['app_code'] = self.here_app_code_
values['searchtext'] = search
# Parse the parameters
payload = urllib.parse.urlencode(values)
# Create complete URL
complete_url = self.here_geocode_url_ + '?' + payload
# Get the response
response = urllib.request.urlopen(complete_url)
# Read the data
data = response.read()
# Define the encoding
encoding = response.info().get_content_charset('utf-8')
# Decode the JSON data
response_json = json.loads(data.decode(encoding))
return response_json
def herePlace(self, lat="", lng=""):
# Create the complete URL by combining lat and lng values
complete_url = self.here_places_url_ + lat + ',' + lng + ',' + '14'
return complete_url
# Execute the following section if this file is ran
if __name__ == "__main__":
# Create the object
geocdr = Geocoder()
# Define query
query = "San Francisco"
# Call the googleGeocder function
# NOTE: To get position from JSON object follow convention "<response>['results'][0]['geometry']['location']"
# while using Google Maps
response_json = geocdr.googleGeocoder(str(query))
position = response_json['results'][0]['geometry']['location']
# Retrieve the unique place_id
place_id = response_json["results"][0]["place_id"]
# Pass the place_id to googlePlace function
url = geocdr.googlePlace(str(place_id))["result"]["url"]
print("\n{} is located at: ".format(query))
print(position)
print("Visit the URL below to open maps:")
print(url)
print("Found using Google geocoder API.\n")
# Call the hereGeocoder function
# NOTE: To get position from JSON object follow convention "<response>['Response']['View'][0]['Result'][0]['Location']['DisplayPosition']"
# while using Here geocoder
response_json = geocdr.hereGeocoder(str(query))
position = response_json['Response']['View'][0]['Result'][0]['Location']['DisplayPosition']
# Get the latitude and longitude values
lat = position['Latitude']
lng = position['Longitude']
# Pass the coordinates to herePlace function
url = geocdr.herePlace(lat=str(lat), lng=str(lng))
print(position)
print("Visit the URL below to open maps:")
print(url)
print("Found using Here geocoder API.")
|
StarcoderdataPython
|
120458
|
<reponame>benrdavison/brd_mod
import pandas as pd
import sys
import os
from brd_mod.brdstats import *
from brd_mod.brdgeo import *
from brd_mod.brdecon import *
if __name__ == "__main__":
print("test")
|
StarcoderdataPython
|
1632834
|
<reponame>bopopescu/conpaas-1
import unittest
from core import test_agent
from core import test_git
from core import test_clouds
suites = [
unittest.TestLoader().loadTestsFromTestCase(test_agent.TestAgent),
unittest.TestLoader().loadTestsFromTestCase(test_git.TestGit),
unittest.TestLoader().loadTestsFromTestCase(test_clouds.TestCloudsBase),
unittest.TestLoader().loadTestsFromTestCase(test_clouds.TestCloudDummy),
]
alltests = unittest.TestSuite(suites)
alltests.run(unittest.result.TestResult())
|
StarcoderdataPython
|
157137
|
<reponame>bunjdo/bunjgames
from common.consumers import Consumer
from whirligig.models import Game
from whirligig.serializers import GameSerializer
class WhirligigConsumer(Consumer):
@property
def routes(self):
return dict(
next_state=lambda game, from_state: game.next_state(from_state),
change_score=lambda game, connoisseurs_score, viewers_score: game.change_score(
connoisseurs_score, viewers_score),
change_timer=lambda game, paused: game.change_timer(paused),
answer_correct=lambda game, is_correct: game.answer_correct(is_correct),
extra_time=lambda game: game.extra_time(),
)
@property
def game_name(self):
return 'whirligig'
def get_game(self, token):
return Game.objects.get(token=token)
def serialize_game(self, game):
return GameSerializer().to_representation(game)
|
StarcoderdataPython
|
54063
|
<filename>final/160401069/sunucu.py<gh_stars>1-10
#<NAME> - 160401069
import socket
import sys
import datetime
import pickle
host = "127.0.0.1"
port = 142
try:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind((host, port))
print("Baglama Basarili")
except :
print("Baglanti hatasi")
sys.exit()
greenwich=datetime.datetime.utcnow()
timee=datetime.datetime.now()
baslangic=[timee.hour,timee.minute,timee.second,timee.microsecond]
utc=timee.hour-greenwich.hour
utc="UTC+" + str(utc)
s.listen(5)
conn, addr = s.accept()
suan=datetime.datetime.now()
liste=[suan,utc]
zaman=pickle.dumps(liste)
conn.send(zaman)
|
StarcoderdataPython
|
97542
|
from collections import Collection
import regex as re
import numpy as np
from bn.values.array_val import ArrayVal
from bn.values.boolean_val import BooleanVal
from bn.values.double_val import DoubleVal
from bn.values.none_val import NoneVal
from bn.values.relational_val import RelationalVal
from bn.values.set_val import SetVal
from bn.values.string_val import StringVal
from bn.values.custom_val import CustomVal
from bn.values.value import Value
from datastructs.graph import Graph
from utils.py_utils import get_class, Singleton
import logging
from multipledispatch import dispatch
from settings import Settings
dispatch_namespace = dict()
class ValueFactory:
"""
Factory for creating variable values.
"""
_none_value = NoneVal()
_double_pattern = re.compile(r'^[-+]?[0-9]*\.?[0-9]+([eE][-+]?[0-9]+)?$')
_array_pattern = re.compile(r'\[([-+]?[0-9]*\.?[0-9]+([eE][-+]?[0-9]+)?,\s*)*([-+]?[0-9]*\.?[0-9]+([eE][-+]?[0-9]+)?)\]')
_set_pattern = re.compile(r'[/\w\-_\.\^\=\s]*([\[\(][/\w\-_,\.\^\=\s\(]+\)*[\]\)])?')
_custom_class_pattern = re.compile(r'^@[^\(\)]*$')
_custom_function_pattern = re.compile(r'^@[^\(\)]+\(.*\)$')
# logger
log = logging.getLogger('PyOpenDial')
@staticmethod
@dispatch(str, namespace=dispatch_namespace)
def create(value):
"""
Creates a new value based on the provided string representation. If the string
contains a numeric value, "true", "false", "None", or opening and closing
brackets, convert it to the appropriate values. Else, returns a string value.
:param value: the string representation for the value
:return: the resulting value
"""
if value == None:
return NoneVal()
if ValueFactory._double_pattern.search(value):
return DoubleVal(float(value))
elif value.lower() == 'true':
return BooleanVal(True)
elif value.lower() == 'false':
return BooleanVal(False)
elif value.lower() == 'none':
return ValueFactory._none_value
elif ValueFactory._array_pattern.match(value):
value_list = list()
value_str_list = value[1:-1].split(',')
for value_str_item in value_str_list:
value_list.append(float(value_str_item))
return ArrayVal(np.array(value_list))
elif value.startswith('[') and value.endswith(']'):
if Graph.is_relational(value):
relation_value = RelationalVal(value)
if not relation_value.is_empty():
return relation_value
sub_values = list()
for match in ValueFactory._set_pattern.finditer(value[1:-1]):
sub_value = match.group(0).strip()
if len(sub_value) > 0:
sub_values.append(ValueFactory.create(sub_value))
return SetVal(sub_values)
elif ValueFactory._custom_class_pattern.match(value):
class_name = value[1:]
custom_value = get_class(class_name)()
if isinstance(custom_value, Singleton):
return CustomVal(custom_value)
else:
raise ValueError("Custom class should inherit utils.py_utils.Singleton")
elif ValueFactory._custom_function_pattern.match(value):
function_name = value.split("(")[0][1:]
params = value.split("(")[1][:-1].split(",")
params.remove('')
if function_name in Settings._functions:
func = Settings._functions[function_name]
func_result = func(*params)
if isinstance(func_result, float):
return DoubleVal(func_result)
elif isinstance(func_result, bool):
return BooleanVal(func_result)
elif func_result is None:
return ValueFactory._none_value
elif isinstance(func_result, np.ndarray):
return ArrayVal(func_result)
elif isinstance(func_result, set):
return SetVal(func_result)
elif isinstance(func_result, str):
return StringVal(func_result)
else:
raise ValueError("Not supported return type %s" % type(func_result))
else:
raise ValueError("Function %s is not defined." % function_name)
else:
return StringVal(value)
@staticmethod
@dispatch(float, namespace=dispatch_namespace)
def create(value):
"""
Returns a double value given the double
:param value: the float
:return: the value
"""
return DoubleVal(value)
@staticmethod
@dispatch(bool, namespace=dispatch_namespace)
def create(value):
"""
Returns the boolean value given the boolean
:param value: the boolean
:return: the boolean value
"""
return BooleanVal(value)
@staticmethod
@dispatch((list, Collection), namespace=dispatch_namespace)
def create(values):
"""
Returns the set value given the values
:param values: the values
:return: the set value
"""
if len(values) == 0 or isinstance(next(iter(values)), Value):
return SetVal(values)
if isinstance(values[0], float):
return ArrayVal(np.array(values))
@staticmethod
@dispatch(namespace=dispatch_namespace)
def none():
"""
Returns the none value.
:return: the none value
"""
return ValueFactory._none_value
@staticmethod
@dispatch(Value, Value, namespace=dispatch_namespace)
def concatenate(v1, v2):
"""
Returns the concatenation of the two values.
:param v1: the value
:param v2: the value
:return: the concatenation of the two values
"""
if isinstance(v1, StringVal) and isinstance(v2, StringVal):
return str(v1) + ' ' + str(v2)
elif isinstance(v1, NoneVal):
return v2
elif isinstance(v2, NoneVal):
return v1
else:
ValueFactory.log.warning("concatenation not implemented for %s + %s" % (v1, v2))
return NoneVal()
|
StarcoderdataPython
|
116029
|
# -*- coding: utf-8 -*-
from .compras import (
AdicionarOrcamentoCompraView,
AdicionarPedidoCompraView,
OrcamentoCompraListView,
OrcamentoCompraVencidosListView,
OrcamentoCompraVencimentoHojeListView,
PedidoCompraListView,
PedidoCompraAtrasadosListView,
PedidoCompraEntregaHojeListView,
EditarOrcamentoCompraView,
EditarPedidoCompraView,
GerarPedidoCompraView,
CancelarOrcamentoCompraView,
CancelarPedidoCompraView,
GerarCopiaOrcamentoCompraView,
GerarCopiaPedidoCompraView,
ReceberPedidoCompraView,
GerarPDFOrcamentoCompra,
GerarPDFPedidoCompra
)
from .ajax_views import InfoCompra
|
StarcoderdataPython
|
1689075
|
from .menulistener import MenuListener
__red_end_user_data_statement__ = "No personal data is stored."
def setup(bot):
n = MenuListener(bot)
bot.add_cog(n)
bot.loop.create_task(n.reload())
|
StarcoderdataPython
|
60464
|
from django.contrib.contenttypes.models import ContentType
from nautobot.dcim.models import Site
from nautobot.extras.choices import CustomFieldTypeChoices
from nautobot.extras.jobs import Job
from nautobot.extras.models import CustomField
class TestCreateSiteWithCustomField(Job):
class Meta:
name = "Site and Custom Field Creation"
description = "Site with a custom field"
def run(self, data, commit):
obj_type = ContentType.objects.get_for_model(Site)
cf = CustomField.objects.create(name="cf1", type=CustomFieldTypeChoices.TYPE_TEXT, default="-")
cf.content_types.set([obj_type])
self.log_success(obj=cf, message="CustomField created successfully.")
site_1 = Site.objects.create(name="Test Site One", slug="test-site-one")
site_1.cf[cf.name] = "some-value"
site_1.save()
self.log_success(obj=site_1, message="Created a new site")
site_2 = Site.objects.create(name="Test Site Two", slug="test-site-two")
self.log_success(obj=site_2, message="Created another new site")
return "Job completed."
|
StarcoderdataPython
|
3380036
|
<filename>catkin_ws/src/ros_cap/src/cosa.py
#! /usr/bin/env python3
import copy
import cv2
import numpy as np
from keras.models import load_model
from phue import Bridge
import pygame
import time
import rospy
# General Settings
prediction = ''
action = ''
score = 0
img_counter = 500
# pygame.event.wait()
# Turn on/off the ability to save images
save_images, selected_gesture = False, 'peace'
gesture_names = {0: 'Fist',
1: 'L',
2: 'Okay',
3: 'Palm',
4: 'Peace'}
model = load_model('/home/camilo/Descargas/VGG_cross_validated.h5')
def predict_rgb_image_vgg(image):
image = np.array(image, dtype='float32')
image /= 255
pred_array = model.predict(image)
print(f'pred_array: {pred_array}')
result = gesture_names[np.argmax(pred_array)]
print(f'Result: {result}')
print(max(pred_array[0]))
score = float("%0.2f" % (max(pred_array[0]) * 100))
print(result)
return result, score
# parameters
cap_region_x_begin = 0.5 # start point/total width
cap_region_y_end = 0.8 # start point/total width
threshold = 60 # binary threshold
blurValue = 41 # GaussianBlur parameter
bgSubThreshold = 50
learningRate = 0
# variableslt
isBgCaptured = 0 # bool, whether the background captured
triggerSwitch = False # if true, keyboard simulator works
def remove_background(frame):
fgmask = bgModel.apply(frame, learningRate=learningRate)
kernel = np.ones((3, 3), np.uint8)
fgmask = cv2.erode(fgmask, kernel, iterations=1)
res = cv2.bitwise_and(frame, frame, mask=fgmask)
return res
# Camera
camera = cv2.VideoCapture(0)
camera.set(10, 200)
def prediccion():
while camera.isOpened():
ret, frame = camera.read()
frame = cv2.bilateralFilter(frame, 5, 50, 100) # smoothing filter
frame = cv2.flip(frame, 1) # flip the frame horizontally
cv2.rectangle(frame, (int(cap_region_x_begin * frame.shape[1]), 0),
(frame.shape[1], int(cap_region_y_end * frame.shape[0])), (255, 0, 0), 2)
cv2.imshow('original', frame)
# Run once background is captured
if isBgCaptured == 1:
img = remove_background(frame)
img = img[0:int(cap_region_y_end * frame.shape[0]),
int(cap_region_x_begin * frame.shape[1]):frame.shape[1]] # clip the ROI
# cv2.imshow('mask', img)
# convert the image into binary image
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
blur = cv2.GaussianBlur(gray, (blurValue, blurValue), 0)
# cv2.imshow('blur', blur)
ret, thresh = cv2.threshold(blur, threshold, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
# Add prediction and action text to thresholded image
# cv2.putText(thresh, f"Prediction: {prediction} ({score}%)", (50, 50), cv2.FONT_HERSHEY_SIMPLEX, 1, (255,255,255))
# cv2.putText(thresh, f"Action: {action}", (50, 100), cv2.FONT_HERSHEY_SIMPLEX, 1, (255,255,255)) # Draw the text
# Draw the text
cv2.putText(thresh, f"Prediction: {prediction} ({score}%)", (50, 30), cv2.FONT_HERSHEY_SIMPLEX, 1,
(255, 255, 255))
cv2.putText(thresh, f"Action: {action}", (50, 80), cv2.FONT_HERSHEY_SIMPLEX, 1,
(255, 255, 255)) # Draw the text
cv2.imshow('ori', thresh)
# get the contours
thresh1 = copy.deepcopy(thresh)
_, contours, hierarchy = cv2.findContours(thresh1, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
length = len(contours)
maxArea = -1
if length > 0:
for i in range(length): # find the biggest contour (according to area)
temp = contours[i]
area = cv2.contourArea(temp)
if area > maxArea:
maxArea = area
ci = i
res = contours[ci]
hull = cv2.convexHull(res)
drawing = np.zeros(img.shape, np.uint8)
cv2.drawContours(drawing, [res], 0, (0, 255, 0), 2)
cv2.drawContours(drawing, [hull], 0, (0, 0, 255), 3)
cv2.imshow('output', drawing)
# Keyboard OP
k = cv2.waitKey(10)
if k == 27: # press ESC to exit all windows at any time
break
elif k == ord('b'): # press 'b' to capture the background
bgModel = cv2.createBackgroundSubtractorMOG2(0, bgSubThreshold)
b.set_light(6, on_command)
time.sleep(2)
isBgCaptured = 1
print('Background captured')
elif k == ord('r'): # press 'r' to reset the background
time.sleep(1)
bgModel = None
triggerSwitch = False
isBgCaptured = 0
print('Reset background')
# pasará algo si lo quitamos?
elif k == 32:
# If space bar pressed
cv2.imshow('original', frame)
# copies 1 channel BW image to all 3 RGB channels
target = np.stack((thresh,) * 3, axis=-1)
target = cv2.resize(target, (224, 224))
target = target.reshape(1, 224, 224, 3)
prediction, score = predict_rgb_image_vgg(target)
if prediction == 'Palm': # no lo detecta bien
pass
elif prediction == 'Fist':
action = 'Adelante'
elif prediction == 'L':
action = 'Izquierda'
elif prediction == 'Okay':
action = 'Derecha'
elif prediction == 'Peace':
action = 'Atrás'
else:
pass
if save_images:
img_name = f"./frames/drawings/drawing_{selected_gesture}_{img_counter}.jpg".format(
img_counter)
cv2.imwrite(img_name, drawing)
print("{} written".format(img_name))
img_name2 = f"./frames/silhouettes/{selected_gesture}_{img_counter}.jpg".format(
img_counter)
cv2.imwrite(img_name2, thresh)
print("{} written".format(img_name2))
img_name3 = f"./frames/masks/mask_{selected_gesture}_{img_counter}.jpg".format(
img_counter)
cv2.imwrite(img_name3, img)
print("{} written".format(img_name3))
img_counter += 1
elif k == ord('t'):
print('Tracker turned on.')
cap = cv2.VideoCapture(0)
ret, frame = cap.read()
# Select Region of Interest (ROI)
r = cv2.selectROI(frame)
# Crop image
imCrop = frame[int(r[1]):int(r[1] + r[3]), int(r[0]):int(r[0] + r[2])]
# setup initial location of window
r, h, c, w = 250, 400, 400, 400
track_window = (c, r, w, h)
# set up the ROI for tracking
roi = imCrop
hsv_roi = cv2.cvtColor(roi, cv2.COLOR_BGR2HSV)
mask = cv2.inRange(hsv_roi, np.array((0., 60., 32.)), np.array((180., 255., 255.)))
roi_hist = cv2.calcHist([hsv_roi], [0], mask, [180], [0, 180])
cv2.normalize(roi_hist, roi_hist, 0, 255, cv2.NORM_MINMAX)
# Setup the termination criteria, either 10 iteration or move by at least 1 pt
term_crit = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 1)
while (1):
ret, frame = cap.read()
if ret == True:
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
dst = cv2.calcBackProject([hsv], [0], roi_hist, [0, 180], 1)
# apply meanshift to get the new location
ret, track_window = cv2.CamShift(dst, track_window, term_crit)
# Draw it on image
pts = cv2.boxPoints(ret)
pts = np.int0(pts)
img2 = cv2.polylines(frame, [pts], True, (0, 255, 0), 2)
cv2.imshow('img2', img2)
k = cv2.waitKey(60) & 0xff
if k == 27: # if ESC key
break
else:
cv2.imwrite(chr(k) + ".jpg", img2)
else:
break
cv2.destroyAllWindows()
return action
cap.release()
|
StarcoderdataPython
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.