id
stringlengths 1
7
| text
stringlengths 6
1.03M
| dataset_id
stringclasses 1
value |
---|---|---|
1634269
|
from django.contrib.auth import views as auth_views
from django.contrib.auth.forms import PasswordResetForm
from django.core import mail
from django.core.urlresolvers import reverse
from django.urls import resolve
from django.test import TestCase
from django.contrib.auth.tokens import default_token_generator
from django.utils.encoding import force_bytes
from django.utils.http import urlsafe_base64_encode
from django.contrib.auth.forms import SetPasswordForm
from ..models import User
class PasswordResetTests(TestCase):
def setUp(self):
url = reverse('password_reset')
self.response = self.client.get(url)
def test_status_code(self):
self.assertEquals(self.response.status_code, 200)
def test_view_function(self):
view = resolve('/reset/')
self.assertEquals(view.func.view_class, auth_views.PasswordResetView)
def test_csrf(self):
self.assertContains(self.response, 'csrfmiddlewaretoken')
def test_contains_form(self):
form = self.response.context.get('form')
self.assertIsInstance(form, PasswordResetForm)
def test_form_inputs(self):
"""The view must contain two inputs: csrf and email."""
self.assertContains(self.response, '<input', 2)
self.assertContains(self.response, 'type="email"', 1)
class SuccessfulPasswordResetTests(TestCase):
def setUp(self):
email = '<EMAIL>'
User.objects.create_user(email=email, password='<PASSWORD>')
url = reverse('password_reset')
self.response = self.client.post(url, {'email': email})
def test_redirection(self):
"""A valid form submission should redirect the user to `password_reset_done` view."""
url = reverse('password_reset_done')
self.assertRedirects(self.response, url)
def test_send_password_reset_email(self):
self.assertEqual(1, len(mail.outbox))
class InvalidPasswordResetTests(TestCase):
def setUp(self):
url = reverse('password_reset')
self.response = self.client.post(
url, {'email': '<EMAIL>'})
def test_redirection(self):
"""Even invalid emails in the database should redirect
the user to `password_reset_done` view.
"""
url = reverse('password_reset_done')
self.assertRedirects(self.response, url)
def test_no_reset_email_sent(self):
self.assertEqual(0, len(mail.outbox))
class PasswordResetDoneTests(TestCase):
def setUp(self):
url = reverse('password_reset_done')
self.response = self.client.get(url)
def test_status_code(self):
self.assertEquals(self.response.status_code, 200)
def test_view_function(self):
view = resolve('/reset/done/')
self.assertEquals(view.func.view_class,
auth_views.PasswordResetDoneView)
class PasswordResetConfirmTests(TestCase):
def setUp(self):
user = User.objects.create_user(
email='<EMAIL>', password='<PASSWORD>')
"""
Create a valid password reset token based on how django creates the token internally:
https://github.com/django/django/blob/1.11.5/django/contrib/auth/forms.py#L280
"""
self.uid = urlsafe_base64_encode(force_bytes(user.pk)).decode()
self.token = default_token_generator.make_token(user)
url = reverse('password_reset_confirm', kwargs={
'uidb64': self.uid, 'token': self.token})
self.response = self.client.get(url, follow=True)
def test_status_code(self):
self.assertEquals(self.response.status_code, 200)
def test_view_function(self):
view = resolve(
'/reset/{uidb64}/{token}/'.format(uidb64=self.uid, token=self.token))
self.assertEquals(view.func.view_class,
auth_views.PasswordResetConfirmView)
def test_csrf(self):
self.assertContains(self.response, 'csrfmiddlewaretoken')
def test_contains_form(self):
form = self.response.context.get('form')
self.assertIsInstance(form, SetPasswordForm)
def test_form_inputs(self):
"""The view must contain two inputs: csrf and two password fields."""
self.assertContains(self.response, '<input', 3)
self.assertContains(self.response, 'type="password"', 2)
class PasswordResetCompleteTests(TestCase):
def setUp(self):
url = reverse('password_reset_complete')
self.response = self.client.get(url)
def test_status_code(self):
self.assertEquals(self.response.status_code, 200)
def test_view_function(self):
view = resolve('/reset/complete/')
self.assertEquals(view.func.view_class,
auth_views.PasswordResetCompleteView)
|
StarcoderdataPython
|
1715629
|
<filename>adam/perception/visual_perception.py
import json
from pathlib import Path
from typing import Sequence, Union, List, Mapping, Dict, Optional, Any
from attr import attrs, attrib
from attr.validators import instance_of, deep_iterable, deep_mapping, optional
from immutablecollections import ImmutableSet, ImmutableDict, immutabledict
from immutablecollections.converter_utils import _to_immutableset, _to_immutabledict
from adam.math_3d import Point
from adam.ontology import OntologyNode
from adam.ontology.phase1_ontology import WHITE, BLACK, RED, GREEN, BLUE
from adam.perception import PerceptualRepresentationFrame
from adam.perception.perception_graph_nodes import (
GraphNode,
RgbColorNode,
CategoricalNode,
ContinuousNode,
ObjectStroke,
)
CATEGORY_PROPERTY_KEYS: List[str] = ["texture"]
CONTINUOUS_PROPERTY_KEYS: List[str] = []
STROKE_PROPERTY_KEYS: List[str] = ["mean", "std"]
@attrs(slots=True, frozen=True)
class ClusterPerception:
"""Class to hold an object cluster's perception properties."""
cluster_id: int = attrib(validator=instance_of(int))
viewpoint_id: int = attrib(validator=instance_of(int))
sub_object_id: int = attrib(validator=instance_of(int)) # 0 means not a sub-object
strokes: ImmutableSet[ObjectStroke] = attrib(
validator=deep_iterable(instance_of(ObjectStroke)), converter=_to_immutableset
)
adjacent_strokes: ImmutableDict[
ObjectStroke, ImmutableDict[ObjectStroke, bool]
] = attrib(
validator=deep_mapping(
instance_of(ObjectStroke),
deep_mapping(instance_of(ObjectStroke), instance_of(bool)),
),
converter=_to_immutabledict,
)
properties: ImmutableSet[Union[GraphNode, OntologyNode]] = attrib(
validator=deep_iterable(instance_of((GraphNode, OntologyNode))),
converter=_to_immutableset,
)
centroid_x: Optional[float] = attrib(
validator=optional(instance_of(float)), default=None
)
centroid_y: Optional[float] = attrib(
validator=optional(instance_of(float)), default=None
)
def color_as_category(color_properties: Sequence[int]) -> OntologyNode:
"""Convert RGB values into color categories."""
red = color_properties[0]
green = color_properties[1]
blue = color_properties[2]
if red > 240 and green > 240 and blue > 240:
return WHITE
if red > 128 and green < 128 and blue < 128:
return RED
if red < 128 and green > 128 and blue < 128:
return GREEN
if red < 128 and green < 128 and blue > 128:
return BLUE
return BLACK
@attrs(slots=True, frozen=True, repr=False)
class VisualPerceptionFrame(PerceptualRepresentationFrame):
"""
A static snapshot of a visually processed representation of an image.
This is the default perceptual representation for phase 3 phase of the ADAM project.
"""
clusters: Sequence[ClusterPerception] = attrib(
validator=deep_iterable(instance_of(ClusterPerception))
)
@staticmethod
def from_mapping(
json_perception: Mapping[str, Any], *, color_is_rgb: bool = False
) -> "VisualPerceptionFrame":
clusters = []
for cluster_map in json_perception["objects"]:
color_property = cluster_map["color"]
strokes_map = cluster_map["stroke graph"]
strokes = [
ObjectStroke(normalized_coordinates=[Point(x, y, 0) for x, y in stroke])
for stroke in strokes_map["strokes normalized coordinates"]
]
adjacency_matrix: Dict[
ObjectStroke, ImmutableDict[ObjectStroke, bool]
] = dict()
for stroke_id, column in enumerate(strokes_map["adjacency matrix"]):
adjacency_matrix[strokes[stroke_id]] = immutabledict(
(strokes[stroke_id_2], bool(val))
for stroke_id_2, val in enumerate(column)
)
properties: List[Union[GraphNode, OntologyNode]] = [
RgbColorNode(
red=color_property[0],
green=color_property[1],
blue=color_property[2],
weight=1.0,
)
if color_is_rgb
else color_as_category(color_property)
]
properties.extend(
CategoricalNode(value=cluster_map[entry], weight=1.0)
for entry in CATEGORY_PROPERTY_KEYS
)
properties.extend(
ContinuousNode(label=entry, value=cluster_map[entry], weight=1.0)
for entry in CONTINUOUS_PROPERTY_KEYS
)
properties.extend(
ContinuousNode(
label=f"stroke-{entry}", value=strokes_map[entry], weight=1.0
)
for entry in STROKE_PROPERTY_KEYS
)
clusters.append(
ClusterPerception(
cluster_id=cluster_map["object name"],
viewpoint_id=cluster_map["viewpoint id"],
sub_object_id=cluster_map["sub-object id"],
strokes=strokes,
adjacent_strokes=adjacency_matrix,
properties=properties,
)
)
return VisualPerceptionFrame(clusters=clusters)
@staticmethod
def from_json_str(
json_str: str, *, color_is_rgb: bool = False
) -> "VisualPerceptionFrame":
json_perception = json.loads(json_str)
return VisualPerceptionFrame.from_mapping(
json_perception, color_is_rgb=color_is_rgb
)
@staticmethod
def from_json(
json_path: Path, *, color_is_rgb: bool = False
) -> "VisualPerceptionFrame":
with open(json_path, encoding="utf-8") as json_file:
json_perception = json.load(json_file)
return VisualPerceptionFrame.from_mapping(
json_perception, color_is_rgb=color_is_rgb
)
|
StarcoderdataPython
|
3208167
|
# Exercise 5.7
from Tkinter import *
root = Tk()
radius = 50
c = Canvas(root, width=400, height=265, bg='gray')
c.pack()
def myDrawOval(x, y, color):
c.create_oval(x, y, x + radius, y + radius,
fill="", outline=color, width=5)
myDrawOval(50, 58, 'blue')
myDrawOval(116, 58, 'black')
myDrawOval(182, 58, 'red')
myDrawOval(84, 90, 'yellow')
myDrawOval(151, 90, 'green')
root.mainloop()
|
StarcoderdataPython
|
3375771
|
##
# Copyright 2015 TFMT UG (haftungsbeschränkt).
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##
from django.conf.urls import include, url
from netboot import views
urlpatterns = [
url(r'^$', views.IndexView.as_view(), name='index'),
url(r'^dist/', include('dist.urls', namespace='dist')),
url(r'^menu\.cfg$', views.MenuCfgView.as_view(), name='menu_cfg'),
url(r'^menu\.ipxe$', views.MenuIPXEView.as_view(), name='menu_ipxe'),
url(r'^my/', include('my.urls', namespace='my')),
]
|
StarcoderdataPython
|
1735765
|
<filename>assignments/01/word2vec_utils.py
from collections import Counter
import random
import os
import sys
import utils
sys.path.append('..')
import zipfile
import numpy as np
from six.moves import urllib
import tensorflow as tf
def read_data(file_path):
""" Read data into a list of tokens
There should be 17,005,207 tokens
"""
with zipfile.ZipFile(file_path) as f:
words = tf.compat.as_str(f.read(f.namelist()[0])).split()
return words
def build_vocab(words, vocab_size, visual_fld):
""" Build vocabulary of VOCAB_SIZE most frequent words and write it to
visualization/vocab.tsv
"""
utils.safe_mkdir(visual_fld)
file = open(os.path.join(visual_fld, 'vocab.tsv'), 'w')
dictionary = dict()
count = [('UNK', -1)]
index = 0
count.extend(Counter(words).most_common(vocab_size - 1))
for word, _ in count:
dictionary[word] = index
index += 1
file.write(word + '\n')
index_dictionary = dict(zip(dictionary.values(), dictionary.keys()))
file.close()
return dictionary, index_dictionary
def convert_words_to_index(words, dictionary):
""" Replace each word in the dataset with its index in the dictionary """
return [dictionary[word] if word in dictionary else 0 for word in words]
def generate_around_words(index_words, context_window_size):
for index, center in enumerate(index_words[context_window_size: -context_window_size], start=context_window_size):
context_words = index_words[index - context_window_size: index] + \
index_words[index + 1: index + context_window_size + 1]
# assert len(context_words) == 2 * context_window_size
yield (context_words, center)
def most_common_words(visual_fld, num_visualize):
""" create a list of num_visualize most frequent words to visualize on TensorBoard.
saved to visualization/vocab_[num_visualize].tsv
"""
words = open(os.path.join(visual_fld, 'vocab.tsv'), 'r').readlines()[:num_visualize]
words = [word for word in words]
file = open(os.path.join(visual_fld, 'vocab_' + str(num_visualize) + '.tsv'), 'w')
for word in words:
file.write(word)
file.close()
def batch_gen(download_url, expected_byte, vocab_size, batch_size,
skip_window, visual_fld):
local_dest = '../../examples/data/text8.zip'
utils.download_one_file(download_url, local_dest, expected_byte)
words = read_data(local_dest)
dictionary, _ = build_vocab(words, vocab_size, visual_fld)
index_words = convert_words_to_index(words, dictionary)
del words # to save memory
single_gen = generate_around_words(index_words, skip_window)
while True:
around_batch = np.zeros([batch_size, 2 * skip_window], dtype=np.int32)
center_batch = np.zeros([batch_size, 1])
for index in range(batch_size):
around_batch[index], center_batch[index] = next(single_gen)
yield around_batch, center_batch
|
StarcoderdataPython
|
57651
|
<reponame>jdashg/misc
#! /usr/bin/env python3
assert __name__ == '__main__'
print(__file__)
import http.server
import pathlib
import ssl
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--bind', '-b', default='localhost', metavar='ADDRESS',
help='Specify alternate bind address '
'[default: all interfaces]')
parser.add_argument('--cache', '-c', action='store',
default=0, type=int,
help='Allow caching [default: 0]')
parser.add_argument('port', action='store',
default=4443, type=int,
nargs='?',
help='Specify alternate port [default: 4443]')
args = parser.parse_args()
class CustomRequestHandler(http.server.SimpleHTTPRequestHandler):
def end_headers(self):
if not args.cache:
self.send_header("Cache-Control", "no-cache, no-store, must-revalidate")
self.send_header("Pragma", "no-cache")
self.send_header("Expires", "0")
super().end_headers()
CERT_FILE = str(pathlib.PurePath(__file__).with_name('server.pem'))
httpd = http.server.ThreadingHTTPServer(('', 4443), CustomRequestHandler)
try:
httpd.socket = ssl.wrap_socket(httpd.socket, certfile=CERT_FILE, server_side=True)
except FileNotFoundError:
print(f'''{CERT_FILE} not found.
Try `openssl req -new -x509 -keyout server.pem -out {CERT_FILE} -days 365 -nodes`''')
exit(1)
print(f'Serving at {httpd.socket.getsockname()}...')
httpd.serve_forever()
|
StarcoderdataPython
|
168683
|
import os
import tempfile
from contextlib import (ExitStack,
contextmanager)
from functools import partial
from typing import (Any,
Dict,
Iterable,
Optional)
import click
import pytest
import strictyaml
from hypothesis import given
from monty import monty
from tests import strategies
from tests.utils import Secured
@given(strategies.settings,
strategies.templates_directories_paths,
strategies.template_repositories_names,
strategies.temporary_directories,
strategies.github_access_tokens)
def test_main(settings: Dict[str, str],
templates_directory_path: str,
template_repository_name: Optional[str],
temporary_directory: tempfile.TemporaryDirectory,
github_access_token: Secured) -> None:
with ExitStack() as stack:
output_dir = stack.enter_context(temporary_directory)
settings_path = stack.enter_context(write_settings(settings))
command = partial(monty.main.callback,
version=False,
settings_path=settings_path,
templates_dir=templates_directory_path,
output_dir=output_dir,
github_access_token=github_access_token.value,
template_repo=template_repository_name)
files_count_before = capacity(monty.files_paths(output_dir))
command(overwrite=False)
template_directory_files_count = capacity(monty.files_paths(
templates_directory_path))
files_count_after = capacity(monty.files_paths(output_dir))
command(overwrite=True)
files_count_after_overwrite = capacity(monty.files_paths(output_dir))
assert files_count_after == (files_count_before
+ template_directory_files_count)
assert files_count_after_overwrite == files_count_after
if template_directory_files_count:
with pytest.raises(click.BadOptionUsage):
command(overwrite=False)
@contextmanager
def write_settings(settings: Dict[str, str]) -> Any:
file = tempfile.NamedTemporaryFile(mode='w',
encoding='utf8',
delete=False)
file.write(strictyaml.as_document(settings).as_yaml())
file.close()
result = file.name
try:
yield result
finally:
os.unlink(result)
def capacity(elements: Iterable[Any]) -> int:
return sum(1 for _ in elements)
|
StarcoderdataPython
|
104687
|
<gh_stars>10-100
import Tkinter
import shmooze.settings as sets
def splash(fsg,text,bg=sets.bg_color,fg=sets.fg_color,font="Helvetica",size=72):
c=Tkinter.Canvas(fsg,width=fsg.width,height=fsg.height,highlightthickness=0,bg=bg)
c.pack()
coord = fsg.center()
arc = c.create_text(coord, text=text, fill=fg, justify=Tkinter.CENTER, font=(font,size))
def paragraph(fsg,text,bg=sets.bg_color,fg=sets.fg_color,font="Mono",size=32,padx=10,pady=10):
fsg.config(bg=bg)
textbox = Tkinter.Text(fsg,
font=(font, size),
wrap=Tkinter.WORD,
highlightthickness=0,
relief=Tkinter.FLAT,
fg=fg,
bg=bg,
)
textbox.insert(Tkinter.END, text)
textbox.pack(side=Tkinter.TOP, fill=Tkinter.BOTH, padx=padx, pady=pady)
def email(fsg,text,bg=sets.bg_color,fg=sets.fg_color,font="Arial",sender_size=36,subject_size=48,body_size=32,padx=10,pady=0,scroll_beginning_dead_time=18,scroll_end_dead_time=-3):
sender = text['sender']
subject = text['subject']
body = text['body']
fsg.configure(bg=bg)
widget_from = Tkinter.Label(fsg,
font=(font, sender_size),
wraplength=fsg.width,
text=sender,
fg=fg,
bg=bg,
)
widget_from.pack(padx=padx,pady=pady)
widget_subj = Tkinter.Label(fsg,
font=(font,subject_size),
wraplength=fsg.width,
text=subject,
fg=fg,
bg=bg,
)
widget_subj.pack(padx=padx,pady=pady)
widget_body = Tkinter.Text(fsg,
font=(font,body_size),
wrap=Tkinter.WORD,
highlightthickness=0,
relief=Tkinter.FLAT,
fg=fg,
bg=bg,
)
widget_body.insert(Tkinter.END, body)
widget_body.pack(side=Tkinter.TOP, fill=Tkinter.BOTH, padx=padx, pady=pady)
def calc_pos(t,dur):
if t < scroll_beginning_dead_time:
fraction = 0
elif t < dur - scroll_end_dead_time:
fraction = (t - scroll_beginning_dead_time) / (dur - scroll_end_dead_time - scroll_beginning_dead_time)
else:
fraction = 1
return fraction
def do_scroll():
if fsg.vlc_duration is not None:
pos=calc_pos(fsg.play_time()-fsg.vlc_time_started,fsg.vlc_duration)
widget_body.yview_moveto(pos)
fsg.after_playing(10,do_scroll)
fsg.sync(do_scroll)
|
StarcoderdataPython
|
3389178
|
<reponame>ojss/c3lr
data_path = './data'
# miniImageNetFullSize_path = '/home/ojas/projects/unsupervised-meta-learning/data/untarred/miniImagenetFullSize/'
miniImageNetFullSize_path = "/home/user/unsupervised-meta-learning/data/miniImagenetFullSize"
EuroSAT_path = "/home/ojas/projects/unsupervised-meta-learning/data/untarred/EuroSat"
|
StarcoderdataPython
|
1700784
|
<reponame>qua-platform/qua-libs<filename>examples/Workshops/CQE/6. leakage-reduction/configuration.py
import numpy as np
gauss_len = 4
def IQ_imbalance(g, phi):
c = np.cos(phi)
s = np.sin(phi)
N = 1 / ((1 - g ** 2) * (2 * c ** 2 - 1))
return [float(N * x) for x in [(1 - g) * c, (1 + g) * s, (1 - g) * s, (1 + g) * c]]
readout_len = 400
qubit_IF = 100e6
manual_ssb_IF = 100e6
use_manual_ssb = False
rr_IF = 0
qubit_LO = 5117.22e6
rr_LO = 5117.22e6
config = {
"version": 1,
"controllers": {
"con1": {
"type": "opx1",
"analog_outputs": {
1: {"offset": +0.0}, # qubit 1-I
2: {"offset": +0.0}, # qubit 1-Q
3: {"offset": +0.0}, # Readout resonator
4: {"offset": +0.0}, # Readout resonator
},
"digital_outputs": {
1: {},
},
"analog_inputs": {
1: {"offset": +0.0},
2: {"offset": +0.0},
},
}
},
"elements": {
"qubit": {
"mixInputs": {
"I": ("con1", 1),
"Q": ("con1", 2),
"lo_frequency": qubit_LO,
"mixer": "mixer_qubit",
},
"intermediate_frequency": qubit_IF,
"operations": {
"X/2": "DRAG_PULSE_X/2",
"-X/2": "DRAG_PULSE_-X/2",
"Y/2": "DRAG_PULSE_Y/2",
"-Y/2": "DRAG_PULSE_-Y/2",
"X": "DRAG_PULSE_X",
"Y": "DRAG_PULSE_Y",
"random_clifford_seq": "random_sequence",
},
},
"rr": {
"mixInputs": {
"I": ("con1", 3),
"Q": ("con1", 4),
"lo_frequency": rr_LO,
"mixer": "mixer_RR",
},
"intermediate_frequency": rr_IF,
"operations": {
"readout": "readout_pulse",
},
"outputs": {"out1": ("con1", 1)},
"time_of_flight": 28,
"smearing": 0,
},
},
"pulses": {
"DRAG_PULSE_X/2": {
"operation": "control",
"length": None,
"waveforms": {"I": "DRAG_gauss_wf_X/2", "Q": "DRAG_gauss_der_wf_X/2"},
},
"DRAG_PULSE_Y/2": {
"operation": "control",
"length": None,
"waveforms": {"I": "DRAG_gauss_wf_Y/2", "Q": "DRAG_gauss_der_wf_Y/2"},
},
"DRAG_PULSE_-X/2": {
"operation": "control",
"length": None,
"waveforms": {"I": "DRAG_gauss_wf_-X/2", "Q": "DRAG_gauss_der_wf_-X/2"},
},
"DRAG_PULSE_-Y/2": {
"operation": "control",
"length": None,
"waveforms": {"I": "DRAG_gauss_wf_-Y/2", "Q": "DRAG_gauss_der_wf_-Y/2"},
},
"DRAG_PULSE_X": {
"operation": "control",
"length": None,
"waveforms": {"I": "DRAG_gauss_wf_X", "Q": "DRAG_gauss_der_wf_X"},
},
"DRAG_PULSE_Y": {
"operation": "control",
"length": None,
"waveforms": {"I": "DRAG_gauss_wf_Y", "Q": "DRAG_gauss_der_wf_Y"},
},
"random_sequence": {
"operation": "control",
"length": None,
"waveforms": {"I": "random_I", "Q": "random_Q"},
},
"readout_pulse": {
"operation": "measurement",
"length": readout_len,
"waveforms": {"I": "readout_wf", "Q": "zero_wf"},
"integration_weights": {
"integW1": "integW1",
"integW2": "integW2",
},
"digital_marker": "ON",
},
},
"waveforms": {
"zero_wf": {"type": "constant", "sample": 0.0},
"DRAG_gauss_wf_X": {"type": "arbitrary", "samples": []},
"DRAG_gauss_der_wf_X": {"type": "arbitrary", "samples": []},
"DRAG_gauss_wf_Y": {"type": "arbitrary", "samples": []},
"DRAG_gauss_der_wf_Y": {"type": "arbitrary", "samples": []},
"DRAG_gauss_wf_X/2": {"type": "arbitrary", "samples": []},
"DRAG_gauss_der_wf_X/2": {"type": "arbitrary", "samples": []},
"DRAG_gauss_wf_-X/2": {"type": "arbitrary", "samples": []},
"DRAG_gauss_der_wf_-X/2": {"type": "arbitrary", "samples": []},
"DRAG_gauss_wf_Y/2": {"type": "arbitrary", "samples": []},
"DRAG_gauss_der_wf_Y/2": {"type": "arbitrary", "samples": []},
"DRAG_gauss_wf_-Y/2": {"type": "arbitrary", "samples": []},
"DRAG_gauss_der_wf_-Y/2": {"type": "arbitrary", "samples": []},
"readout_wf": {"type": "arbitrary", "samples": []},
"random_I": {
"type": "arbitrary",
"samples": [],
},
"random_Q": {
"type": "arbitrary",
"samples": [],
},
},
"digital_waveforms": {
"ON": {"samples": [(1, 0)]},
},
"integration_weights": {
"integW1": {
"cosine": [1.0] * readout_len,
"sine": [0.0] * readout_len,
},
"integW2": {
"cosine": [0.0] * readout_len,
"sine": [1.0] * readout_len,
},
},
"mixers": {
"mixer_qubit": [
{
"intermediate_frequency": qubit_IF,
"lo_frequency": qubit_LO,
"correction": IQ_imbalance(0.0, 0.0),
}
],
"mixer_RR": [
{
"intermediate_frequency": rr_IF,
"lo_frequency": rr_LO,
"correction": IQ_imbalance(0.0, 0.0),
}
],
},
}
|
StarcoderdataPython
|
1783306
|
<filename>app/migrations/0003_auto_20181014_2227.py<gh_stars>0
# -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2018-10-14 19:27
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('app', '0002_auto_20181014_2221'),
]
operations = [
migrations.RenameField(
model_name='profile',
old_name='pub_date_created',
new_name='pub_date',
),
]
|
StarcoderdataPython
|
18021
|
<gh_stars>0
from django.db import models
class TaskB_table(models.Model):
img = models.ImageField(upload_to='taskB/', default='defo')
pred_price = models. FloatField()
|
StarcoderdataPython
|
1623155
|
from Environments import ChessEnvironment
from collections import defaultdict
from constants import N_ACTIONS
from dummy import generate_action_dict
import numpy as np
import time
from threading import Thread
# TODO: Tidy this up
a2m, m2a = generate_action_dict()
C_PUCT = 2
class MasterNode():
""" A placeholder node the root node statistics
"""
def __init__(self):
self.parent = None
self.child_n = defaultdict(float)
self.child_w = defaultdict(float)
self.child_lock = defaultdict(float)
class MctsNode():
""" MCTS Node class
Args:
f_action: edge connecting node to this parent
parent: the parent node
env: a chess environment
"""
def __init__(self, f_action, parent, env=None, is_root_node=False):
self.parent = parent
self.child_n = np.zeros(N_ACTIONS, dtype=np.int)
self.child_w = np.zeros(N_ACTIONS, dtype=np.float32)
self.child_lock = np.zeros(N_ACTIONS, dtype=np.bool)
self.f_action = f_action
self.is_expanded = False
self.is_root_node = is_root_node
self.locked = False
# Root Node always copy the environment
if self.is_root_node:
self.env = env.copy()
def select(self):
""" Select a child node
"""
Q = -self.child_w / (self.child_n + 1)
# TODO: Optimise
U = C_PUCT * self.child_p * (np.sqrt(self.parent.child_n[self.f_action]) / (self.child_n + 1))
L = - 999 * self.illegal_actions # Illegal actions are punished
K = - 999 * self.child_lock # child who are locked are punished
score = Q + U + L + K
a = np.argmax(score)
return a
def prep_env(self):
# Prep environment for evaluation
if not self.is_root_node:
self.env = self.parent.env.copy()
self.env.act(a2m[self.f_action])
def expand(self, child_prior):
assert not self.is_expanded
# only expand legal nodes
legal_actions_arr, la_list = get_legal_actions(self.env)
self.illegal_actions = np.invert(legal_actions_arr)
self.child = {a: MctsNode(a, self) for a in la_list}
# mask prior by legal moves and normalise
self.child_p = child_prior * legal_actions_arr
self.child_p = self.child_p * (1 / self.child_p.sum())
self.is_expanded = True
def backup(self, value):
self.parent.child_n[self.f_action] += 1
self.parent.child_w[self.f_action] += value
if not self.is_root_node:
self.parent.backup(-value) # backup in the perspective of the parent
def _apply_loss(self, loss):
self.parent.child_n[self.f_action] += loss
if not self.is_root_node:
self.parent.backup(-loss)
def lock(self):
self.locked = True
# virtual loss
# self._apply_loss(-1)
self.parent.child_lock[self.f_action] = True
def unlock(self):
self._apply_loss(1)
self.locked = False
self.parent.child_lock[self.f_action] = False
@property
def child_q(self):
return self.child_w / (self.child_n + 1)
@property
def _child_u(self):
return C_PUCT * self.child_p * (np.sqrt(self.parent.child_n[self.f_action]) / (self.child_n + 1))
def get_legal_actions(env):
legal_actions = [m2a[m.uci()] for m in env.legal_moves]
action_array = np.zeros(N_ACTIONS, dtype=np.bool)
action_array[legal_actions] = True
return action_array, legal_actions
def lap_timer(last_tic):
now = time.perf_counter()
lap = now - last_tic
return lap, now
lock_count = 0
class MctsThread(Thread):
def __init__(self, root_node, eval_fn):
Thread.__init__(self)
self.root_node = root_node
self.eval_fn = eval_fn
def run(self):
current_node = root_node
# 1. Select
while current_node.is_expanded:
a = current_node.select()
current_node = current_node.child[a]
# abandon if locked
if current_node.locked:
# print('LOCKED!')
global lock_count
lock_count += 1
return
# 2. Evaluate
current_node.lock()
current_node.prep_env()
value, priors = self.eval_fn(current_node.env)
# 3. Expand
current_node.expand(priors)
current_node.unlock()
current_node.backup(value)
def mcts_search_new(root_node, eval_fn, n_sim=800):
statistics = {'select_op': 0,
'max_depth': 0,
'select_time': 0,
'evaluate_time': 0,
'expand_time': 0,
'backup_time': 0
}
threads = []
while root_node.parent.child_n[None] < n_sim:
t = MctsThread(root_node, eval_fn)
t.start()
threads.append(t)
for t in threads:
t.join()
return statistics
# Old search
def mcts_search_classic(root_node, eval_fn, n_sim=800):
statistics = {'select_op': 0,
'max_depth': 0,
'select_time': 0,
'evaluate_time': 0,
'expand_time': 0,
'backup_time': 0
}
for n in range(n_sim):
# print('Search:', n)
current_node = root_node
depth = 0
# select
tic = time.perf_counter()
while current_node.is_expanded:
# print('Select')
a = current_node.select()
depth += 1
current_node = current_node.child[a]
statistics['select_op'] += 1
statistics['max_depth'] = max(depth, statistics['max_depth'])
statistics['select_time'] += time.perf_counter() - tic
# expand, evaluate, and backup
# print('_Prep')
tic = time.perf_counter()
current_node.prep_env()
value, priors = eval_fn(current_node.env)
statistics['evaluate_time'] += time.perf_counter() - tic
tic = time.perf_counter()
# print('Expand\n', current_node.env)
current_node.expand(priors)
statistics['expand_time'] += time.perf_counter() - tic
tic = time.perf_counter()
# print('Backup:', value)
current_node.backup(value)
statistics['backup_time'] += time.perf_counter() - tic
return statistics
# Helper function
def mcts_setup(env):
master_node = MasterNode()
root_node = MctsNode(f_action=None, parent=master_node, env=env, is_root_node=True)
return root_node, master_node
def calc_improved_policy(root_node):
# only basic calculation for now
total = root_node.parent[None]
return root_node.child_n / total
if __name__ == "__main__":
from dummy import dummy_material_net
N_SIM = 1200
board = ChessEnvironment("r3k2r/8/8/8/8/8/8/R3K2R w KQkq - 0 1")
root_node, master_node = mcts_setup(board)
tic = time.perf_counter()
stats = mcts_search_classic(root_node, dummy_material_net, N_SIM)
# stats = mcts_search_new(root_node, dummy_material_net, N_SIM)
toc = time.perf_counter() - tic
print('INFO: Search statistics')
for k, v in stats.items():
print(k, v)
print('INFO: Main loop time {:.2f}s'.format(toc))
print('Total visits based on rt', root_node.child_n.sum())
print('Master:', master_node.child_n[None])
pi = root_node.child_n / master_node.child_n[None]
p = np.argsort(pi)[::-1]
from IPython import embed
for i in range(10):
# embed()
action = p[i]
prob = pi[action]
move = a2m[action]
print(i, move, prob)
for m in ('a1a8', 'h1h8'):
a = m2a[m]
print(m, a)
print('N', root_node.child_n[a])
print('W', root_node.child_w[a])
print('q', root_node.child_q[a])
print('p', root_node.child_p[a])
print('u', root_node._child_u[a])
print('s', root_node._child_u[a] - root_node.child_q[a])
# embed()
print('locked', lock_count)
|
StarcoderdataPython
|
1688448
|
<filename>c2logic/compiler.py
import os
import sysconfig
import dataclasses
from dataclasses import dataclass
from pycparser import c_ast, parse_file
from pycparser.c_ast import (
Compound, Constant, DeclList, Enum, FileAST, FuncDecl, Struct, TypeDecl, Typename
)
from .consts import builtins, draw_funcs, func_binary_ops, func_unary_ops
from .instructions import (
BinaryOp, Draw, DrawFlush, Enable, End, FunctionCall, GetLink, Goto, Instruction, JumpCondition,
Print, PrintFlush, Radar, RawAsm, Read, RelativeJump, Return, Sensor, Set, Shoot, UnaryOp, Write
)
@dataclass
class Function():
name: str
params: list
instructions: list = dataclasses.field(default_factory=list)
locals: list = dataclasses.field(init=False)
start: int = dataclasses.field(default=None, init=False)
callees: set = dataclasses.field(init=False, default_factory=set)
callers: set = dataclasses.field(init=False, default_factory=set)
labels: dict = dataclasses.field(init=False, default_factory=dict)
def __post_init__(self):
self.locals = self.params[:]
@dataclass
class Loop():
start: int
end_jumps: list = dataclasses.field(default_factory=list)
"""
@dataclass
class Variable():
type: str
name: str
"""
class Compiler(c_ast.NodeVisitor):
def __init__(self, opt_level=0):
self.opt_level = opt_level
self.functions: dict = None
self.curr_function: Function = None
self.globals: list = None
#TODO replace this with "blocks" attr on Function
self.loops: list = None
self.loop_end: int = None
self.special_vars: dict = None
def compile(self, filename: str):
self.functions = {}
self.curr_function = None
self.globals = []
self.loops = []
self.loop_end = None
self.special_vars = {}
ast = parse_file(filename, use_cpp=True, cpp_args=["-I", get_include_path()])
self.visit(ast)
#remove uncalled functions
if self.opt_level >= 2:
self.functions["main"].callers.add("__start")
self.remove_uncalled_funcs()
init_call = FunctionCall("main")
if self.opt_level >= 3:
if len(self.functions) == 1:
preamble = []
else:
preamble = [init_call]
else:
preamble = [Set("__retaddr_main", "2"), init_call, End()]
offset = len(preamble)
#set function starts
for function in self.functions.values():
function.start = offset
offset += len(function.instructions)
#rewrite relative jumps and func calls
init_call.func_start = self.functions["main"].start
for function in self.functions.values():
instructions = function.instructions
for instruction in instructions:
if isinstance(instruction, RelativeJump):
instruction.func_start = function.start
elif isinstance(instruction, FunctionCall):
instruction.func_start = self.functions[instruction.func_name].start
elif isinstance(instruction, Goto):
instruction.offset = function.labels[instruction.label]
instruction.func_start = function.start
elif isinstance(instruction, Set) and instruction.dest.startswith("__retaddr"):
instruction.src += function.start
out = []
if preamble:
out.append("\n".join(map(str, preamble)))
out.extend(
"\n".join(map(str, function.instructions)) for function in self.functions.values()
)
return "\n".join(out)
def remove_uncalled_funcs(self):
to_remove = set()
for name, function in list(self.functions.items()):
if name in to_remove:
continue
callers = set()
if not self.is_called(function, callers):
to_remove.add(name)
to_remove |= callers
for name in to_remove:
del self.functions[name]
def is_called(self, function, callers):
if function.name in callers: #avoid infinite loops
return False
for func_name in function.callers:
if func_name == "__start":
return True
callers.add(function.name)
if self.is_called(self.functions[func_name], callers):
return True
return False
#utilities
def push(self, instruction: Instruction):
self.curr_function.instructions.append(instruction)
def pop(self):
return self.curr_function.instructions.pop()
def peek(self):
return self.curr_function.instructions[-1]
def curr_offset(self):
return len(self.curr_function.instructions) - 1
def get_varname(self, varname):
if varname in self.curr_function.locals:
return f"_{varname}_{self.curr_function.name}"
elif varname not in self.globals:
raise NameError(f"Unknown variable {varname}")
return varname
def get_special_var(self, varname):
#avoids special variables clobbering each other
if varname not in self.special_vars:
self.special_vars[varname] = -1
self.special_vars[varname] += 1
#print(f"create {varname}_{self.special_vars[varname]}")
return f"{varname}_{self.special_vars[varname]}"
def delete_special_var(self, varname):
name, _, num = varname.rpartition("_")
try:
if int(num) != self.special_vars[name]:
#print(varname, self.special_vars[name])
return
#raise ValueError(f"{varname} was attempted to be deleted when self.special_vars[{name}] was {num}")
#print(f"delete {name}_{self.special_vars[name]}")
self.special_vars[name] -= 1
except (ValueError, KeyError): # not deleting a special var, this is normal
pass
def can_avoid_indirection(self, var="__rax"):
top = self.peek()
return self.opt_level >= 1 and isinstance(top, Set) and top.dest == var
def set_to_rax(self, varname: str):
top = self.peek()
if self.opt_level >= 1 and hasattr(top, "dest") and top.dest == "__rax":
#avoid indirection through __rax
self.curr_function.instructions[-1].dest = varname
else:
self.push(Set(varname, "__rax"))
def push_body_jump(self):
""" jump over loop/if body when cond is false """
if self.opt_level >= 1 and isinstance(self.peek(), BinaryOp):
try:
self.push(RelativeJump(None, JumpCondition.from_binaryop(self.pop().inverse())))
except KeyError:
self.push(RelativeJump(None, JumpCondition("==", "__rax", "0")))
else:
self.push(RelativeJump(None, JumpCondition("==", "__rax", "0")))
def start_loop(self, cond):
self.loops.append(Loop(self.curr_offset() + 1))
self.visit(cond)
self.push_body_jump()
self.loops[-1].end_jumps = [self.curr_offset()] # also used for breaks
def end_loop(self):
loop = self.loops.pop()
self.push(RelativeJump(loop.start, JumpCondition.always))
self.loop_end = self.curr_offset() + 1
for offset in loop.end_jumps:
self.curr_function.instructions[offset].offset = self.loop_end
def push_ret(self):
#TODO make retaddr and local variables use get_special_var and delete_special_var
if self.opt_level >= 3 and self.curr_function.name == "main":
top = self.peek()
if isinstance(top, Set) and top.dest == "__rax":
self.pop()
self.push(End())
else:
self.push(Return(self.curr_function.name))
def optimize_builtin_args(self, args):
if self.opt_level >= 1:
for i, arg in reversed(list(enumerate(args))):
if self.can_avoid_indirection(arg):
args[i] = self.pop().src
self.delete_special_var(arg)
else:
break
return args
def get_unary_builtin_arg(self, args):
self.visit(args[0])
if self.can_avoid_indirection():
return self.pop().src
else:
return "__rax"
def get_binary_builtin_args(self, args, name):
left_name = self.get_special_var(f"__{name}_arg0")
self.visit(args[0])
self.set_to_rax(left_name)
self.visit(args[1])
left = left_name
right = "__rax"
if self.can_avoid_indirection():
self.delete_special_var(right)
right = self.pop().src
if self.can_avoid_indirection(left_name):
self.delete_special_var(left)
left = self.pop().src
return left, right
def get_multiple_builtin_args(self, args, name):
argnames = []
for i, arg in enumerate(args):
self.visit(arg)
argname = self.get_special_var(f"__{name}_arg{i}")
self.set_to_rax(argname)
argnames.append(argname)
return self.optimize_builtin_args(argnames)
#visitors
def visit_FuncDef(self, node): # function definitions
func_name = node.decl.name
if func_name in self.functions:
self.curr_function = self.functions[func_name]
else:
func_decl = node.decl.type
if func_decl.args is None or isinstance(func_decl.args.params[0], Typename):
params = []
else:
params = [param_decl.name for param_decl in func_decl.args.params]
self.curr_function = Function(func_name, params)
self.visit(node.body)
#implicit return
#needed if loop/if body is at end of function or hasn't returned yet
if self.loop_end == self.curr_offset() + 1 or not isinstance(self.peek(), Return):
self.push(Set("__rax", "null"))
self.push_ret()
self.functions[func_name] = self.curr_function
def visit_Decl(self, node):
if isinstance(node.type, TypeDecl): # variable declaration
#TODO fix local/global split
varname = node.name
if self.curr_function is None: # globals
self.globals.append(varname)
else:
self.curr_function.locals.append(varname)
varname = f"_{varname}_{self.curr_function.name}"
if node.init is not None:
self.visit(node.init)
self.set_to_rax(varname)
elif isinstance(node.type, FuncDecl):
if node.name not in builtins + func_unary_ops + func_binary_ops:
#create placeholder function for forward declarations
func_decl = node.type
if func_decl.args is None or isinstance(func_decl.args.params[0], Typename):
params = []
else:
params = [param_decl.name for param_decl in func_decl.args.params]
self.functions[node.name] = Function(node.name, params)
elif isinstance(node.type, Struct):
if node.type.name != "MindustryObject":
#TODO structs
raise NotImplementedError(node)
elif isinstance(node.type, Enum):
#TODO enums
raise NotImplementedError(node)
else:
raise NotImplementedError(node)
def visit_Assignment(self, node):
self.visit(node.rvalue)
varname = self.get_varname(node.lvalue.name)
if node.op == "=": #normal assignment
self.set_to_rax(varname)
else: #augmented assignment(+=,-=,etc)
if self.can_avoid_indirection():
#avoid indirection through __rax
self.push(BinaryOp(varname, varname, self.pop().src, node.op[:-1]))
else:
self.push(BinaryOp(varname, varname, "__rax", node.op[:-1]))
if self.opt_level < 3:
self.push(Set("__rax", varname))
def visit_Constant(self, node): # literals
self.push(Set("__rax", node.value))
def visit_ID(self, node): # identifier
varname = node.name
if varname not in self.functions:
varname = self.get_varname(varname)
if varname in ("links", "ipt", "counter", "time"):
varname = "@" + varname
self.push(Set("__rax", varname))
def visit_BinaryOp(self, node):
self.visit(node.left)
left = self.get_special_var("__rbx")
self.set_to_rax(left)
self.visit(node.right)
right = "__rax"
if self.can_avoid_indirection():
right = self.pop().src
if self.can_avoid_indirection(left):
self.delete_special_var(left)
left = self.pop().src
self.push(BinaryOp("__rax", left, right, node.op))
self.delete_special_var(left)
def visit_UnaryOp(self, node):
if node.op == "p++" or node.op == "p--": #postincrement/decrement
varname = self.get_varname(node.expr.name)
if self.opt_level < 3:
self.push(Set("__rax", varname))
self.push(BinaryOp(varname, varname, "1", node.op[1]))
elif node.op == "++" or node.op == "--":
varname = self.get_varname(node.expr.name)
self.push(BinaryOp(varname, varname, "1", node.op[0]))
if self.opt_level < 3:
self.push(Set("__rax", varname))
elif node.op == "!":
self.visit(node.expr)
if self.opt_level >= 1 and isinstance(self.peek(), BinaryOp):
try:
self.push(self.pop().inverse())
except KeyError:
self.push(BinaryOp("__rax", "__rax", "0", "=="))
else:
self.push(BinaryOp("__rax", "__rax", "0", "=="))
else:
self.visit(node.expr)
self.push(UnaryOp("__rax", "__rax", node.op))
def visit_For(self, node):
self.visit(node.init)
self.start_loop(node.cond)
self.visit(node.stmt) # loop body
self.visit(node.next)
self.end_loop()
def visit_While(self, node):
self.start_loop(node.cond)
self.visit(node.stmt)
self.end_loop()
def visit_DoWhile(self, node):
#jump over the condition on the first iterattion
self.push(RelativeJump(None, JumpCondition.always))
init_jump_offset = self.curr_offset()
self.start_loop(node.cond)
self.curr_function.instructions[init_jump_offset].offset = len(
self.curr_function.instructions
)
self.visit(node.stmt)
self.end_loop()
def visit_If(self, node):
self.visit(node.cond)
self.push_body_jump()
cond_jump_offset = self.curr_offset()
self.visit(node.iftrue)
#jump over else body from end of if body
if node.iffalse is not None:
self.push(RelativeJump(None, JumpCondition.always))
cond_jump_offset2 = self.curr_offset()
self.curr_function.instructions[cond_jump_offset].offset = len(
self.curr_function.instructions
)
if node.iffalse is not None:
self.visit(node.iffalse)
self.curr_function.instructions[cond_jump_offset2].offset = len(
self.curr_function.instructions
)
def visit_Break(self, node): #pylint: disable=unused-argument
self.push(RelativeJump(None, JumpCondition.always))
self.loops[-1].end_jumps.append(self.curr_offset())
def visit_Continue(self, node): #pylint: disable=unused-argument
self.push(RelativeJump(self.loops[-1].start, JumpCondition.always))
def visit_Return(self, node):
if node.expr is None:
self.push(Set("__rax","null"))
else:
self.visit(node.expr)
self.push_ret()
def visit_Label(self, node):
self.curr_function.labels[node.name] = self.curr_offset() + 1
self.visit(node.stmt)
def visit_Goto(self, node):
self.push(Goto(node.name))
def visit_FuncCall(self, node):
name = node.name.name
if node.args is not None:
args = node.args.exprs
else:
args = []
#TODO avoid duplication in builtin calls
builtins_dict = {
"print": Print,
"printd": Print,
"printflush": PrintFlush,
"enable": Enable,
"shoot": Shoot,
"get_link": lambda index: GetLink("__rax", index),
"read": lambda cell, index: Read("__rax", cell, index),
"write": Write,
"drawflush": DrawFlush
}
if name in builtins_dict:
argnames = self.get_multiple_builtin_args(args, name)
self.push(builtins_dict[name](*argnames))
for argname in argnames:
if argname.startswith(f"__{name}_arg"):
self.delete_special_var(argname)
elif name == "asm":
arg = args[0]
if not isinstance(arg, Constant) or arg.type != "string":
raise TypeError("Non-string argument to asm", node)
self.push(RawAsm(arg.value[1:-1]))
elif name == "radar":
argnames = []
for i, arg in enumerate(args):
if 1 <= i <= 4:
if not isinstance(arg, Constant) or arg.type != "string":
raise TypeError("Non-string argument to radar", node)
self.push(Set("__rax", arg.value[1:-1]))
else:
self.visit(arg)
argname = self.get_special_var(f"__radar_arg{i}")
self.set_to_rax(argname)
argnames.append(argname)
argnames = self.optimize_builtin_args(argnames)
self.push(Radar("__rax", *argnames)) #pylint: disable=no-value-for-parameter
for argname in argnames:
if argname.startswith("__radar_arg"):
self.delete_special_var(argname)
elif name == "sensor":
self.visit(args[0])
left = self.get_special_var("__sensor_arg0")
self.set_to_rax(left)
arg = args[1]
if not isinstance(arg, Constant) or arg.type != "string":
raise TypeError("Non-string argument to sensor", node)
self.push(Set("__rax", arg.value[1:-1]))
right = "__rax"
if self.can_avoid_indirection():
right = self.pop().src
if self.can_avoid_indirection(left):
self.delete_special_var(left)
left = self.pop().src
self.push(Sensor("__rax", left, right))
if left.startswith("__sensor_arg0"):
self.delete_special_var(left)
elif name == "end":
self.push(End())
elif name in draw_funcs:
argnames = self.get_multiple_builtin_args(args, name)
cmd = draw_funcs[name]
self.push(Draw(cmd, *argnames))
for argname in argnames:
if argname.startswith(f"__{name}_arg"):
self.delete_special_var(argname)
elif name in func_binary_ops:
left, right = self.get_binary_builtin_args(args, name)
self.push(BinaryOp("__rax", left, right, name))
if left.startswith(f"__{name}_arg"):
self.delete_special_var(left)
elif name in func_unary_ops:
self.push(UnaryOp("__rax", self.get_unary_builtin_arg(args), name))
else:
try:
func = self.functions[name]
except KeyError:
raise ValueError(f"{name} is not a function")
if self.opt_level >= 2:
self.curr_function.callees.add(name)
func.callers.add(self.curr_function.name)
for param, arg in zip(func.params, args):
self.visit(arg)
self.set_to_rax(f"_{param}_{name}")
self.push(Set("__retaddr_" + name, self.curr_offset() + 3))
self.push(FunctionCall(name))
def generic_visit(self, node):
if isinstance(node, (FileAST, Compound, DeclList)):
super().generic_visit(node)
else:
raise NotImplementedError(node)
def get_include_path():
if os.name == "posix":
return sysconfig.get_path("include", "posix_user")
elif os.name == "nt":
return sysconfig.get_path("include", "nt")
else:
raise ValueError(f"Unknown os {os.name}")
def main(file, optimization = 0):
print(Compiler(optimization).compile(file), file=open(file+".masm", "w+"))
if __name__ == "__main__":
main()
|
StarcoderdataPython
|
3283719
|
from otree.api import (
models,
widgets,
BaseConstants,
BaseSubsession,
BaseGroup,
BasePlayer,
Currency as c,
currency_range,
)
author = 'Your name here'
doc = """
Your app description
"""
class Constants(BaseConstants):
name_in_url = 'inst_tsim'
players_per_group = None
num_rounds = 1
payoff_win = c(4)
payoff_lose = c(0.5)
completion_fee = c(2.5)
beliefs_payoff = c(0.5)
class Subsession(BaseSubsession):
pass
class Group(BaseGroup):
pass
class Player(BasePlayer):
q1_simult = models.IntegerField(
choices=[
[1, 'Both would get {}.'.format(Constants.payoff_lose)],
[2, 'Participant A would get {} and Participant B would get {}.'.format(Constants.payoff_lose,
Constants.payoff_win)],
[3, 'Participant A would get {} and Participant B would get {}.'.format(Constants.payoff_win,
Constants.payoff_lose)],
[4, 'Both would get {}.'.format(Constants.payoff_win)]
],
widget=widgets.RadioSelect,
)
def q1_simult_error_message(self, value):
print('value is', value)
if not value == 4:
return 'Recall: if any of you reports Orange, then both would get {}.'.format(Constants.payoff_win)
q2_simult = models.IntegerField(
choices=[
[1, 'Both would get {}.'.format(Constants.payoff_lose)],
[2, 'Participant A would get {} and Participant B would get {}.'.format(Constants.payoff_lose,
Constants.payoff_win)],
[3, 'Participant A would get {} and Participant B would get {}.'.format(Constants.payoff_win,
Constants.payoff_lose)],
[4, 'Both would get {}.'.format(Constants.payoff_win)]
],
widget=widgets.RadioSelect,
)
def q2_simult_error_message(self, value):
print('value is', value)
if not value == 4:
return 'Recall: if any of you reports Orange, then both would get {}.'.format(Constants.payoff_win)
q3_simult = models.IntegerField(
choices=[
[1, 'Both would get {}.'.format(Constants.payoff_lose)],
[2, 'Participant A would get {} and Participant B would get {}.'.format(Constants.payoff_lose,
Constants.payoff_win)],
[3, 'Participant A would get {} and Participant B would get {}.'.format(Constants.payoff_win,
Constants.payoff_lose)],
[4, 'Both would get {}.'.format(Constants.payoff_win)]
],
widget=widgets.RadioSelect,
)
def q3_simult_error_message(self, value):
print('value is', value)
if not value == 1:
return 'Recall: if both participants report black cards, then both would get {}.'.format(
Constants.payoff_lose)
q4_simult = models.IntegerField(
choices=[
[1, 'Both would get {}.'.format(Constants.payoff_lose)],
[2, 'Participant A would get {} and Participant B would get {}.'.format(Constants.payoff_lose,
Constants.payoff_win)],
[3, 'Participant A would get {} and Participant B would get {}.'.format(Constants.payoff_win,
Constants.payoff_lose)],
[4, 'Both would get {}.'.format(Constants.payoff_win)]
],
widget=widgets.RadioSelect,
)
def q4_simult_error_message(self, value):
print('value is', value)
if not value == 4:
return 'Recall: if both participants report orange cards, then both would get {}.'.format(
Constants.payoff_win)
q5_simult = models.IntegerField(
choices=[
[1, 'Before Participant B reports her or his own card’s color.'],
[2, 'After Participant B reports her or his own card’s color.'],
],
widget=widgets.RadioSelect,
)
def q5_simult_error_message(self, value):
print('value is', value)
if not value == 2:
return 'Recall: Participants learn the report of the other at the end of the experiment.'
|
StarcoderdataPython
|
57588
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from preggy.assertions.types.boolean import *
from preggy.assertions.types.classes import *
from preggy.assertions.types.errors import *
from preggy.assertions.types.file import *
from preggy.assertions.types.function import *
from preggy.assertions.types.nullable import *
from preggy.assertions.types.numeric import *
from preggy.assertions.types.regexp import *
|
StarcoderdataPython
|
1641193
|
<reponame>thread/django-lightweight-queue
from django.conf.urls import url
from . import views
app_name = 'django_lightweight_queue'
urlpatterns = (
url(r'^debug/django-lightweight-queue/debug-run$', views.debug_run, name='debug-run'),
)
|
StarcoderdataPython
|
1656437
|
# Copyright 2016 The Bazel Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This package manipulates OCI image configuration metadata."""
from collections import namedtuple
import copy
import json
import os
import os.path
import sys
from tools.build_defs.docker import utils
from third_party.py import gflags
gflags.DEFINE_string('base', None, 'The parent image')
gflags.DEFINE_string('output', None, 'The output file to generate')
gflags.MarkFlagAsRequired('output')
gflags.DEFINE_multistring('layer', [],
'Layer sha256 hashes that make up this image')
gflags.DEFINE_list('entrypoint', None,
'Override the "Entrypoint" of the previous image')
gflags.DEFINE_list('command', None, 'Override the "Cmd" of the previous image')
gflags.DEFINE_string('user', None, 'The username to run commands under')
gflags.DEFINE_list('labels', None, 'Augment the "Label" of the previous image')
gflags.DEFINE_list('ports', None,
'Augment the "ExposedPorts" of the previous image')
gflags.DEFINE_list('volumes', None,
'Augment the "Volumes" of the previous image')
gflags.DEFINE_string('workdir', None, 'Set the working directory for the image')
gflags.DEFINE_list('env', None, 'Augment the "Env" of the previous image')
FLAGS = gflags.FLAGS
_ConfigOptionsT = namedtuple('ConfigOptionsT', ['layers', 'entrypoint', 'cmd',
'env', 'labels', 'ports',
'volumes', 'workdir', 'user'])
class ConfigOptions(_ConfigOptionsT):
"""Docker image configuration options."""
def __new__(cls,
layers=None,
entrypoint=None,
cmd=None,
user=None,
labels=None,
env=None,
ports=None,
volumes=None,
workdir=None):
"""Constructor."""
return super(ConfigOptions, cls).__new__(cls,
layers=layers,
entrypoint=entrypoint,
cmd=cmd,
user=user,
labels=labels,
env=env,
ports=ports,
volumes=volumes,
workdir=workdir)
_PROCESSOR_ARCHITECTURE = 'amd64'
_OPERATING_SYSTEM = 'linux'
def Resolve(value, environment):
"""Resolves environment variables embedded in the given value."""
outer_env = os.environ
try:
os.environ = environment
return os.path.expandvars(value)
finally:
os.environ = outer_env
def DeepCopySkipNull(data):
"""Do a deep copy, skipping null entry."""
if isinstance(data, dict):
return dict((DeepCopySkipNull(k), DeepCopySkipNull(v))
for k, v in data.items() if v is not None)
return copy.deepcopy(data)
def KeyValueToDict(pair):
"""Converts an iterable object of key=value pairs to dictionary."""
d = dict()
for kv in pair:
(k, v) = kv.split('=', 1)
d[k] = v
return d
def CreateImageConfig(data, options):
"""Create an image config possibly based on an existing one.
Args:
data: A dict of Docker image config to base on top of.
options: Options specific to this image which will be merged with any
existing data
Returns:
Image config for the new image
"""
defaults = DeepCopySkipNull(data)
# dont propagate non-spec keys
output = dict()
output['created'] = '0001-01-01T00:00:00Z'
output['author'] = 'Bazel'
output['architecture'] = _PROCESSOR_ARCHITECTURE
output['os'] = _OPERATING_SYSTEM
output['config'] = defaults.get('config', {})
if options.entrypoint:
output['config']['Entrypoint'] = options.entrypoint
if options.cmd:
output['config']['Cmd'] = options.cmd
if options.user:
output['config']['User'] = options.user
def Dict2ConfigValue(d):
return ['%s=%s' % (k, d[k]) for k in sorted(d.keys())]
if options.env:
# Build a dictionary of existing environment variables (used by Resolve).
environ_dict = KeyValueToDict(output['config'].get('Env', []))
# Merge in new environment variables, resolving references.
for k, v in options.env.items():
# Resolve handles scenarios like "PATH=$PATH:...".
environ_dict[k] = Resolve(v, environ_dict)
output['config']['Env'] = Dict2ConfigValue(environ_dict)
# TODO(babel-team) Label is currently docker specific
if options.labels:
label_dict = KeyValueToDict(output['config'].get('Label', []))
for k, v in options.labels.items():
label_dict[k] = v
output['config']['Label'] = Dict2ConfigValue(label_dict)
if options.ports:
if 'ExposedPorts' not in output['config']:
output['config']['ExposedPorts'] = {}
for p in options.ports:
if '/' in p:
# The port spec has the form 80/tcp, 1234/udp
# so we simply use it as the key.
output['config']['ExposedPorts'][p] = {}
else:
# Assume tcp
output['config']['ExposedPorts'][p + '/tcp'] = {}
if options.volumes:
if 'Volumes' not in output['config']:
output['config']['Volumes'] = {}
for p in options.volumes:
output['config']['Volumes'][p] = {}
if options.workdir:
output['config']['WorkingDir'] = options.workdir
# diff_ids are ordered from bottom-most to top-most
diff_ids = defaults.get('rootfs', {}).get('diff_ids', [])
layers = options.layers if options.layers else []
diff_ids += ['sha256:%s' % l for l in layers]
output['rootfs'] = {
'type': 'layers',
'diff_ids': diff_ids,
}
# history is ordered from bottom-most layer to top-most layer
history = defaults.get('history', [])
# docker only allows the child to have one more history entry than the parent
history += [{
'created': '0001-01-01T00:00:00Z',
'created_by': 'bazel build ...',
'author': 'Bazel'}]
output['history'] = history
return output
def main(unused_argv):
base_json = '{}'
manifest = utils.GetLatestManifestFromTar(FLAGS.base)
if manifest:
config_file = manifest['Config']
base_json = utils.GetTarFile(FLAGS.base, config_file)
data = json.loads(base_json)
layers = []
for layer in FLAGS.layer:
layers.append(utils.ExtractValue(layer))
labels = KeyValueToDict(FLAGS.labels)
for label, value in labels.items():
if value.startswith('@'):
with open(value[1:], 'r') as f:
labels[label] = f.read()
output = CreateImageConfig(data,
ConfigOptions(layers=layers,
entrypoint=FLAGS.entrypoint,
cmd=FLAGS.command,
user=FLAGS.user,
labels=labels,
env=KeyValueToDict(FLAGS.env),
ports=FLAGS.ports,
volumes=FLAGS.volumes,
workdir=FLAGS.workdir))
with open(FLAGS.output, 'w') as fp:
json.dump(output, fp, sort_keys=True)
fp.write('\n')
if __name__ == '__main__':
main(FLAGS(sys.argv))
|
StarcoderdataPython
|
3298982
|
"""Types used in the library."""
import enum
import typing
if typing.TYPE_CHECKING:
from genshin.models.model import Unique
__all__ = ["Game", "Region"]
UniqueT = typing.TypeVar("UniqueT", bound="Unique")
class Region(str, enum.Enum):
"""Region to get data from."""
OVERSEAS = "os"
"""Applies to all overseas APIs."""
CHINESE = "cn"
"""Applies to all chinese mainland APIs."""
class Game(str, enum.Enum):
"""Hoyoverse game."""
GENSHIN = "genshin"
"""Genshin Impact"""
HONKAI = "honkai3rd"
"""Honkai Impact 3rd"""
IDOr = typing.Union[int, UniqueT]
"""Allows partial objects."""
|
StarcoderdataPython
|
3201586
|
<reponame>Robert-Ma/foal<filename>src/foal/__init__.py
from foal import search
from foal import sort
from foal import linear_algebra
from foal import dynamic_programming
from foal import tree
from foal import graph
|
StarcoderdataPython
|
1778589
|
#!/usr/bin/env python
from __future__ import print_function
import tensorflow as tf
import cv2
import sys
sys.path.append("../common")
from common.netconstruct import weight_variable,bias_variable, conv2d,max_pool_2x2
import random
import numpy as np
from collections import deque
OBSERVE_LENGTH = 1000
EXPLORE_LENGTH = 30000
FINAL_EPSILON = 0.0001
INITIAL_EPSILON = 0.1
GAMMA = 0.8 # decay rate of past observations
# OBSERVE = 100000. # timesteps to observe before training
# EXPLORE = 2000000. # frames over which to anneal epsilon
# FINAL_EPSILON = 0.0001 # final value of epsilon
# INITIAL_EPSILON = 0.0001 # starting value of epsilon
REPLAY_MEMORY_LENGTH = 50000 # number of previous transitions to remember
BATCH = 32 # size of minibatch
FRAME_PER_ACTION = 3
def createNetwork(ACTIONS):
# network weights
W_conv1 = weight_variable([8, 8, 4, 32])
b_conv1 = bias_variable([32])
W_conv2 = weight_variable([4, 4, 32, 64])
b_conv2 = bias_variable([64])
W_conv3 = weight_variable([3, 3, 64, 64])
b_conv3 = bias_variable([64])
W_fc1 = weight_variable([1600, 512])
b_fc1 = bias_variable([512])
W_fc2 = weight_variable([512, ACTIONS])
b_fc2 = bias_variable([ACTIONS])
# input layer
s = tf.placeholder("float", [None, 80, 80, 4])
# hidden layers
h_conv1 = tf.nn.relu(conv2d(s, W_conv1, 4) + b_conv1)
h_pool1 = max_pool_2x2(h_conv1)
h_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2, 2) + b_conv2)
#h_pool2 = max_pool_2x2(h_conv2)
h_conv3 = tf.nn.relu(conv2d(h_conv2, W_conv3, 1) + b_conv3)
#h_pool3 = max_pool_2x2(h_conv3)
#h_pool3_flat = tf.reshape(h_pool3, [-1, 256])
h_conv3_flat = tf.reshape(h_conv3, [-1, 1600])
h_fc1 = tf.nn.relu(tf.matmul(h_conv3_flat, W_fc1) + b_fc1)
# readout layer
readout = tf.matmul(h_fc1, W_fc2) + b_fc2
return s, readout, h_fc1
class DQN(object):
def __init__(self,ACTIONS):
self.ACTIONS = ACTIONS
self.status, self.readout,self.h_fc1 = createNetwork(ACTIONS)
self.actions = tf.placeholder("float", [None, ACTIONS])
self.y = tf.placeholder("float", [None])
self.readout_action = tf.reduce_sum(tf.multiply(self.readout, self.actions), reduction_indices=1)
cost = tf.reduce_mean(tf.square(self.y - self.readout_action))
self.train_step = tf.train.AdamOptimizer(1e-6).minimize(cost)
self.model_saver = tf.train.Saver()
self.replay_memory = deque()
self.replay_memory_terminal = deque()
# store the previous observations in replay memory
self.sample_batch = BATCH
self.t = 0
self.saver = tf.train.Saver()
def train(self, s_t, a_t, r_t, s_t1, terminal):
if terminal:
self.replay_memory_terminal.append((s_t, a_t, r_t, s_t1, terminal))
else:
self.replay_memory.append((s_t, a_t, r_t, s_t1, terminal))
if len(self.replay_memory) > REPLAY_MEMORY_LENGTH:
self.replay_memory.popleft()
if len(self.replay_memory_terminal) > REPLAY_MEMORY_LENGTH:
self.replay_memory_terminal.popleft()
if self.t > OBSERVE_LENGTH:
# sample a minibatch to train on
minibatch = random.sample(self.replay_memory, self.sample_batch)
for i in range(BATCH):
minibatch.append(random.choice(self.replay_memory_terminal))
# get the batch variables
s_j_batch = [d[0] for d in minibatch]
a_batch = [d[1] for d in minibatch]
r_batch = [d[2] for d in minibatch]
s_j1_batch = [d[3] for d in minibatch]
y_batch = []
# readout_j1_batch = self.readout.eval(feed_dict={s: s_j1_batch})
readout_j1_batch = self.pridict(s_j1_batch)
for i in range(0, len(minibatch)):
terminal_t = minibatch[i][4]
# if terminal, only equals reward
if terminal_t:
y_batch.append(r_batch[i])
else:
y_batch.append(r_batch[i] + GAMMA * np.max(readout_j1_batch[i]))
# perform gradient step
self.train_step.run(feed_dict={
self.y: y_batch,
self.a: a_batch,
self.s: s_j_batch}
)
def predict_epsion_greedy(self, status,epsilon):
import random
if random.random() <= epsilon:
print("----------Random Action----------")
readout_t = self.readout.eval(feed_dict={self.status: [status]})[0]
action_index = random.randrange(self.ACTIONS)
return action_index, readout_t
else:
return self.predict(status)
def predict(self, status):
readout_t = self.readout.eval(feed_dict={self.status: [status]})[0]
action_index = np.argmax(readout_t)
return action_index, readout_t
def load(self, tf_sess, model_path):
# sess.run(tf.initialize_all_variables())
checkpoint = tf.train.get_checkpoint_state(model_path)
if checkpoint and checkpoint.model_checkpoint_path:
self.saver.restore(tf_sess, checkpoint.model_checkpoint_path)
print("Successfully loaded:", checkpoint.model_checkpoint_path)
else:
print("Could not find old network weights")
def save(self, tf_sess, save_path, global_step):
# sess.run(tf.initialize_all_variables())
self.saver.save(tf_sess, save_path, global_step=global_step)
def main():
model = DQN(4)
if __name__ == "__main__":
main()
|
StarcoderdataPython
|
85064
|
#! /usr/bin/env python3
# -*- coding: utf-8 -*-
# Import
import os
from genenetweaver.gene_net_weaver import GeneNetWeaver
import numpy as np
import argparse
def argument_parser():
parser = argparse.ArgumentParser(
description='Run GeneNetWeaver (GNW) to simulate gene expression data '
'from which gene regulatory networks can be inferred. '
'Please provide the following arguments:')
parser.add_argument('--path_to_jar',
required=True,
help='Enter the path to the GNW jar.')
parser.add_argument('--network',
required=True,
type=str,
help=(
'Choose between "e.coli" and "yeast".' +
'If you prefer to use a custom network,' +
'please provide the path.'
))
parser.add_argument('--path_to_network',
help='Enter the path to the custom network file.',
default=None)
parser.add_argument('--n_samples',
required=True,
type=int,
help='Enter how many samples GNW should simulate.')
parser.add_argument('--n_entities',
required=True,
type=int,
help='Enter the size of the simulated network.')
parser.add_argument('--output_directory',
required=True,
help='Please enter the path to the output directory.')
parser.add_argument('--noise_type',
help=(
'Choose between: "normal",' +
'"lognormal" or "microarray".'
),
default='normal')
parser.add_argument('--simulation_type',
help='Choose between: "ode" and "sde"',
default='ode')
return parser.parse_args()
def return_network_string(name, path):
if name == 'yeast':
# Returns Yeast network provided by GeneNetWeaver
return 'yeast_transcriptional_network_Balaji2006.tsv'
elif name == 'e.coli':
# Returns E.coli network provided by GeneNetWeaver
return 'ecoli_transcriptional_network_regulonDB_6_7.tsv'
else:
# Upload example network
# TODO: Write function to include custom network into file.
return path
def simulate(n_samples, n_entities, gnw_jar, noise, simulation_type,
input_network, outdir):
# Simulation
print('Simulation of network with {} samples and {} entities.'
.format(n_samples, n_entities))
gnw = GeneNetWeaver(gnw_jar=gnw_jar, output_path=outdir)
network, rna, protein = gnw.generate_data_set(
n_samples,
n_entities,
noise_type=noise,
simulation_type=simulation_type,
input_network_string=input_network,
remove_temporary_directory=True
)
# Write files
directory = outdir + 'simulations/sim_net_{}samples_{}entities'.format(
n_samples, n_entities
)
if not os.path.exists(directory):
os.makedirs(directory)
print('Write simulation results to {}.'.format(directory))
with open(directory + '/true_network.tsv', 'w') as network_file:
network.to_csv(network_file, sep='\t', index=True, header=True)
with open(directory + '/rna_data.tsv', 'w') as rna_file:
rna.to_csv(rna_file, sep='\t', index=True, header=True)
with open(directory + '/protein_data.tsv', 'w') as protein_file:
protein.to_csv(protein_file, sep='\t', index=True, header=True)
if __name__ == '__main__':
# Get arguments
args = argument_parser()
# Simulation settings
n_samples = args.n_samples
n_entities = args.n_entities
simulation_type = args.simulation_type
noise_type = args.noise_type
gnw_jar = os.path.abspath(args.path_to_jar)
if not os.path.exists(gnw_jar):
error = '{} does not exist.'.format(gnw_jar)
raise OSError(error)
input_network_string = return_network_string(
args.network, args.path_to_network
)
# Run simulation
simulate(n_samples, n_entities, gnw_jar, noise_type, simulation_type,
input_network_string, args.output_directory)
|
StarcoderdataPython
|
190272
|
from typing import Any, Dict, List, Tuple
from src.db.models.event import Event
from src.db.models.match import Match
from src.db.models.team import Team
from src.db.models.team_event import TeamEvent
from src.db.models.team_match import TeamMatch
from src.db.models.team_year import TeamYear
from src.db.models.year import Year
def create_team_obj(data: Dict[str, Any]) -> Team:
data["wins"] = 0
data["losses"] = 0
data["ties"] = 0
data["count"] = 0
return Team.from_dict(data)
def create_year_obj(data: Dict[str, Any]) -> Year:
return Year.from_dict(data)
def create_team_year_obj(data: Dict[str, Any]) -> TeamYear:
data["wins"] = 0
data["losses"] = 0
data["ties"] = 0
data["count"] = 0
return TeamYear.from_dict(data)
def create_event_obj(data: Dict[str, Any]) -> Event:
return Event.from_dict(data)
def create_team_event_obj(data: Dict[str, Any]) -> TeamEvent:
data["wins"] = 0
data["losses"] = 0
data["ties"] = 0
data["count"] = 0
return TeamEvent.from_dict(data)
def create_match_obj(data: Dict[str, Any]) -> Tuple[Match, List[TeamMatch]]:
data["playoff"] = data["comp_level"] != "qm"
data["red_auto"] = data["red_score_breakdown"]["auto"]
data["red_auto_movement"] = data["red_score_breakdown"]["auto_movement"]
data["red_auto_1"] = data["red_score_breakdown"]["auto_1"]
data["red_auto_2"] = data["red_score_breakdown"]["auto_2"]
data["red_teleop_1"] = data["red_score_breakdown"]["teleop_1"]
data["red_teleop_2"] = data["red_score_breakdown"]["teleop_2"]
data["red_1"] = data["red_score_breakdown"]["1"]
data["red_2"] = data["red_score_breakdown"]["2"]
data["red_teleop"] = data["red_score_breakdown"]["teleop"]
data["red_endgame"] = data["red_score_breakdown"]["endgame"]
data["red_no_fouls"] = data["red_score_breakdown"]["no_fouls"]
data["red_fouls"] = data["red_score_breakdown"]["fouls"]
data["red_rp_1"] = data["red_score_breakdown"]["rp1"]
data["red_rp_2"] = data["red_score_breakdown"]["rp2"]
data["blue_auto"] = data["blue_score_breakdown"]["auto"]
data["blue_auto_movement"] = data["blue_score_breakdown"]["auto_movement"]
data["blue_auto_1"] = data["blue_score_breakdown"]["auto_1"]
data["blue_auto_2"] = data["blue_score_breakdown"]["auto_2"]
data["blue_teleop_1"] = data["blue_score_breakdown"]["teleop_1"]
data["blue_teleop_2"] = data["blue_score_breakdown"]["teleop_2"]
data["blue_1"] = data["blue_score_breakdown"]["1"]
data["blue_2"] = data["blue_score_breakdown"]["2"]
data["blue_teleop"] = data["blue_score_breakdown"]["teleop"]
data["blue_endgame"] = data["blue_score_breakdown"]["endgame"]
data["blue_no_fouls"] = data["blue_score_breakdown"]["no_fouls"]
data["blue_fouls"] = data["blue_score_breakdown"]["fouls"]
data["blue_rp_1"] = data["blue_score_breakdown"]["rp1"]
data["blue_rp_2"] = data["blue_score_breakdown"]["rp2"]
match = Match.from_dict(data)
team_matches: List[TeamMatch] = []
new_data = {"match": data["key"], **data}
for alliance in ["red", "blue"]:
new_data["alliance"] = alliance
for team in data[alliance].split(","):
new_data["team"] = int(team)
team_matches.append(create_team_match_obj(new_data))
return (match, team_matches)
def create_team_match_obj(data: Dict[str, Any]) -> TeamMatch:
return TeamMatch.from_dict(data)
|
StarcoderdataPython
|
177219
|
from http.server import HTTPServer, BaseHTTPRequestHandler
class HelloHandler(BaseHTTPRequestHandler):
def do_GET(self):
self.send_response(200)
self.send_header("Content-Type", "text/html")
self.end_headers()
self.wfile.write("<h1>Hello World!</h1>\n".encode("UTF-8"))
self.wfile.write(" Rainy day today".encode("UTF-8"))
PORT = 8080
if __name__ == "__main__":
server_address = ("", PORT)
server = HTTPServer(server_address, HelloHandler)
server.serve_forever()
|
StarcoderdataPython
|
3201952
|
<reponame>TangJiahui/AC215-Advanced_Practical_Data_Science
from fastapi import APIRouter
# Define Router
router = APIRouter()
|
StarcoderdataPython
|
111071
|
<gh_stars>1-10
BASE_HELIX_URL = "https://api.twitch.tv/helix/"
# token url will return a 404 if trailing slash is added
BASE_AUTH_URL = "https://id.twitch.tv/oauth2/token"
TOKEN_VALIDATION_URL = "https://id.twitch.tv/oauth2/validate"
WEBHOOKS_HUB_URL = "https://api.twitch.tv/helix/webhooks/hub"
|
StarcoderdataPython
|
166809
|
# -*- coding:utf-8 -*-
# Copyright 2015 NEC Corporation. #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); #
# you may not use this file except in compliance with the License. #
# You may obtain a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
from org.o3project.odenos.core.component.network.flow.ofpflow.ofp_flow_action_group_action\
import OFPFlowActionGroupAction
import unittest
class OFPFlowActionGroupActionTest(unittest.TestCase):
def setUp(self):
self.target = OFPFlowActionGroupAction("OFPFlowActionGroupAction",
1234)
def tearDown(self):
self.target = None
def test_constractor(self):
self.assertEqual(self.target._body[self.target.TYPE],
"OFPFlowActionGroupAction")
self.assertEqual(self.target._body[self.target.GROUP_ID],
1234)
def test_group_id(self):
self.assertEqual(self.target.group_id, 1234)
def test_create_from_packed(self):
self.value = {self.target.TYPE: "OFPFlowActionGroupAction",
self.target.GROUP_ID: 4321}
self.result = OFPFlowActionGroupAction.create_from_packed(self.value)
self.assertEqual(self.result._body[self.target.TYPE],
"OFPFlowActionGroupAction")
self.assertEqual(self.result._body[self.target.GROUP_ID],
4321)
def test_packed_object(self):
self.result = self.target.packed_object()
self.assertEqual(self.result[self.target.TYPE],
"OFPFlowActionGroupAction")
self.assertEqual(self.result[self.target.GROUP_ID],
1234)
if __name__ == '__main__':
unittest.main()
|
StarcoderdataPython
|
1750834
|
# Copyright (c) 2010 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""A generator for initializing_coclass.h, which contains a bunch of
repeated code that can't be produced through the preprocessor."""
import sys
from string import Template
HEADER_ = """\
// This file is autogenerated by initializing_coclass.py, do not edit.
#ifndef CEEE_COMMON_INITIALIZING_COCLASS_GEN_INL_
#define CEEE_COMMON_INITIALIZING_COCLASS_GEN_INL_
"""
FOOTER_ = """\
#endif // CEEE_COMMON_INITIALIZING_COCLASS_GEN_INL_
"""
# This template is used to generate a pair of template functions for N
# initialization parameters.
TEMPLATE_ = Template('''\
// Creates a new instance of ImplClass
// @returns S_OK on success, an appropriate error on failure.
// @param new_instance on success returns the newly created and initialized
// ImplClass instance.
// @note *new_instance has zero references, and as result the use of this
// function is highly discouraged as the existence of *new_instance
// on return of this function is quite brittle.
template <${templ_params}>
static HRESULT CreateInstance(${params}, T** new_instance) {
TImpl* instance;
HRESULT hr = TImpl::CreateInstance(&instance);
if (FAILED(hr))
return hr;
instance->InternalFinalConstructAddRef();
hr = instance->Initialize(${args});
instance->InternalFinalConstructRelease();
if (FAILED(hr)) {
delete instance;
instance = NULL;
}
*new_instance = instance;
return hr;
}
template <class I, ${templ_params}>
static HRESULT CreateInitialized(${params}, I** instance) {
T* new_instance;
HRESULT hr = CreateInstance(${args}, &new_instance);
if (FAILED(hr))
return hr;
hr = new_instance->QueryInterface(__uuidof(I),
reinterpret_cast<void**>(instance));
if (FAILED(hr))
delete new_instance;
return hr;
}
template <class I, ${templ_params}>
static HRESULT CreateInitializedIID(${params}, REFIID iid, I** instance) {
T* new_instance;
HRESULT hr = CreateInstance(${args}, &new_instance);
if (FAILED(hr))
return hr;
hr = new_instance->QueryInterface(iid, reinterpret_cast<void**>(instance));
if (FAILED(hr))
delete new_instance;
return hr;
}
''')
def Emit(outfile, templ, num_args):
"""Emits a template function for num_args arguments"""
mapping = {
'templ_params':
", ".join(["class A%d" % d for d in xrange(1, num_args + 1)]),
'params':
", ".join(["const A%d &a%d" % (d, d) for d in xrange(1, num_args + 1)]),
'args': ", ".join(["a%d" % d for d in xrange(1, num_args + 1)]),
}
outfile.write(templ.substitute(mapping))
NUM_ARGUMENTS = 10
def Main(outfile):
"""Emits the contents of initializing_coclass-inl.h."""
outfile.write(HEADER_)
# do one to NUM_ARGUMENTS arguments
for num_args in xrange(1, 1 + NUM_ARGUMENTS):
Emit(outfile, TEMPLATE_, num_args)
outfile.write(FOOTER_)
if (__name__ == "__main__"):
outfile = open(sys.argv[1], 'w')
Main(outfile)
|
StarcoderdataPython
|
99154
|
<filename>what/utils/logger.py
import logging
def get_logger(name, level=logging.INFO):
logging.basicConfig()
logger = logging.getLogger(name)
logger.handlers = [] # This is the key thing for the question!
# Start defining and assigning your handlers here
handler = logging.StreamHandler()
handler.setFormatter(logging.Formatter("[%(levelname)s] %(asctime)s %(name)s: %(message)s"))
handler.setLevel(level)
logger.handlers = [handler]
logger.propagate = False
logger.setLevel(level)
return logger
|
StarcoderdataPython
|
1619188
|
<reponame>model-checking/cbmc-viewer<filename>tests/bin/arguments.py<gh_stars>0
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
"""Methods for common command-line argument parsing."""
import argparse
import logging
def create_parser(options=None, description=None):
"""Create a parser for command line arguments."""
options = options or []
description = description or ""
flags = [option.get('flag') for option in options]
if '--verbose' not in flags:
options.append({'flag': '--verbose', 'action': 'store_true', 'help': 'Verbose output'})
if '--debug' not in flags:
options.append({'flag': '--debug', 'action': 'store_true', 'help': 'Debug output'})
parser = argparse.ArgumentParser(description=description)
for option in options:
flag = option.pop('flag')
parser.add_argument(flag, **option)
return parser
def configure_logging(args):
"""Configure logging level based on command line arguments."""
# Logging is configured by first invocation of basicConfig
fmt = '%(levelname)s: %(message)s'
if args.debug:
logging.basicConfig(level=logging.DEBUG, format=fmt)
return
if args.verbose:
logging.basicConfig(level=logging.INFO, format=fmt)
return
logging.basicConfig(format=fmt)
|
StarcoderdataPython
|
110042
|
import datetime as _dt
import json as _json
import hashlib as _hashlib
class Blockchain:
def __init__(self) -> None:
self.chain = list()
genesis_block = self._create_block(
data="genesis block", proof=1, previous_hash="0", index=0
)
self.chain.append(genesis_block)
def _to_digest(
self, new_proof: int, previous_proof: int, index: int
) -> bytes:
to_digest = str(new_proof**2 - previous_proof + index)
return to_digest.encode()
def _proof_of_work(self, previous_proof: str, index: int, data: str) -> int:
new_proof = 1
check_proof = False
while not check_proof:
to_digest = self._to_digest(
new_proof=new_proof,
previous_proof=previous_proof,
index=index,
)
hash_value = _hashlib.sha256(to_digest).hexdigest()
if hash_value[:4] == "0000":
check_proof = True
else:
new_proof += 1
return new_proof
def get_previous_block(self) -> dict:
return self.chain[-1]
def mine_block(self, data: str) -> dict:
previous_block = self.get_previous_block()
previous_proof = previous_block["proof"]
index = previous_block["index"] + 1
proof = self._proof_of_work(previous_proof, index, data)
previous_hash = self._hash(block=previous_block)
block = self._create_block(
data=data, proof=proof, previous_hash=previous_hash, index=index
)
self.chain.append(block)
return block
def _hash(self, block: dict):
encoded_block = _json.dumps(block, sort_keys=True).encode()
return _hashlib.sha256(encoded_block).hexdigest()
def _create_block(self, data: str, proof: int, previous_hash: str, index: int) -> dict:
block = {
"index": index,
"timestamp": str(_dt.datetime.utcnow()),
"data": data,
"proof": proof,
"previous_hash": previous_hash,
}
return block
def _is_chain_valid(self) -> bool:
current_block = self.chain[0]
block_index = 1
while block_index < len(self.chain):
next_block = self.chain[block_index]
if next_block["previous_hash"] != self._hash(current_block):
return False
current_proof = current_block["proof"]
next_index, next_proof = {
next_block["index"],
next_block["proof"],
}
hash_value = _hashlib.sha256(
self._to_digest(
new_proof=next_proof,
previous_proof=current_proof,
index=next_index
)
).hexdigest()
if hash_value[:4] != "0000":
return False
current_block = next_block
block_index += 1
return True
|
StarcoderdataPython
|
3237462
|
"""
This python code generates figure S3 (appendix) in the paper.
"""
from matplotlib import pyplot as plt
import pandas as pd
import numpy as np
import random
import math
from collections import Counter
import itertools
# building information: floor 1-25, pax destination 2-25
numFloor = 24 # total number of pax destinations
numPax = 2750 # total number of pax during rush hour
timeInterval = 10 # we update the system every 10 seconds
# info for pax arrivals
# number of simulations
numFile = 100
# floor ranges of each queue if we are queue splitting, default []
queueDest = []
#can be anything from 0 to 100- usually we do 0,20,40,60,80,100
WtW = 0
# create arrival files, will be stored in the same folder we run the code
# if we want different random simulations, uncomment the lines below
"""
createOtherArrivalFiles(numPax,numFloor,WtW,numFile)
fileName = str(numPax)+"_"+str(numFloor)+"_"+str(WtW)+"_"
"""
# name of the files used for the simulations,
# these files will yield exactly the figures in the paper
# if we want new random files, comment the line below
# get current working directory
parent_dir = str(os.getcwd())
# get csv files in subdirectory for this setup
all_files = glob.glob(parent_dir+'/data/2750pax_large/*')
# put together all pax configurations
paxInfo = [all_files,numFile,queueDest,WtW]
# elevator info
# total number of elevators
elevNumTotal = 14
# speed of elevator to traverse one floor
elevSpeed = 1.4
# multiplier to account for intermediate pax entering and leaving
elevSpeedMultiplier = 1.3
# time to board an elevator, depends on num of pax entering the elev
# if there is one pax, they take 15s to board + 2s for every additional pax
elevBoardTime = [15, 17, 19, 21]
# time to deboard an elevator, depends on num of pax exiting the elev
# if there is one pax, they take 15s to deboard + 2s for every additional pax
elevStopTime = [15, 17, 19, 21]
# the range of floors the elev can serve, the first 7 elevators serve floors 2-15
elevServiceRange = dict.fromkeys(range(7),list(range(2,14,1)))
# the next 7 elevators serve floors 16-25
for elev in range(7,14,1):
set_key(elevServiceRange,elev,list(range(14,26,1)))
# capacity of the elevators
elevCap = 4
# put together all elev configurations
elevInfo = [numFloor, elevNumTotal,elevSpeed,elevSpeedMultiplier, elevBoardTime, elevStopTime, elevServiceRange, elevCap]
"Compute results of 4 Queue Split- equal split"
intervention = "FCFSQueueSplit"
# set the floor ranges for each queue, here it is 2 queue split
queueDest = [list(range(2,8,1)),list(range(8,14,1)),list(range(14,20,1)),list(range(20,26,1))]
# update paxInfo
paxInfo[2] = queueDest
# run the intervention
(waitTime,avgqueue,timequeue,load,tripTime,buttonPresses,avgEachQueue,timeEachQueue) = run_InterventionOnMultipleFiles(paxInfo, elevInfo, intervention)
# create list of wait time
ResultWaitTime = list(waitTime)
# round wait time to nearest second.
waitTime = [int(np.rint(number)) for number in waitTime]
# round trip time to the nearest second.
tripTime = [int(np.rint(number)) for number in tripTime]
# store results
waitTime_FCFSQueueSpliteven_4Q = waitTime
avgqueue_FCFSQueueSpliteven_4Q = avgqueue
timequeue_FCFSQueueSpliteven_4Q = timequeue
timeEachQueue_FCFSQueueSpliteven_4Q = timeEachQueue
"Compute results of 4 Queue Split- unequal split"
intervention = "FCFSQueueSplit"
# set the floor ranges for each queue, here it is 2 queue split
queueDest = [list(range(2,8,1)),list(range(8,14,1)),list(range(14,22,1)),list(range(22,26,1))]
# update paxInfo
paxInfo[2] = queueDest
# run the intervention
(waitTime,avgqueue,timequeue,load,tripTime,buttonPresses,avgEachQueue,timeEachQueue) = run_InterventionOnMultipleFiles(paxInfo, elevInfo, intervention)
# create list of wait time
ResultWaitTime = list(waitTime)
# round wait time to nearest second.
waitTime = [int(np.rint(number)) for number in waitTime]
# round trip time to the nearest second.
tripTime = [int(np.rint(number)) for number in tripTime]
# store results
waitTime_FCFSQueueSplitnoteven_4Q = waitTime
avgqueue_FCFSQueueSplitnoteven_4Q = avgqueue
timequeue_FCFSQueueSplitnoteven_4Q = timequeue
timeEachQueue_FCFSQueueSplitnoteven_4Q = timeEachQueue
"Plots"
"Length of diff queues if floor ranges split evenly"
# queue length of each queue in the lobby vs time
fig = plt.figure(figsize=(15, 5))
ax = plt.subplot(1,2,1)
timeInterval=10
d=timeEachQueue_FCFSQueueSpliteven_4Q
queueDestLabels=['Floors 2-7', 'Floors 8-13','Floors 14-19','Floors 20-25']
colorList = ['orange','green','maroon','magenta']
for j in range(4):
ax.plot(list(range(0,timeInterval*len(d[j]), timeInterval)), list(d[j]),label=queueDestLabels[j],color=colorList[j])
# plt.xticks(np.arange(0,9000,1800))
plt.xticks([0,1800,3600,5400,7100],['8:00 AM', '8:30 AM','9:00 AM','9:30 AM','10:00 AM'])
plt.xlim(-50,7101)
plt.xlabel('Time')
plt.ylabel('Queue Length in the lobby')
#plt.title('Length of Each Queue', y = -0.25)
plt.ylim(ymin = 0)
ax.text(-0.1, 1.1,'A', transform=ax.transAxes,
size=20, weight='bold')
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
box = ax.get_position()
ax.set_position([box.x0, box.y0, box.width * 0.8, box.height])
plt.legend(loc='upper left', bbox_to_anchor=(0,1),fontsize='x-large',shadow=True, fancybox=True)
# Put a legend to the right of the current axis
#ax.legend(loc='center left', bbox_to_anchor=(1, 0.5),fontsize='x-large')
"Plots"
"Length of diff queues if floor ranges split UNevenly"
# queue length of each queue in the lobby vs time
ax = plt.subplot(1,2,2)
timeInterval=10
d=timeEachQueue_FCFSQueueSplitnoteven_4Q
queueDestLabels=['Floors 2-7', 'Floors 8-13','Floors 14-21','Floors 22-25']
colorList = ['orange','green','maroon','magenta']
for j in range(4):
ax.plot(list(range(0,timeInterval*len(d[j]), timeInterval)), list(d[j]),label=queueDestLabels[j],color=colorList[j])
# plt.xticks(np.arange(0,9000,1800))
plt.xticks([0,1800,3600,5400,7100],['8:00 AM', '8:30 AM','9:00 AM','9:30 AM','10:00 AM'])
plt.xlim(-50,7101)
plt.xlabel('Time')
plt.ylabel('Queue Length in the lobby')
#plt.title('Length of Each Queue', y = -0.25)
plt.ylim(ymin = 0,ymax=64)
box = ax.get_position()
ax.set_position([box.x0, box.y0, box.width * 0.8, box.height])
ax.text(-0.1, 1.1,'B', transform=ax.transAxes,
size=20, weight='bold')
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
# Put a legend to the right of the current axis
#ax.legend(loc='center left', bbox_to_anchor=(1, 0.5),fontsize='x-large')
plt.legend(loc='upper left', bbox_to_anchor=(0,1),fontsize='x-large',shadow=True, fancybox=True)
# "Compute results of FCFS"
# intervention = "FCFS"
# # run the intervention
# (waitTime,avgqueue,timequeue,load,tripTime,buttonPresses,avgEachQueue,timeEachQueue) = run_InterventionOnMultipleFiles(paxInfo, elevInfo, intervention)
# # create list of wait time
# ResultWaitTime = list(waitTime)
# # round wait time to nearest second.
# waitTime = [int(np.rint(number)) for number in waitTime]
# # round trip time to the nearest second.
# tripTime = [int(np.rint(number)) for number in tripTime]
# # store results
# waitTime_FCFS = waitTime
# avgqueue_FCFS = avgqueue
# timequeue_FCFS = timequeue
# "Compute results of Cohorting"
# intervention = "CohortFCFS"
# # run the intervention
# (waitTime,avgqueue,timequeue,load,tripTime,buttonPresses,avgEachQueue,timeEachQueue) = run_InterventionOnMultipleFiles(paxInfo, elevInfo, intervention)
# # create list of wait time
# ResultWaitTime = list(waitTime)
# # round wait time to nearest second.
# waitTime = [int(np.rint(number)) for number in waitTime]
# # round trip time to the nearest second.
# tripTime = [int(np.rint(number)) for number in tripTime]
# # store results
# waitTime_CohortFCFS = waitTime
# avgqueue_CohortFCFS = avgqueue
# timequeue_CohortFCFS = timequeue
# timeInterval=10 # we update the system every 10 seconds
# d = list(timequeue_FCFS)
# plt.plot(list(range(0,timeInterval*len(d), timeInterval)), d,color='black',label='Default FCFS')
# d = list(timequeue_CohortFCFS)
# plt.plot(list(range(0,timeInterval*len(d), timeInterval)), d,color='red',label ='Cohorting')
# # d = list(timequeue_FCFSQueueSplit)
# # plt.plot(list(range(0,timeInterval*len(d), timeInterval)), d,color='blue',label='2 Queue Split')
# plt.xticks([0,1800,3600,5400,7100],['8:00 AM', '8:30 AM','9:00 AM','9:30 AM','10:00 AM'])
# plt.xlim(-50,7101)
# plt.xlabel('Time')
# plt.ylabel('Queue Length')
# plt.title('Queue Length vs Time')
# plt.legend(loc='upper left', bbox_to_anchor=(0,1),fontsize='x-large', shadow=True, fancybox=True)
# plt.show()
|
StarcoderdataPython
|
1748445
|
"""Platform Models."""
from marshmallow import fields, Schema
from marshmallow.validate import OneOf
from ..enums import *
from ..models.BaseSchema import BaseSchema
from .TagSourceSchema import TagSourceSchema
class TagSchema(BaseSchema):
# Content swagger.json
name = fields.Str(required=False)
url = fields.Str(required=False)
type = fields.Str(required=False)
sub_type = fields.Str(required=False)
_id = fields.Str(required=False)
position = fields.Str(required=False)
attributes = fields.Dict(required=False)
content = fields.Str(required=False)
__source = fields.Nested(TagSourceSchema, required=False)
|
StarcoderdataPython
|
1632021
|
<reponame>Photon26/wrs-main-210414
import numpy as np
from . import util
from . import transformations
def sample_surface(mesh, count):
"""
Sample the surface of a mesh, returning the specified number of points
For individual triangle sampling uses this method:
http://mathworld.wolfram.com/TrianglePointPicking.html
:param mesh: a Trimesh instance
:param count: number of points to return
:return:
author: revised by weiwei
date: 20200120
"""
# len(mesh.faces) float array of the areas of each face of the mesh
area = mesh.area_faces
# total area (float)
area_sum = np.sum(area)
# cumulative area (len(mesh.faces))
area_cum = np.cumsum(area)
face_pick = np.random.random(count) * area_sum
face_index = np.searchsorted(area_cum, face_pick)
# pull triangles into the form of an origin + 2 vectors
tri_origins = mesh.triangles[:, 0]
tri_vectors = mesh.triangles[:, 1:].copy()
tri_vectors -= np.tile(tri_origins, (1, 2)).reshape((-1, 2, 3))
# pull the vectors for the faces we are going to sample from
tri_origins = tri_origins[face_index]
tri_vectors = tri_vectors[face_index]
# randomly generate two 0-1 scalar components to multiply edge vectors by
random_lengths = np.random.random((len(tri_vectors), 2, 1))
# points will be distributed on a quadrilateral if we use 2 0-1 samples
# if the two scalar components sum less than 1.0 the point will be
# inside the triangle, so we find vectors longer than 1.0 and
# transform them to be inside the triangle
random_test = random_lengths.sum(axis=1).reshape(-1) > 1.0
random_lengths[random_test] -= 1.0
random_lengths = np.abs(random_lengths)
# multiply triangle edge vectors by the random lengths and sum
points_vector = (tri_vectors * random_lengths).sum(axis=1)
# finally, offset by the origin to generate
# (n,3) points in space on the triangle
points = points_vector + tri_origins
return points, face_index
# def sample_surface_withfaceid(mesh, count):
# '''
# Sample the surface of a mesh, returning the specified number of points
#
# For individual triangle sampling uses this method:
# http://mathworld.wolfram.com/TrianglePointPicking.html
#
# Arguments
# ---------
# mesh: Trimesh object
# count: number of points to return
#
# Returns
# ---------
# samples: (count,3) points in space on the surface of mesh
#
# '''
#
# # len(mesh.faces) float array of the areas of each face of the mesh
# area = mesh.area_faces
# # total area (float)
# area_sum = np.sum(area)
# # cumulative area (len(mesh.faces))
# area_cum = np.cumsum(area)
# face_pick = np.random.random(count) * area_sum
# face_index = np.searchsorted(area_cum, face_pick)
#
# # pull triangles into the form of an origin + 2 vectors
# tri_origins = mesh.triangles[:, 0]
# tri_vectors = mesh.triangles[:, 1:].copy()
# tri_vectors -= np.tile(tri_origins, (1, 2)).reshape((-1, 2, 3))
#
# # pull the vectors for the faces we are going to sample from
# tri_origins = tri_origins[face_index]
# tri_vectors = tri_vectors[face_index]
#
# # randomly generate two 0-1 scalar components to multiply edge vectors by
# random_lengths = np.random.random((len(tri_vectors), 2, 1))
#
# # points will be distributed on a quadrilateral if we use 2 0-1 samples
# # if the two scalar components sum less than 1.0 the point will be
# # inside the triangle, so we find vectors longer than 1.0 and
# # transform them to be inside the triangle
# random_test = random_lengths.sum(axis=1).reshape(-1) > 1.0
# random_lengths[random_test] -= 1.0
# random_lengths = np.abs(random_lengths)
#
# # multiply triangle edge vectors by the random lengths and sum
# sample_vector = (tri_vectors * random_lengths).sum(axis=1)
#
# # finally, offset by the origin to generate
# # (n,3) points in space on the triangle
# samples = sample_vector + tri_origins
#
# return samples, face_index
def sample_volume(mesh, count):
"""
Use rejection sampling to produce points randomly
distributed in the volume of a mesh
:param mesh:
:param count:
:return:
author: revised by weiwei
date: 20210120
"""
points = (np.random.random((count, 3)) * mesh.extents) + mesh.bounds[0]
contained = mesh.contains(points)
samples = points[contained][:count]
return samples
def sample_box_volume(extents,
count,
transform=None):
"""
Use rejection sampling to produce points randomly
distributed in the volume of a given box
:param extents: 1x3 nparray
:param count: npoints
:param transform: homogeneous transformation matrix
:return: nx3 points in the requested volume
author: revised by weiwei
date: 20210120
"""
samples = np.random.random((count, 3)) - .5
samples *= extents
if transform is not None:
samples = transformations.transform_points(samples,
transform)
return samples
def sample_surface_even(mesh, count, radius=None):
"""
Sample the surface of a mesh, returning samples which are
approximately evenly spaced.
Note that since it is using rejection sampling it may return
fewer points than requested (i.e. n < count). If this is the
case a log.warning will be emitted.
:param mesh:
:param count:
:param radius:
:return:
author: revised by weiwei
date: 20210120
"""
from .points import remove_close
# guess radius from area
if radius is None:
radius = np.sqrt(mesh.area / (3 * count))
# get points on the surface
points, index = sample_surface(mesh, count * 3)
# remove the points closer than radius
points, mask = remove_close(points, radius)
# we got all the samples we expect
if len(points) >= count:
return points[:count], index[mask][:count]
# warn if we didn't get all the samples we expect
# util.log.warning('only got {}/{} samples!'.format(len(points), count)) TODO
return points, index[mask]
def sample_surface_sphere(count):
"""
Correctly pick random points on the surface of a unit sphere
Uses this method:
http://mathworld.wolfram.com/SpherePointPicking.html
:param count:
:return nx3 points on a unit sphere
"""
# get random values 0.0-1.0
u, v = np.random.random((2, count))
# convert to two angles
theta = np.pi * 2 * u
phi = np.arccos((2 * v) - 1)
# convert spherical coordinates to cartesian
points = util.spherical_to_vector(np.column_stack((theta, phi)))
return points
|
StarcoderdataPython
|
1733739
|
<gh_stars>1-10
'''This is a reproduction of the IRNN experiment
with pixel-by-pixel sequential MNIST in
"A Simple Way to Initialize Recurrent Networks of Rectified Linear Units"
by <NAME>, <NAME>, <NAME>
arXiv:1504.00941v2 [cs.NE] 7 Apr 2015
http://arxiv.org/pdf/1504.00941v2.pdf
Optimizer is replaced with RMSprop which yields more stable and steady
improvement.
Reaches 0.93 train/test accuracy after 900 epochs
(which roughly corresponds to 1687500 steps in the original paper.)
'''
from __future__ import print_function
from keras.datasets import mnist
from keras.models import Sequential
from keras.layers import Dense, Activation
from keras.layers import SimpleRNN
from keras.initializations import normal, identity
from keras.optimizers import RMSprop
from keras.utils import np_utils
batch_size = 32
nb_classes = 10
nb_epochs = 200
hidden_units = 100
learning_rate = 1e-6
clip_norm = 1.0
# the data, shuffled and split between train and test sets
(X_train, y_train), (X_test, y_test) = mnist.load_data()
X_train = X_train.reshape(X_train.shape[0], -1, 1)
X_test = X_test.reshape(X_test.shape[0], -1, 1)
X_train = X_train.astype('float32')
X_test = X_test.astype('float32')
X_train /= 255
X_test /= 255
print('X_train shape:', X_train.shape)
print(X_train.shape[0], 'train samples')
print(X_test.shape[0], 'test samples')
# convert class vectors to binary class matrices
Y_train = np_utils.to_categorical(y_train, nb_classes)
Y_test = np_utils.to_categorical(y_test, nb_classes)
print('Evaluate IRNN...')
model = Sequential()
model.add(SimpleRNN(output_dim=hidden_units,
init=lambda shape, name: normal(shape, scale=0.001, name=name),
inner_init=lambda shape, name: identity(shape, scale=1.0, name=name),
activation='relu',
input_shape=X_train.shape[1:]))
model.add(Dense(nb_classes))
model.add(Activation('softmax'))
rmsprop = RMSprop(lr=learning_rate)
model.compile(loss='categorical_crossentropy',
optimizer=rmsprop,
metrics=['accuracy'])
model.fit(X_train, Y_train, batch_size=batch_size, nb_epoch=nb_epochs,
verbose=1, validation_data=(X_test, Y_test))
scores = model.evaluate(X_test, Y_test, verbose=0)
print('IRNN test score:', scores[0])
print('IRNN test accuracy:', scores[1])
|
StarcoderdataPython
|
119680
|
from django.contrib.auth.mixins import UserPassesTestMixin
from django.urls import reverse_lazy
class UserIsObjectUserMixIn(UserPassesTestMixin):
def test_func(self):
object = self.get_object()
return object.user == self.request.user
|
StarcoderdataPython
|
3273084
|
from ptrlib import *
def alloc(size, data):
sock.recvuntil("> ")
sock.sendline("1")
sock.recvuntil("> ")
sock.sendline(str(size))
sock.recvuntil("> ")
sock.sendline(data)
def free():
sock.recvuntil("> ")
sock.sendline("2")
def secret():
sock.recvuntil("> ")
sock.sendline("3")
elf = ELF("./aria-writer")
libc = ELF("./libc-2.27.so")
#sock = Process("./aria-writer")
sock = Socket("pwn.hsctf.com", 2222)
plt_puts = 0x400750
# name
sock.recvuntil("> ")
sock.sendline("/bin/sh")
# double free for shell
alloc(0x38, "A")
free()
free()
alloc(0x38, p64(elf.got("write")))
alloc(0x38, "")
# double free for libc leak
alloc(0x28, "B")
free()
free()
alloc(0x28, p64(elf.symbol("global")))
alloc(0x28, "")
alloc(0x18, "C")
free()
free()
alloc(0x18, p64(elf.got("free")))
alloc(0x18, "")
# free@got = puts@plt
alloc(0x18, p64(plt_puts))
# global = puts@got
alloc(0x28, p64(elf.got("puts")))
# libc leak
free()
sock.recvline()
addr_puts = u64(sock.recvline().rstrip())
libc_base = addr_puts - libc.symbol("puts")
logger.info("libc base = " + hex(libc_base))
# write@got = one gadget
one_gadget = libc_base + 0x4f322
alloc(0x38, p64(one_gadget))
# get the shell!
secret()
sock.interactive()
|
StarcoderdataPython
|
76784
|
<reponame>npsand/ElevenClock
# INSTRUCTIONS
# Translate the text and write it between the "
# EXAMPLE: original -> "This text is in english: value {0}"
# translation -> "Aquest text està en anglès: valor {0}"
# If you see sth like {0}, {1}, maintain it on the translated sentence
# Meke special attention to elements like ":", etc.
lang_3_5_0 = {
"Tooltip Appearance:": "",
"Tooltip's font, font size, font color and background": "",
"Disable tooltip's blurry background": "",
"Sync time with the internet": "",
"Internet date and time": "",
"Select internet time provider, change sync frequency": "",
"Enable internet time sync": "",
"Paste a URL from the world clock api or equivalent": "",
"Help": "",
"Internet sync frequency": "",
"10 minutes": "",
"30 minutes": "",
"1 hour": "",
"2 hours": "",
"4 hours": "",
"10 hours": "",
"24 hours": "",
}
lang_3_4_0 = lang_3_5_0 | {
"Show calendar": "",
"Disabled": "",
"Open quick settings": "",
"Show desktop": "",
"Open run dialog": "",
"Open task manager": "",
"Open start menu": "",
"Open search menu": "",
"Change task": "",
"Change the action done when the clock is clicked": "",
}
lang_3_3_2 = lang_3_4_0 | {
"ElevenClock Updater": "",
"ElevenClock is downloading updates": "",
"ElevenClock has updated to version {0} successfully\nPlease see GitHub for the changelog": "",
"Customize the clock on Windows 11": "",
"Disable the new instance checker method": "",
"Import settings from a local file": "",
"Export settings to a local file": "",
"Export": "",
"Import": "",
}
lang_3_3_1 = lang_3_3_2 | {
"Invalid time format\nPlease follow the\nC 1989 Standards": "",
"Nothing to preview": "",
"Invalid time format\nPlease modify it\nin the settings": "",
"Disable the tooltip shown when the clock is hovered": ""
}
lang_3_3 = lang_3_3_1 | {
"Custom format rules:": "",
"Any text can be placed here. To place items such as date and time, please use the 1989 C standard. More info on the following link": "",
"Python date and time formats": "",
"To disable the zero-padding effect, add a # in between the % and the code: non-zero-padded hours would be %#H, and zero-padded hours would be %H": "", # Here please don't modify the %H and %#H values
"Click on Apply to apply and preview the format": "",
"Apply": "",
"If you don't understand what is happening, please uncheck the checkbox over the text area": "",
"Set a custom date and time format": "",
"(for advanced users only)": "",
"Move this clock to the left": "",
"Move this clock to the top": "",
"Move this clock to the right": "",
"Move this clock to the bottom": "",
"Restore horizontal position": "",
"Restore vertical position": "",
}
lang_3_2_1 = lang_3_3 | {
"Open online help to troubleshoot problems": "Apri la guida in linea per risolvere i problemi",
"Reset ElevenClock preferences to defaults": "Ripristina le preferenze di ElevanClock per i valori predefiniti",
"Specify a minimum width for the clock": "Specificare una larghezza minima per l'orologio",
"Search on the settings": "Cerca sulle impostazioni",
"No results were found": "Nessun risultato trovato",
}
lang_3_2 = lang_3_2_1 | {
"Use system accent color as background color": "Utilizzare il colore di accento del sistema come colore di sfondo",
"Check only the focused window on the fullscreen check": "Controllare solo la finestra focalizzata sul controllo completo",
"Clock on monitor {0}": "Orologio sul monitor {0}",
"Move to the left": "Passa a sinistra",
"Show this clock on the left": "Mostra questo orologio a sinistra",
"Show this clock on the right": "Mostra questo orologio a destra",
"Restore clock position": "Ripristina la posizione dell'orologio",
}
lang_3_1 = lang_3_2 | {
# The initial of the word week in your language: W for week, S for setmana, etc.
"W": "S",
"Disable the notification badge": "Disabilita il distintivo della notifica",
"Override clock default height": "Override Orologio Altezza predefinita",
"Adjust horizontal clock position": "Regola la posizione dell'orologio orizzontale",
"Adjust vertical clock position": "Regola la posizione dell'orologio verticale",
"Export log as a file": "Esporta log come file",
"Copy log to clipboard": "Copia log negli Appunti",
"Announcements:": "Annunci:",
"Fetching latest announcement, please wait...": "Recuperando l'ultimo annuncio, per favore aspetta ...",
"Couldn't load the announcements. Please try again later": "Non poteva caricare gli annunci. Per favore riprova più tardi",
"ElevenClock's log": "Log ElevenClock",
"Pick a color": "Scegli un colore"
}
lang_3 = lang_3_1 | {
"Hide the clock during 10 seconds when clicked": "Nascondi l'orologio durante 10 secondi quando cliccato",
"Enable low-cpu mode": "Abilita la modalità a bassa cpu",
"You might lose functionalities, like the notification counter or the dynamic background": "Potresti perdere funzionalità, come il contatore di notifica o lo sfondo dinamico",
"Clock position and size:": "Posizione e dimensione dell'orologio:",
"Clock size preferences, position offset, clock at the left, etc.": "Preferenze di dimensioni dell'orologio, offset di posizione, orologio a sinistra, ecc.",
"Reset monitor blacklisting status": "Reset Monitor Blacklisting Status",
"Reset": "Ripristina",
"Third party licenses": "Licenze di terze parti",
"View": "Visualizza",
"ElevenClock": "ElevenClock",
"Monitor tools": "Monitor strumenti",
"Blacklist this monitor": "Blacklist questo monitor.",
"Third Party Open-Source Software in Elevenclock {0} (And their licenses)": "",
"ElevenClock is an Open-Source application made with the help of other libraries made by the community:": "",
"Ok": "",
"More Info": "",
"About Qt": "",
"Success": "",
"The monitors were unblacklisted successfully.": "",
"Now you should see the clock everywhere": "",
"Ok": "",
"Blacklist Monitor": "",
"Blacklisting a monitor will hide the clock on this monitor permanently.": "",
"This action can be reverted from the settings window, under <b>Clock position and size</b>": "",
"Are you sure do you want to blacklist the monitor \"{0}\"?": "",
"Yes": "",
"No": "",
}
lang_2_9_2 = lang_3 | {
"Reload log": "Ricarica log",
"Do not show the clock on secondary monitors": "Non mostrare l'orologio sui monitor secondari",
"Disable clock taskbar background color (make clock transparent)": "Disabilita il colore dello sfondo della barra delle applicazioni dell'orologio (crea orologio trasparente)",
"Open the welcome wizard": "Apri il wizard di benvenuto",
" (ALPHA STAGE, MAY NOT WORK)": "(Fase alfa, potrebbe non funzionare)",
"Welcome to ElevenClock": "Benvenuti in ElevenClock.",
"Skip": "Salta",
"Start": "Inizio",
"Next": "Prossima",
"Finish": "Fine",
}
lang_2_9 = lang_2_9_2 | {
"Task Manager": "Task Manager",
"Change date and time": "Cambia data e ora",
"Notification settings": "Impostazioni di notifica",
"Updates, icon tray, language": "Aggiornamenti, Icona Vassoio, Lingua",
"Hide extended options from the clock right-click menu (needs a restart to be applied)": "Nascondi opzioni estese dal menu del tasto destro del mouse dell'orologio (ha bisogno di un riavvio da applicare)",
"Fullscreen behaviour, clock position, 1st monitor clock, other miscellanious settings": "Comportamento a schermo intero, posizione dell'orologio, 1 ° monitor clock, altre impostazioni miscellanee",
'Add the "Show Desktop" button on the left corner of every clock': 'Aggiungi il pulsante "Mostra desktop" sull\'angolo sinistro di ogni orologio',
'You might need to set a custom background color for this to work. More info <a href="{0}" style="color:DodgerBlue">HERE</a>': '',
"Clock's font, font size, font color and background, text alignment": "Carattere dell'orologio, dimensione del carattere, colore del carattere e sfondo, allineamento del testo",
"Date format, Time format, seconds,weekday, weeknumber, regional settings": "Formato data, formato orario, secondi, giorni feriali, Numero della settimana, Impostazioni regionali",
"Testing features and error-fixing tools": "Caratteristiche di prova e strumenti di fissaggio degli errori",
"Language pack author(s), help translating ElevenClock": "Autore del pacchetto di lingue, aiuto per la traduzioneElevenClock ",
"Info, report a bug, submit a feature request, donate, about": "Info, segnala un bug, inviare una richiesta di funzionalità, donare, circa",
"Log, debugging information": "Log, informazioni di debug",
}
lang_2_8 = lang_2_9 | {
"Force the clock to be at the top of the screen": "Forza l'orologio ad essere nella parte superiore dello schermo",
"Show the clock on the primary screen": "Mostra l'orologio sullo schermo principale",
"Use a custom font color": "Usa un colore personalizzato per il font",
"Use a custom background color": "Usa un colore personalizzato per lo sfondo",
"Align the clock text to the center": "Allinea il testo dell'orologio al centro",
"Select custom color": "Seleziona un colore personalizzato",
"Hide the clock when a program occupies all screens": "Nascondi l'orologio quando un programma occupa tutte le schermate",
}
lang2_7_bis = lang_2_8 | {
"Use a custom font": "Usa un font personalizzato",
"Use a custom font size": "Usa una dimensione del font personalizzata",
"Enable hide when multi-monitor fullscreen apps are running": "Abilita Nascondi quando sono in esecuzione app a schermo intero multi-monitor",
"<b>{0}</b> needs to be enabled to change this setting": "<b>{0}</b> deve essere abilitato per modificare questa impostazione",
"<b>{0}</b> needs to be disabled to change this setting": "<b>{0}</b> deve essere disabilitato per modificare questa impostazione",
}
lang2_7 = lang2_7_bis | {
" (This feature has been disabled because it should work by default. If it is not, please report a bug)": "(Questa funzionalità è stata disabilitata perchè dovrebbe funzionare di default. Se non funziona, segnala un bug)",
"ElevenClock's language": "Lingua di ElevenClock"
}
lang2_6 = lang2_7 | {
"About Qt6 (PySide6)": "Circa Qt6 (PySide6)",
"About": "Circa",
"Alternative non-SSL update server (This might help with SSL errors)": "Server di aggiornamento non SSL alternativo (potrebbe aiutare con gli errori SSL)",
"Fixes and other experimental features: (Use ONLY if something is not working)": "Correzioni e altre funzionalità sperimentali: (Usale SOLO se qualcosa non funziona)",
"Show week number on the clock": "Mostra il numero della settimana",
}
lang2_5 = lang2_6 | {
"Hide the clock when RDP Client or Citrix Workspace are running": "Nascondi l'orologio quando RDP Client o Citrix Workspace sono in esecuzione",
"Clock Appearance:": "Aspetto dell'orologio",
"Force the clock to have black text": "Forza l'orologio ad avere il testo scuro",
" - It is required that the Dark Text checkbox is disabled": "È richiesto che la casella Testo Scuro sia disabilitata",
"Debbugging information:": "Informazioni di debug",
"Open ElevenClock's log": "Apri i log di ElevenClock",
}
lang2_4 = lang2_5 | {
# Added text in version 2.4
"Show the clock on the primary screen (Useful if clock is set on the left)": "Mostra l'orologio sullo schermo principale (Utile se l'orologio è impostato a sinistra)",
"Show weekday on the clock": "Visualizza il giorno della settimana",
}
lang2_3 = lang2_4 | {
# Context menu
"ElevenClock Settings": "Impostazioni ElevenClock", # Also settings title
"Reload Clocks": "Ricarica",
"ElevenClock v{0}": "ElevenClock v{0}",
"Restart ElevenClock": "Riavvia ElevenClock",
"Hide ElevenClock": "Nascondi ElevenClock",
"Quit ElevenClock": "Esci",
# General settings section
"General Settings:": "Impostazioni generali:",
"Automatically check for updates": "Rileva automaticamente gli aggiornamenti",
"Automatically install available updates": "Installa automaticamente gli aggiornamenti",
"Enable really silent updates": "Abilita aggiornamenti silenziosi",
"Bypass update provider authenticity check (NOT RECOMMENDED, AT YOUR OWN RISK)": "Ignora il controllo di autenticità del provider di aggiornamento (NON RACCOMANDATO, A TUO RISCHIO)",
"Show ElevenClock on system tray": "Visualizza ElevenClock sulla barra di sistema",
"Alternative clock alignment (may not work)": "Allineamento alternativo dell'orologio (potrebbe non funzionare)",
"Change startup behaviour": "Cambia il comportamento in avvio",
"Change": "Cambia",
"<b>Update to the latest version!</b>": "<b>Aggiorna all'ultima versione!</b>",
"Install update": "Installa l'aggiornamento",
# Clock settings
"Clock Settings:": "Impostazioni orologio:",
"Hide the clock in fullscreen mode": "Nascondi l'orologio in modalità a schermo intero",
"Hide the clock when RDP client is active": "Nascondi l'orologio quando il client RDP è attivo",
"Force the clock to be at the bottom of the screen": "Forza l'orologio ad essere al fondo dello schermo",
"Show the clock when the taskbar is set to hide automatically": "Visualizza l'orologio quando la barra delle applicazioni è impostata a Nascondi",
"Fix the hyphen/dash showing over the month": "Corregge la visualizzazione del trattino sul mese",
"Force the clock to have white text": "Forza l'orologio ad usare testo bianco",
"Show the clock at the left of the screen": "Visualizza l'orologio alla sinistra dello schermo",
# Date & time settings
"Date & Time Settings:": "Impostazioni data e Ora:",
"Show seconds on the clock": "Visualizza i secondi",
"Show date on the clock": "Visualizza la data",
"Show time on the clock": "Visualizza l'ora",
"Change date and time format (Regional settings)": "Cambia il formato di visualizzazione della data e dell'ora (Impostazioni regionali)",
"Regional settings": "Impostazioni regionali",
# About the language pack
"About the language pack:": "Informazioni sulla traduzione",
# Here, make sute to give you some credits: Translated to LANGUAGE by USER/NAME/PSEUDONYM/etc.
"Translated to English by martinet101": "Tradotto in Italiano da Parapongo, zuidstroopwafel",
"Translate ElevenClock to your language": "Traduci ElevenClock nella tua lingua",
"Get started": "Inizia",
# About ElevenClock
"About ElevenClock version {0}:": "Informazioni sulla versione {0} di ElevenClock",
"View ElevenClock's homepage": "Visualizza l'homepage di ElevenClock",
"Open": "Apri",
"Report an issue/request a feature": "Segnala un problema/richiedi una nuova funzionalità",
"Report": "Segnala",
"Support the dev: Give me a coffee☕": "Supporta lo sviluppatore: donami un caffè☕",
"Open page": "Apri la pagina",
# Here, the word "Icons8" should not be translated
"Icons by Icons8": "Icone tratte da Icons8",
"Webpage": "Pagina Web",
"Close settings": "Chiudi le impostazioni",
"Close": "Chiudi",
}
lang = lang2_3
|
StarcoderdataPython
|
3269146
|
<gh_stars>1-10
# Funkcje i zasięg zmiennych w Python
# https://tinyurl.com/popo-namespace
# główna przestrzeń nazw: "__main__"
# przestrzenie nazw funkcji są osobne
some_value = 12
some_list = [1, 2]
def function_1():
print(some_value)
print(some_list)
def function_2():
some_value = "Other value" # zmienna rodzaju immutable
some_list = "Other list" # zmienna rodzaju immutable, bo przypisanie innej wartości
print(some_value)
print(some_list)
def function_3(param_1, param_2):
print(f"Parameter 1 value: {param_1}")
print(f"Parameter 2 value: {param_2}")
def function_4():
some_list.append(3) # zmienna rodzaju mutable, do listy dodajemy element
print(some_value)
print(some_list)
function_1()
function_2()
function_3(some_value, some_list)
function_4()
|
StarcoderdataPython
|
166424
|
<gh_stars>100-1000
"""
yolo格式数据,裁剪图像中心区域,生成一批新数据。
"""
import cv2
import os
from tqdm import tqdm
def plot_bbox(img, gt=None ,line_thickness=None):
# 可视化测试
colorlist = []
# 5^3种颜色。
for i in range(30,256,50):
for j in range(40,256,50):
for k in range(50,256,50):
colorlist.append((i,j,k))
height, width,_ = img.shape
tl = line_thickness or round(0.002 * (width + height) / 2) + 1 # line/font thickness
font = cv2.FONT_HERSHEY_SIMPLEX
tf = max(tl - 1, 1) # font thickness
with open(gt,'r') as f:
annotations = f.readlines()
# print(annotations)
for ann in annotations:
ann = list(map(float,ann.split()))
ann[0] = int(ann[0])
# print(ann)
cls,x,y,w,h = ann
color = colorlist[cls]
c1, c2 = (int((x-w/2)*width),int((y-h/2)*height)), (int((x+w/2)*width), int((y+h/2)*height))
cv2.rectangle(img, c1, c2, color, thickness=tl*2, lineType=cv2.LINE_AA)
return img
def parse_label(gt, crop_ratio=0.25,):
scale_ratio = 1/(1-crop_ratio*2)
out_str = ''
with open(gt,'r') as f:
annotations = f.readlines()
# print(annotations)
for ann in annotations:
ann = list(map(float,ann.split()))
# print(ann)
if crop_ratio < ann[1] < 1-crop_ratio and crop_ratio < ann[2] < 1-crop_ratio:
# center point in the specified area
# print(ann)
out_l = [int(ann[0]), ann[1]-crop_ratio, ann[2]-crop_ratio, ann[3], ann[4]]
out_l[1:] = [out*scale_ratio for out in out_l[1:]]
out_l = list(map(str,out_l))
out_str += ' '.join(out_l) +'\n'
return out_str
if __name__ == '__main__':
origin_root_dir = '/home/winner/chenwf/yolov5/data/pedestrian/train'
save_dir = '/home/winner/chenwf/yolov5/data/pedestrian/train_crop'
if not os.path.exists(save_dir):
os.makedirs(os.path.join(save_dir,'images'))
os.makedirs(os.path.join(save_dir,'labels'))
img_dir = os.path.join(origin_root_dir,'images')
label_dir = os.path.join(origin_root_dir,'labels')
img_names = os.listdir(img_dir)
# crop ratio
crop_ratio = 0.25
for img_name in tqdm(img_names):
label_name = img_name.replace('jpg','txt')
img_path = os.path.join(img_dir, img_name)
label_path = os.path.join(label_dir, label_name)
img = cv2.imread(img_path)
height,width,_ = img.shape
out_str = parse_label(label_path, crop_ratio)
if out_str:
# 空样本就不添加了
with open(os.path.join(save_dir, 'labels', label_name),'w') as f:
# write
f.write(out_str)
# crop
crop_img = img[int(height*crop_ratio):height-int(height*crop_ratio),int(width*crop_ratio):width-int(width*crop_ratio),:]
# plot_bbox(crop_img, os.path.join(save_dir, 'labels', label_name)) # visualize the bbox
cv2.imwrite(os.path.join(save_dir, 'images', img_name),crop_img)
|
StarcoderdataPython
|
3342652
|
import json
import os
import socket
from typing import List, Optional
from abejacli import config
from abejacli.config import RESERVED_ENV_VAR, RUN_LOCAL_COMMAND_V1
from abejacli.docker.utils import get_home_path
# ==========================
# Environment Variable Keys
# ==========================
SERVICE_TYPE_HTTP = 'HTTP'
ABEJA_PLATFORM_USER_ID = 'ABEJA_PLATFORM_USER_ID'
ABEJA_PLATFORM_PERSONAL_ACCESS_TOKEN = 'ABEJA_PLATFORM_PERSONAL_ACCESS_TOKEN'
# ==========================
# Defined Default Path
# ==========================
DEFAULT_STORAGE_BASE_DIR = '/cache'
DEFAULT_STORAGE_DIR = '{}/.abeja/.cache'.format(DEFAULT_STORAGE_BASE_DIR)
DEFAULT_WORKING_DIR = '/srv/app'
DEFAULT_ARTIFACT_DIR = '/output'
PYTHONUNBUFFERED_OPTION = 'x'
# ==========================
# Defined Docker Parmaertes
# ==========================
DOCKER_PARAMETER_SETTING = {
'x86_cpu': {
'privileged': False,
'volume': [],
'runtime': None
},
'x86_gpu': {
'privileged': False,
'volume': [],
'runtime': 'nvidia'
},
'jetson_tx2': {
'privileged': True,
'volume': [
{
'host': '/usr/lib/aarch64-linux-gnu/tegra',
'guest': {'bind': '/usr/lib/aarch64-linux-gnu/tegra', 'mode': 'ro'}
}],
'runtime': None
},
'raspberry3': {
'privileged': False,
'volume': [{'host': '/opt', 'guest': {'bind': '/opt', 'mode': 'ro'}}],
'runtime': None
}
}
def get_free_tcp_port() -> int:
"""
get free tcp port in dynamic port range
which is defined by IANA
cf. https://www.iana.org/assignments/service-names-port-numbers/service-names-port-numbers.xhtml
:returns: free port number
"""
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
s.bind(('', 0))
_address, port = s.getsockname()
return port
def build_volumes(volume_params):
volumes = {}
for host_path, container_path in volume_params:
volumes.update(build_volume(host_path, container_path))
return volumes
def build_volume(host_path, container_path, mode='rw'):
return {
host_path: {
'bind': container_path,
'mode': mode
}
}
def get_storage_volume() -> Optional[dict]:
home_dir = get_home_path()
if home_dir:
return build_volume(home_dir, DEFAULT_STORAGE_BASE_DIR)
return None
def get_default_volume() -> dict:
current_dir = '{}'.format(os.getcwd())
volume = build_volume(current_dir, DEFAULT_WORKING_DIR)
storage_volume = get_storage_volume()
if storage_volume:
volume.update(storage_volume)
return volume
def add_default_env_vars(env_vars: dict) -> dict:
"""add default environment variables commonly used in both model and train"""
# deep copy to make argument immutable
update_env_vars = {**env_vars}
PYTHONUNBUFFERED = RESERVED_ENV_VAR['python_unbufferd']
if PYTHONUNBUFFERED not in update_env_vars:
update_env_vars[PYTHONUNBUFFERED] = PYTHONUNBUFFERED_OPTION
ABEJA_STORAGE_DIR_PATH = RESERVED_ENV_VAR['abeja_storage_dir_path']
if ABEJA_STORAGE_DIR_PATH not in update_env_vars:
update_env_vars[ABEJA_STORAGE_DIR_PATH] = DEFAULT_STORAGE_DIR
return update_env_vars
class RunCommand:
def __init__(
self, image: str, working_dir: str=None,
environment: List[str]=None, volumes: dict=None, ports: dict=None,
command: List[str]=None, remove: bool=True, detach: bool=True,
privileged: bool=False,
stderr: bool=True, runtime: str=None) -> None:
self.image = image
self.working_dir = working_dir
self.environment = environment
if ports is None:
ports = {}
self.ports = ports
self.volumes = volumes
self.command = command
self.remove = remove
self.detach = detach
self.stderr = stderr
self.runtime = runtime
self.privileged = privileged
def get_port(self) -> Optional[dict]:
ports = list(self.ports.values())
return ports[0] if ports else None
def to_dict(self) -> dict:
d = {
'image': self.image,
'remove': self.remove,
'detach': self.detach,
'stderr': self.stderr
}
if self.working_dir:
d['working_dir'] = self.working_dir
if self.environment:
d['environment'] = self.environment
if self.ports:
d['ports'] = self.ports
if self.volumes:
d['volumes'] = self.volumes
if self.command:
d['command'] = self.command
if self.runtime:
d['runtime'] = self.runtime
if self.privileged:
d['privileged'] = self.privileged
return d
class TrainRunCommand(RunCommand):
@classmethod
def create(
cls, image: str, handler: str, datasets: dict=None,
runtime: str=None, env_vars: dict=None, volume: dict=None,
platform_user_id: str=None, platform_personal_access_token: str=None,
platform_organization_id: str=None, command: list = None,
remove=True) -> 'TrainRunCommand':
if volume is None:
volume = {}
if env_vars is None:
env_vars = {}
env_vars = add_default_env_vars(env_vars)
environment = ['{}={}'.format(k, v) for k, v in env_vars.items()]
if handler:
environment.append('{}={}'.format(RESERVED_ENV_VAR['handler'], handler))
if datasets:
environment.append('{}={}'.format(
RESERVED_ENV_VAR['datasets'], str(json.dumps(datasets))))
if platform_user_id:
environment.append('{}={}'.format(
ABEJA_PLATFORM_USER_ID, platform_user_id))
if platform_personal_access_token:
environment.append('{}={}'.format(
ABEJA_PLATFORM_PERSONAL_ACCESS_TOKEN, platform_personal_access_token))
if platform_organization_id:
environment.append('{}={}'.format(
RESERVED_ENV_VAR['organization_id'], platform_organization_id))
if command is None:
command = ['/bin/sh', '-c', 'abeja-model train']
return TrainRunCommand(
image=image, working_dir=DEFAULT_WORKING_DIR, environment=environment,
command=command, volumes=volume, runtime=runtime, remove=remove)
class ModelRunCommand(RunCommand):
@classmethod
def create(
cls, image: str, handler: str, device_type: str, port: int = None,
command: List[str] = None, env_vars: dict = None,
organization_id: str = None) -> 'ModelRunCommand':
volume = get_default_volume()
privileged = DOCKER_PARAMETER_SETTING[device_type]['privileged']
runtime = DOCKER_PARAMETER_SETTING[device_type]['runtime']
for v in DOCKER_PARAMETER_SETTING[device_type]['volume']:
volume[v['host']] = v['guest']
if port is None:
port = get_free_tcp_port()
ports = {'5000/tcp': port}
if env_vars is None:
env_vars = {}
env_vars = add_default_env_vars(env_vars)
if RESERVED_ENV_VAR['abeja_training_result_dir'] not in env_vars:
env_vars[RESERVED_ENV_VAR['abeja_training_result_dir']] = '.'
env_vars[RESERVED_ENV_VAR['service_type']] = SERVICE_TYPE_HTTP
if ABEJA_PLATFORM_USER_ID not in env_vars:
env_vars[ABEJA_PLATFORM_USER_ID] = config.ABEJA_PLATFORM_USER_ID
if ABEJA_PLATFORM_PERSONAL_ACCESS_TOKEN not in env_vars:
env_vars[ABEJA_PLATFORM_PERSONAL_ACCESS_TOKEN] = config.ABEJA_PLATFORM_TOKEN
if handler:
env_vars[RESERVED_ENV_VAR['handler']] = handler
if organization_id:
# NOTE: organization_id from arg takes priority over one passed as env.
env_vars[RESERVED_ENV_VAR['organization_id']] = organization_id
environment = ['{}={}'.format(k, v) for k, v in env_vars.items()]
if command is None:
command = RUN_LOCAL_COMMAND_V1
return ModelRunCommand(
image=image, working_dir=DEFAULT_WORKING_DIR, ports=ports,
environment=environment, volumes=volume, privileged=privileged,
runtime=runtime, command=command)
|
StarcoderdataPython
|
1775591
|
import logging
import os
# TODO Create proper logging.
# TODO Create proper testing.
# TODO Create folder structure
def logger(default_level='INFO', file='logs.log'):
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
fh = logging.FileHandler(file)
fh.setLevel(logging.INFO)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
fh.setFormatter(formatter)
logger.addHandler(fh)
return logger
SCHEMAS_FOLDER = '{}\\schema\\'.format(os.getcwd())
DATABASE_CONFIG = {
'host': 'localhost',
'port' : 27017,
'database' : 'curri',
}
|
StarcoderdataPython
|
3301162
|
<reponame>Nukesor/stasibot<gh_stars>0
# Get your telegram api-key from @botfather
TELEGRAM_API_KEY = None
CHANNEL = 8
TARGET_FOLDER = 'guest@server:videos'
NAME = 'LolBot'
TEMP_FOLDER = 'camvideos' # relative to home directory
USERNAME = 'User'
USER_ID = 12345678
|
StarcoderdataPython
|
1703824
|
from openbiolink.graph_creation.file_downloader.fileDownloader import FileDownloader
|
StarcoderdataPython
|
1790616
|
<filename>qgate/simulator/cudaruntime.py
try :
from . import cudaext
except :
import sys
if sys.version_info[0] == 2 :
del cudaext
raise
import numpy as np
import weakref
from .native_qubit_processor import NativeQubitProcessor
from .native_qubit_states import NativeQubitStates
from .native_qubits_states_getter import NativeQubitsStatesGetter
from .native_sampling_pool import NativeSamplingPool
from . import glue
import sys
this = sys.modules[__name__]
# initialization flag.
this.initialized = False
# dictionary that holds native instances.
this.native_instances = weakref.WeakValueDictionary()
def set_preference(device_ids = [], max_po2idx_per_chunk = -1, memory_store_size = -1) :
if this.initialized :
raise RuntimeError('already initialized.')
this.max_po2idx_per_chunk = max_po2idx_per_chunk
this.device_ids = device_ids
this.memory_store_size = memory_store_size
def set_preference(device_ids = [], max_po2idx_per_chunk = -1, memory_store_size = -1) :
if this.initialized :
raise RuntimeError('already initialized.')
if len(device_ids) != 0 :
this.device_ids = device_ids
if max_po2idx_per_chunk != -1 :
this.max_po2idx_per_chunk = max_po2idx_per_chunk
if memory_store_size != -1 :
this.memory_store_size = memory_store_size
def reset_preference(device_ids = [], max_po2idx_per_chunk = -1, memory_store_size = -1) :
this.device_ids = []
this.max_po2idx_per_chunk = -1
this.memory_store_size = -1
def create_qubit_states(dtype) :
if not this.initialized :
module_init()
# create qubit_processor
qproc = NativeQubitProcessor(dtype, cudaext.qubit_processor_new(dtype))
this.native_instances[id(qproc)] = qproc
# create qubit states
ptr = cudaext.qubit_states_new(dtype)
qstates = NativeQubitStates(ptr, qproc)
this.native_instances[id(qstates)] = qstates
return qstates
def create_qubits_states_getter(dtype) :
ptr = cudaext.qubits_states_getter_new(dtype)
return CUDAQubitsStatesGetter(dtype, ptr)
class CUDAQubitsStatesGetter(NativeQubitsStatesGetter) :
def __init__(self, dtype, ptr) :
NativeQubitsStatesGetter.__init__(self, dtype, ptr)
def create_sampling_pool(self, qreg_ordering,
n_lanes, n_hidden_lanes, lane_trans, empty_lanes,
sampling_pool_factory = None) :
return self._create_sampling_pool(qreg_ordering, n_lanes, n_hidden_lanes, lane_trans,
empty_lanes, True, sampling_pool_factory)
def module_init() :
cudaext.devices_initialize(this.device_ids, this.max_po2idx_per_chunk, this.memory_store_size)
this.initialized = True
def module_finalize() :
instances = this.native_instances.values()
for ptr in instances :
ptr.delete()
if this.initialized :
cudaext.devices_clear()
this.initialized = False
import atexit
atexit.register(module_finalize)
# set default preference
this.reset_preference()
|
StarcoderdataPython
|
3360511
|
<gh_stars>0
# Copyright (c) 2020 Broadcom.
# The term "Broadcom" refers to Broadcom Inc. and/or its subsidiaries.
#
# This program and the accompanying materials are made
# available under the terms of the Eclipse Public License 2.0
# which is available at https://www.eclipse.org/legal/epl-2.0/
#
# SPDX-License-Identifier: EPL-2.0
#
# Contributors:
# Broadcom, Inc. - initial API and implementation
def get_line_content(cls, line_number, trim=False, editor=None):
return cls.get_line_content_with_number(line_number, trim=trim, editor=editor)
def get_active_editor(cls):
return cls.get_editor_element()
def focus_in(cls, editor):
return cls.set_focus(editor)
def get_editor_id(cls, editor):
return cls.get_element_id(editor)
def get_hostname_from_id(cls, id):
return cls.get_hostname_from_id_value(id)
def get_dataset_member_from_id(cls, id):
return cls.get_dataset_member_from_id_value(id)
def get_active_editor_tab(cls):
return cls.get_active_tab()
def get_active_tab(cls):
return cls.get_active_tab_element()
def get_tab_title(cls, tab):
return cls.get_tab_element_title(tab)
def get_active_tab_id(cls, tab):
return cls.get_element_id(tab)
def wait_for_tab(cls, dataset, member):
return cls.wait_for_tab_with_dataset_member(dataset, member)
def wait_for_tab_with_title(cls, title):
return cls.wait_for_tab_exist(title)
def wait_for_sub_tab_with_title(cls, title):
return cls.wait_for_sub_tab_exist(title)
def focus_in_user_preferences(cls):
return cls.set_focus_in_user_preferences()
def find_lines_with_errors(cls, editor, error_should_exist=True):
return cls.find_errors_on_screen(editor=editor, error_should_exist=error_should_exist)
def find_lines_with_warnings(cls, editor, error_should_exist=True):
return cls.find_errors_on_screen(editor=editor, error_type="info", error_should_exist=error_should_exist)
def mark_lines(cls, line_numbers_list, editor=None):
return cls.mark_lines_with_numbers(line_numbers_list, editor=editor)
def show_error_tooltip_for_line_number(cls, line_number, editor=None):
return cls.show_error_tooltip(line_number, editor=editor)
def show_info_tooltip_for_line_number(cls, line_number, editor=None):
return cls.show_info_tooltip(line_number, editor=editor)
def close_tab(cls, tab_element, save=False):
return cls.close_tab_element(tab_element, save)
def close_tab_with_member(cls, dataset, member):
return cls.close_tab_with_dataset_member(dataset, member)
def scroll_editor_for_line_number(cls, direction, line_number, active_editor):
return cls.scroll_editor_till_line_number(line_number, direction, active_editor)
def select_line(cls, line_number):
return cls.select_line_element(line_number)
def append_spaces_to_line(cls, line_element):
return cls.append_ending_spaces_to_line(line_element)
def add_new_line(cls):
return cls.press_enter_here()
def call_auto_complete(cls):
return cls.call_auto_complete_here()
def call_find_all_references(cls):
return cls.call_find_all_references_here()
def wait_for_autocomplete_suggest(cls):
return cls.wait_for_suggest_widget_is_visible()
def choose_from_suggestion(cls, suggest_widget, element):
return cls.choose_item_from_suggestion(suggest_widget, element)
def fill_snippets_with(cls, values):
return cls.fill_snippets_with_values(values)
def click_inside_definition(cls, line_number, text, editor=None):
return cls.click_on_text(line_number, text, editor=editor)
def control_click(cls, element):
return cls.control_left_click(element)
def get_current_line_number(cls, content=None, editor=None):
return cls.get_current_line_num(content, editor)
def get_current_line_number_in_widget_zone(cls, content=None, editor=None):
return cls.get_current_line_num_inside_widget_zone(content, editor)
def close_zone_if_exists(cls, editor=None):
return cls.close_zone_widget_if_exists(editor)
def get_number_of_references(cls, editor=None):
return cls.get_references_num(editor)
def get_widget_zone_rows(cls, editor=None):
return cls.get_references_in_widget_zone(editor)
def navigate_to_reference(cls, reference_num, list_rows=None, content=None, editor=None):
return cls.get_line_num_for_reference(reference_num, list_rows, content, editor)
def expand_references_tree(cls, editor=None):
return cls.expand_tree_of_references(editor)
def set_breakpoint_at_line_editor(cls, line_number, editor=None):
return cls.set_breakpoint_at_line(line_number, editor)
def remove_all_breakpoints(cls):
cls.clear_breakpoints()
def find_breakpoint_lines(cls, editor=None):
return cls.find_lines_with_breakpoints(editor)
|
StarcoderdataPython
|
3314448
|
# coding=utf-8
from django.conf import settings
from django.conf.urls import patterns, url, include
from django.conf.urls.static import static
from django.core.urlresolvers import reverse_lazy
from django.views.generic import TemplateView, DeleteView
from media.forms import VideoAjaxUploadForm, YoutubeVideoAjaxUploadForm
from media.models import Video, YoutubeVideo
from .views import (ListMediaItem, CreateMedia, DetailMedia,
UpdateMedia, CreateMultipleYoutubeVideos, ShowMultipleYoutubeVideos)
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('',
# ADMIN URLS
url(r'^admin/', include(admin.site.urls)),
# media urls
url(
r'^media/', include('media.urls')
),
url(
r'^test/$',
TemplateView.as_view(template_name='test/test.html'),
name="test"
),
# list
url(
r'^test/video/list/$',
ListMediaItem.as_view(
template_name='test/list.html',
model=Video
),
name="list_video"
),
url(
r'^test/youtubevideo/list/$',
ListMediaItem.as_view(
template_name='test/list.html',
model=YoutubeVideo
),
name="list_youtubevideo"
),
# create
url(
r'^test/video/create/$',
CreateMedia.as_view(
template_name='test/create_update.html',
model=Video,
form_class=VideoAjaxUploadForm,
success_url=reverse_lazy('list_video')
),
name="create_video"
),
url(
r'^test/youtubevideo/create/$',
CreateMedia.as_view(
template_name='test/create_update.html',
model=YoutubeVideo,
form_class=YoutubeVideoAjaxUploadForm,
success_url=reverse_lazy('list_youtubevideo')
),
name="create_youtubevideo"
),
# update
url(
r'^test/video/update/(?P<pk>\d+)/$',
UpdateMedia.as_view(
template_name='test/create_update.html',
model=Video,
form_class=VideoAjaxUploadForm,
success_url=reverse_lazy('list_video')
),
name="update_video"
),
url(
r'^test/youtubevideo/update/(?P<pk>\d+)/$',
UpdateMedia.as_view(
template_name='test/create_update.html',
model=YoutubeVideo,
form_class=YoutubeVideoAjaxUploadForm,
success_url=reverse_lazy('list_youtubevideo')
),
name="update_youtubevideo"
),
# detail
url(
r'^test/video/detail/(?P<pk>\d+)/$',
DetailMedia.as_view(
model=Video,
template_name='test/video/detail.html'
),
name="detail_video"
),
url(
r'^test/youtubevideo/detail/(?P<pk>\d+)/$',
DetailMedia.as_view(
model=YoutubeVideo,
template_name='test/youtube/detail.html'
),
name="detail_youtubevideo"
),
# delete
url(
r'^test/video/delete/(?P<pk>\d+)/$',
DeleteView.as_view(
model=Video,
success_url=reverse_lazy('list_video')
),
name="delete_video"
),
url(
r'^test/youtubevideo/delete/(?P<pk>\d+)/$',
DeleteView.as_view(
model=YoutubeVideo,
success_url=reverse_lazy('list_youtubevideo')
),
name="delete_youtubevideo"
),
###########################################################
url(
r'^test/youtubevideo/multiple/create/$',
CreateMultipleYoutubeVideos.as_view(
template_name='test/multi_videos/create_update.html',
model=YoutubeVideo,
form_class=YoutubeVideoAjaxUploadForm,
success_url=reverse_lazy('list_youtubevideo')
),
name="create_youtubevideo_multi"
),
url(
r'^test/youtubevideo/multiple/detail/$',
ShowMultipleYoutubeVideos.as_view(
template_name='test/multi_videos/detail.html',
),
name="detail_youtubevideo_multi"
),
)
# FOR UPLOADED MEDIA
if settings.DEBUG:
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
|
StarcoderdataPython
|
3273825
|
<reponame>isaaccorley/contrastive-surface-image-pretraining
"""Inspired by OpenAI's CLIP https://github.com/openai/CLIP."""
import numpy as np
import pytorch_lightning as pl
import timm
import torch
import torch.nn as nn
import torch.nn.functional as F
class NTXent(nn.Module):
def forward(self, z1, z2, t):
batch_size = z1.shape[0]
device = z1.device
z1 = F.normalize(z1, dim=-1)
z2 = F.normalize(z2, dim=-1)
similarity = torch.matmul(z1, z2.T)
similarity = similarity * torch.exp(t)
targets = torch.arange(batch_size, device=device)
loss = F.cross_entropy(similarity, targets)
return loss
class Encoder(nn.Sequential):
def __init__(self, backbone, num_channels, pretrained=True):
model = timm.create_model(
backbone, in_chans=num_channels, num_classes=0, pretrained=pretrained
)
self.num_features = model.num_features
super().__init__(model)
class DualEncoders(pl.LightningModule):
def __init__(
self,
backbone="resnet18",
proj_dim=256,
num_channels_a=3,
num_channels_b=1,
pretrained=True,
):
super().__init__()
self.save_hyperparameters()
self.encoder_a = Encoder(backbone, num_channels_a, pretrained)
self.encoder_b = Encoder(backbone, num_channels_b, pretrained)
self.proj_a = nn.Linear(self.encoder_a.num_features, proj_dim)
self.proj_b = nn.Linear(self.encoder_b.num_features, proj_dim)
self.t = nn.Parameter(torch.ones([]) * np.log(1 / 0.07))
self.loss_fn = NTXent()
def configure_optimizers(self):
return torch.optim.AdamW(self.parameters(), lr=1e-3)
def forward(self, x_a, x_b):
return self.encoder_a(x_a), self.encoder_b(x_b)
def training_step(self, batch, batch_idx):
x_a, x_b = batch["x_a"], batch["x_b"]
e_a, e_b = self(x_a, x_b)
z_a, z_b = self.proj_a(e_a), self.proj_b(e_b)
loss = self.loss_fn(z_a, z_b, self.t)
self.log("train_loss", loss)
return loss
@torch.no_grad()
def embed_a(self, x):
return F.normalize(self.encoder_a(x), dim=-1)
@torch.no_grad()
def embed_b(self, x):
return F.normalize(self.encoder_b(x), dim=-1)
|
StarcoderdataPython
|
3281462
|
<filename>setup.py
from setuptools import setup, find_packages
with open("README.md", "r") as fh:
long_description = fh.read()
setup(
name='torrent-crawler',
description='Library to search any torrents',
version='1.0.0',
url='https://github.com/Alxspb/torrent-crawler',
download_url='https://github.com/Alxspb/torrent-crawler/releases',
author='Alxspb',
author_email='<EMAIL>',
long_description=long_description,
long_description_content_type='text/markdown',
packages=find_packages(),
keywords=['pip', 'torrent', 'crawler'],
classifiers=[
"Intended Audience :: Developers",
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
entry_points={
'console_scripts': [
'search-torrent=torrent_crawler.search:main',
],
},
install_requires=[
'bs4',
'requests',
'html5lib'
]
)
|
StarcoderdataPython
|
1724081
|
from isserviceup.models.favorite import Favorite
from isserviceup.services import SERVICES
def get_favorite_services(user_id):
favs = Favorite.objects(user_id=user_id)
res = []
for fav in favs:
res.append(SERVICES[fav.service_id])
return res
def update_favorite_status(user_id, service_id, status):
if status: # added favorite
Favorite(user_id=user_id, service_id=service_id).save()
else: # removed favorite
Favorite.objects(user_id=user_id, service_id=service_id).delete()
|
StarcoderdataPython
|
3238668
|
#
# Copyright 2020 Australian National University
#
# Please see the LICENSE.txt file for details.
import os
import shutil
from pathlib import Path
config_template = """
## WARNING: THIS IS AN AUTO-GENERATED FILE. MANUAL CHANGES WILL BE
## OVERWRITTEN.
#
# This file is managed by mdserver - changes should be made through the
# mdserver config most likely in /etc/mdserver.
user={user}
leasefile-ro
strict-order
expand-hosts
pid-file={run_dir}/{net_name}.pid
except-interface=lo
bind-dynamic
interface={interface}
dhcp-range={mds_gateway},static
dhcp-no-override
dhcp-lease-max={lease_len}
dhcp-hostsdir={dhcp_hostsdir}
dhcp-optsfile={dhcp_optsfile}
hostsdir={dns_hostsdir}
"""
opts_template = """
option:classless-static-route,{listen_addr}/32,{mds_gateway},0.0.0.0/0,{mds_gateway}
249,{listen_addr}/32,{mds_gateway},0.0.0.0/0,{mds_gateway}
option:router,{mds_gateway}
"""
class Dnsmasq(object):
"""Manage dnsmasq configuration.
This code is primarily concerned with managing host data for dnsmasq's
DHCP and DNS configuration, as well as creating a configuration file that
makes use of that host data.
"""
def __init__(self, config):
self.config = config
for option in config:
if option.startswith('dnsmasq.'):
name = option.split('.')[1]
setattr(self, name, config[option])
self.base_dir = Path(self.base_dir).resolve().as_posix()
def gen_dhcp_hosts(self, db):
"""Create a dnsmasq DHCP hosts file.
All host data is pulled from the database, and is written to a single
file, overwriting any previous data.
"""
lease = self.lease_len
dirname = os.path.join(self.base_dir, "dhcp")
Path(dirname).mkdir(mode=0o777, parents=True, exist_ok=True)
name = self.net_name + ".dhcp-hosts"
hostsfile = os.path.join(dirname, name)
# note that this truncates the file before writing
with open(hostsfile, "w") as hf:
for entry in db:
mac = entry['mds_mac']
ipv4 = entry['mds_ipv4']
ipv6 = entry['mds_ipv6']
hname = entry['domain_name']
if ipv4 is not None:
line = "%s,id:*,%s,%s,%d\n" % (mac, ipv4, hname, lease)
hf.write(line)
if ipv6 is not None:
line = "%s,id:*,[%s],%s,%d\n" % (mac, ipv6, hname, lease)
hf.write(line)
def gen_dns_hosts(self, db):
"""Create a dnsmasq DNS hosts file.
All host data is pulled from the database, and is written to a single
file, overwriting any previous data.
"""
order = self.entry_order
order = [o.strip().lower() for o in order.split(',')]
prefix = self.prefix
domain = self.domain
dirname = os.path.join(self.base_dir, "dns")
Path(dirname).mkdir(mode=0o777, parents=True, exist_ok=True)
name = self.net_name + ".dns-hosts"
hostsfile = os.path.join(dirname, name)
# note that this truncates the file befor writing
with open(hostsfile, "w") as hf:
for entry in db:
ipv4 = entry['mds_ipv4']
ipv6 = entry['mds_ipv6']
hname = entry['domain_name']
prefixed = hname
if prefix:
prefixed = prefix + hname
fqdn = False
if domain:
fqdn = prefixed + "." + domain
names = []
for o in order:
if o.startswith('base'):
names.append(hname)
elif o.startswith('prefix'):
if prefix:
names.append(prefixed)
elif o == 'domain' or o == 'fqdn':
if domain:
names.append(fqdn)
if len(names) > 0:
if ipv4 is not None:
line = "%s %s\n" % (ipv4, " ".join(names))
hf.write(line)
if ipv6 is not None:
line = "%s %s\n" % (ipv6, " ".join(names))
hf.write(line)
def gen_dnsmasq_config(self):
"""Create a dnsmasq config file, set up to make use of generated host
data, along with other relevant configuration options.
"""
# make basedir
Path(self.base_dir).mkdir(mode=0o775, parents=False, exist_ok=True)
try:
shutil.chown(self.base_dir, user=None, group=self.user)
except PermissionError:
pass
# make dhcp and dns dirs
confname = self.net_name + ".conf"
conffile = os.path.join(self.base_dir, confname)
optsname = self.net_name + ".opts"
optsfile = os.path.join(self.base_dir, optsname)
dhcp_dir = os.path.join(self.base_dir, "dhcp")
Path(dhcp_dir).mkdir(mode=0o775, parents=True, exist_ok=True)
try:
shutil.chown(dhcp_dir, user=self.user, group=self.user)
except PermissionError:
pass
dns_dir = os.path.join(self.base_dir, "dns")
Path(dns_dir).mkdir(mode=0o775, parents=True, exist_ok=True)
try:
shutil.chown(dns_dir, user=self.user, group=self.user)
except PermissionError:
pass
# make run dir
Path(self.run_dir).mkdir(mode=0o775, parents=False, exist_ok=True)
try:
shutil.chown(self.base_dir, user=self.user, group=self.user)
except PermissionError:
pass
config_strings = {
'user': self.user,
'net_name': self.net_name,
'interface': self.interface,
'lease_len': self.lease_len,
'run_dir': self.run_dir,
'dhcp_hostsdir': dhcp_dir,
'dns_hostsdir': dns_dir,
'dhcp_optsfile': optsfile,
'mds_gateway': self.gateway,
}
opts_strings = {
'mds_gateway': self.gateway,
'listen_addr': self.config['mdserver.listen_address'],
}
config_formatted = config_template.format(**config_strings)
opts_formatted = opts_template.format(**opts_strings)
if self.config['dnsmasq.domain'] is not None:
opts_formatted += "domain=%s" % (self.domain)
if self.config['dnsmasq.use_dns']:
config_formatted += "option:dns-server,%s" % (self.gateway)
with open(conffile, 'w') as cf:
cf.write(config_template.format(**config_strings))
with open(optsfile, 'w') as of:
of.write(opts_template.format(**opts_strings))
|
StarcoderdataPython
|
1761218
|
<reponame>hyperbrowser/conglomerate<filename>src/pycolocstats/core/types.py<gh_stars>1-10
from __future__ import absolute_import, division, print_function, unicode_literals
__metaclass__ = type
class PathStr(str):
pass
class PathStrList(list):
pass
class SingleResultValue(object):
def __init__(self, numericResult, textualResult):
self.numericResult = numericResult
self.textualResult = textualResult
def __repr__(self):
return self.textualResult
class TrackFile(object):
def __init__(self, path, title):
assert path, 'Path missing for track %s' % title
assert title, 'Path missing for track %s' % path
self.path = path
self.title = title
def __repr__(self):
return 'TrackFile(%s:%s)' % (str(self.title), str(self.path))
|
StarcoderdataPython
|
1702271
|
<reponame>carmenchilson/BirdRoostDetection
"""Read in csv and create train, test, and validation splits for ML."""
import BirdRoostDetection.LoadSettings as settings
import os
import pandas
def ml_splits_by_date(csv_input_path,
csv_output_path,
k=5):
"""Split labeled data for k-fold cross validation.
For machine learning, you need a training, validation, and test set. This
method will read in a csv from csv_input_path. This data should be formatted
like the ml_labels_example file. It will then create k splits of the data.
Each time the data is used for training, k - 2 splits will be used for
training, 1 split will be used for testing, and 1 split will be used for
validating. This method will split the data by date (to avoid cross
contamination of the datasets) and then write out a csv used to look up
which file belongs to which split.
Args:
csv_input_path: The input file location. Formated like
example_labels.csv, a string.
csv_output_path: The output csv location path, a string. The output csv
will be saved to this location.
k: The size of k for k fold cross validation.
"""
pd = pandas.read_csv(csv_input_path)
basenames = {}
file_list = list(pd['AWS_file'])
is_roost_list = list(pd['Roost'])
fold_images = [[] for split_index in range(k)]
index = 0
for i, file_name in enumerate(file_list):
basename = file_name[4:12]
if basename not in basenames:
basenames[basename] = index
index = (index + 1) % 5
hash = basenames[basename]
for split_index in range(k):
if hash == split_index:
fold_images[split_index].append([file_name, is_roost_list[i]])
output = []
for split_index in range(k):
for file_name in fold_images[split_index]:
output.append({
'split_index': split_index,
'AWS_file': file_name[0], 'Roost': file_name[1]})
output_pd = pandas.DataFrame.from_dict(output)
output_pd.to_csv(csv_output_path, index=False)
def main():
ml_splits_by_date(csv_input_path=settings.LABEL_CSV,
csv_output_path=settings.ML_SPLITS_DATA,
k=5)
if __name__ == "__main__":
os.chdir(settings.WORKING_DIRECTORY)
main()
|
StarcoderdataPython
|
3311994
|
import numbers
import functools
from vedmath import VDigit, int_to_digits, digits_from_vdigits
class VInt:
'''
An experimental class for vedic integers.
'''
def __init__(self, n:int):
'''
Initialise a VInt from an integer n.
'''
self.ds = [VDigit(d) for d in int_to_digits(n)]
@classmethod
def fromints(cls, ds):
'''
Return a VInt from a list of digits as ints.
'''
ans = cls(0)
ans.ds = [VDigit(d) for d in ds]
return ans
@classmethod
def fromvdigits(cls, ds):
'''
Return a VInt from a list of digits as VDigits.
'''
ans = cls(0)
ans.ds = ds
return ans
def __repr__(self):
'''
Provide a string representation of a VInt.
'''
return f"VInt({self.ds})"
def __len__(self):
'''Return the length of the VInt.'''
return len(self.ds)
def __getitem__(self, index):
'''Return the item specified by index.'''
cls = type(self)
if isinstance(index, slice):
return cls.fromvdigits(self.ds[index])
elif isinstance(index, numbers.Integral):
return self.ds[index]
else:
msg = '{cls.__name__} indices must be integers'
raise TypeError(msg.format(cls=cls))
def __int__(self):
'''Return a VInt transformed to int.'''
return functools.reduce(lambda x, y: 10 * x + y, self.get_digits(), 0)
def get_digits(self):
'''
Returns a list of the digits as ints
'''
return digits_from_vdigits(self.ds)
def __eq__(self, other):
return int(self) == int(other)
|
StarcoderdataPython
|
4833757
|
<reponame>Mahdi-Asaly/Coursera-SDN-Assignments<gh_stars>0
from random import choice
from pyretic.lib.corelib import *
from pyretic.lib.std import *
from pyretic.lib.query import *
from pyretic.kinetic.fsm_policy import *
from pyretic.kinetic.drivers.json_event import JSONEvent
from pyretic.kinetic.smv.model_checker import *
from pyretic.kinetic.apps.mac_learner import *
#####################################################################################################
# * App launch
# - pyretic.py pyretic.kinetic.apps.server_lb
#
# * Mininet Generation (in "~/pyretic/pyretic/kinetic" directory)
# - sudo mn --controller=remote,ip=127.0.0.1 --mac --arp --switch ovsk --link=tc --custom mininet_topos/example_topos.py --topo=server_lb
#
# * Start ping from h1 to h2
# - mininet> h1 ping h2
#
# * Events (in "~/pyretic/pyretic/kinetic" directory)
# - python json_sender.py -n lb -l True --flow="{srcip=10.0.0.1,dstip=10.0.0.2}" -a 127.0.0.1 -p 50001
#
#####################################################################################################
class serverlb(DynamicPolicy):
def __init__(self):
# Server list.
self.servers = {'10.0.0.3': '00:00:00:00:00:03',
'10.0.0.4': '00:00:00:00:00:04',
'10.0.0.5': '00:00:00:00:00:05'}
# Randmoly choose a server from the list
def randomly_choose_server(servermap):
return server_i_policy(choice(servermap.keys()))
# Forward to i-th server
def server_i_policy(i):
ip_list = self.servers.keys()
ip_str = str(i)
mac_str = self.servers[ip_str]
public_ip = IP('10.0.0.100')
client_ips = [IP('10.0.0.1'), IP('10.0.0.2')]
receive_ip = [IP(ip_str)]*len(client_ips)
rewrite_ip_policy = rewrite(zip(client_ips, receive_ip), public_ip)
rewrite_mac_policy = if_(match(dstip=IP(ip_str),ethtype=2048),
modify(dstmac=MAC(mac_str)),passthrough)
return rewrite_ip_policy >> rewrite_mac_policy
# Rewrite IP address.
def rewrite(d,p):
return intersection([subs(c,r,p) for c,r in d])
# subroutine of rewrite()
def subs(c,r,p):
c_to_p = match(srcip=c,dstip=p)
r_to_c = match(srcip=r,dstip=c)
return ((c_to_p >> modify(dstip=r))+(r_to_c >> modify(srcip=p))+(~r_to_c >> ~c_to_p))
### DEFINE THE FLEC FUNCTION
def lpec(f):
return match(srcip=f['srcip'])
## SET UP TRANSITION FUNCTIONS
@transition
def server(self):
self.case(occurred(self.event),self.event)
@transition
def policy(self):
self.servers = {'10.0.0.3': '00:00:00:00:00:03',
'10.0.0.4': '00:00:00:00:00:04',
'10.0.0.5': '00:00:00:00:00:05'}
self.case(is_true(V('server')),C(randomly_choose_server(self.servers)))
self.default(C(server_i_policy(self.servers.keys()[1])))
### SET UP THE FSM DESCRIPTION
self.fsm_def = FSMDef(
server=FSMVar(type=BoolType(),
init=False,
trans=server),
policy=FSMVar(type=Type(Policy,set([server_i_policy(i) for i in self.servers])),
init=server_i_policy(choice(self.servers.keys())),
trans=policy))
# Instantiate FSMPolicy, start/register JSON handler.
fsm_pol = FSMPolicy(lpec, self.fsm_def)
json_event = JSONEvent()
json_event.register_callback(fsm_pol.event_handler)
super(serverlb,self).__init__(fsm_pol)
def main():
pol = serverlb()
# For NuSMV
smv_str = fsm_def_to_smv_model(pol.fsm_def)
mc = ModelChecker(smv_str,'server_lb')
## Add specs
mc.save_as_smv_file()
mc.verify()
return pol >> mac_learner()
|
StarcoderdataPython
|
1747172
|
0
1
2
3
12
123
1234
1234999
|
StarcoderdataPython
|
3256597
|
<reponame>PSSTools/py-pss-parser<gh_stars>1-10
'''
Created on May 1, 2020
@author: ballance
'''
from pssparser.model.expr_id import ExprId
from pssparser.model.data_type import DataType
class CovergroupPort(object):
def __init__(self,
name : ExprId,
data_type : DataType):
self.name = name
self.data_type = data_type
def accept(self, v):
v.visit_covergroup_port(self)
|
StarcoderdataPython
|
1633645
|
import warnings
from pyramid.compat import urlparse
from pyramid.interfaces import (
IRequest,
IRouteRequest,
IRoutesMapper,
PHASE2_CONFIG,
)
from pyramid.exceptions import ConfigurationError
from pyramid.registry import predvalseq
from pyramid.request import route_request_iface
from pyramid.urldispatch import RoutesMapper
from pyramid.config.util import (
action_method,
as_sorted_tuple,
)
import pyramid.config.predicates
class RoutesConfiguratorMixin(object):
@action_method
def add_route(self,
name,
pattern=None,
permission=None,
factory=None,
for_=None,
header=None,
xhr=None,
accept=None,
path_info=None,
request_method=None,
request_param=None,
traverse=None,
custom_predicates=(),
use_global_views=False,
path=None,
pregenerator=None,
static=False,
**predicates):
""" Add a :term:`route configuration` to the current
configuration state, as well as possibly a :term:`view
configuration` to be used to specify a :term:`view callable`
that will be invoked when this route matches. The arguments
to this method are divided into *predicate*, *non-predicate*,
and *view-related* types. :term:`Route predicate` arguments
narrow the circumstances in which a route will be match a
request; non-predicate arguments are informational.
Non-Predicate Arguments
name
The name of the route, e.g. ``myroute``. This attribute is
required. It must be unique among all defined routes in a given
application.
factory
A Python object (often a function or a class) or a :term:`dotted
Python name` which refers to the same object that will generate a
:app:`Pyramid` root resource object when this route matches. For
example, ``mypackage.resources.MyFactory``. If this argument is
not specified, a default root factory will be used. See
:ref:`the_resource_tree` for more information about root factories.
traverse
If you would like to cause the :term:`context` to be
something other than the :term:`root` object when this route
matches, you can spell a traversal pattern as the
``traverse`` argument. This traversal pattern will be used
as the traversal path: traversal will begin at the root
object implied by this route (either the global root, or the
object returned by the ``factory`` associated with this
route).
The syntax of the ``traverse`` argument is the same as it is
for ``pattern``. For example, if the ``pattern`` provided to
``add_route`` is ``articles/{article}/edit``, and the
``traverse`` argument provided to ``add_route`` is
``/{article}``, when a request comes in that causes the route
to match in such a way that the ``article`` match value is
``'1'`` (when the request URI is ``/articles/1/edit``), the
traversal path will be generated as ``/1``. This means that
the root object's ``__getitem__`` will be called with the
name ``'1'`` during the traversal phase. If the ``'1'`` object
exists, it will become the :term:`context` of the request.
:ref:`traversal_chapter` has more information about
traversal.
If the traversal path contains segment marker names which
are not present in the ``pattern`` argument, a runtime error
will occur. The ``traverse`` pattern should not contain
segment markers that do not exist in the ``pattern``
argument.
A similar combining of routing and traversal is available
when a route is matched which contains a ``*traverse``
remainder marker in its pattern (see
:ref:`using_traverse_in_a_route_pattern`). The ``traverse``
argument to add_route allows you to associate route patterns
with an arbitrary traversal path without using a
``*traverse`` remainder marker; instead you can use other
match information.
Note that the ``traverse`` argument to ``add_route`` is
ignored when attached to a route that has a ``*traverse``
remainder marker in its pattern.
pregenerator
This option should be a callable object that implements the
:class:`pyramid.interfaces.IRoutePregenerator` interface. A
:term:`pregenerator` is a callable called by the
:meth:`pyramid.request.Request.route_url` function to augment or
replace the arguments it is passed when generating a URL for the
route. This is a feature not often used directly by applications,
it is meant to be hooked by frameworks that use :app:`Pyramid` as
a base.
use_global_views
When a request matches this route, and view lookup cannot
find a view which has a ``route_name`` predicate argument
that matches the route, try to fall back to using a view
that otherwise matches the context, request, and view name
(but which does not match the route_name predicate).
static
If ``static`` is ``True``, this route will never match an incoming
request; it will only be useful for URL generation. By default,
``static`` is ``False``. See :ref:`static_route_narr`.
.. versionadded:: 1.1
Predicate Arguments
pattern
The pattern of the route e.g. ``ideas/{idea}``. This
argument is required. See :ref:`route_pattern_syntax`
for information about the syntax of route patterns. If the
pattern doesn't match the current URL, route matching
continues.
.. note::
For backwards compatibility purposes (as of :app:`Pyramid` 1.0), a
``path`` keyword argument passed to this function will be used to
represent the pattern value if the ``pattern`` argument is
``None``. If both ``path`` and ``pattern`` are passed, ``pattern``
wins.
xhr
This value should be either ``True`` or ``False``. If this
value is specified and is ``True``, the :term:`request` must
possess an ``HTTP_X_REQUESTED_WITH`` (aka
``X-Requested-With``) header for this route to match. This
is useful for detecting AJAX requests issued from jQuery,
Prototype and other Javascript libraries. If this predicate
returns ``False``, route matching continues.
request_method
A string representing an HTTP method name, e.g. ``GET``, ``POST``,
``HEAD``, ``DELETE``, ``PUT`` or a tuple of elements containing
HTTP method names. If this argument is not specified, this route
will match if the request has *any* request method. If this
predicate returns ``False``, route matching continues.
.. versionchanged:: 1.2
The ability to pass a tuple of items as ``request_method``.
Previous versions allowed only a string.
path_info
This value represents a regular expression pattern that will
be tested against the ``PATH_INFO`` WSGI environment
variable. If the regex matches, this predicate will return
``True``. If this predicate returns ``False``, route
matching continues.
request_param
This value can be any string. A view declaration with this
argument ensures that the associated route will only match
when the request has a key in the ``request.params``
dictionary (an HTTP ``GET`` or ``POST`` variable) that has a
name which matches the supplied value. If the value
supplied as the argument has a ``=`` sign in it,
e.g. ``request_param="foo=123"``, then the key
(``foo``) must both exist in the ``request.params`` dictionary, and
the value must match the right hand side of the expression (``123``)
for the route to "match" the current request. If this predicate
returns ``False``, route matching continues.
header
This argument represents an HTTP header name or a header
name/value pair. If the argument contains a ``:`` (colon),
it will be considered a name/value pair
(e.g. ``User-Agent:Mozilla/.*`` or ``Host:localhost``). If
the value contains a colon, the value portion should be a
regular expression. If the value does not contain a colon,
the entire value will be considered to be the header name
(e.g. ``If-Modified-Since``). If the value evaluates to a
header name only without a value, the header specified by
the name must be present in the request for this predicate
to be true. If the value evaluates to a header name/value
pair, the header specified by the name must be present in
the request *and* the regular expression specified as the
value must match the header value. Whether or not the value
represents a header name or a header name/value pair, the
case of the header name is not significant. If this
predicate returns ``False``, route matching continues.
accept
This value represents a match query for one or more
mimetypes in the ``Accept`` HTTP request header. If this
value is specified, it must be in one of the following
forms: a mimetype match token in the form ``text/plain``, a
wildcard mimetype match token in the form ``text/*`` or a
match-all wildcard mimetype match token in the form ``*/*``.
If any of the forms matches the ``Accept`` header of the
request, or if the ``Accept`` header isn't set at all in the
request, this predicate will be true. If this predicate
returns ``False``, route matching continues.
effective_principals
If specified, this value should be a :term:`principal` identifier or
a sequence of principal identifiers. If the
:attr:`pyramid.request.Request.effective_principals` property
indicates that every principal named in the argument list is present
in the current request, this predicate will return True; otherwise it
will return False. For example:
``effective_principals=pyramid.security.Authenticated`` or
``effective_principals=('fred', 'group:admins')``.
.. versionadded:: 1.4a4
custom_predicates
.. deprecated:: 1.5
This value should be a sequence of references to custom
predicate callables. Use custom predicates when no set of
predefined predicates does what you need. Custom predicates
can be combined with predefined predicates as necessary.
Each custom predicate callable should accept two arguments:
``info`` and ``request`` and should return either ``True``
or ``False`` after doing arbitrary evaluation of the info
and/or the request. If all custom and non-custom predicate
callables return ``True`` the associated route will be
considered viable for a given request. If any predicate
callable returns ``False``, route matching continues. Note
that the value ``info`` passed to a custom route predicate
is a dictionary containing matching information; see
:ref:`custom_route_predicates` for more information about
``info``.
predicates
Pass a key/value pair here to use a third-party predicate
registered via
:meth:`pyramid.config.Configurator.add_view_predicate`. More than
one key/value pair can be used at the same time. See
:ref:`view_and_route_predicates` for more information about
third-party predicates.
.. versionadded:: 1.4
"""
if custom_predicates:
warnings.warn(
('The "custom_predicates" argument to Configurator.add_route '
'is deprecated as of Pyramid 1.5. Use '
'"config.add_route_predicate" and use the registered '
'route predicate as a predicate argument to add_route '
'instead. See "Adding A Third Party View, Route, or '
'Subscriber Predicate" in the "Hooks" chapter of the '
'documentation for more information.'),
DeprecationWarning,
stacklevel=3
)
# these are route predicates; if they do not match, the next route
# in the routelist will be tried
if request_method is not None:
request_method = as_sorted_tuple(request_method)
factory = self.maybe_dotted(factory)
if pattern is None:
pattern = path
if pattern is None:
raise ConfigurationError('"pattern" argument may not be None')
# check for an external route; an external route is one which is
# is a full url (e.g. 'http://example.com/{id}')
parsed = urlparse.urlparse(pattern)
external_url = pattern
if parsed.hostname:
pattern = parsed.path
original_pregenerator = pregenerator
def external_url_pregenerator(request, elements, kw):
if '_app_url' in kw:
raise ValueError(
'You cannot generate a path to an external route '
'pattern via request.route_path nor pass an _app_url '
'to request.route_url when generating a URL for an '
'external route pattern (pattern was "%s") ' %
(pattern,)
)
if '_scheme' in kw:
scheme = kw['_scheme']
elif parsed.scheme:
scheme = parsed.scheme
else:
scheme = request.scheme
kw['_app_url'] = '{0}://{1}'.format(scheme, parsed.netloc)
if original_pregenerator:
elements, kw = original_pregenerator(
request, elements, kw)
return elements, kw
pregenerator = external_url_pregenerator
static = True
elif self.route_prefix:
pattern = self.route_prefix.rstrip('/') + '/' + pattern.lstrip('/')
mapper = self.get_routes_mapper()
introspectables = []
intr = self.introspectable('routes',
name,
'%s (pattern: %r)' % (name, pattern),
'route')
intr['name'] = name
intr['pattern'] = pattern
intr['factory'] = factory
intr['xhr'] = xhr
intr['request_methods'] = request_method
intr['path_info'] = path_info
intr['request_param'] = request_param
intr['header'] = header
intr['accept'] = accept
intr['traverse'] = traverse
intr['custom_predicates'] = custom_predicates
intr['pregenerator'] = pregenerator
intr['static'] = static
intr['use_global_views'] = use_global_views
if static is True:
intr['external_url'] = external_url
introspectables.append(intr)
if factory:
factory_intr = self.introspectable('root factories',
name,
self.object_description(factory),
'root factory')
factory_intr['factory'] = factory
factory_intr['route_name'] = name
factory_intr.relate('routes', name)
introspectables.append(factory_intr)
def register_route_request_iface():
request_iface = self.registry.queryUtility(IRouteRequest, name=name)
if request_iface is None:
if use_global_views:
bases = (IRequest,)
else:
bases = ()
request_iface = route_request_iface(name, bases)
self.registry.registerUtility(
request_iface, IRouteRequest, name=name)
def register_connect():
pvals = predicates.copy()
pvals.update(
dict(
xhr=xhr,
request_method=request_method,
path_info=path_info,
request_param=request_param,
header=header,
accept=accept,
traverse=traverse,
custom=predvalseq(custom_predicates),
)
)
predlist = self.get_predlist('route')
_, preds, _ = predlist.make(self, **pvals)
route = mapper.connect(
name, pattern, factory, predicates=preds,
pregenerator=pregenerator, static=static
)
intr['object'] = route
return route
# We have to connect routes in the order they were provided;
# we can't use a phase to do that, because when the actions are
# sorted, actions in the same phase lose relative ordering
self.action(('route-connect', name), register_connect)
# But IRouteRequest interfaces must be registered before we begin to
# process view registrations (in phase 3)
self.action(('route', name), register_route_request_iface,
order=PHASE2_CONFIG, introspectables=introspectables)
@action_method
def add_route_predicate(self, name, factory, weighs_more_than=None,
weighs_less_than=None):
""" Adds a route predicate factory. The view predicate can later be
named as a keyword argument to
:meth:`pyramid.config.Configurator.add_route`.
``name`` should be the name of the predicate. It must be a valid
Python identifier (it will be used as a keyword argument to
``add_view``).
``factory`` should be a :term:`predicate factory` or :term:`dotted
Python name` which refers to a predicate factory.
See :ref:`view_and_route_predicates` for more information.
.. versionadded:: 1.4
"""
self._add_predicate(
'route',
name,
factory,
weighs_more_than=weighs_more_than,
weighs_less_than=weighs_less_than
)
def add_default_route_predicates(self):
p = pyramid.config.predicates
for (name, factory) in (
('xhr', p.XHRPredicate),
('request_method', p.RequestMethodPredicate),
('path_info', p.PathInfoPredicate),
('request_param', p.RequestParamPredicate),
('header', p.HeaderPredicate),
('accept', p.AcceptPredicate),
('effective_principals', p.EffectivePrincipalsPredicate),
('custom', p.CustomPredicate),
('traverse', p.TraversePredicate),
):
self.add_route_predicate(name, factory)
def get_routes_mapper(self):
""" Return the :term:`routes mapper` object associated with
this configurator's :term:`registry`."""
mapper = self.registry.queryUtility(IRoutesMapper)
if mapper is None:
mapper = RoutesMapper()
self.registry.registerUtility(mapper, IRoutesMapper)
return mapper
|
StarcoderdataPython
|
2141
|
hiddenimports = ['sip', 'PyQt4.QtGui', 'PyQt4._qt']
from PyInstaller.hooks.hookutils import qt4_plugins_binaries
def hook(mod):
mod.binaries.extend(qt4_plugins_binaries('phonon_backend'))
return mod
|
StarcoderdataPython
|
127842
|
<filename>1-second-data-summary/one_second_data_summary_functions.py
import glob
import os
import collections
import datetime
import matplotlib
matplotlib.use('Agg')
matplotlib.rcParams['figure.figsize'] = (10.0, 10.0)
import matplotlib.pyplot as plt
import matplotlib.mlab
import numpy as np
import scipy
import scipy.signal
import h5py
from rq import Queue
from rq import get_current_job
def calibrate_offset(f, average_frequency):
if 'voltage' not in list(f):
return np.zeros(f['voltage1'].shape), np.zeros(f['voltage1'].shape)
length = len(f['voltage'])
period_length = round(average_frequency / 50)
remainder = divmod(length, period_length)[1]
if remainder == 0:
remainder = period_length
offset = np.pad(f['voltage'][:], (0, period_length - remainder), 'constant', constant_values=0).reshape(-1, period_length).mean(axis=1)
x = np.linspace(1, length, length // period_length, dtype=np.int)
new_x = np.linspace(1, length, length - period_length, dtype=np.int)
offset = scipy.interpolate.interp1d(x, offset)(new_x)
offset = np.concatenate((np.repeat([offset[0]], period_length / 2), offset, np.repeat([offset[-1]], period_length / 2)))
return offset, offset * 0.7
def compute_rms(f, j, seconds_per_file, average_frequency, offset_voltage, offset_current, name):
"""
Root-Mean-Square'd values per second.
First we square the quantity, then we calculate the mean and finally, the
square-root of the mean of the squares.
"""
rms = dict()
for name in list(f):
signal = f[name][:] * 1.0
if name == 'voltage' and offset_voltage is not None:
signal -= offset_voltage
elif 'current' in name and offset_current is not None:
signal -= offset_current
signal = np.multiply(signal, f[name].attrs['calibration_factor'])
signal -= np.mean(signal)
signal = scipy.signal.medfilt(signal, 15)
key = name.replace('current', 'current_rms').replace('voltage', 'voltage_rms')
rms[key] = np.sqrt(np.mean(np.square(signal)[:seconds_per_file * int(average_frequency)].reshape(-1, int(average_frequency)), axis=1))
return rms
def compute_real_power(f, j, seconds_per_file, average_frequency, offset_voltage, offset_current):
"""
Real power is the average of instantaneous power.
First we calculate the instantaneous power by multiplying the instantaneous
voltage measurement by the instantaneous current measurement. We sum the
instantaneous power measurement over a given number of samples and divide by
that number of samples.
"""
cs = [n for n in list(f) if 'current' in n]
real_power = dict()
for cs_i, _ in enumerate(cs):
if 'voltage' in list(f):
voltage_name = 'voltage'
else:
voltage_name = 'voltage{}'.format(cs_i + 1)
voltage_signal = f[voltage_name][:] * 1.0
if offset_voltage is not None:
voltage_signal -= offset_voltage
voltage_signal *= f[voltage_name].attrs['calibration_factor']
voltage_signal -= np.mean(voltage_signal)
voltage_signal = scipy.signal.medfilt(voltage_signal, 15)
current_signal = f['current{}'.format(cs_i + 1)][:] * 1.0
if offset_current is not None:
current_signal -= offset_current
current_signal *= f['current{}'.format(cs_i + 1)].attrs['calibration_factor']
current_signal -= np.mean(current_signal)
current_signal = scipy.signal.medfilt(current_signal, 15)
real_power_name = 'real_power{}'.format(cs_i + 1)
current = current_signal[:seconds_per_file * int(average_frequency)].reshape(-1, int(average_frequency))
voltage = voltage_signal[:seconds_per_file * int(average_frequency)].reshape(-1, int(average_frequency))
real_power[real_power_name] = np.mean(current * voltage, axis=1)
return real_power
def compute_apparent_power(f, j, seconds_per_file, values):
"""
Apparent power is the product of the voltage RMS and the current RMS.
"""
cs = [n for n in list(f) if 'current' in n]
apparent_power = dict()
for cs_i, _ in enumerate(cs):
if 'voltage' in list(f):
voltage_name = 'voltage_rms'
else:
voltage_name = 'voltage_rms{}'.format(cs_i + 1)
current_rms_name = 'current_rms{}'.format(cs_i + 1)
apparent_power_name = 'apparent_power{}'.format(cs_i + 1)
voltage_rms = values[voltage_name][j:j + seconds_per_file]
current_rms = values[current_rms_name][j:j + seconds_per_file]
apparent_power[apparent_power_name] = voltage_rms * current_rms
return apparent_power
def compute_power_factor(f, j, seconds_per_file, values):
"""
Power factor is the ratio of real power to apparent power.
"""
cs = [n for n in list(f) if 'current' in n]
power_factor = dict()
for cs_i, _ in enumerate(cs):
real_power_name = 'real_power{}'.format(cs_i + 1)
apparent_power_name = 'apparent_power{}'.format(cs_i + 1)
power_factor_name = 'power_factor{}'.format(cs_i + 1)
real_power = values[real_power_name][j:j + seconds_per_file]
apparent_power = values[apparent_power_name][j:j + seconds_per_file]
power_factor[power_factor_name] = real_power / apparent_power
return power_factor
def compute_mains_frequency(f, j, seconds_per_file, frequency, average_frequency, offset_voltage):
"""
Mains frequency is calculated by counting zero-crossings in the voltage.
To get a cleaner value, we take the average across all phases.
"""
vs = [n for n in list(f) if 'voltage' in n]
mains_freq = np.zeros((len(vs), seconds_per_file))
for cs_i, name in enumerate(vs):
voltage_signal = (f[name][:] * 1.0 - offset_voltage) * f[name].attrs['calibration_factor']
voltage_signal = scipy.signal.medfilt(voltage_signal, 15)
for i, j in enumerate(range(0, len(voltage_signal), frequency)):
voltage_slice = voltage_signal[j:(j + frequency)]
indices = matplotlib.mlab.find((voltage_slice[1:] >= 0) & (voltage_slice[:-1] < 0))
crossings = [k - voltage_slice[k] / (voltage_slice[k + 1] - voltage_slice[k]) for k in indices]
mains_freq[cs_i, i] = frequency / np.mean(np.diff(crossings)) * (average_frequency / frequency)
return np.mean(mains_freq, axis=0)
def compute_average_frequency(start_file, end_file, files_path, files_length, seconds_per_file):
"""
Estimate the average sampling rate per day.
Calculate the difference between the first and last sample of a day based on
the timestamps of the files.
"""
with h5py.File(start_file, 'r', driver='core') as f:
frequency = int(f.attrs['frequency'])
start_timestamp = datetime.datetime(
year=int(f.attrs['year']),
month=int(f.attrs['month']),
day=int(f.attrs['day']),
hour=int(f.attrs['hours']),
minute=int(f.attrs['minutes']),
second=int(f.attrs['seconds']),
microsecond=int(f.attrs['microseconds']),
tzinfo=datetime.timezone(datetime.timedelta(hours=int(f.attrs['timezone'][1:4]), minutes=int(f.attrs['timezone'][4:]))),
)
next_folder = datetime.date(start_timestamp.year, start_timestamp.month, start_timestamp.day) + datetime.timedelta(days=1)
next_files_path = files_path.replace(
'/{:04d}-{:02d}-{:02d}/'.format(start_timestamp.year, start_timestamp.month, start_timestamp.day),
'/{:04d}-{:02d}-{:02d}/'.format(next_folder.year, next_folder.month, next_folder.day))
next_files = sorted(glob.glob(next_files_path))
if next_files:
end_file = next_files[0]
duration = files_length * seconds_per_file
else:
end_file = end_file
duration = (files_length - 1) * seconds_per_file
with h5py.File(end_file, 'r', driver='core') as f:
end_timestamp = datetime.datetime(
year=int(f.attrs['year']),
month=int(f.attrs['month']),
day=int(f.attrs['day']),
hour=int(f.attrs['hours']),
minute=int(f.attrs['minutes']),
second=int(f.attrs['seconds']),
microsecond=int(f.attrs['microseconds']),
tzinfo=datetime.timezone(datetime.timedelta(hours=int(f.attrs['timezone'][1:4]), minutes=int(f.attrs['timezone'][4:]))),
)
return duration / (end_timestamp - start_timestamp).total_seconds() * frequency
def make_hdf5_file(hdf5_file, year, month, day, name, values, delay_after_midnight, frequency, average_frequency):
with h5py.File(hdf5_file, 'w', driver='core') as f:
f.attrs.create('year', year, dtype='uint32')
f.attrs.create('month', month, dtype='uint32')
f.attrs.create('day', day, dtype='uint32')
f.attrs.create('name', bytes(name, 'ASCII'))
f.attrs.create('frequency', frequency, dtype='uint64')
f.attrs.create('average_frequency', average_frequency, dtype='float')
f.attrs.create('delay_after_midnight', delay_after_midnight, dtype='int32')
for k in sorted(values.keys()):
v = values[k]
f.create_dataset(
k,
data=v,
shape=v.shape,
dtype='f',
fletcher32=True,
compression='gzip',
compression_opts=9,
shuffle=True,
)
def make_plots(hdf5_file, year, month, day, name, delay_after_midnight):
with h5py.File(hdf5_file, 'r', driver='core') as f:
powers = [f[n][:] for n in list(f) if 'apparent_power' in n]
max_power = np.max([np.max(p) for p in powers])
if max_power <= 150:
max_power = 150
elif max_power <= 200:
max_power = 200
elif max_power <= 300:
max_power = 300
elif max_power <= 1000:
max_power = 1000
elif max_power <= 2000:
max_power = 2000
elif max_power <= 3000:
max_power = 3000
is_dst_affected = len(powers[0]) > 60 * 60 * 24 + 300 # longer than a full day plus a bit extra
plt.figure()
f, axarr = plt.subplots(len(powers), sharex=True)
for j, power in enumerate(powers):
power = np.pad(power, (5 - divmod(len(power), 5)[1], 0), 'constant', constant_values=0)
time_scale = np.arange(delay_after_midnight, delay_after_midnight + len(power), 5) / (60 * 60)
p = np.median(power.reshape(-1, 5), axis=1)
axarr[j].plot(time_scale, p, linewidth=0.8)
axarr[j].set_ylim(0, max_power)
axarr[j].set_ylabel('Power #{} [W]'.format(j + 1))
axarr[0].set_title("{} - Apparent Power - {:04d}-{:02d}-{:02d}".format(name, year, month, day))
plt.xticks(list(range(0, 26, 1)), ['{}:00'.format(j) for j in range(0, 26, 1)], rotation='vertical')
axarr[-1].set_xlim(0, 25.25 if is_dst_affected else 24.25)
axarr[-1].set_xlabel('Time of the day')
filename = "summary-{:04d}-{:02d}-{:02d}-{}.pdf".format(year, month, day, name)
plt.savefig(os.path.join(os.path.dirname(hdf5_file), filename))
plt.close()
def compute_one_second_data_summary(folder, path_prefix, results_folder):
dataset_folder = folder.split('/')[0]
files_path = os.path.expanduser(os.path.join(path_prefix, folder, '*.hdf5'))
files = sorted(glob.glob(files_path))
if len(files) == 0:
raise ValueError("No files found: " + files_path)
if folder == 'BLOND-50/2016-10-18/clear':
# CLEAR had a brief interruption that day.
len_files = 288
else:
len_files = len(files)
with h5py.File(files[0], 'r', driver='core') as f:
name = f.attrs['name'].decode()
year = f.attrs['year']
month = f.attrs['month']
day = f.attrs['day']
frequency = int(f.attrs['frequency'])
length = len(f[list(f)[0]])
seconds_per_file = length // frequency
delay_after_midnight = int(f.attrs['hours']) * 60 * 60 + int(f.attrs['minutes']) * 60 + round(int(f.attrs['seconds']) + int(f.attrs['microseconds']) * 1e-6)
if folder == 'BLOND-50/2016-10-18/clear':
# CLEAR had a brief interruption that day.
average_frequency = 49952.355
else:
average_frequency = compute_average_frequency(files[0], files[-1], files_path, len_files, seconds_per_file)
seconds_per_file = length / average_frequency
seconds_per_file = int(5 * round(float(seconds_per_file) / 5))
j = 0
values = collections.defaultdict(lambda: np.zeros(len_files * seconds_per_file))
for file in files:
try:
with h5py.File(file, 'r', driver='core') as f:
offset_voltage, offset_current = calibrate_offset(f, average_frequency)
if folder == 'BLOND-50/2016-10-18/clear' and f.attrs['sequence'] == 0:
# CLEAR had a brief interruption that day.
# We need to create a gap to align the next data file correctly.
j += 8367
for k, v in compute_rms(f, j, seconds_per_file, average_frequency, offset_voltage, offset_current, name).items():
values[k][j:j + seconds_per_file] = v
for k, v in compute_real_power(f, j, seconds_per_file, average_frequency, offset_voltage, offset_current).items():
values[k][j:j + seconds_per_file] = v
for k, v in compute_apparent_power(f, j, seconds_per_file, values).items():
values[k][j:j + seconds_per_file] = v
for k, v in compute_power_factor(f, j, seconds_per_file, values).items():
values[k][j:j + seconds_per_file] = v
values['mains_frequency'][j:j + seconds_per_file] = compute_mains_frequency(f, j, seconds_per_file, frequency, average_frequency, offset_voltage)
except IOError:
pass
j += seconds_per_file
filename = 'summary-{:04d}-{:02d}-{:02d}-{}.hdf5'.format(year, month, day, name)
folder = os.path.expanduser(os.path.join(results_folder, dataset_folder, '{:04d}-{:02d}-{:02d}'.format(year, month, day), name))
os.makedirs(folder, exist_ok=True)
hdf5_file = os.path.join(folder, filename)
make_hdf5_file(hdf5_file, year, month, day, name, values, delay_after_midnight, frequency, average_frequency)
make_plots(hdf5_file, year, month, day, name, delay_after_midnight)
job = get_current_job()
results_q = Queue(connection=job.connection, name='results')
results_q.enqueue(print, folder)
|
StarcoderdataPython
|
3347674
|
<reponame>ToReforge/djforge-redis-multitokens
try:
from unittest.mock import patch
except ImportError:
from mock import patch
from django.contrib.auth import get_user_model
from django.test import TestCase
from rest_framework import status
from rest_framework.test import APIClient
from .utils import (
create_test_user,
MockedLibrarySettings,
MockedSettings,
SetupTearDownForMultiTokenTests,
)
from djforge_redis_multitokens.tokens_auth import MultiToken, TOKENS_CACHE
from djforge_redis_multitokens.utils import parse_full_token
User = get_user_model()
class TestCreateToken(SetupTearDownForMultiTokenTests, TestCase):
def test_new_token_has_attributes_required_by_DRF(self):
self.assertTrue(hasattr(self.token, 'key'))
self.assertTrue(hasattr(self.token, 'user'))
self.assertEqual(self.token.user.pk, self.user.pk)
def test_first_token_for_user_is_flagged_correctly_as_first_device_getting_a_token(self):
self.assertTrue(self.first_device)
def test_second_token_for_user_is_flagged_correctly_as_not_the_first_device_getting_a_token(self):
second_token, first_device = MultiToken.create_token(self.user)
self.assertFalse(first_device)
def test_token_is_saved_correctly_in_redis(self):
self.assertIsNotNone(TOKENS_CACHE.get(self.user.pk))
hashes = TOKENS_CACHE.get(self.user.pk)
self.assertEqual(len(hashes), 1)
self.assertIsNotNone(TOKENS_CACHE.get(hashes[0]))
def test_only_token_hash_is_saved_in_redis(self):
hash = TOKENS_CACHE.get(self.user.pk)[0]
self.assertIsNotNone(TOKENS_CACHE.get(hash))
self.assertIsNone(TOKENS_CACHE.get(self.token.key))
def test_second_hash_is_saved_in_redis_alongside_the_first_one(self):
first_hash = TOKENS_CACHE.get(self.user.pk)[0]
second_token, first_device = MultiToken.create_token(self.user)
second_hash = TOKENS_CACHE.get(self.user.pk)[1]
self.assertEqual(len(TOKENS_CACHE.get(self.user.pk)), 2)
self.assertIn(first_hash, TOKENS_CACHE.get(self.user.pk))
self.assertIn(second_hash, TOKENS_CACHE.get(self.user.pk))
self.assertIsNotNone(TOKENS_CACHE.get(first_hash))
self.assertIsNotNone(TOKENS_CACHE.get(second_hash))
class TestGetUserFromTokenMethod(SetupTearDownForMultiTokenTests, TestCase):
def test_correct_user_is_found_for_correct_token(self):
user = MultiToken.get_user_from_token(self.token.key)
self.assertEqual(user.pk, self.user.pk)
def test_exception_is_raised_for_wrong_token(self):
wrong_token = self.token.key[:-1]
self.assertRaises(User.DoesNotExist, MultiToken.get_user_from_token, wrong_token)
wrong_token = self.token.key[1:]
self.assertRaises(User.DoesNotExist, MultiToken.get_user_from_token, wrong_token)
class TestExpireTokenMethod(SetupTearDownForMultiTokenTests, TestCase):
def test_token_is_removed_from_redis_when_user_has_only_one_token(self):
self.assertIsNone(MultiToken.expire_token(self.token))
self.assertEqual(len(TOKENS_CACHE.get(self.user.pk)), 0)
_, hash = parse_full_token(self.token.key)
self.assertIsNone(TOKENS_CACHE.get(hash))
def test_token_is_removed_from_redis_when_user_has_multiple_tokens(self):
second_token, first_device = MultiToken.create_token(self.user)
MultiToken.expire_token(self.token)
self.assertEqual(len(TOKENS_CACHE.get(self.user.pk)), 1)
_, hash = parse_full_token(self.token.key)
self.assertIsNone(TOKENS_CACHE.get(hash))
self.assertEqual(TOKENS_CACHE.get(self.user.pk)[0], parse_full_token(second_token.key)[1])
self.assertIsNotNone(TOKENS_CACHE.get(parse_full_token(second_token.key)[1]))
def test_other_users_tokens_are_not_affected(self):
second_user = create_test_user('tester2')
second_token, _ = MultiToken.create_token(second_user)
MultiToken.expire_token(self.token)
self.assertIsNotNone(TOKENS_CACHE.get(second_user.pk))
self.assertIsNotNone(TOKENS_CACHE.get(parse_full_token(second_token.key)[1]))
class TestExpireAllTokenMethod(SetupTearDownForMultiTokenTests, TestCase):
def test_all_tokens_from_user_are_removed_when_user_has_only_one_token(self):
self.assertIsNone(MultiToken.expire_token(self.token))
self.assertEqual(len(TOKENS_CACHE.get(self.user.pk)), 0)
_, hash = parse_full_token(self.token.key)
self.assertIsNone(TOKENS_CACHE.get(hash))
def test_all_tokens_from_user_are_removed_when_user_has_multiple_tokens(self):
second_token, first_device = MultiToken.create_token(self.user)
MultiToken.expire_token(self.token)
self.assertEqual(len(TOKENS_CACHE.get(self.user.pk)), 1)
_, hash = parse_full_token(self.token.key)
self.assertIsNone(TOKENS_CACHE.get(hash))
self.assertEqual(TOKENS_CACHE.get(self.user.pk)[0], parse_full_token(second_token.key)[1])
self.assertIsNotNone(TOKENS_CACHE.get(parse_full_token(second_token.key)[1]))
def test_other_users_are_not_affected(self):
second_user = create_test_user('tester2')
second_token, _ = MultiToken.create_token(second_user)
MultiToken.expire_all_tokens(self.user)
self.assertIsNotNone(TOKENS_CACHE.get(second_user.pk))
self.assertIsNotNone(TOKENS_CACHE.get(parse_full_token(second_token.key)[1]))
class TestSetValueInCacheMethod(SetupTearDownForMultiTokenTests, TestCase):
@patch('djforge_redis_multitokens.tokens_auth.settings', new=MockedSettings(timeout=None))
def test_default_timeout_for_cache_db_is_used_when_timeout_is_not_provided_provided(self):
MultiToken._set_key_value('key', 'value')
self.assertIsNone(TOKENS_CACHE.ttl('key'))
@patch('djforge_redis_multitokens.tokens_auth.settings', new=MockedSettings(timeout=1000))
def test_token_ttl_is_correct_when_user_provides_cache_db_timeout_parameter(self):
MultiToken._set_key_value('key', 'value')
self.assertIsNotNone(TOKENS_CACHE.ttl('key'))
self.assertAlmostEquals(TOKENS_CACHE.ttl('key'), 1000)
class TestResetTokensTTLMethod(SetupTearDownForMultiTokenTests, TestCase):
@patch('djforge_redis_multitokens.tokens_auth.settings', new=MockedSettings(timeout=1000))
def test_users_immortal_tokens_get_limited_ttl_when_OVERWRITE_NONE_TTL_setting_is_True(self):
hash = TOKENS_CACHE.get(self.user.pk)[0]
self.assertIsNone(TOKENS_CACHE.ttl(self.user.pk))
self.assertIsNone(TOKENS_CACHE.ttl(hash))
MultiToken.reset_tokens_ttl(self.user.pk)
self.assertIsNotNone(TOKENS_CACHE.ttl(self.user.pk))
self.assertIsNotNone(TOKENS_CACHE.ttl(hash))
@patch('djforge_redis_multitokens.tokens_auth.settings', new=MockedSettings(timeout=None))
@patch('djforge_redis_multitokens.tokens_auth.drt_settings', new=MockedLibrarySettings(overwrite_ttl=False))
def test_users_immortal_tokens_stay_immortal_when_OVERWRITE_NONE_TTL_setting_is_False(self):
hash = TOKENS_CACHE.get(self.user.pk)[0]
MultiToken.reset_tokens_ttl(self.user.pk)
self.assertIsNone(TOKENS_CACHE.ttl(self.user.pk))
self.assertIsNone(TOKENS_CACHE.ttl(hash))
@patch('djforge_redis_multitokens.tokens_auth.settings', new=MockedSettings(timeout=1000))
def test_other_users_tokens_are_not_affected(self):
second_user = create_test_user('tester2')
second_token, _ = MultiToken.create_token(second_user)
import time
time.sleep(1)
MultiToken.reset_tokens_ttl(self.user.pk)
self.assertEqual(TOKENS_CACHE.ttl(self.user.pk), 1000)
self.assertNotEqual(TOKENS_CACHE.ttl(second_user.pk), 1000)
hash = TOKENS_CACHE.get(second_user.pk)[0]
self.assertNotEqual(hash, 1000)
@patch('djforge_redis_multitokens.tokens_auth.settings', new=MockedSettings(timeout=1000))
@patch('djforge_redis_multitokens.tokens_auth.drt_settings', new=MockedLibrarySettings())
def test_correct_ttl_is_set_for_renewed_tokens(self):
hash = TOKENS_CACHE.get(self.user.pk)[0]
MultiToken.reset_tokens_ttl(self.user.pk)
self.assertAlmostEquals(TOKENS_CACHE.ttl(self.user.pk), 1000)
self.assertAlmostEquals(TOKENS_CACHE.ttl(hash), 1000)
@patch('djforge_redis_multitokens.tokens_auth.settings', new=MockedSettings())
def test_immortal_tokens_stay_immortal_when_user_doesnt_provide_timeout(self):
hash = TOKENS_CACHE.get(self.user.pk)[0]
self.assertIsNone(TOKENS_CACHE.ttl(self.user.pk))
self.assertIsNone(TOKENS_CACHE.ttl(hash))
MultiToken.reset_tokens_ttl(self.user.pk)
self.assertIsNone(TOKENS_CACHE.ttl(self.user.pk))
self.assertIsNone(TOKENS_CACHE.ttl(hash))
@patch('djforge_redis_multitokens.tokens_auth.settings', new=MockedSettings(timeout=None))
def test_immortal_tokens_stay_immortal_when_user_provided_timeout_is_None(self):
hash = TOKENS_CACHE.get(self.user.pk)[0]
self.assertIsNone(TOKENS_CACHE.ttl(self.user.pk))
self.assertIsNone(TOKENS_CACHE.ttl(hash))
MultiToken.reset_tokens_ttl(self.user.pk)
self.assertIsNone(TOKENS_CACHE.ttl(self.user.pk))
self.assertIsNone(TOKENS_CACHE.ttl(hash))
@patch('djforge_redis_multitokens.tokens_auth.settings')
def test_token_with_ttl_becomes_immortal_when_user_changes_timeout_to_None(self, mocked_settings):
hash = TOKENS_CACHE.get(self.user.pk)[0]
TOKENS_CACHE.expire(self.user.pk, 1000)
TOKENS_CACHE.expire(TOKENS_CACHE.ttl(hash), 1000)
settings = MockedSettings(timeout=None)
mocked_settings.CACHES.__getitem__.return_value = settings.CACHES['default']
MultiToken.reset_tokens_ttl(self.user.pk)
self.assertIsNone(TOKENS_CACHE.ttl(self.user.pk))
self.assertIsNone(TOKENS_CACHE.ttl(hash))
@patch('djforge_redis_multitokens.tokens_auth.settings')
def test_token_with_ttl_gets_new_ttl_when_user_changes_timeout_to_2000(self, mocked_settings):
hash = TOKENS_CACHE.get(self.user.pk)[0]
TOKENS_CACHE.expire(self.user.pk, 1000)
TOKENS_CACHE.expire(TOKENS_CACHE.ttl(hash), 1000)
settings = MockedSettings(timeout=2000)
mocked_settings.CACHES.__getitem__.return_value = settings.CACHES['default']
MultiToken.reset_tokens_ttl(self.user.pk)
self.assertEqual(TOKENS_CACHE.ttl(self.user.pk), 2000)
self.assertEqual(TOKENS_CACHE.ttl(hash), 2000)
class TestCachedTokenAuthentication(SetupTearDownForMultiTokenTests, TestCase):
header_prefix = 'Token '
def test_auth_for_user_without_token_fails(self):
TOKENS_CACHE.clear()
client = APIClient(enforce_csrf_checks=True)
response = client.post('/token/', {'username': self.user.username}, format='json')
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
def test_auth_for_user_with_token_succeeds(self):
client = APIClient(enforce_csrf_checks=True)
auth = self.header_prefix + self.token.key
response = client.post('/token/', {'example': 'example'}, HTTP_AUTHORIZATION=auth)
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_auth_with_wrong_token_fails(self):
client = APIClient(enforce_csrf_checks=True)
auth = self.header_prefix + self.token.key + 'blah'
response = client.post('/token/', {'example': 'example'}, HTTP_AUTHORIZATION=auth)
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
@patch('djforge_redis_multitokens.tokens_auth.MultiToken.reset_tokens_ttl')
def test_successful_auth_renews_token(self, mock_reset_ttl_method):
client = APIClient(enforce_csrf_checks=True)
auth = self.header_prefix + self.token.key
response = client.post('/token/', {'example': 'example'}, HTTP_AUTHORIZATION=auth)
mock_reset_ttl_method.assert_called_once_with(self.user.pk)
|
StarcoderdataPython
|
144796
|
<reponame>rdo-management/ceilometer
#
# Copyright 2014 eNovance
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Rest alarm notifier with trusted authentication."""
from keystoneclient.v3 import client as keystone_client
from oslo_config import cfg
from six.moves.urllib import parse
from ceilometer.alarm.notifier import rest
cfg.CONF.import_opt('http_timeout', 'ceilometer.service')
cfg.CONF.import_group('service_credentials', 'ceilometer.service')
class TrustRestAlarmNotifier(rest.RestAlarmNotifier):
"""Notifier supporting keystone trust authentication.
This alarm notifier is intended to be used to call an endpoint using
keystone authentication. It uses the ceilometer service user to
authenticate using the trust ID provided.
The URL must be in the form trust+http://trust-id@host/action.
"""
@staticmethod
def notify(action, alarm_id, alarm_name, severity, previous, current,
reason, reason_data):
trust_id = action.username
auth_url = cfg.CONF.service_credentials.os_auth_url.replace(
"v2.0", "v3")
client = keystone_client.Client(
username=cfg.CONF.service_credentials.os_username,
password=cfg.CONF.service_credentials.os_password,
cacert=cfg.CONF.service_credentials.os_cacert,
auth_url=auth_url,
region_name=cfg.CONF.service_credentials.os_region_name,
insecure=cfg.CONF.service_credentials.insecure,
timeout=cfg.CONF.http_timeout,
trust_id=trust_id)
# Remove the fake user
netloc = action.netloc.split("@")[1]
# Remove the trust prefix
scheme = action.scheme[6:]
action = parse.SplitResult(scheme, netloc, action.path, action.query,
action.fragment)
headers = {'X-Auth-Token': client.auth_token}
rest.RestAlarmNotifier.notify(
action, alarm_id, alarm_name, severity, previous, current, reason,
reason_data, headers)
|
StarcoderdataPython
|
151682
|
import numpy as np
from skimage.morphology import label
from scipy.sparse import csr_matrix
from scipy.spatial import cKDTree as KDTree
import pandas as pd
import itertools
from tqdm import tqdm
def compute_M(data):
cols = np.arange(data.size)
return csr_matrix((cols, (data.ravel(), cols)),
shape=(data.max() + 1, data.size))
def get_indices_sparse(data):
M = compute_M(data)
return [np.unravel_index(row.data, data.shape) for row in M]
def Execute_Correspondences_CreateInputs(candidates,normalized_images,im_th,cycle,channels,nbit):
print(str(cycle)+" "+str(channels))
inputs_df=pd.DataFrame(columns=['cycle','ch','x','y','z','Intensities_window_5x5'])
max_df=pd.DataFrame(columns=['I_T','I_G','I_C','I_A','x_T','y_T','z_T','x_G','y_G','z_G','x_C','y_C','z_C','x_A','y_A','z_A','cycle'])
cc, n_c = label(np.amax(candidates[cycle,2:channels,:,:,:],axis=0),return_num=True,connectivity=1)
conn_components = np.zeros((4,candidates.shape[-3],candidates.shape[-2],candidates.shape[-1]))
for ch in range(4):
conn_components[ch,:,:,:] = np.multiply(cc,candidates[cycle,ch+2,:,:,:]).astype(np.uint16) #presume to not have more than 65535 cc to save memory
conn_components = get_indices_sparse(conn_components.astype(np.uint16))
for i in tqdm(range(1,n_c+1)):
ch,z,y,x = conn_components[i]
kdT_tmp = KDTree(np.array([z,x,y]).T)
if len(list(itertools.combinations(np.arange(len(x)),2)))==len(kdT_tmp.query_pairs(2,p=1)): # if connected components is too large (likely cover more signals) then split it
df=pd.Series(data={ 'I_T':np.nan,'I_G':np.nan,'I_C':np.nan,'I_A':np.nan,'x_T':np.nan,'y_T':np.nan,'z_T':np.nan,'x_G':np.nan,'y_G':np.nan,'z_G':np.nan,'x_C':np.nan,'y_C':np.nan,'z_C':np.nan,'x_A':np.nan,'y_A':np.nan,'z_A':np.nan,'cycle':cycle})
df=df[['I_T','I_G','I_C','I_A','x_T','y_T','z_T','x_G','y_G','z_G','x_C','y_C','z_C','x_A','y_A','z_A','cycle']]
for j in range(len(x)):
df.iloc[ch[j]] = im_th[cycle,ch[j]+2,z[j],y[j],x[j]]
df.iloc[ch[j]*3+4]= x[j]
df.iloc[ch[j]*3+4+1]= y[j]
df.iloc[ch[j]*3+4+2]= z[j]
I=df['I_T':'I_A']
col=I[I==np.nanmax(I)].index[0] #retrieving the column
tomove=df.index.get_loc(col) #column index to reach the correct columns coordinates
x_ch=int(df[tomove*3+4])
y_ch=int(df[tomove*3+4+1])
z_ch=int(df[tomove*3+4+2])
ch_idx=tomove
cycle=int(df['cycle'])
rect=normalized_images[cycle,ch_idx+2,z_ch,y_ch-2:y_ch+3,x_ch-2:x_ch+3]
if not rect.size==0:
rect=(rect-np.amin(rect))/(np.amax(rect)-np.amin(rect))
rect=rect-np.mean(rect)
row=pd.Series(data={'cycle':cycle,'ch':ch_idx+2,'x':x_ch,'y':y_ch,'z':z_ch,'Intensities_window_5x5':rect})
inputs_df=inputs_df.append(row,ignore_index=True)
max_df=max_df.append(df,ignore_index=True)
else:
coords = np.vstack((z,x,y))
coords_unique = np.unique(coords,axis=1)
for j in range(coords_unique.shape[-1]):
coords_tmp = coords_unique[:,j][:, np.newaxis]
coords_idx = np.argwhere(np.all(coords==coords_tmp,axis=0)).reshape((-1,))
df=pd.Series(data={ 'I_T':np.nan,'I_G':np.nan,'I_C':np.nan,'I_A':np.nan,'x_T':np.nan,'y_T':np.nan,'z_T':np.nan,'x_G':np.nan,'y_G':np.nan,'z_G':np.nan,'x_C':np.nan,'y_C':np.nan,'z_C':np.nan,'x_A':np.nan,'y_A':np.nan,'z_A':np.nan,'cycle':cycle})
df=df[['I_T','I_G','I_C','I_A','x_T','y_T','z_T','x_G','y_G','z_G','x_C','y_C','z_C','x_A','y_A','z_A','cycle']]
for k in coords_idx:
df.iloc[ch[k]] = im_th[cycle,ch[k]+2,z[k],y[k],x[k]]
df.iloc[ch[k]*3+4]= x[k]
df.iloc[ch[k]*3+4+1]= y[k]
df.iloc[ch[k]*3+4+2]= z[k]
I=df['I_T':'I_A']
col=I[I==np.nanmax(I)].index[0] #retrieving the column
tomove=df.index.get_loc(col) #column index to reach the correct columns coordinates
x_ch=int(df[tomove*3+4])
y_ch=int(df[tomove*3+4+1])
z_ch=int(df[tomove*3+4+2])
ch_idx=tomove
cycle=int(df['cycle'])
rect=normalized_images[cycle,ch_idx+2,z_ch,y_ch-2:y_ch+3,x_ch-2:x_ch+3]
if not rect.size==0:
rect=(rect-np.amin(rect))/(np.amax(rect)-np.amin(rect))
rect=rect-np.mean(rect)
row=pd.Series(data={'cycle':cycle,'ch':ch_idx+2,'x':x_ch,'y':y_ch,'z':z_ch,'Intensities_window_5x5':rect})
inputs_df=inputs_df.append(row,ignore_index=True)
max_df=max_df.append(df,ignore_index=True)
return {'max_df':max_df, 'inputs_df':inputs_df}
|
StarcoderdataPython
|
3207719
|
<reponame>gokhangg/Uncertainix
# -*- coding: utf-8 -*-
"""
Created on Sat Nov 7 17:02:31 2020
@author: ghngu
"""
from ExpSettings.EnvBase import EnvBase
import sys, time,os
_selfPath = os.path.dirname(__file__)
class Environment(EnvBase):
def __init__(self, rootDirectory):
self.__rootDictionary = {}
#Root Dir where the results to be saved
self["rootDir"] = rootDirectory
self["experimentsRootDir"] = self["rootDir"] + "/ExperimentResults/TestMethod"
#Settings file for PCE execution model
self["pceModelSettingsFile"] = _selfPath + "/PceSettings.json"
def GetRootEnvironmentDict(self):
return self.__rootDictionary
def GetEnvironmentDictForDataset(self, datasetIndex, expSize, el = {}):
envDict = self.__rootDictionary
envDict.update({"finalResultFile": envDict["experimentsRootDir"] + "/Result.mhd"})
return envDict
def __getitem__(self, key):
if key in self.__rootDictionary:
return self.__rootDictionary[key]
return None
def __setitem__(self, key, val):
self.__rootDictionary.update({key : val})
|
StarcoderdataPython
|
3296370
|
##
# Copyright (c) 2005-2017 Apple Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##
from cStringIO import StringIO
from twext.python.log import Logger
from twisted.logger import LogLevel, LogPublisher, textFileLogObserver
from twisted.trial import unittest
class LogPublisherTests(unittest.TestCase):
"""
Tests for L{Logger}.
"""
def test_old_style(self):
"""
L{Logger} handles old style log strings.
"""
observer = LogPublisher()
observed = []
observer.addObserver(observed.append)
sio = StringIO()
observer.addObserver(textFileLogObserver(sio))
logger = Logger(observer=observer)
index = 0
logger.info("test")
self.assertEqual(observed[index]["log_level"], LogLevel.info)
self.assertEqual(observed[index]["log_format"], u"{msg}")
self.assertEqual(observed[index]["msg"], u"test")
self.assertEqual(sio.getvalue().splitlines()[index].split("#info] ")[1], "test")
index += 1
logger.info("test {}")
self.assertEqual(observed[index]["log_level"], LogLevel.info)
self.assertEqual(observed[index]["log_format"], u"{msg}")
self.assertEqual(observed[index]["msg"], u"test {}")
self.assertEqual(sio.getvalue().splitlines()[index].split("#info] ")[1], "test {}")
index += 1
logger.info("test {foo}")
self.assertEqual(observed[index]["log_level"], LogLevel.info)
self.assertEqual(observed[index]["log_format"], u"{msg}")
self.assertEqual(observed[index]["msg"], u"test {foo}")
self.assertEqual(sio.getvalue().splitlines()[index].split("#info] ")[1], "test {foo}")
def test_utf8(self):
"""
L{Logger} handles utf8 log strings and format args.
"""
observer = LogPublisher()
observed = []
observer.addObserver(observed.append)
sio = StringIO()
observer.addObserver(textFileLogObserver(sio))
logger = Logger(observer=observer)
index = 0
logger.info("t\xc3\xa9st")
self.assertEqual(observed[index]["log_level"], LogLevel.info)
self.assertEqual(observed[index]["log_format"], u"{msg}")
self.assertEqual(observed[index]["msg"], u"t\xe9st")
self.assertEqual(sio.getvalue().splitlines()[index].split("#info] ")[1], "t\xc3\xa9st")
index += 1
logger.info("{str}", str="t\xc3\xa9st")
self.assertEqual(observed[index]["log_level"], LogLevel.info)
self.assertEqual(observed[index]["log_format"], u"{str}")
self.assertEqual(observed[index]["str"], u"t\xe9st")
self.assertEqual(sio.getvalue().splitlines()[index].split("#info] ")[1], "t\xc3\xa9st")
index += 1
logger.info("T\xc3\xa9st {str}", str="t\xc3\xa9st")
self.assertEqual(observed[index]["log_level"], LogLevel.info)
self.assertEqual(observed[index]["log_format"], u"T\xe9st {str}")
self.assertEqual(observed[index]["str"], u"t\xe9st")
self.assertEqual(sio.getvalue().splitlines()[index].split("#info] ")[1], "T\xc3\xa9st t\xc3\xa9st")
|
StarcoderdataPython
|
1642898
|
<reponame>florian-ionescu/python-learning
with open('../file1.txt', 'w') as f:
f.write('some string'.split()[-1])
|
StarcoderdataPython
|
79663
|
from sys import platform
from distutils.core import setup
from distutils.extension import Extension
from Cython.Build import cythonize
import numpy
ext_modules = [
Extension(
"src.libs.cutils",
["src/libs/cutils.pyx"],
extra_compile_args=['/openmp' if platform == "win32" else '-fopenmp']
)
]
setup(
ext_modules=cythonize(ext_modules),
include_dirs=[numpy.get_include()],
)
|
StarcoderdataPython
|
41406
|
<gh_stars>0
from django.contrib.auth.models import check_password
from django.contrib.auth import get_user_model
_user = get_user_model()
class EmailAuthBackend(object):
"""
Email Authentication Backend
Allows a user to sign in using an email/password pair rather than
a username/password pair.
"""
def authenticate(self, username=None, password=None):
""" Authenticate a user based on email address as the user name. """
try:
user = _user.objects.get(email=username)
if user.check_password(password):
return user
except _user.DoesNotExist:
return None
def get_user(self, user_id):
""" Get a _user object from the user_id. """
try:
return _user.objects.get(pk=user_id)
except _user.DoesNotExist:
return None
|
StarcoderdataPython
|
134422
|
import requests
import re
#data = requests.get('http://dbpedia.org/data/Alice_and_Bob.json').json()
#print(data)
def getNumberOfLinks(url):
ret = 0
try:
url_json = str(url)+".json"
data = requests.get(url_json).json()
datastr = str(data);
# print(datastr)
ret = datastr.count("uri")
return ret
except:
pass
return ret
def heuristicValue(i):
# 1. open ith file
filename = "csv/tuple_"+str(i)+".csv"
file = open(filename,"r")
fileContent = file.readlines()
setA = set()
setB = set()
setT = set()
mapA = []
mapB = []
for lines in fileContent:
try:
lines = lines.replace( "resource", "data")
lines = lines.replace( "\"", "")
setT.add(lines)
wordList = re.sub(",", " ", lines).split()
u = wordList[0]
v = wordList[2]
setA.add(u);
setB.add(v);
for ai in setA:
n_ai = getNumberOfLinks(ai)
mapA[ai] = n_ai;
for bi in setB:
n_bi = getNumberOfLinks(bi)
mapB[bi] = n_bi
except:
pass
file.close()
# print(setA)
# print(setB)
return
if __name__ == "__main__":
heuristicValue(1)
nA = getNumberOfLinks("http://dbpedia.org/data/Alice_and_Bob")
print(nA)
pass
|
StarcoderdataPython
|
1787558
|
#!/usr/bin/env python
import os, sys
if(os.getenv("I3_BUILD") == None):
print("I3_BUILD not set.")
sys.exit()
from os.path import expandvars
from I3Tray import *
from icecube import dataclasses, dataio, ppc
def particle(f):
p = dataclasses.I3Particle(dataclasses.I3Position(0,0,0),
dataclasses.I3Direction(0,0,1), 0,
dataclasses.I3Particle.Cascade, 0)
p.type = dataclasses.I3Particle.ParticleType.EMinus
p.location_type = dataclasses.I3Particle.LocationType.InIce
p.energy = 1.e5*I3Units.GeV
t = dataclasses.I3MCTree()
t.add_primary(p)
f["particle"] = t
tray = I3Tray()
gcdfile=expandvars("$I3_TESTDATA/sim/GeoCalibDetectorStatus_IC86.55697_corrected_V2.i3.gz")
tray.AddModule("I3InfiniteSource", "muxer")(
# ("Prefix", gcdfile),
("Stream", icetray.I3Frame.DAQ)
)
if(True): # ppc part
os.putenv("OGPU", "1")
os.putenv("PPCTABLESDIR", expandvars("$I3_BUILD/ppc/resources/ice"))
if(True): # flasher simulation
tray.AddModule("i3ppc", "ppc")(
("nph", 1.e9),
("fla", OMKey(63, 20))
)
else: # particle simulation
tray.AddModule(particle, "particle", Streams=[icetray.I3Frame.DAQ])
tray.AddModule("i3ppc", "ppc")
tray.AddModule("I3Writer", "writer")(
("streams", [icetray.I3Frame.DAQ]),
("filename", "out.i3")
)
tray.Execute(10)
del tray
|
StarcoderdataPython
|
12463
|
<filename>corehq/form_processor/migrations/0049_case_attachment_props.py
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from __future__ import absolute_import
from django.db import models, migrations
import jsonfield.fields
class Migration(migrations.Migration):
dependencies = [
('form_processor', '0048_attachment_content_length_blob_id'),
]
operations = [
migrations.AddField(
model_name='xformattachmentsql',
name='properties',
field=jsonfield.fields.JSONField(default=dict),
preserve_default=True,
),
migrations.AddField(
model_name='caseattachmentsql',
name='attachment_from',
field=models.TextField(null=True),
preserve_default=True,
),
migrations.AddField(
model_name='caseattachmentsql',
name='properties',
field=jsonfield.fields.JSONField(default=dict),
preserve_default=True,
),
migrations.AddField(
model_name='caseattachmentsql',
name='attachment_src',
field=models.TextField(null=True),
preserve_default=True,
),
migrations.AddField(
model_name='caseattachmentsql',
name='identifier',
field=models.CharField(default='', max_length=255),
preserve_default=False,
),
]
|
StarcoderdataPython
|
3362496
|
# Pimoroni Bearable(s) library for CircuitPython
# The MIT License (MIT)
# Copyright (c) 2018 <NAME>
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""
`Bearable`
====================================================
Driver for the Pimoroni Bearable(s) bear and fox badges using i2c pads.
* Author(s): <NAME>
Implementation Notes
--------------------
Inspired by Pimoroni and:
* Phil Underwood hacking the Bareables Badge: https://lorrainbow.wordpress.com/2017/11/18/guest-blogger-phil-underwood-hacking-the-bareables-badge/
* Raspberry Pi bearables library: https://github.com/sandyjmacdonald/bearables
**Hardware:**
* Pimoroni Bearables Bear kit: https://shop.pimoroni.com/products/bearables-bear-kit
* Pimoroni Bearables Fox kit: https://shop.pimoroni.com/products/bearables-fox-kit
**Software and Dependencies:**
* Adafruit CircuitPython firmware for the supported boards:
https://github.com/adafruit/circuitpython/releases
* Adafruit's Bus Device library: https://github.com/adafruit/Adafruit_CircuitPython_BusDevice
"""
# imports
import time
from adafruit_bus_device.i2c_device import I2CDevice
__version__ = "0.0.0-auto.0"
__repo__ = "https://github.com/kevinjwalters/CircuitPython_Bearable.git"
# i2c commands extracted from
# https://github.com/sandyjmacdonald/bearables/blob/master/library/bearables/__init__.py
# pylint: disable=bad-whitespace
_BEAR_SET_MODE = const(0x00)
_BEAR_LED_PATTERN_MODE = const(0x10)
_BEAR_LED_DIRECT_MODE = const(0x11)
_BEAR_SET_LEDS = const(0x01)
_BEAR_GET_ANALOGUE1 = const(0x07)
_BEAR_SET_PATTERN = const(0x08)
# pylint: enable=bad-whitespace
class Bearable:
"""
Driver for the Pimoroni Bearable(s) bear and fox badges using i2c pads.
Throws exceptions for any i2c errors after retries.
:param ~busio.I2C i2c: the busio.I2C object.
:param int n: The number of leds on the badge, defaults to 12.
:param int address: The i2c address of the badge, defaults to 0x15.
:param float brightness: Global brightness of the pixels between 0.0 and 1.0, defaults to 1.0.
:param bool auto_write: True if the dotstars should immediately change when
set. If False, `show` must be called explicitly. Defaults to True.
:param int retries: The number of retries for i2c communication errors, defaults to 3.
:param float retrypause: The pause between any retries in seconds, defaults to 0.001 (1ms).
"""
def __init__(self, i2c, *, n=12, address=0x15, brightness=1.0, auto_write=True, retries=3, retrypause=0.001):
if brightness < 0.0 or brightness > 1.0:
raise ValueError("brightness out of range")
self._i2c = i2c
self._n = n # 12 LEDs on bear and fox badges
self._address = address
self._brightness = brightness
self._auto_write = auto_write
self._attempts = retries + 1
self._retrypause = retrypause
self._device = I2CDevice(i2c, address)
# red LED bellow chin is index 0 and order is counter-clockwise
self._leds = [0.0] * self._n
# empirical testing suggests this is 8
# see https://github.com/sandyjmacdonald/bearables/issues/1
self._maxbrightness = 8
self._convertfactor = self._maxbrightness * self._brightness
self._mode = None
self._pattern = None
def _set_mode(self, mode):
"""Sends an i2c command to set the badge's mode if that mode has not already been set.
:param int mode: either _BEAR_LED_DIRECT_MODE or _BEAR_LED_PATTERN_MODE.
"""
if mode == self._mode:
return
cmd = bytes([_BEAR_SET_MODE, mode])
self._i2cwrite(cmd)
self._mode = mode
def __setitem__(self, index, val):
if isinstance(index, slice):
start, stop, step = index.indices(self._n)
length = stop - start
if step != 0:
length = (length + step - 1) // step
if len(val) != length:
raise ValueError("Slice and input sequence size do not match")
for val_i, in_i in enumerate(range(start, stop, step)):
self._set_item(in_i, val[val_i])
else:
self._set_item(index, val)
if self._auto_write:
self.show()
def __getitem__(self, index):
"""Returns the set value between 0.0 and 1.0, not the quantized value."""
return self._leds[index]
def __len__(self):
return self._n
def _set_item(self, index, value):
self._leds[index] = value
def fill(self, value):
"""
Set all the pixels to same value:
:param float value: brightness between 0.0 and 1.0.
"""
for i in range(self._n):
self._leds[i] = value
if self._auto_write:
self.show()
def read_pin(self):
"""Read the value of the analogue input pin.
Return value is an int between 0 and 255.
Acorn badge is 0 when lying flat and around 45 when vertical.
A finger pressed across contacts firmly is 1.
A 2k2 resistor is 208, a 4k4 resistor is 177.
YMMV.
"""
self._set_mode(_BEAR_LED_DIRECT_MODE)
errors = 0
cmd = bytes([_BEAR_GET_ANALOGUE1])
buf = bytearray(1)
for attempt in range(self._attempts):
try:
with self._device:
self._device.write(cmd)
self._device.readinto(buf)
break # if here then no exception -> success!
except Exception as e:
errors += 1
if errors == self._attempts:
raise RuntimeError("i2c last exception after retries: " + repr(e))
else:
time.sleep(self._retrypause)
return buf[0]
def maxbrightness(self):
"""Returns the number of brightness levels the hardware supports (ignoring all off)."""
return self._maxbrightness
def _pack_leds(self):
return [(int(self._leds[i] * self._convertfactor) & 0b1111) << 4
| (int(self._leds[i + 1] * self._convertfactor) & 0b1111) for i in range(0, self._n, 2)]
def show(self):
"""Displays any set pixels.
"""
self._set_mode(_BEAR_LED_DIRECT_MODE)
cmd = bytes([_BEAR_SET_LEDS] + self._pack_leds())
# print('bear in the woods')
# print(' '.join([hex(i) for i in cmd]))
self._i2cwrite(cmd)
def pattern(self, pattern):
"""Puts bear into pattern mode and sets the pattern (0-11).
"""
self._set_mode(_BEAR_LED_PATTERN_MODE)
cmd = bytes([_BEAR_SET_PATTERN, pattern])
# print('bear likes a pattern {:d}'.format(pattern))
# print(' '.join([hex(i) for i in cmd]))
self._i2cwrite(cmd)
self._pattern = pattern
def _i2cwrite(self, data, *, attempts=None):
errors = 0
if attempts is None: attempts = self._attempts
for attempt in range(attempts):
try:
with self._device:
self._device.write(data)
break # if here then no exception -> success!
except Exception as e:
errors += 1
if errors == self._attempts:
raise RuntimeError("i2c last exception after retries: " + repr(e))
else:
time.sleep(self._retrypause)
def _i2cwriteread(self, wdata, rdata, *, attempts=None):
"""Send a command down i2c bus and immediately read reply.
:param bytes wdata: data to write.
:param bytearray rdata: size must match reply size.
"""
errors = 0
if attempts is None: attempts = self._attempts
for attempt in range(attempts):
try:
with self._device:
self._device.write(wdata)
self._device.readinto(rdata)
break # if here then no exception -> success!
except Exception as e:
errors += 1
if errors == self._attempts:
raise RuntimeError("i2c last exception after retries: " + repr(e))
else:
time.sleep(self._retrypause)
|
StarcoderdataPython
|
4814487
|
<gh_stars>10-100
#!/usr/bin/env python3
# encoding: utf-8
import os
from setuptools import setup, find_packages
def files(package, paths):
skip = len(package)+1
for path in paths:
for dirpath, dirnames, filenames in os.walk(os.path.join(package, path)):
for filename in filenames:
yield os.path.join(dirpath, filename)[skip:]
setup(
name = "fedora-software",
version = '0.1',
description = "Web interface to Fedora Software database",
author = "<NAME> & <NAME>",
author_email = "<EMAIL>, <EMAIL>",
url = "https://github.com/misli/fedora-software",
packages = find_packages(),
package_data = {
'fedora_software': list(files('fedora_software', ['templates', 'static'])),
},
scripts = ['bin/fedora-software-manage'],
data_files = [
('/etc/bash_completion.d', ['conf/bash_completion.d/fedora-software.bash']),
('/usr/lib/fedora-software/httpd', ['conf/httpd/fedora-software.conf']),
('/var/lib/fedora-software/data', []),
('/var/lib/fedora-software/htdocs', ['fedora_software/wsgi.py']),
('/var/lib/fedora-software/htdocs/static', []),
('/var/lib/fedora-software/htdocs/media', []),
],
)
|
StarcoderdataPython
|
3343673
|
<filename>vulnerabilities/tests/test_rust.py
#
# Copyright (c) nexB Inc. and others. All rights reserved.
# VulnerableCode is a trademark of nexB Inc.
# SPDX-License-Identifier: Apache-2.0
# See http://www.apache.org/licenses/LICENSE-2.0 for the license text.
# See https://github.com/nexB/vulnerablecode for support or download.
# See https://aboutcode.org for more information about nexB OSS projects.
#
import os
from unittest import TestCase
from packageurl import PackageURL
from univers.version_range import VersionRange
from vulnerabilities.importer import AdvisoryData
from vulnerabilities.importer import Reference
from vulnerabilities.importers.rust import RustImporter
from vulnerabilities.importers.rust import categorize_versions
from vulnerabilities.importers.rust import get_advisory_data
from vulnerabilities.package_managers import CratesVersionAPI
from vulnerabilities.package_managers import Version
from vulnerabilities.utils import AffectedPackage
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
TEST_DATA = os.path.join(BASE_DIR, "test_data/rust")
MOCKED_CRATES_API_VERSIONS = CratesVersionAPI(
cache={
"bitvec": {Version("0.10.0"), Version("0.12.0"), Version("0.18.0")},
"bumpalo": {Version("2.8.0"), Version("3.0.1"), Version("3.2.5")},
"cbox": {Version("0.10.0"), Version("0.12.0"), Version("0.18.0")},
"flatbuffers": {Version("0.3.0"), Version("0.5.0"), Version("0.6.5")},
"hyper": {Version("0.10.0"), Version("0.12.0"), Version("0.13.0")},
"byte_struct": {Version("0.6.1"), Version("0.6.0"), Version("1.0.0")},
}
)
def test_categorize_versions():
flatbuffers_versions = MOCKED_CRATES_API_VERSIONS.get("flatbuffers").valid_versions
unaffected_ranges = [VersionRange.from_scheme_version_spec_string("semver", "< 0.4.0")]
affected_ranges = [
VersionRange.from_scheme_version_spec_string("semver", ">= 0.4.0"),
VersionRange.from_scheme_version_spec_string("semver", "<= 0.6.0"),
]
resolved_ranges = [VersionRange.from_scheme_version_spec_string("semver", ">= 0.6.1")]
unaffected_versions, affected_versions = categorize_versions(
set(flatbuffers_versions),
unaffected_ranges,
affected_ranges,
resolved_ranges,
)
assert len(unaffected_versions) == 2
assert "0.3.0" in unaffected_versions
assert "0.6.5" in unaffected_versions
assert len(affected_versions) == 1
assert "0.5.0" in affected_versions
def test_categorize_versions_without_affected_ranges():
all_versions = {"1.0", "1.1", "2.0", "2.1", "3.0", "3.1"}
unaffected_ranges = [VersionRange.from_scheme_version_spec_string("semver", "< 1.2")]
affected_ranges = []
resolved_ranges = [VersionRange.from_scheme_version_spec_string("semver", ">= 3.0")]
unaffected_versions, affected_versions = categorize_versions(
all_versions,
unaffected_ranges,
affected_ranges,
resolved_ranges,
)
assert len(unaffected_versions) == 4
assert "1.0" in unaffected_versions
assert "1.1" in unaffected_versions
assert "3.0" in unaffected_versions
assert "3.1" in unaffected_versions
assert len(affected_versions) == 2
assert "2.0" in affected_versions
assert "2.1" in affected_versions
def test_categorize_versions_with_only_affected_ranges():
all_versions = {"1.0", "1.1", "2.0", "2.1", "3.0", "3.1"}
unaffected_ranges = []
affected_ranges = [
VersionRange.from_scheme_version_spec_string("semver", "> 1.2"),
VersionRange.from_scheme_version_spec_string("semver", "<= 2.1"),
]
resolved_ranges = []
unaffected_versions, affected_versions = categorize_versions(
all_versions,
unaffected_ranges,
affected_ranges,
resolved_ranges,
)
assert len(unaffected_versions) == 4
assert "1.0" in unaffected_versions
assert "1.1" in unaffected_versions
assert "3.0" in unaffected_versions
assert "3.1" in unaffected_versions
assert len(affected_versions) == 2
assert "2.0" in affected_versions
assert "2.1" in affected_versions
def test_categorize_versions_without_any_ranges():
all_versions = {"1.0", "1.1", "2.0", "2.1", "3.0", "3.1"}
unaffected, affected = categorize_versions(all_versions, [], [], [])
assert len(unaffected) == 0
assert len(affected) == 0
class RustImportTest(TestCase):
@classmethod
def setUpClass(cls) -> None:
data_source_cfg = {
"repository_url": "",
}
cls.data_src = RustImporter(1, config=data_source_cfg)
cls.data_src._crates_api = MOCKED_CRATES_API_VERSIONS
def test_load_advisory(self):
md_path = os.path.join(TEST_DATA, "RUSTSEC-2021-0032.md")
data = self.data_src._load_advisory(md_path)
expected_data = Advisory(
summary="",
vulnerability_id="CVE-2021-28033",
affected_packages=[
AffectedPackage(
vulnerable_package=PackageURL(
type="cargo",
name="byte_struct",
version="0.6.0",
),
patched_package=PackageURL(
type="cargo",
name="byte_struct",
version="0.6.1",
),
)
],
references=[
Reference(
reference_id="",
url="https://github.com/wwylele/byte-struct-rs/issues/1",
severities=[],
),
Reference(
reference_id="RUSTSEC-2021-0032",
url="https://rustsec.org/advisories/RUSTSEC-2021-0032.html",
severities=[],
),
],
)
assert expected_data == data
def test_load_toml_from_md(self):
md_path = os.path.join(TEST_DATA, "CVE-2019-16760.md")
loaded_data = get_advisory_data(md_path)
expected_data = {
"advisory": {
"aliases": ["GHSA-phjm-8x66-qw4r"],
"date": "2019-09-30",
"id": "CVE-2019-16760",
"package": "cargo",
"url": "https://groups.google.com/forum/#!topic/rustlang-security-announcements/rVQ5e3TDnpQ", # nopep8
},
"versions": {"patched": [">= 1.26.0"]},
}
assert loaded_data == expected_data
|
StarcoderdataPython
|
4820500
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
##############################################################################
# Copyright (c) <NAME> - <EMAIL>
# All Rights Reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
##############################################################################
import os
import tensorflow as tf
__all__ = ['data_loading_fn']
def data_loading_fn(
data_dir,
input_shape=(572, 572, 1),
mask_shape=(388, 388, 1),
training=True,
batch_size=32,
num_threads=32,
use_gpu_prefetch=False,
seed=None
):
shuffle_buffer_size = 10000
def decode_csv(line):
input_image_name, image_mask_name, label = tf.decode_csv(
line,
record_defaults=[[""], [""], [0]],
field_delim=','
)
def decode_image(filepath, resize_shape):
image_content = tf.read_file(filepath)
# image = tf.image.decode_image(image_content, channels=resize_shape[-1])
image = tf.image.decode_png(
contents=image_content,
channels=resize_shape[-1],
dtype=tf.uint8
)
image = tf.image.resize_images(
image,
size=resize_shape[:2],
method=tf.image.ResizeMethod.NEAREST_NEIGHBOR, # [BILINEAR, NEAREST_NEIGHBOR, BICUBIC, AREA]
align_corners=False,
preserve_aspect_ratio=True
)
image.set_shape(resize_shape)
return image
input_image = decode_image(
filepath=tf.strings.join([image_dir, input_image_name], separator='/'),
resize_shape=input_shape
)
mask_image = tf.cond(
tf.equal(image_mask_name, ""),
true_fn=lambda: tf.zeros(mask_shape, dtype=tf.uint8),
false_fn=lambda: decode_image(tf.strings.join([mask_image_dir, image_mask_name], separator='/'), resize_shape=mask_shape),
)
return input_image, mask_image, label
if training:
csv_file = os.path.join(data_dir, "train_list.csv")
image_dir = os.path.join(data_dir, "Train")
else:
csv_file = os.path.join(data_dir, "test_list.csv")
image_dir = os.path.join(data_dir, "Test")
mask_image_dir = os.path.join(image_dir, "Label")
dataset = tf.data.TextLineDataset(csv_file)
dataset = dataset.skip(1) # Skip CSV Header
dataset = dataset.cache()
if training:
dataset = dataset.apply(tf.data.experimental.shuffle_and_repeat(
buffer_size=shuffle_buffer_size,
seed=seed
))
else:
dataset = dataset.repeat()
dataset = dataset.apply(
tf.data.experimental.map_and_batch(
map_func=decode_csv,
num_parallel_calls=num_threads,
batch_size=batch_size,
drop_remainder=True,
)
)
dataset = dataset.prefetch(buffer_size=tf.contrib.data.AUTOTUNE)
if use_gpu_prefetch:
dataset.apply(tf.data.experimental.prefetch_to_device(
device="/gpu:0",
buffer_size=batch_size*8
))
return dataset
if __name__ == "__main__":
'''
Data Loading Benchmark Usage:
python data_loading.py \
--data_dir="/data/dagm2007/" \
--batch_size=64 \
--warmup_steps=200 \
--benchmark_steps=2000 \
--class_id=1
'''
import time
import argparse
import numpy as np
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
parser = argparse.ArgumentParser(description="DAGM2007_data_loader_benchmark")
parser.add_argument(
'--data_dir',
required=True,
type=str,
help="Directory path which contains the preprocessed DAGM 2007 dataset"
)
parser.add_argument(
'--batch_size',
default=64,
type=int,
required=True,
help="""Batch size used to measure performance."""
)
parser.add_argument(
'--warmup_steps',
default=200,
type=int,
required=True,
help="""Number of steps considered as warmup and not taken into account for performance measurements."""
)
parser.add_argument(
'--benchmark_steps',
default=200,
type=int,
required=True,
help="""Number of steps considered as warmup and not taken into account for performance measurements."""
)
parser.add_argument(
'--class_id',
default=1,
choices=range(1, 11), # between 1 and 10
type=int,
required=True,
help="""Class ID used for benchmark."""
)
FLAGS, unknown_args = parser.parse_known_args()
if len(unknown_args) > 0:
for bad_arg in unknown_args:
print("ERROR: Unknown command line arg: %s" % bad_arg)
raise ValueError("Invalid command line arg(s)")
DATA_DIR = os.path.join(FLAGS.data_dir, "raw_images/private/Class1")
BURNIN_STEPS = FLAGS.warmup_steps
TOTAL_STEPS = FLAGS.warmup_steps + FLAGS.benchmark_steps
BATCH_SIZE = FLAGS.batch_size
if not os.path.exists(DATA_DIR):
raise FileNotFoundError("The directory `%s` does not exists ..." % DATA_DIR)
# Build the data input
dataset = data_loading_fn(
data_dir=DATA_DIR,
input_shape=(572, 572, 1),
mask_shape=(388, 388, 1),
batch_size=BATCH_SIZE,
training=True,
num_threads=64,
use_gpu_prefetch=True,
seed=None
)
dataset_iterator = dataset.make_one_shot_iterator()
input_images, mask_images, labels = dataset_iterator.get_next()
print("Input Image Shape: %s - Dtype: %s" % (input_images.get_shape(), input_images.dtype))
print("Mask Image Shape: %s - Dtype: %s" % (mask_images.get_shape(), mask_images.dtype))
print("Label Shape: %s - Dtype: %s" % (labels.get_shape(), labels.dtype))
with tf.device("/gpu:0"):
input_images = tf.identity(input_images)
mask_images = tf.identity(mask_images)
labels = tf.identity(labels)
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
config.log_device_placement = True
with tf.Session(config=config) as sess:
sess.run(tf.global_variables_initializer())
total_files_processed = 0
img_per_sec_arr = []
processing_time_arr = []
processing_start_time = time.time()
for step in range(TOTAL_STEPS):
start_time = time.time()
img_batch, mask_batch, lbl_batch = sess.run([input_images, mask_images, labels])
batch_size = img_batch.shape[0]
total_files_processed += batch_size
elapsed_time = (time.time() - start_time) * 1000
imgs_per_sec = (batch_size / elapsed_time) * 1000
if (step + 1) > BURNIN_STEPS:
processing_time_arr.append(elapsed_time)
img_per_sec_arr.append(imgs_per_sec)
if (step + 1) % 20 == 0 or (step + 1) == TOTAL_STEPS:
print("[STEP %04d] # Files: %03d - Time: %03d msecs - Speed: %6d img/s" % (
step + 1,
batch_size,
elapsed_time,
imgs_per_sec
))
processing_time = time.time() - processing_start_time
avg_processing_speed = np.mean(img_per_sec_arr)
print("\n###################################################################")
print("*** Data Loading Performance Metrics ***\n")
print("\t=> Number of Steps: %d" % (step + 1))
print("\t=> Batch Size: %d" % BATCH_SIZE)
print("\t=> Files Processed: %d" % total_files_processed)
print("\t=> Total Execution Time: %d secs" % processing_time)
print("\t=> Median Time per step: %3d msecs" % np.median(processing_time_arr))
print("\t=> Median Processing Speed: %d images/secs" % np.median(img_per_sec_arr))
print("\t=> Median Processing Time: %.2f msecs/image" % (1 / float(np.median(img_per_sec_arr)) * 1000))
print("\n*** Debug Shape Information:")
print("\t[*] Batch Shape: %s" % str(img_batch.shape))
print("\t[*] Mask Shape: %s" % str(mask_batch.shape))
print("\t[*] Label Shape: %s" % str(lbl_batch.shape))
|
StarcoderdataPython
|
3320906
|
from django.urls import path, include
from school import views
urlpatterns = [
# Used to retrieve own details of student
path('student/<int:pk>/', views.StudentsRetrieveViewSet.as_view(), name = 'student-view'),
# Used to create and list students
path('teacher/', views.StudentCreateListViewSet.as_view(), name='teacher-view'),
# Used to create and list teachers
path('superuser/', views.TeacherCreateListViewSet.as_view(), name='superuser-view'),
]
|
StarcoderdataPython
|
1728442
|
<reponame>lucasrdrgs/RsaEncDec
# Sorry for the ugly code, I wrote this in a hurry.
# Are you mad? Submit a pull request, loser.
import os
import sys
import tempfile
import datetime
ERRORS_ = [
'''Invalid syntax.\n\nHere is how it works:
If you ARE NOT generating a keypair:
$ rsaencdec OPTIONS input [outputFilePath if OPTIONS has F] key
If you ARE generating a keypair:
$ rsaencdec g publicPath privatePath bits
These are the options:
e: encrypt
d: decrypt
g: generate key pair
f: input as file
p: input as plain text
b: input as base64 plain text
F: output as file
P: output as plain text
B: output as base64 plain text
Examples:
$ rsaencdec epF "Hello, World!" result.txt public.pem
$ rsaencdec dfP result.txt private.pem
$ rsaencdec g PubKey.key PrivKey.key 2048''',
'Input file doesn\'t exist.',
'Key doesn\'t exist.',
'Wrong key for operation (try the other one).',
'Key size is too small (try 512 or greater).'
]
def err_(code):
print(ERRORS_[code - 1])
# print('Argument count: ' + str(len(sys.argv)))
return code
class Modes:
ENC = 0
DEC = 1
INP = 2
INP64 = 3
INF = 4
OUP = 5
OUP64 = 6
OUF = 7
def tmpName():
return os.path.join(tempfile.mkdtemp(), 'rsaencdec')
def main():
if len(sys.argv) not in [4, 5]: return err_(1)
# Arguments:
# Options In Out Key
options = list(sys.argv[1])
if (len(options) != 3 or len(set(list(options))) != 3) and (len(options) != 1 or len(set(list(options))) != 1):
return err_(1)
if 'g' not in options:
if 'e' not in options and 'd' not in options:
return err_(1)
if 'p' not in options and 'f' not in options and 'b' not in options:
return err_(1)
if 'P' not in options and 'F' not in options and 'B' not in options:
return err_(1)
else:
try:
publk = sys.argv[2]
privk = sys.argv[3]
keysz = sys.argv[4]
try:
keysz = int(keysz)
except: return err_(1)
if keysz < 512:
return err_(5)
if keysz < 2048:
print('This key size is not very secure. Generating anyway.')
if os.path.isfile(publk):
new = '.'.join(publk.split('.')[:-1])
ext = publk.split('.')[-1]
accum = 1
while os.path.isfile(new + '_' + str(accum) + '.' + ext):
accum += 1
publk = new + '_' + str(accum) + '.' + ext
if os.path.isfile(privk):
new = '.'.join(privk.split('.')[:-1])
ext = privk.split('.')[-1]
accum = 1
while os.path.isfile(new + '_' + str(accum) + '.' + ext):
accum += 1
privk = new + '_' + str(accum) + '.' + ext
os.system('openssl genpkey -algorithm RSA -out ' + privk + ' -pkeyopt rsa_keygen_bits:' + str(keysz))
os.system('openssl rsa -pubout -in ' + privk + ' -out ' + publk)
return 0
except Exception as e:
print(e)
return err_(1)
encmode = None
if 'e' in options: encmode = Modes.ENC
else: encmode = Modes.DEC
inmode = None
if 'p' in options: inmode = Modes.INP
elif 'b' in options: inmode = Modes.INP64
else: inmode = Modes.INF
outmode = None
if 'P' in options: outmode = Modes.OUP
elif 'B' in options: outmode = Modes.OUP64
else: outmode = Modes.OUF
tmpFName1 = tmpName(); open(tmpFName1, 'w')
tmpFName2 = tmpName(); open(tmpFName2, 'w')
tmpF1 = tmpFName1
tmpF2 = tmpFName2
if inmode == Modes.INP:
with open(tmpF1, 'w') as f:
f.write(sys.argv[2] + '\n')
elif inmode == Modes.INP64:
with open(tmpF1, 'w') as f:
os.system('echo ' + sys.argv[2] + ' | base64 -d > ' + tmpF1)
else:
tmpF1 = sys.argv[2]
if not os.path.isfile(tmpF1):
return err_(3)
KEY_FILE_INDEX = 3
if outmode == Modes.OUF:
tmpF2 = sys.argv[3]
if os.path.isfile(tmpF2):
print('File already exists. Press Y and hit enter to overwrite it. Press any other key to create a file with a different name.')
confirm = input('> ').lower()
if confirm != 'y':
tmpF2new = '.'.join(tmpF2.split('.')[:-1])
extens = tmpF2.split('.')[-1]
accum = 1
while os.path.isfile(tmpF2new + '_' + str(accum) + '.' + extens):
accum += 1
tmpF2 = tmpF2new + '_' + str(accum) + '.' + extens
KEY_FILE_INDEX += 1
sameFile = False
if tmpF2 == tmpF1:
sameFile = True
tmpFName2 = tmpName(); open(tmpFName2, 'w')
tmpF2 = tmpFName2
keyPath = sys.argv[KEY_FILE_INDEX]
cmd = 'openssl rsautl -'
cmd += 'encrypt ' if encmode == Modes.ENC else 'decrypt '
cmd += '-in ' + tmpF1 + ' -out ' + tmpF2
cmd += ' -pubin ' if encmode == Modes.ENC else ' '
cmd += '-inkey ' + keyPath
try:
with open(keyPath, 'r') as f:
if encmode == Modes.ENC:
if '-----BEGIN PUBLIC KEY-----' not in f.readlines()[0]:
return err_(5)
else:
if '-----BEGIN PRIVATE KEY-----' not in f.readlines()[0]:
return err_(5)
except:
return err_(4)
os.system(cmd)
if sameFile:
os.system('mv ' + tmpF2 + ' ' + tmpF1)
if(outmode == Modes.OUP):
os.system('cat ' + tmpF2)
elif(outmode == Modes.OUP64):
os.system('cat ' + tmpF2 + ' | base64 -w 0 && echo')
# openssl rsautl -encrypt -in file.txt -out res.txt -pubin -inkey public.pem
# openssl rsautl -decrypt -in enc.txt -out plain.txt -inkey private.pem
try:
os.remove(tmpFName1)
os.remove(tmpFName2)
except:
pass
return 0
if __name__ == '__main__':
main()
|
StarcoderdataPython
|
3201924
|
# MIT License
#
# Copyright (c) 2019 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""Low-level, core functionality for DayDream"""
__all__ = ['DayDreamError', 'Reference', 'Aggregator']
from copy import copy, deepcopy
from functools import reduce
from operator import add
from typing import Any, AbstractSet, Set, Optional, Union
class DayDreamError(Exception):
"""Error for package-specific issues."""
class Reference:
"""Creates a reference to an attribute present in a parent class.
:param name: name of the referenced attribute
:param target: type or name of the object with the referenced
attribute
:param modifier: this is added to the dereferenced value
"""
def dereference(self, instance: Any) -> Any:
"""Dereference an attribute on the instance.
:param instance: object whose attribute is referenced
:return: value of the referenced attribute
"""
result = self._dereference_name(instance)
try:
modifier = self._modifier.dereference(instance) # type: ignore
except (AttributeError, TypeError):
if result is self:
raise TypeError('Instance type is not referenced')
if self._modifier is not None:
result = result + self._modifier
else:
if result is self:
result = deepcopy(result)
# pylint: disable=protected-access
result._modifier = modifier
else:
result = result + modifier
return result
@property
def name(self) -> str:
"""Returns the name of the referenced attribute."""
return self._name
def __init__(self,
name: str,
target: Union[type, str],
modifier: Any = None) -> None:
self._name = name
self._target = target
self._modifier = modifier
def __repr__(self) -> str:
return (type(self).__name__
+ f'({repr(self._name)}, {self._type_name()}, '
+ f'{repr(self._modifier)})')
def __eq__(self, other: Any) -> bool:
if isinstance(other, type(self)):
# pylint: disable-msg=protected-access
result = (self._name == other._name
and self._target == other._target
and self._modifier == other._modifier)
else:
result = NotImplemented
return result
def __add__(self, other: Any) -> 'Reference':
if self._modifier is None:
modifier = other
else:
modifier = self._modifier + other
return type(self)(self._name, self._target, modifier)
__radd__ = __add__
def _refers_to(self, instance: Any) -> bool:
if not isinstance(instance, type):
instance = type(instance)
if isinstance(self._target, type):
result = issubclass(instance, self._target)
elif isinstance(self._target, str):
result = instance.__name__ == self._target
else:
raise NotImplementedError('Internal state is unexpected.')
return result
def _dereference_name(self, instance):
if self._refers_to(instance):
result = getattr(instance, self._name)
else:
result = self
return result
def _type_name(self):
if isinstance(self._target, str):
type_name = repr(self._target)
else:
type_name = self._target.__name__
return type_name
def _is_private(name: str) -> bool:
"""Return true if the name is private.
:param name: name of an attribute
"""
return name.startswith('_')
def _is_public(name: str) -> bool:
"""Return true if the name is not private, i.e. is public.
:param name: name of an attribute
"""
return not name.startswith('_')
class Aggregator:
"""Aggregate values from all objects attached to an instance.
This overrides attribute look up and allows combining (through
addition) values defined both on a particular instance and on its
attributes. This is used to allow multiple different sources to
modify a value.
For example, a character's strength is partially inherent and is
possibly modified by their race and levels in particular classes.
This class defines the interface by which all of these can be made
aware of each other with minimal boilerplate. To manage this, a
character will be an aggregator and have its own `strength`
attribute (i.e. the inherent part of its strength) and will add to
this value any attribute that also defines a `strength` attribute.
In this way, a class and a race that both define `strength` will
be added to the strength present on the character yielding a total
for that ability score.
"""
def __init_subclass__(cls,
ignore: Optional[AbstractSet[str]] = None) -> None:
"""Setup attributes to access directly."""
super().__init_subclass__()
if ignore is None:
cls._ignore: Set[str] = set()
else:
cls._ignore = set(ignore)
cls._instance_names: Set[str] = {k for k, v in vars(cls).items()
if isinstance(v, property)}
def __init__(self) -> None:
"""Initialize attribute name tracker."""
super().__init__()
self._instance_names = copy(self._instance_names)
def __setattr__(self, name: str, value: Any) -> None:
"""Track any attributes that are added to an instance."""
if _is_public(name) and name not in self._known_names:
self._instance_names.add(name)
super().__setattr__(name, value)
def __getattribute__(self, name: str) -> Any:
"""Aggregate value from each attribute if allowed."""
if _is_private(name) or name in super().__getattribute__('_ignore'):
result = super().__getattribute__(name)
else:
values = []
try:
values.append(super().__getattribute__(name))
except AttributeError:
pass
for name_other in super().__getattribute__('_instance_names'):
if name_other != name:
attribute = super().__getattribute__(name_other)
if hasattr(attribute, name):
values.append(getattr(attribute, name))
if values:
result = reduce(add, values)
else:
raise AttributeError(f'The desired attribute {name} could not'
f' be found')
try:
result = result.dereference(self)
except (AttributeError, TypeError):
pass
return result
def __delattr__(self, name: str) -> None:
"""Remove deleted attributes from tracker."""
super().__delattr__(name)
self._instance_names.remove(name)
@property
def _known_names(self) -> Set[str]:
"""Return all known names."""
return self._ignore | self._instance_names
|
StarcoderdataPython
|
4813343
|
<gh_stars>0
import pymysql
myconn=pymysql.connect(host='localhost',user='root',password='<PASSWORD>', database="mydatabase")
cur=myconn.cursor()
sql="insert into employee1(name,empid,salary)values(%s,%s,%s)"
val=[("john",102,25000),
("david",104,45000),
("nick",105,50000)]
cur.executemany(sql,val)
print(cur.rowcount,"record inserted!")
myconn.commit()
myconn.close()
|
StarcoderdataPython
|
3397746
|
<reponame>bionicles/pytorch-dnc
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import pytest
import numpy as np
import torch.nn as nn
import torch as T
from torch.autograd import Variable as var
import torch.nn.functional as F
from torch.nn.utils import clip_grad_norm_
import torch.optim as optim
import numpy as np
import sys
import os
import math
import time
import functools
sys.path.insert(0, '.')
from pyflann import *
from dnc.flann_index import FLANNIndex
def test_indexes():
n = 30
cell_size=20
nr_cells=1024
K=10
probes=32
d = T.ones(n, cell_size)
q = T.ones(1, cell_size)
for gpu_id in (-1, -1):
i = FLANNIndex(cell_size=cell_size, nr_cells=nr_cells, K=K, probes=probes, gpu_id=gpu_id)
d = d if gpu_id == -1 else d.cuda(gpu_id)
i.add(d)
dist, labels = i.search(q*7)
assert dist.size() == T.Size([1,K])
assert labels.size() == T.Size([1, K])
|
StarcoderdataPython
|
4822606
|
<reponame>lar-deeufba/potential_fields
#!/usr/bin/env python
# Code available in
# https://github.com/neobotix/neo_simulation
import rospy
import tf
import rospkg
from gazebo_msgs.srv import SpawnModel, GetModelState
import time
from geometry_msgs.msg import *
from gazebo_msgs.msg import ModelState, ModelStates
import os
from os.path import expanduser
from pathlib import Path
from tf import TransformListener
from tf.transformations import quaternion_from_euler
rospack = rospkg.RosPack()
Home = rospack.get_path('custom_codes')
path = Home + '/models/box/model.sdf'
class Moving():
def __init__(self, model_name, Spawning1, y_pose, x_pose, z_pose, oriFinal):
self.pub_model = rospy.Publisher('gazebo/set_model_state', ModelState, queue_size=1)
self.model_name = model_name
self.rate = rospy.Rate(10)
self.x_model_pose = x_pose
self.y_model_pose = y_pose
self.z_model_pose = z_pose
self.Spawning1 = Spawning1
self.orientation = oriFinal
def spawning(self,):
with open(path) as f:
product_xml = f.read()
item_name = "product_{0}_0".format(0)
print("Spawning model:%s", self.model_name)
# X and Y positions are somewhat in an incorrect order in Gazebo
item_pose = Pose(Point(x=self.y_model_pose, y=self.x_model_pose,z=self.z_model_pose),
Quaternion(self.orientation[0], self.orientation[1], self.orientation[2], self.orientation[3]))
self.Spawning1(self.model_name, product_xml, "", item_pose, "world")
def moving_goal(self):
obstacle = ModelState()
ptFinal, oriFinal = tf.lookupTransform("base_link", "ar_marker_0", rospy.Time(0))
obstacle.model_name = self.model_name
obstacle.pose = model.pose[i]
obstacle.twist = Twist()
obstacle.twist.linear.y = 1.3
obstacle.twist.angular.z = 0
self.pub_model.publish(obstacle)
def main():
rospy.init_node('Spawning_APF_Goal')
Spawning1 = rospy.ServiceProxy("gazebo/spawn_sdf_model", SpawnModel)
rospy.wait_for_service("gazebo/spawn_sdf_model")
model_coordinates = rospy.ServiceProxy(
'/gazebo/get_model_state', GetModelState)
object_coordinates = model_coordinates("robot", "")
z_position = object_coordinates.pose.position.z
y_position = object_coordinates.pose.position.y
x_position = object_coordinates.pose.position.x
print "X, Y, Z: ", x_position, y_position, z_position
# This is the position of the object spawned in gazebo relative to the base_link
ptFinal = [-0.5, 0.1, 0.05]
oriFinal = quaternion_from_euler(0.0, 0.0, -0.6)
moving1 = Moving("custom_box", Spawning1, x_position - ptFinal[1], y_position + ptFinal[0], z_position + ptFinal[2], oriFinal)
moving1.spawning()
# while not rospy.is_shutdown():
# moving.moving_goal()
if __name__ == '__main__':
main()
|
StarcoderdataPython
|
106957
|
<reponame>murufeng/awesome_lightweight_networks
from .mobile_vit import *
from .levit import *
from .ConvNeXt import *
|
StarcoderdataPython
|
151126
|
#!/usr/bin/env python3
#
# A PyMol extension script to test extrusion of a hub from a single module's
# c-term
#
def main():
"""main"""
raise RuntimeError('This module should not be executed as a script')
if __name__ =='__main__':
main()
in_pymol = False
try:
import pymol
in_pymol = True
except ImportError as ie:
main()
if in_pymol:
from pymol import cmd
from elfinpy import utilities
import os
@cmd.extend
def extrude_hub_at_single_c(single_name=None, hub_name=None, component_id=None):
"""Extrudes a hub at the c-terminus of a single module.
Args:
- single_name - string
- hub_name - string
- component_id - string, indicating which module component inside the hub
to extend into
"""
if single_name is None or \
hub_name is None or \
component_id is None:
print(extrude_hub_at_single_c.__doc__)
else:
double_name = '-'.join([single_name, single_name])
pdb_dir = os.getcwd() + '/../../resources/pdb_aligned/'
cmd.load(pdb_dir + '/singles/' + single_name + '.pdb')
cmd.set_name(single_name, 'single')
cmd.load(pdb_dir + '/doubles/' + double_name + '.pdb')
cmd.set_name(double_name, 'double')
cmd.load(pdb_dir + '/hubs/' + hub_name + '.pdb')
cmd.set_name(hub_name, 'hub')
xdb=utilities.read_json(os.getcwd() + '/../../resources/xdb.json')
hub_comp_info = xdb['hub_data'][hub_name]['component_info']
comp_a_cc = hub_comp_info[component_id]['n_connections'][single_name]
tx('hub', rot=comp_a_cc['rot'], tran_after=comp_a_cc['tran'])
noclip()
print('Extruded Hub {} Component {} at Single {}\'s C-Term'.\
format(hub_name, component_id, single_name))
@cmd.extend
def extrude_hub_at_single_c_example():
extrude_hub_at_single_c(single_name='D4', hub_name='D4_C3_02', component_id='B')
print('Extrude Hub At Single Loaded')
|
StarcoderdataPython
|
110426
|
#!/usr/bin/python3
"""Init file of the tests.measures.sample module."""
import brfast.measures.sample
|
StarcoderdataPython
|
3200884
|
from models import QuoteModel
from ingestors.ingestor_interface import IngestorInterface
class TextIngestor(IngestorInterface):
@classmethod
def parse(cls, path):
file = open(path, "r", encoding="utf-8-sig")
lines = file.readlines()
file.close()
return [QuoteModel(*quote.rstrip("\n").split(" - ")) for quote in lines]
|
StarcoderdataPython
|
3251603
|
import tornado.ioloop
import tornado.web
import socket
import os
import re
import glob
class HelpHandler(tornado.web.RequestHandler):
def get(self):
print ('-->HelpHandler.get...' + self.request.uri)
self.render("help.html")
|
StarcoderdataPython
|
1753631
|
<filename>tests/portfolio/test_portfolio.py<gh_stars>0
import unittest
from unittest.mock import Mock
import numpy as np
from core.instrument import Instrument
from core.trade import Trade
from core.events import TradeExecutedEvent
from portfolio.portfolio import Portfolio
class TestPortfolio(unittest.TestCase):
"""
Unittest to test implementation of portfolio class
"""
def setUp(self):
self._instruments = [
Instrument(name='btc_usd_perp', instrument_id='BTC-PERP', tick_size=1, size_unit=0.0001),
Instrument(name='eth_usd_perp', instrument_id='ETH-PERP', tick_size=0.01, size_unit=0.001),
Instrument(name='ltc_usd_perp', instrument_id='LTC-PERP', tick_size=0.01, size_unit=0.01)
]
def test_init(self):
portfolio = Portfolio(self._instruments)
current_position = portfolio.get_current_position()
self.assertEqual(len(current_position), 3, 'Length of position array is not equal number of specified instruments.')
self.assertEqual(current_position.index.tolist(), ['btc_usd_perp', 'eth_usd_perp', 'ltc_usd_perp'])
self.assertEqual(current_position.tolist(), [0, 0, 0])
def test_handle_execution(self):
portfolio = Portfolio(self._instruments)
trade = Trade(self._instruments[0], size=1.0, client=Mock(), execution_callback=Mock())
trade_executed_event = TradeExecutedEvent(trade)
portfolio.handle_execution(trade_executed_event)
self.assertEqual(portfolio.get_current_position()[self._instruments[0].name], 1.0)
trade = Trade(self._instruments[1], -1.0, client=Mock(), execution_callback=Mock())
trade_executed_event = TradeExecutedEvent(trade)
portfolio.handle_execution(trade_executed_event)
self.assertEqual(portfolio.get_current_position()[self._instruments[1].name], -1.0)
trade = Trade(self._instruments[0], -1.0, client=Mock(), execution_callback=Mock())
trade_executed_event = TradeExecutedEvent(trade)
portfolio.handle_execution(trade_executed_event)
self.assertEqual(portfolio.get_current_position()[self._instruments[0].name], 0.0)
if __name__ == '__main__':
unittest.main()
|
StarcoderdataPython
|
1776809
|
import lz4.frame
import pytest
test_data = [
(b'a' * 1024 * 1024),
]
@pytest.fixture(
params=test_data,
ids=[
'data' + str(i) for i in range(len(test_data))
]
)
def data(request):
return request.param
def test_frame_decompress_mem_usage(data):
tracemalloc = pytest.importorskip('tracemalloc')
tracemalloc.start()
compressed = lz4.frame.compress(data)
prev_snapshot = None
for i in range(1000):
decompressed = lz4.frame.decompress(compressed) # noqa: F841
if i % 100 == 0:
snapshot = tracemalloc.take_snapshot()
if prev_snapshot:
stats = snapshot.compare_to(prev_snapshot, 'lineno')
assert stats[0].size_diff < (1024 * 4)
prev_snapshot = snapshot
def test_frame_decompress_chunk_mem_usage(data):
tracemalloc = pytest.importorskip('tracemalloc')
tracemalloc.start()
compressed = lz4.frame.compress(data)
prev_snapshot = None
for i in range(1000):
context = lz4.frame.create_decompression_context()
decompressed = lz4.frame.decompress_chunk( # noqa: F841
context, compressed
)
if i % 100 == 0:
snapshot = tracemalloc.take_snapshot()
if prev_snapshot:
stats = snapshot.compare_to(prev_snapshot, 'lineno')
assert stats[0].size_diff < (1024 * 10)
prev_snapshot = snapshot
def test_frame_open_decompress_mem_usage(data):
tracemalloc = pytest.importorskip('tracemalloc')
tracemalloc.start()
with lz4.frame.open('test.lz4', 'w') as f:
f.write(data)
prev_snapshot = None
for i in range(1000):
with lz4.frame.open('test.lz4', 'r') as f:
decompressed = f.read() # noqa: F841
if i % 100 == 0:
snapshot = tracemalloc.take_snapshot()
if prev_snapshot:
stats = snapshot.compare_to(prev_snapshot, 'lineno')
assert stats[0].size_diff < (1024 * 10)
prev_snapshot = snapshot
# TODO: add many more memory usage tests along the lines of this one
# for other funcs
def test_dummy_always_pass():
# If pytest finds all tests are skipped, then it exits with code 5 rather
# than 0, which tox sees as an error. Here we add a dummy test that always passes.
assert True
|
StarcoderdataPython
|
1756812
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import time
import rclpy
import ujson
import subprocess
from typing import Optional
from rclpy.node import Node
from example_interfaces.msg import String, Bool
from rcl_interfaces.srv import GetParameters
from python_pkg.volume_percent import get_init_volume_percent
# play music/the-second.mp3
# 3611660238
# play /home/felix/music/the-second.mp3 trim '=80.5' jump to pos
# amixer -c 0 cget numid=1 // read current volume information
# amixer -c 0 cset numid=1 63% // set audio to specific volume percent
DEFAULT_TRACK = "/home/felix/music/Wavecont-Inspiring-Full.mp3"
class AudioLoopNode(Node):
def __init__(self):
super().__init__("audio_loop")
self.card_id: Optional[str]
self.music_started = False
self.pid = None
self.logger_info("Audio loop started")
self.volume_percent = get_init_volume_percent()
self.start_time = 0
self.running_seconds = 0
self.track_paused = False
self.track = ""
self.subscriber_mfrc = self.create_subscription(
String, "mfrc_connection", self.callback_mfrc_connection, qos_profile=1
)
self.subscriber_volume_down = self.create_subscription(
Bool, "volume_down", self.callback_volume_down, qos_profile=1
)
self.subscriber_volume_up = self.create_subscription(
Bool, "volume_up", self.callback_volume_up, qos_profile=1
)
self.subscriber_pause = self.create_subscription(
Bool, "pause_track", self.callback_pause_track, qos_profile=1
)
def logger_info(self, text: str):
self.get_logger().info(text)
def logger_error(self, text: str):
self.get_logger().error(text)
def set_audio_volume(self):
if 0 <= self.volume_percent <= 100:
subprocess.Popen(
["amixer", "-c", "0", "cset", "numid=1", f"{self.volume_percent}%"]
)
def callback_pause_track(self, msg: Bool):
if msg.data:
if self.track_paused:
process = subprocess.Popen(
["play", self.track, "trim", f"={self.running_seconds}"]
)
self.pid = process.pid
self.music_started = True
self.track_paused = False
self.start_time = time.time()
else:
self.running_seconds += time.time() - self.start_time
self.stop_music()
self.track_paused = True
self.music_started = True
def callback_volume_down(self, msg: Bool):
if msg.data:
if 0 <= self.volume_percent - 5:
self.volume_percent -= 5
self.set_audio_volume()
def callback_volume_up(self, msg: Bool):
# amixer -D pulse sset Master 5%+
if msg.data:
if self.volume_percent + 5 <= 100:
self.volume_percent += 5
self.set_audio_volume()
def get_track(self):
client = self.create_client(GetParameters, "get_music_path")
if not client.service_is_ready():
self.track = DEFAULT_TRACK
future = client.call_async(
# First value for the corresponding track, second for validation
GetParameters.Request(names=[self.card_id, self.card_id])
)
future.add_done_callback(self.callback_get_track)
def callback_get_track(self, future):
try:
results = future.result().values
self.logger_info(f"{results = }")
if results[1].string_value == str(self.card_id):
self.track = future.result().values[0].string_value
self.logger_info(f"Received track {self.track}")
else:
self.get_track()
except Exception as err:
self.logger_error(f"{err}")
def play_music(self):
if self.card_id and self.music_started:
return
elif self.card_id and self.track and not self.music_started:
self.logger_info(f"start track {self.track} for {self.card_id = }")
process = subprocess.Popen(["play", self.track])
self.start_time = time.time()
self.running_seconds = 0
self.pid = process.pid
self.logger_info(f"{self.pid = }")
self.music_started = True
elif self.card_id is None and self.music_started:
print("Music stopped")
self.stop_music()
def stop_music(self):
if self.pid is not None:
subprocess.Popen(["kill", str(self.pid)])
self.pid = None
self.music_started = False
print("Music stopped. New card detected.")
def clean_current_audio(self, stop_music=True):
if stop_music:
self.stop_music()
self.card_id = None
self.track_paused = False
self.track = ""
def callback_mfrc_connection(self, msg: String):
data = ujson.loads(msg.data)
if data["status"] == "ok":
card_id = data.get("card_id")
if card_id != self.card_id:
self.clean_current_audio()
self.card_id = card_id
if not self.track:
self.get_track()
else:
self.clean_current_audio()
self.play_music()
def main(args=None):
rclpy.init(args=args)
node = AudioLoopNode()
try:
rclpy.spin(node) # will hold/keep alive node
finally:
rclpy.shutdown()
if __name__ == "__main__":
main()
|
StarcoderdataPython
|
3303930
|
import unittest
import platform
class SysfontModuleTest(unittest.TestCase):
def todo_test_create_aliases(self):
self.fail()
def todo_test_initsysfonts(self):
self.fail()
@unittest.skipIf('Darwin' not in platform.platform(), 'Not mac we skip.')
def test_initsysfonts_darwin(self):
import pygame.sysfont
self.assertTrue(len(pygame.sysfont.get_fonts()) > 10)
def test_sysfont(self):
import pygame.font
pygame.font.init()
arial = pygame.font.SysFont('Arial', 40)
def todo_test_initsysfonts_unix(self):
self.fail()
def todo_test_initsysfonts_win32(self):
self.fail()
################################################################################
if __name__ == '__main__':
unittest.main()
|
StarcoderdataPython
|
3287883
|
# # -*- coding:utf-8 -*-
# from torcms.model.info_hist_model import MInfoHist
#
# def Test():
# assert MInfoHist()
#
#
|
StarcoderdataPython
|
3223971
|
<reponame>devasia1000/anti_adblock
from libmproxy.protocol.http import decoded
log_file = ''
filter_list = []
def string_matching_boyer_moore_horspool(text='', pattern=''):
"""
Returns positions where pattern is found in text
See http://en.wikipedia.org/wiki/Boyer%E2%80%93Moore%E2%80%93Horspool_algorithm for an explanation on how
this algorithm works.
O(n)
Performance: ord() is slow so we shouldn't use it here
Example: text = 'ababbababa', pattern = 'aba'
string_matching_boyer_moore_horspool(text, pattern) returns [0, 5, 7]
@param text text to search inside
@param pattern string to search for
@return list containing offsets (shifts) where pattern is found inside text
"""
m = len(pattern)
n = len(text)
offsets = []
if m > n:
return offsets
skip = []
for k in range(256):
skip.append(m)
for k in range(m-1):
skip[ord(pattern[k])] = m - k - 1
skip = tuple(skip)
k = m - 1
while k < n:
j = m - 1; i = k
while j >= 0 and text[i] == pattern[j]:
j -= 1
i -= 1
if j == -1:
offsets.append(i + 1)
k += skip[ord(text[k])]
return offsets
def start(context, argv):
global log_file
global filter_list
log_file = open("log.txt", "w")
fs = open("easylist.txt")
for line in fs:
filter_list.append(line.rstrip())
def response(context, flow):
global log_file
global filter_list
with decoded(flow.response):
for rule in filter_list:
#print 'Trying out ' + rule
result = string_matching_boyer_moore_horspool(flow.response.content, rule)
for res in result:
#log_file.write(str(res) + '\n')
#log_file.flush()
print str(flow.response.content)[res:res+10]
|
StarcoderdataPython
|
1792044
|
import json
class NotSet:
pass
class PlayerShip:
def __init__(self, id: str):
self.id = id
self.target_x = None
self.target_y = None
self.metadata = {}
def to_dict(self) -> dict:
return dict(
id=self.id,
target_x=self.target_x,
target_y=self.target_y,
metadata=self.metadata
)
@classmethod
def from_dict(cls, data: dict):
c = cls(data["id"])
c.target_x = data["target_x"]
c.target_y = data["target_y"]
c.metadata = data.get("metadata", {})
return c
def set_attribute(self, attribute: str, value):
try:
json.dumps(value)
except(TypeError, OverflowError):
raise ValueError("Attribute values MUST be serializable!")
self.metadata[attribute] = value
def get_attribute(self, attribute: str, default=NotSet):
if default == NotSet:
if attribute not in self.metadata:
raise AttributeError("Ship does not have attribute '{}'!".format(attribute))
return self.metadata.get(attribute)
else:
return self.metadata.get(attribute, default)
|
StarcoderdataPython
|
3307067
|
<filename>gauged/config.py
"""
Gauged
https://github.com/chriso/gauged (MIT Licensed)
Copyright 2014 (c) <NAME> <<EMAIL>>
"""
from .writer import Writer
from .utilities import to_bytes, Time
DEFAULTS = {
'namespace': 0,
'block_size': Time.DAY,
'resolution': Time.SECOND,
'writer_name': 'default',
'overwrite_blocks': False,
'key_overflow': Writer.ERROR,
'key_whitelist': None,
'flush_seconds': 0,
'append_only_violation': Writer.ERROR,
'gauge_nan': Writer.ERROR,
'key_cache_size': 64 * 1024,
'max_interval_steps': 31 * 24,
'min_cache_interval': Time.HOUR,
'max_look_behind': Time.WEEK,
'defaults': {
'namespace': None,
'limit': 10,
'offset': None,
'prefix': None,
'start': None,
'end': None,
'interval': Time.DAY,
'cache': True,
'key': None,
'aggregate': None,
'percentile': 50
}
}
class Config(object):
def __init__(self, **kwargs):
self.block_arrays = None
self.defaults = None
self.key_whitelist = None
self.block_size = None
self.resolution = None
self.update(**kwargs)
def update(self, **kwargs):
for key in kwargs.iterkeys():
if key not in DEFAULTS:
raise ValueError('Unknown configuration key: ' + key)
for key, default in DEFAULTS.iteritems():
if key == 'defaults':
defaults = DEFAULTS['defaults'].copy()
if 'defaults' in kwargs:
for key, value in kwargs['defaults'].iteritems():
if key not in defaults:
raise ValueError('Unknown default key: ' + key)
defaults[key] = value
self.defaults = defaults
else:
setattr(self, key, kwargs.get(key, default))
if self.block_size % self.resolution != 0:
raise ValueError('`block_size` must be a multiple of `resolution`')
self.block_arrays = self.block_size // self.resolution
if self.key_whitelist is not None:
self.key_whitelist = {to_bytes(key) for key in self.key_whitelist}
|
StarcoderdataPython
|
28250
|
# Incorrect Regex "https://www.hackerrank.com/challenges/incorrect-regex/problem"
# Enter your code here. Read input from STDIN. Print output to STDOUT
import re
for i in range(int(input())):
try:
re.compile(input())
print("True")
except ValueError:
print("False")
|
StarcoderdataPython
|
1692467
|
<gh_stars>0
#
# Copyright 2016 The BigDL Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
from time import time
from argparse import ArgumentParser
from bigdl.orca import init_orca_context, stop_orca_context
from bigdl.friesian.feature import FeatureTable
LABEL_COL = 0
INT_COLS = ["_c{}".format(i) for i in list(range(1, 14))]
CAT_COLS = ["_c{}".format(i) for i in list(range(14, 40))]
conf = {"spark.network.timeout": "10000000",
"spark.sql.broadcastTimeout": "7200",
"spark.sql.shuffle.partitions": "2000",
"spark.locality.wait": "0s",
"spark.sql.hive.filesourcePartitionFileCacheSize": "4096000000",
"spark.sql.crossJoin.enabled": "true",
"spark.serializer": "org.apache.spark.serializer.KryoSerializer",
"spark.kryo.unsafe": "true",
"spark.kryoserializer.buffer.max": "1024m",
"spark.task.cpus": "1",
"spark.executor.heartbeatInterval": "200s",
"spark.driver.maxResultSize": "40G",
# Default replication is 3. You may adjust according your cluster settings.
"spark.hadoop.dfs.replication": "1"}
def _parse_args():
parser = ArgumentParser()
parser.add_argument("--cluster_mode", type=str, default="local",
help="The cluster mode, such as local, yarn, standalone or spark-submit.")
parser.add_argument("--master", type=str, default=None,
help="The master url, only used when the cluster_mode is standalone.")
parser.add_argument("--cores", type=int, default=48,
help="The number of cores to use on each node.")
parser.add_argument("--memory", type=str, default="240g",
help="The amount of memory to allocate on each node.")
parser.add_argument("--num_nodes", type=int, default=8,
help="The number of nodes to use in the cluster.")
parser.add_argument("--driver_cores", type=int, default=4,
help="The number of cores to use for the driver.")
parser.add_argument("--driver_memory", type=str, default="36g",
help="The amount of memory to allocate for the driver.")
parser.add_argument("--days", type=str, default="0-23",
help="The day range for data preprocessing, such as 0-23, 0-1.")
parser.add_argument("--frequency_limit", type=int, default=15,
help="Categories with frequency below this value will be "
"omitted from encoding.")
parser.add_argument("--input_folder", type=str, required=True,
help="The path to the folder of parquet files, "
"either a local path or an HDFS path.")
parser.add_argument("--output_folder", type=str,
help="The path to save the preprocessed data and "
"the generated string indices to parquet files. "
"HDFS path is recommended.")
args = parser.parse_args()
start, end = args.days.split("-")
args.day_range = list(range(int(start), int(end) + 1))
args.days = len(args.day_range)
return args
def preprocess_and_save(data_tbl, models, mode, save_path):
data_tbl = data_tbl.encode_string(CAT_COLS, models) \
.fillna(0, INT_COLS + CAT_COLS).log(INT_COLS)
data_tbl = data_tbl.ordinal_shuffle_partition()
if save_path:
if mode == "train":
save_path = os.path.join(save_path, "saved_data")
elif mode == "test":
save_path = os.path.join(save_path, "saved_data_test")
else:
raise ValueError("mode should be either train or test")
print("Saving {} data files to {}".format(mode, save_path))
data_tbl.write_parquet(save_path)
else:
data_tbl.compute()
return data_tbl
if __name__ == "__main__":
args = _parse_args()
if args.cluster_mode == "local":
init_orca_context("local", cores=args.cores, memory=args.memory)
elif args.cluster_mode == "standalone":
init_orca_context("standalone", master=args.master, cores=args.cores,
num_nodes=args.num_nodes, memory=args.memory,
driver_cores=args.driver_cores, driver_memory=args.driver_memory,
conf=conf)
elif args.cluster_mode == "yarn":
init_orca_context("yarn-client", cores=args.cores,
num_nodes=args.num_nodes, memory=args.memory,
driver_cores=args.driver_cores, driver_memory=args.driver_memory,
conf=conf)
elif args.cluster_mode == "spark-submit":
init_orca_context("spark-submit")
else:
raise ValueError(
"cluster_mode should be one of 'local', 'yarn', 'standalone' and 'spark-submit'"
", but got " + args.cluster_mode)
time_start = time()
paths = [os.path.join(args.input_folder, "day_%d.parquet" % i) for i in args.day_range]
tbl = FeatureTable.read_parquet(paths)
idx_list = tbl.gen_string_idx(CAT_COLS, freq_limit=args.frequency_limit)
if args.days == 24: # Full Criteo dataset
# Exclude the last path day_23.parquet since the first half of day_23 is separated for test.
train_data = FeatureTable.read_parquet(paths[:-1])
train_preprocessed = preprocess_and_save(train_data, idx_list, "train", args.output_folder)
test_data = FeatureTable.read_parquet(
os.path.join(args.input_folder, "day_23_test.parquet"))
test_preprocessed = preprocess_and_save(test_data, idx_list, "test", args.output_folder)
else:
train_data = FeatureTable.read_parquet(paths)
train_preprocessed = preprocess_and_save(train_data, idx_list, "train", args.output_folder)
time_end = time()
print("Total preprocessing time: ", time_end - time_start)
train_preprocessed.show(5)
if args.output_folder:
for idx in idx_list:
idx.write_parquet(args.output_folder)
print("Finished")
stop_orca_context()
|
StarcoderdataPython
|
3331733
|
<gh_stars>10-100
import sys
from argparse import ArgumentParser
from configparser import ConfigParser
from distutils.util import strtobool
def arg_parse(args):
# setting the log level on the root logger must happen BEFORE any output
# parse values from a configuration file if provided and use those as the
# default values for the argparse arguments
config_argparse = ArgumentParser(prog=__file__, add_help=False)
config_argparse.add_argument('-c', '--config-file',
help='path to configuration file', required=True)
config_args, _ = config_argparse.parse_known_args(args)
defaults = {}
if config_args.config_file:
try:
config_parser = ConfigParser()
with open(config_args.config_file) as f:
config_parser.read_file(f)
config_parser.read(config_args.config_file)
except OSError as err:
sys.exit(1)
defaults.update(dict(config_parser.items('options')))
# parse the program's main arguments using the dictionary of defaults and
# the previous parsers as "parent' parsers
parsers = [config_argparse]
main_parser = ArgumentParser(prog=__file__, parents=parsers)
main_parser.set_defaults(**defaults)
main_parser.add_argument('-s', '--data_dir')
main_parser.add_argument('-r', '--res_dir')
main_parser.add_argument('-nums', '--num_epochs_s', type=int)
main_parser.add_argument('-numa', '--num_epochs_a', type=int)
main_parser.add_argument('-batch_size', '--batch_size', type=int)
main_parser.add_argument('-lr_a', '--learning_rate_a', type=float)
main_parser.add_argument('-lr_s', '--learning_rate_s', type=float)
main_parser.add_argument('-seed', '--seed', type=int)
main_parser.add_argument('-p_val', '--p_val', type=float)
main_parser.add_argument('-p_test', '--p_test', type=float)
main_parser.add_argument('-all', '--all_patient', type=lambda x:bool(strtobool(x)))
main_parser.add_argument('-k', '--num_k', type=int)
main_args = main_parser.parse_args(args)
return main_args
def inference_parse(args):
# setting the log level on the root logger must happen BEFORE any output
# parse values from a configuration file if provided and use those as the
# default values for the argparse arguments
config_argparse = ArgumentParser(prog=__file__, add_help=False)
config_argparse.add_argument('-c', '--config-file',
help='path to configuration file', required=True)
config_args, _ = config_argparse.parse_known_args(args)
defaults = {}
if config_args.config_file:
try:
config_parser = ConfigParser()
with open(config_args.config_file) as f:
config_parser.read_file(f)
config_parser.read(config_args.config_file)
except OSError as err:
sys.exit(1)
defaults.update(dict(config_parser.items('options')))
# parse the program's main arguments using the dictionary of defaults and
# the previous parsers as "parent' parsers
parsers = [config_argparse]
main_parser = ArgumentParser(prog=__file__, parents=parsers)
main_parser.set_defaults(**defaults)
main_parser.add_argument('-s', '--data_dir', type=str)
main_parser.add_argument('-r', '--res_dir', type=str)
main_parser.add_argument('-m', '--model_dir', type=str)
main_parser.add_argument('-d', '--device', type=str)
main_parser.add_argument('-l', '--long_eeg',type=lambda x:bool(strtobool(x)))
main_args = main_parser.parse_args(args)
return main_args
def arg_parse90(args):
# setting the log level on the root logger must happen BEFORE any output
# parse values from a configuration file if provided and use those as the
# default values for the argparse arguments
config_argparse = ArgumentParser(prog=__file__, add_help=False)
config_argparse.add_argument('-c', '--config-file',
help='path to configuration file', required=True)
config_args, _ = config_argparse.parse_known_args(args)
defaults = {}
if config_args.config_file:
try:
config_parser = ConfigParser()
with open(config_args.config_file) as f:
config_parser.read_file(f)
config_parser.read(config_args.config_file)
except OSError as err:
sys.exit(1)
defaults.update(dict(config_parser.items('options')))
# parse the program's main arguments using the dictionary of defaults and
# the previous parsers as "parent' parsers
parsers = [config_argparse]
main_parser = ArgumentParser(prog=__file__, parents=parsers)
main_parser.set_defaults(**defaults)
main_parser.add_argument('-s', '--data_dir')
main_parser.add_argument('-r', '--res_dir')
main_parser.add_argument('-nums', '--num_epochs_s', type=int)
main_parser.add_argument('-batch_size', '--batch_size', type=int)
main_parser.add_argument('-lr_s', '--learning_rate_s', type=float)
main_parser.add_argument('-seed', '--seed', type=int)
main_parser.add_argument('-p_val', '--p_val', type=float)
main_parser.add_argument('-p_test', '--p_test', type=float)
main_parser.add_argument('-w', '--weight', type=float)
main_parser.add_argument('-all', '--all_patient', type=lambda x:bool(strtobool(x)))
main_args = main_parser.parse_args(args)
return main_args
if __name__ == "__main__":
arg_parse(sys.argv[1:])
|
StarcoderdataPython
|
3320619
|
import sys, wx
sys.path.append('../../')
from sciwx.mesh import Canvas3D, MCanvas3D
from sciapp.util import surfutil, meshutil
from sciapp.object import Scene, Mesh, Surface2d, Surface3d, TextSet, Volume3d
from sciwx.mesh import Canvas3DFrame, Canvas3DNoteBook, Canvas3DNoteFrame
import sys, wx
import scipy.ndimage as ndimg
from skimage.data import moon, camera
import numpy as np
def dem_test():
cnf = Canvas3DFrame(None)
cnf.add_obj('dem', Surface2d(img=moon(), sample=1, sigma=1, k=0.3, cmap='jet'))
cnf.Show()
def ball_test():
cnf = Canvas3DFrame(None)
vts, fs = meshutil.create_ball((100,100,100), 1)
cnf.add_obj('ball', Mesh(vts, fs, colors=(1,0,0)))
cnf.add_obj('line', TextSet(texts=['TEXT'], verts=[(101,100,100)], size=256))
cnf.Show()
def random_ball_test():
cnf = Canvas3DFrame(None)
os = np.random.rand(30).reshape((-1,3))
rs = np.random.rand(10)/7+0.05
cs = np.random.rand(10)
vts_b, fs_b, cs_b = meshutil.create_balls(os, rs, cs)
cnf.add_obj('balls', Mesh(verts=vts_b, faces=fs_b, colors=cs_b, cmap='jet'))
cnf.Show()
def line_test():
cnf = Canvas3DFrame(None)
vts = np.array([(0,0,0),(1,1,0),(2,1,0),(1,0,0)], dtype=np.float32)
fs = np.array([(0,1,2),(1,2,3)], dtype=np.uint32)
ns = np.ones((4,3), dtype=np.float32)
n_mer, n_long = 6, 11
pi = np.pi
dphi = pi / 1000.0
phi = np.arange(0.0, 2 * pi + 0.5 * dphi, dphi)
mu = phi * n_mer
x = np.cos(mu) * (1 + np.cos(n_long * mu / n_mer) * 0.5)
y = np.sin(mu) * (1 + np.cos(n_long * mu / n_mer) * 0.5)
z = np.sin(n_long * mu / n_mer) * 0.5
vts = np.array([x, y, z]).T.astype(np.float32)
fs = np.arange(len(x), dtype=np.uint32)
fs = np.array([fs[:-1], fs[1:]]).T
# cs[:] = geoutil.auto_lookup(vts[:,2], geoutil.linear_color('jet'))/255
cnf.add_obj('ball', Mesh(vts, fs, colors=vts[:,2], mode='grid', cmap='jet'))
cnf.Show()
def mesh_test():
cnf = Canvas3DFrame(None)
dphi, dtheta = np.pi/16.0, np.pi/16.0
[phi,theta] = np.mgrid[0:np.pi+dphi*1.5:dphi,0:2*np.pi+dtheta*1.5:dtheta]
m0 = 4; m1 = 3; m2 = 2; m3 = 3; m4 = 6; m5 = 2; m6 = 6; m7 = 4;
r = np.sin(m0*phi)**m1 + np.cos(m2*phi)**m3 + np.sin(m4*theta)**m5 + np.cos(m6*theta)**m7
x = r*np.sin(phi)*np.cos(theta)
y = r*np.cos(phi)
z = r*np.sin(phi)*np.sin(theta)
vts, fs = meshutil.create_grid_mesh(x, y, z)
mesh = Mesh(vts, fs.astype(np.uint32), vts[:,2], mode='grid', cmap='jet')
cnf.add_obj('ball', mesh)
cnf.Show()
def ball_ring_test():
cnf = Canvas3DFrame(None)
os = np.random.rand(30).reshape((-1,3))
rs = np.random.rand(10)/7 + 0.05
cs = np.random.rand(10)
vts_b, fs_b, cs_b = meshutil.create_balls(os, rs, cs)
cnf.add_obj('balls', Mesh(verts=vts_b, faces=fs_b, colors=cs_b, cmap='jet'))
vts_l, fs_l = meshutil.create_line(*os.T)
cnf.add_obj('line', Mesh(verts=vts_l, faces=fs_l, colors=cs, cmap='jet', mode='grid'))
# vts_c, fs_c, ns_c, cs_c = geoutil.build_cube((0,0,0), (1,1,1))
vts_c, ls_c = meshutil.create_bound((0,0,0), (1,1,1), 3, 3, 3)
cnf.add_obj('box', Mesh(verts=vts_c, faces=ls_c, ))
cnf.Show()
def balls_mark_rest():
cnf = Canvas3DFrame(None)
os = np.random.rand(30).reshape((-1,3))
rs = np.random.rand(10)/7+0.05
cs = np.random.rand(10)
vts_b, fs_b, cs_b = meshutil.create_balls(os, rs, cs)
cont = ['ID:%s'%i for i in range(10)]
# vtss, fss, pps, h, color = surfutil.build_marks(cont, os, rs, 0.05, (1,1,1))
# cnf.add_obj('balls', Mesh(verts=vts_b.astype(np.float32), faces=fs_b.astype(np.uint32), colors=cs_b, cmap='jet'))
cnf.add_obj('line', TextSet(texts=a, verts=b, size=1600, colors=c))
cnf.Show()
def surface2d_test():
cnf = Canvas3DFrame(None)
x, y = np.ogrid[-2:2:20j, -2:2:20j]
z = x * np.exp( - x**2 - y**2)
vts, fs = meshutil.create_surface2d(z, sample=1, k=10)
dem = Mesh(verts=vts, faces=fs.astype(np.uint32), colors=z.ravel(), cmap='jet')
cnf.add_obj('dem', dem)
cnf.Show()
def arrow_test():
cnf = Canvas3DFrame(None)
v1, v2 = np.array([[[0,0,0],[5,5,5]],[[0,15,5],[2,8,3]]], dtype=np.float32)
vts, fs, ns, cs = meshutil.build_arrows(v1, v2, 1, 1, 1, 1, (1,0,0))
# vts, fs = meshutil.create_arrow(15, 15)
cnf.add_obj('arrow', Mesh(vts, fs, colors=(1,0,0)))
cnf.Show()
def cube_test():
cnf = Canvas3DFrame(None)
vts, fs, ls = meshutil.create_cube()
cnf.add_obj('box', Mesh(vts, ls, colors=(1,0,0), mode='grid'))
cnf.Show()
def cube_surf_test():
cnf = Canvas3DFrame(None)
lut = np.zeros((256,3), dtype=np.uint8)
lut[:,0] = np.arange(256)
imgs = np.array([camera()[:300,::]]*256)
vts, fs, ns, cs = geoutil.build_img_cube(imgs)
obj = cnf.add_surf('cube', vts, fs, ns, cs)
vts, fs, ns, cs = geoutil.build_img_box(imgs)
cnf.add_surf('box', vts, fs, ns, cs, mode='grid')
cnf.Show()
def isosurface_test():
cnf = Canvas3DFrame(None)
cube = np.zeros((100,100,100), dtype=np.float32)
x,y,z = np.random.randint(10,90,900).reshape(3,-1)
cube[x,y,z] = 1000
surf3d = Surface3d(cube, level=1.5, sigma=3, step=2, colors=(1,0,0))
cnf.add_obj('volume', surf3d)
cnf.Show()
def volume_test():
cnf = Canvas3DFrame(None)
cube = np.zeros((100,100,100), dtype=np.float32)
x,y,z = np.random.randint(10,90,900).reshape(3,-1)
cube[x,y,z] = 1000
surf3d = Volume3d(cube, level=1.5, step=2, cmap='gray')
cnf.add_obj('volume', surf3d)
cnf.Show()
if __name__ == '__main__':
app = wx.App()
# balls_mark_rest()
# dem_test()
# ball_test()
# random_ball_test()
# line_test()
# mesh_test()
# ball_ring_test()
# balls_mark_rest()
# surface2d_test()
# arrow_test() # bad
# cube_test()
# cube_surf_test() # bad
# isosurface_test()
volume_test()
app.MainLoop()
|
StarcoderdataPython
|
103420
|
from pandac.PandaModules import *
from direct.gui.DirectGui import *
from toontown.toonbase import ToontownGlobals
from toontown.toonbase import TTLocalizer
from toontown.hood import ZoneUtil
import random
LOADING_SCREEN_SORT_INDEX = 4000
class ToontownLoadingScreen:
defaultTex = 'phase_3.5/maps/loading/default.jpg'
zone2picture = {
ToontownGlobals.GoofySpeedway : 'phase_3.5/maps/loading/gs.jpg',
ToontownGlobals.ToontownCentral : 'phase_3.5/maps/loading/ttc.jpg',
ToontownGlobals.SillyStreet : 'phase_3.5/maps/loading/ttc_ss.jpg',
ToontownGlobals.LoopyLane : 'phase_3.5/maps/loading/ttc_ll.jpg',
ToontownGlobals.PunchlinePlace : 'phase_3.5/maps/loading/ttc_pp.jpg',
ToontownGlobals.DonaldsDock : 'phase_3.5/maps/loading/dd.jpg',
ToontownGlobals.BarnacleBoulevard : 'phase_3.5/maps/loading/dd_bb.jpg',
ToontownGlobals.SeaweedStreet : 'phase_3.5/maps/loading/dd_ss.jpg',
ToontownGlobals.LighthouseLane : 'phase_3.5/maps/loading/dd_ll.jpg',
ToontownGlobals.DaisyGardens : 'phase_3.5/maps/loading/dg.jpg',
ToontownGlobals.ElmStreet : 'phase_3.5/maps/loading/dg_es.jpg',
ToontownGlobals.MapleStreet : 'phase_3.5/maps/loading/dg_ms.jpg',
ToontownGlobals.OakStreet : 'phase_3.5/maps/loading/dg_os.jpg',
ToontownGlobals.MinniesMelodyland : 'phase_3.5/maps/loading/mml.jpg',
ToontownGlobals.AltoAvenue : 'phase_3.5/maps/loading/mml_aa.jpg',
ToontownGlobals.BaritoneBoulevard : 'phase_3.5/maps/loading/mml_bb.jpg',
ToontownGlobals.TenorTerrace : 'phase_3.5/maps/loading/mml_tt.jpg',
ToontownGlobals.TheBrrrgh : 'phase_3.5/maps/loading/tb.jpg',
ToontownGlobals.WalrusWay : 'phase_3.5/maps/loading/tb_ww.jpg',
ToontownGlobals.SleetStreet : 'phase_3.5/maps/loading/tb_ss.jpg',
ToontownGlobals.PolarPlace : 'phase_3.5/maps/loading/tb_pp.jpg',
ToontownGlobals.DonaldsDreamland : 'phase_3.5/maps/loading/ddl.jpg',
ToontownGlobals.LullabyLane : 'phase_3.5/maps/loading/ddl_ll.jpg',
ToontownGlobals.PajamaPlace : 'phase_3.5/maps/loading/ddl_pp.jpg',
ToontownGlobals.OutdoorZone : 'phase_3.5/maps/loading/oz.jpg',
ToontownGlobals.GolfZone : 'phase_3.5/maps/loading/gz.jpg',
ToontownGlobals.SellbotHQ : 'phase_3.5/maps/loading/sbhq.jpg',
ToontownGlobals.CashbotHQ : 'phase_3.5/maps/loading/cbhq.jpg',
ToontownGlobals.LawbotHQ : 'phase_3.5/maps/loading/lbhq.jpg',
ToontownGlobals.BossbotHQ : 'phase_3.5/maps/loading/bbhq.jpg'
}
def __init__(self):
self.__expectedCount = 0
self.__count = 0
self.gui = loader.loadModel('phase_3/models/gui/progress-background.bam')
self.title = DirectLabel(guiId='ToontownLoadingScreenTitle', parent=self.gui, relief=None, pos=(base.a2dRight/5, 0, 0.235), text='', textMayChange=1, text_scale=0.08, text_fg=(0.03, 0.83, 0, 1), text_align=TextNode.ALeft, text_font=ToontownGlobals.getSignFont())
self.tip = DirectLabel(guiId='ToontownLoadingScreenTip', parent=self.gui, relief=None, pos=(0.0, 0, .55), text='', textMayChange=1, text_scale=0.06, text_fg=(1, 1, 1, 1), text_shadow=(0, 0, 0, 1), text_wordwrap=25, text_align=TextNode.ACenter, text_font=ToontownGlobals.getSignFont())
self.waitBar = DirectWaitBar(guiId='ToontownLoadingScreenWaitBar', parent=self.gui, frameSize=(base.a2dLeft+(base.a2dRight/4.95), base.a2dRight-(base.a2dRight/4.95), -0.03, 0.03), pos=(0, 0, 0.15), text='')
logoScale = 0.5625 # Scale for our locked aspect ratio (2:1).
self.logo = OnscreenImage(
image='phase_3/maps/toontown-logo.png',
scale=(logoScale * 2.0, 1, logoScale))
self.logo.reparentTo(hidden)
self.logo.setTransparency(TransparencyAttrib.MAlpha)
scale = self.logo.getScale()
# self.logo.setPos(scale[0], 0, -scale[2])
self.logo.setPos(0, 0, -scale[2])
self.toon = None
def destroy(self):
self.tip.destroy()
self.title.destroy()
self.gui.removeNode()
if self.toon:
self.toon.delete()
self.logo.removeNode()
def getTip(self, tipCategory):
return TTLocalizer.TipTitle + '\n' + random.choice(TTLocalizer.TipDict.get(tipCategory))
def begin(self, range, label, gui, tipCategory, zoneId):
self.waitBar['range'] = range
self.title['text'] = label
loadingScreenTex = self.zone2picture.get(ZoneUtil.getBranchZone(zoneId), self.defaultTex)
self.background = loader.loadTexture(loadingScreenTex)
self.__count = 0
self.__expectedCount = range
if gui:
if base.localAvatarStyle:
from toontown.toon import Toon
bored = {'emote':'bored', 'frame':135} #must define before list
run = {'emote':'run', 'frame':7}
victory = {'emote':'victory', 'frame':10}
applause = {'emote':'applause', 'frame':23}
dust = {'emote':'sprinkle-dust', 'frame':40}
hypno = {'emote':'hypnotize', 'frame':25}
cringe = {'emote':'cringe', 'frame':25}
wave = {'emote':'wave', 'frame':25}
shrug = {'emote':'shrug', 'frame':30}
duck = {'emote':'duck', 'frame':40}
up = {'emote':'up', 'frame':60}
pushup = {'emote':'down', 'frame':23}
bow = {'emote':'bow', 'frame':45}
emotelist = [bored, run, victory, applause, dust,
hypno, cringe, wave, shrug, duck,
up, pushup, bow]
emotechosen = random.choice(emotelist)
self.toon = Toon.Toon()
self.toon.setDNA(base.localAvatarStyle)
self.toon.pose(emotechosen['emote'], emotechosen['frame'])
self.toon.getGeomNode().setDepthWrite(1)
self.toon.getGeomNode().setDepthTest(1)
self.toon.setHpr(205, 0, 0)
self.toon.setScale(0.18)
self.toon.setPos(base.a2dBottomRight.getX()/1.25, 0, -0.034)
self.toon.reparentTo(self.waitBar)
self.waitBar['frameSize'] = (base.a2dLeft+(base.a2dRight/8.15), base.a2dRight-(base.a2dRight/2.57), -0.03, 0.03)
self.title.reparentTo(base.a2dpBottomLeft, LOADING_SCREEN_SORT_INDEX)
self.title.setPos(0.24, 0, 0.23)
self.tip['text'] = self.getTip(tipCategory)
self.gui.setPos(0, -0.1, 0)
self.gui.reparentTo(aspect2d, LOADING_SCREEN_SORT_INDEX)
self.gui.setTexture(self.background, 1)
if loadingScreenTex == self.defaultTex:
self.logo.reparentTo(base.a2dpTopCenter, LOADING_SCREEN_SORT_INDEX)
else:
self.title.reparentTo(base.a2dpBottomLeft, LOADING_SCREEN_SORT_INDEX)
self.gui.reparentTo(hidden)
self.logo.reparentTo(hidden)
self.tip.reparentTo(base.a2dpBottomCenter, LOADING_SCREEN_SORT_INDEX)
self.waitBar.reparentTo(base.a2dpBottomCenter, LOADING_SCREEN_SORT_INDEX)
self.waitBar.update(self.__count)
def end(self):
self.waitBar.finish()
self.waitBar.reparentTo(self.gui)
self.title.reparentTo(self.gui)
self.tip.reparentTo(self.gui)
self.gui.reparentTo(hidden)
if self.toon:
self.toon.reparentTo(hidden)
self.logo.reparentTo(hidden)
return (self.__expectedCount, self.__count)
def abort(self):
self.gui.reparentTo(hidden)
def tick(self):
self.__count = self.__count + 1
self.waitBar.update(self.__count)
|
StarcoderdataPython
|
3229395
|
from sanic import Sanic
from sanic.response import text
app = Sanic('App')
@app.get("/")
async def hello_world(request):
return text('Hello, world.')
if __name__ == '__main__':
app.go_fast(host='0.0.0.0')
|
StarcoderdataPython
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.