id
stringlengths 1
7
| text
stringlengths 6
1.03M
| dataset_id
stringclasses 1
value |
---|---|---|
71022
|
<reponame>BirgittM/COSCO-1<gh_stars>10-100
from .Scheduler import *
import numpy as np
from copy import deepcopy
class MADMCRScheduler(Scheduler):
def __init__(self):
super().__init__()
self.utilHistory = []
self.utilHistoryContainer= []
def updateUtilHistoryContainer(self):
containerUtil = [(cid.getBaseIPS() if cid else 0) for cid in self.env.containerlist]
self.utilHistoryContainer.append(containerUtil)
def updateUtilHistory(self):
hostUtils = []
for host in self.env.hostlist:
hostUtils.append(host.getCPU())
self.utilHistory.append(hostUtils)
def selection(self):
self.updateUtilHistoryContainer()
selectedHostIDs = self.ThresholdHostSelection()
selectedVMIDs = self.MaxCorContainerSelection(selectedHostIDs,self.utilHistoryContainer)
return selectedVMIDs
def placement(self, containerIDs):
return self.RandomPlacement(containerIDs)
|
StarcoderdataPython
|
3353713
|
import abc
import logging
from typing import TypeVar, Type, List, Tuple, Any, Callable
__all__ = ('Config', 'Argument', 'Arguments', 'Task', 'Message', 'Fence', 'TaskLogger',
'State', 'Router', 'LoggerService', 'Broker', 'App')
Config = TypeVar('Config')
Argument = TypeVar('Argument')
Arguments = TypeVar('Arguments')
Message = TypeVar('Message')
Fence = TypeVar('Fence')
TaskLogger = TypeVar('TaskLogger')
CLOSERS = {'"': '"', "'": "'", '[': ']', '{': '}', '(': ')'}
class MsgRepr:
__slots__ = ('m',)
def __init__(self, m):
self.m = m
def __str__(self):
"""Short representation"""
return "{'id': %r, 'task': %r}" % (self.m.get('id'), self.m.get('task'))
# noinspection PyDefaultArgument
def __repr__(self, _closers=CLOSERS):
"""Full representation"""
ret = []
for k, v in self.m.items():
v = repr(v)
if len(v) > 100:
v = v[:100] + ' ...'
if v[0] in _closers:
v += _closers[v[0]]
ret.append('%r: %s' % (k, v))
return '{' + ', '.join(ret) + '}'
class State:
PENDING = 'pending'
RUNNING = 'running'
class LoggerService(abc.ABC):
@abc.abstractmethod
def get_logger(self, name) -> logging.Logger:
raise NotImplementedError()
class Router(abc.ABC):
@abc.abstractmethod
def get_queue(self, task_name: str) -> str:
raise NotImplementedError()
class Broker(abc.ABC):
@property
@abc.abstractmethod
def BrokerError(self):
raise NotImplementedError()
@abc.abstractmethod
def set_node_id(self, node_id: str):
raise NotImplementedError()
@abc.abstractmethod
def get_nodes(self) -> List[Tuple[int, str]]:
raise NotImplementedError()
@abc.abstractmethod
def setup(self, consumer_id: str, queues: List[str]):
raise NotImplementedError()
@abc.abstractmethod
def close(self):
raise NotImplementedError()
@abc.abstractmethod
def get_messages(self,
timeout: int = 0):
raise NotImplementedError()
@abc.abstractmethod
def ack(self, key):
raise NotImplementedError()
@abc.abstractmethod
def send_message(self, message: dict, reply_back: bool = False):
raise NotImplementedError()
@abc.abstractmethod
def send_reply(self, consumer: str, message: dict):
raise NotImplementedError()
@abc.abstractmethod
def set_result(self, result_key: str, result: dict, expires_in: int):
raise NotImplementedError()
@abc.abstractmethod
def get_result(self, result_key: str, timeout: int = 0):
raise NotImplementedError()
@abc.abstractmethod
def set_state(self, task_id: str, state: Any):
raise NotImplementedError()
@abc.abstractmethod
def run_gc(self):
raise NotImplementedError()
class App(abc.ABC):
settings: dict = None
@abc.abstractmethod
def set_hooks(self,
on_request: Callable = None,
on_response: Callable = None):
raise NotImplementedError()
@abc.abstractmethod
def get_context(self) -> dict:
raise NotImplementedError()
@abc.abstractmethod
def set_context(self, **kwargs):
raise NotImplementedError()
@abc.abstractmethod
def inject(self, funcs, args=None, cache=True):
raise NotImplementedError()
@abc.abstractmethod
def serve_message(self, message: dict, fence: Fence = None):
raise NotImplementedError()
@abc.abstractmethod
def send_message(self, message: dict):
raise NotImplementedError()
@abc.abstractmethod
def result(self, result_key: str):
raise NotImplementedError()
class Task(abc.ABC):
throws: Tuple[Type[Exception], ...] = ()
ignore_result: bool = False
@property
@abc.abstractmethod
def handler(self) -> Callable:
raise NotImplementedError()
@property
@abc.abstractmethod
def name(self) -> str:
raise NotImplementedError()
@staticmethod
@abc.abstractmethod
def get_arguments(*args, **kwargs) -> dict:
raise NotImplementedError()
|
StarcoderdataPython
|
114110
|
# Copyright 2017 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
from gearman import GearmanAdminClient as AdminClient
from service import Service, Server
class AdminService(Service):
def __init__(self):
self.client = AdminClient(Server.hosts)
self.server = self.client.ping_server()
self.status = self.client.get_status()
self.version = self.client.get_version()
self.workers = self.client.get_workers()
def pretty_print(self):
print("-" * 40)
print("Server alive {}".format(self.version))
print("It took {}s to pingback".format(self.server))
print("-" * 40)
print("Tasks:")
for s in self.status:
func = s.get("task")
workers = s.get("workers")
running = s.get("running")
queued = s.get("queued")
print("=> Task func name: {}".format(func))
print(" Status (q/r/w): {}/{}/{}".format(queued, running, workers))
print("-" * 40)
if len(self.workers) > 1:
print("Workers:")
for w in self.workers:
worker = w.get("client_id")
if worker == "-" or len(worker) == 0:
continue
ipaddr = w.get("ip")
tasks = w.get("tasks")
print("=> Worker ID: {}".format(worker))
print(" Worker IP: {}".format(ipaddr))
if len(tasks) >= 1:
print(" Installed: {}".format(", ".join(tasks)))
else:
print(" No tasks installed...")
else:
print("No workers")
print("-" * 40)
@classmethod
def run(cls, *args):
c = cls()
c.pretty_print()
|
StarcoderdataPython
|
1648239
|
# -*- coding: utf-8 -*-
from math import sqrt, pi
class SoftConfidenceWeighted(object):
MIN_CONFIDENCE = 0.0
MAX_CONFIDENCE = 1.0
MIN_AGGRESSIVENESS = 0.0
VALID_LABEL = [1, -1]
ERF_ORDER = 30
def __init__(self, confidence=0.7, aggressiveness=1.0):
if confidence < self.MIN_CONFIDENCE:
confidence = self.MIN_CONFIDENCE
if confidence > self.MAX_CONFIDENCE:
confidence = self.MAX_CONFIDENCE
if aggressiveness < self.MIN_AGGRESSIVENESS:
aggressiveness = self.MIN_AGGRESSIVENESS
self.aggressiveness = aggressiveness
self.phi = self.__probit(confidence)
self.psi = 1.0 + self.phi * self.phi / 2.0
self.zeta = 1.0 + self.phi * self.phi
self.mu = {}
self.sigma = {}
def classify(self, data):
margin = 0.0
for feature, weight in data.iteritems():
if feature in self.mu:
margin += self.mu[feature] * weight
return 1 if margin > 0.0 else -1
def update(self, data, label):
if not (label in self.VALID_LABEL):
return False
sigma_x = self.__get_sigma_x(data)
(mean, variance) = self.__get_margin_mean_and_variance(
label, data, sigma_x)
if (self.phi * sqrt(variance)) <= mean:
return True
(alpha, beta) = self.__get_alpha_and_beta(mean, variance)
if alpha == 0.0 or beta == 0.0:
return True
for feature, weight in sigma_x.iteritems():
self.mu[feature] += alpha * label * weight
self.sigma[feature] -= beta * weight * weight
return True
def __get_sigma_x(self, data):
sigma_x = {}
for feature, weight in data.iteritems():
if not feature in self.sigma:
self.sigma[feature] = 1.0
sigma_x[feature] = self.sigma[feature] * weight
return sigma_x
def __get_margin_mean_and_variance(self, label, data, sigma_x):
mean = 0.0
variance = 0.0
for feature, weight in data.iteritems():
if not feature in self.mu:
self.mu[feature] = 0.0
mean += self.mu[feature] * weight
variance += sigma_x[feature] * weight
mean *= label
return mean, variance
def __get_alpha_and_beta(self, mean, variance):
alpha_den = variance * self.zeta
if alpha_den == 0.0:
return 0.0, 0.0
term1 = mean * self.phi / 2.0
alpha = (
-1.0 * mean * self.psi +
self.phi * sqrt(term1 * term1 + alpha_den)
) / alpha_den
if alpha <= 0.0:
return 0.0, 0.0
if alpha >= self.aggressiveness:
alpha = self.aggressiveness
beta_num = alpha * self.phi
term2 = variance * beta_num
beta_den = term2 + (
-1.0 * term2 + sqrt(term2 * term2 + 4.0 * variance)
) / 2.0
if beta_den == 0.0:
return 0.0, 0.0
return alpha, beta_num / beta_den
def __probit(self, p):
return sqrt(2.0) * self.__erf_inv(2.0 * p - 1.0)
def __erf_inv(self, z):
value = 1.0
term = 1.0
c_memo = [1.0]
for n in range(1, self.ERF_ORDER+1):
term *= (pi * z * z / 4.0)
c = 0.0
for m in range(0, n):
c += (c_memo[m] * c_memo[n - 1 - m] /
(m + 1.0) / (2.0 * m + 1.0))
c_memo.append(c)
value += (c * term / (2.0 * n + 1.0))
return (sqrt(pi) * z * value / 2.0)
|
StarcoderdataPython
|
1776761
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'main.ui'
#
# Created by: PyQt5 UI code generator 5.15.4
#
# WARNING: Any manual changes made to this file will be lost when pyuic5 is
# run again. Do not edit this file unless you know what you are doing.
from PyQt5 import QtCore, QtGui, QtWidgets
from joblib import load
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(441, 220)
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setObjectName("centralwidget")
self.label = QtWidgets.QLabel(self.centralwidget)
self.label.setGeometry(QtCore.QRect(330, 200, 111, 16))
self.label.setObjectName("label")
self.label_2 = QtWidgets.QLabel(self.centralwidget)
self.label_2.setGeometry(QtCore.QRect(180, 140, 101, 31))
font = QtGui.QFont()
font.setPointSize(14)
font.setBold(True)
font.setWeight(75)
self.label_2.setFont(font)
self.label_2.setTextFormat(QtCore.Qt.PlainText)
self.label_2.setAlignment(QtCore.Qt.AlignCenter)
self.label_2.setObjectName("label_2")
self.widget = QtWidgets.QWidget(self.centralwidget)
self.widget.setGeometry(QtCore.QRect(10, 10, 421, 123))
self.widget.setObjectName("widget")
self.verticalLayout = QtWidgets.QVBoxLayout(self.widget)
self.verticalLayout.setContentsMargins(0, 0, 0, 0)
self.verticalLayout.setObjectName("verticalLayout")
self.textEdit = QtWidgets.QLineEdit(self.widget)
self.textEdit.setObjectName("textEdit")
self.verticalLayout.addWidget(self.textEdit)
self.pushButton = QtWidgets.QPushButton(self.widget)
self.pushButton.setObjectName("pushButton")
self.verticalLayout.addWidget(self.pushButton)
MainWindow.setCentralWidget(self.centralwidget)
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
btn = self.pushButton
btn.clicked.connect(self.detecting_fake_news)
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate(
"MainWindow", "Predict whether news is Fake or Real"))
self.label.setText(_translate("MainWindow", "create by : @ryasy_d"))
# self.label_2.setText(_translate("MainWindow", "..."))
self.pushButton.setText(_translate("MainWindow", "CHECK"))
# function to run for prediction
def detecting_fake_news(self):
# retrieving the best model for prediction
# load your sav model
print('Ready')
model = open('predict.joblib', 'rb')
load_model = load(model)
prediction = load_model.predict([self.textEdit.text()])
self.label_2.setText(prediction[0])
print(prediction[0])
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
MainWindow = QtWidgets.QMainWindow()
ui = Ui_MainWindow()
ui.setupUi(MainWindow)
MainWindow.show()
sys.exit(app.exec_())
# EXAMPLE NEWS
# Climate change: Heatwave temperature threshold raised in England by Met Office
# Heatwave temperature threshold raised in England by Met Office
# CMA investigates Google and Meta over ad tech concerns
|
StarcoderdataPython
|
4812732
|
#!/usr/bin/env python3
"""Advent of Code 2015, Day 11: Corporate Policy"""
import string
import aoc
import pytest
def validate_password(password):
"""Check password against the puzzle’s requirements
Passwords:
* must include one increasing straight of at least three
letters, like abc, bcd, cde, and so on, up to xyz.
They cannot skip letters; abd doesn't count.
* may not contain the letters i, o, or l
* must contain at least two different, non-overlapping
pairs of letters, like aa, bb, or zz.
Args:
password (str): The password to validate
Returns:
bool: True if the password satisfies all requirements
"""
windowed = ("".join(t) for t in zip(password, password[1:], password[2:]))
contains_straight = any(w in string.ascii_lowercase for w in windowed)
no_invalid_chars = not any(char in password for char in "iol")
pair_chars = {a for a, b in zip(password, password[1:]) if a == b}
enough_unique_pairs = len(pair_chars) >= 2
return contains_straight and no_invalid_chars and enough_unique_pairs
def clean_bad_letters(password):
"""Return a candidate password after checking for invalid characters
If password doesn't contain the characters i, o, or l it is returned
immediately.
If it does, the string returned is the next potentially valid password
after short-circuiting and skipping passwords containing the invalid
letter in that particular position.
For example:
xi -> xj
xix -> xja
xixyz -> xjaaa
"""
search_results = (password.find(char) for char in "iol")
bad_chars = [x for x in search_results if x != -1]
if not bad_chars:
return password
cut_pos = min(bad_chars)
new_letter = increment_letter(password[cut_pos])
count_a_to_add = len(password[cut_pos:]) - 1
return password[:cut_pos] + new_letter + "a" * count_a_to_add
def increment_letter(letter):
"""Return the character after `letter` in a restricted circular alphabet
This increments a single letter at a time: a becomes b,
z becomes a and so on.
i, o and l are excluded from the alphabet used as they are
not allowed to appear in valid passwords acccording to the
problem description.
It is, however, safe to increment those restricted letters
using this function as a special case is made for them.
"""
restricted_dict = {"i": "j", "l": "m", "o": "p"}
if letter in restricted_dict:
return restricted_dict[letter]
ok_letters = "abcdefghjkmnpqrstuvwxyz"
current_index = ok_letters.index(letter)
is_final_index = current_index == len(ok_letters) - 1
new_index = 0 if is_final_index else current_index + 1
return ok_letters[new_index]
def increment_password(current_pw, index=None):
"""Create a new password by advancing letters in a circular fashion
Only the final letter is incremented (a -> b, z -> a), but earlier
letters will also be incremented if the final one wraps around
(from z to a). This is done by recursively calling increment_password,
with `index` the position to change.
See increment_letter for details on the (restricted) alphabet used.
"""
pw_list = list(current_pw)
increment_index = len(pw_list) - 1 if index is None else index
new_letter = increment_letter(pw_list[increment_index])
pw_list[increment_index] = new_letter
candidate = "".join(pw_list)
if new_letter == "a" and increment_index > 0:
candidate = increment_password(candidate, index=increment_index - 1)
return candidate
def new_password(current_password):
"""Find the next new password starting at current_password
Only valid passwords are returned, with the requirements being:
* must include one increasing straight of at least three
letters, like abc, bcd, cde, and so on, up to xyz.
They cannot skip letters; abd doesn't count.
* may not contain the letters i, o, or l
* must contain at least two different, non-overlapping
pairs of letters, like aa, bb, or zz.
Passwords must also be exactly eight letters long, but the
clear assumption in the problem is that existing passwords
are only ever that length, so there is no specific check
to maintain the eight-character limit (as there is no
specified response).
"""
candidate = clean_bad_letters(current_password)
if candidate == current_password:
candidate = increment_password(candidate)
while not validate_password(candidate):
candidate = increment_password(candidate)
return candidate
@pytest.mark.parametrize(
"invalid_pass",
[
"<PASSWORD>",
"<PASSWORD>",
"<PASSWORD>",
],
)
def test_invalid_password(invalid_pass):
assert not validate_password(invalid_pass)
@pytest.mark.parametrize(
"valid_pass",
[
"<PASSWORD>",
"<PASSWORD>",
],
)
def test_valid_password(valid_pass):
assert validate_password(valid_pass)
@pytest.mark.parametrize(
"old,new",
[
("abcdefgh", "abcdffaa"),
("ghijklmn", "ghjaabcc"),
],
)
def test_new_password(old, new):
assert new_password(old) == new
if __name__ == "__main__":
# Part one
puzzle_input = "vzbxkghb"
part_one_pw = new_password(puzzle_input)
print(part_one_pw)
# Part two
print(new_password(part_one_pw))
|
StarcoderdataPython
|
160007
|
import re
#from random import randrange
from model.contact import Contact
def test_contact_all(app, db):
ui_list = app.contact.get_contact_list()
def clean(contact):
return Contact(id=contact.id, name=contact.name.strip(), surname=contact.surname.strip(), address=contact.address,
all_emails_from_home_page=merge_emails_like_on_home_page(contact), all_phones_from_home_page=merge_phones_like_on_home_page(contact))
db_list = map(clean, db.get_contact_list())
assert sorted(ui_list, key=Contact.id_or_max) == sorted(db_list, key=Contact.id_or_max)
#def test_contact_all(app):
#old_contacts = app.contact.get_contact_list()
#index = randrange(len(old_contacts))
#contact_from_home_page = app.contact.get_contact_list()[index]
#contact_from_edit_page = app.contact.get_contact_info_from_edit_page(index)
#assert contact_from_home_page.surname == contact_from_edit_page.surname
#assert contact_from_home_page.name == contact_from_edit_page.name
#assert clear_probel(contact_from_home_page.address) == clear_probel(contact_from_edit_page.address)
#assert clear_probel(contact_from_home_page.all_emails_from_home_page) == merge_emails_like_on_home_page(contact_from_edit_page)
#assert contact_from_home_page.all_phones_from_home_page == merge_phones_like_on_home_page(contact_from_edit_page)
def clear(s):
return re.sub("[() -]", "", s)
def clear_probel(s):
return re.sub("[ ]", "", s)
def merge_phones_like_on_home_page(contact):
return "\n".join(filter(lambda x: x != "",
map(lambda x: clear(x),
filter(lambda x: x is not None,
[contact.homephone, contact.mobilephone, contact.workphone, contact.secondaryphone]))))
def merge_emails_like_on_home_page(contact):
return "\n".join(filter(lambda x: x != "",
map(lambda x: clear_probel(x),
filter(lambda x: x is not None,
[contact.email, contact.email2, contact.email3]))))
|
StarcoderdataPython
|
1752474
|
"""Unit test class for GraphTransformer objects."""
from tqdm.auto import tqdm
from unittest import TestCase
from embiggen.edge_prediction import edge_prediction_evaluation
from embiggen import get_available_models_for_edge_prediction, get_available_models_for_node_embedding
from embiggen.edge_prediction.edge_prediction_model import AbstractEdgePredictionModel
from embiggen.embedders import SPINE
from ensmallen.datasets.linqs import Cora, get_words_data
from embiggen.edge_prediction import DecisionTreeEdgePrediction
class TestEvaluateEdgePrediction(TestCase):
"""Unit test class for GraphTransformer objects."""
def setUp(self):
"""Setup objects for running tests on GraphTransformer objects class."""
self._graph, _ = get_words_data(Cora())
self._graph = self._graph.remove_singleton_nodes()
self._graph_without_node_types = self._graph.remove_node_types()
self._subgraph_of_interest = self._graph.filter_from_names(
source_node_type_name_to_keep=["Neural_Networks"]
)
self._number_of_holdouts = 2
def test_model_recreation(self):
df = get_available_models_for_edge_prediction()
for _, row in df.iterrows():
model = AbstractEdgePredictionModel.get_model_from_library(
model_name=row.model_name,
task_name=AbstractEdgePredictionModel.task_name(),
library_name=row.library_name
)()
AbstractEdgePredictionModel.get_model_from_library(
model_name=row.model_name,
task_name=AbstractEdgePredictionModel.task_name(),
library_name=row.library_name
)(**model.parameters())
def test_evaluate_edge_prediction(self):
df = get_available_models_for_edge_prediction()
holdouts = edge_prediction_evaluation(
holdouts_kwargs=dict(train_size=0.8),
models=df.model_name,
library_names=df.library_name,
node_features=SPINE(embedding_size=5),
evaluation_schema="Connected Monte Carlo",
graphs=[self._graph, self._graph_without_node_types],
number_of_holdouts=self._number_of_holdouts,
verbose=True,
smoke_test=True
)
self.assertEqual(
holdouts.shape[0], self._number_of_holdouts*2*2*df.shape[0])
def test_evaluate_edge_prediction_in_subgraph(self):
df = get_available_models_for_edge_prediction()
holdouts = edge_prediction_evaluation(
holdouts_kwargs=dict(train_size=0.8),
models=df.model_name,
library_names=df.library_name,
node_features=SPINE(embedding_size=5),
graphs=self._graph,
number_of_holdouts=self._number_of_holdouts,
verbose=True,
smoke_test=True,
subgraph_of_interest=self._subgraph_of_interest
)
self.assertEqual(holdouts.shape[0],
self._number_of_holdouts*2*df.shape[0])
def test_tree_with_cosine(self):
for evaluation_schema in AbstractEdgePredictionModel.get_available_evaluation_schemas():
holdouts = edge_prediction_evaluation(
holdouts_kwargs=dict(train_size=0.8),
models=DecisionTreeEdgePrediction(
edge_embedding_method="CosineSimilarity"),
node_features=SPINE(embedding_size=10),
evaluation_schema=evaluation_schema,
graphs="CIO",
number_of_holdouts=self._number_of_holdouts,
verbose=True,
smoke_test=True,
validation_unbalance_rates=(1.0, 2.0,),
)
self.assertEqual(holdouts.shape[0], self._number_of_holdouts*2*2)
self.assertTrue(set(holdouts.validation_unbalance_rate)
== set((1.0, 2.0)))
def test_all_embedding_models_as_feature(self):
df = get_available_models_for_node_embedding()
bar = tqdm(
df.iterrows(),
total=df.shape[0],
leave=False,
desc="Testing embedding methods"
)
for _, row in bar:
if row.requires_edge_weights:
graph_name = "Usair97"
repository = "networkrepository"
else:
graph_name = "CIO"
repository = "kgobo"
bar.set_description(
f"Testing embedding model {row.model_name} from library {row.library_name}")
edge_prediction_evaluation(
holdouts_kwargs=dict(train_size=0.8),
models="Perceptron",
node_features=row.model_name,
evaluation_schema="Connected Monte Carlo",
graphs=graph_name,
repositories=repository,
number_of_holdouts=self._number_of_holdouts,
verbose=False,
smoke_test=True,
)
|
StarcoderdataPython
|
3233833
|
<filename>datasets/seq2seq/en-ta-parallel-v2/tokenizer.py
import nltk
def combinations(word, blacklist):
combs = []
for i in range(len(word)):
if word[i] not in blacklist:
for j in range(i+2,len(word)+1):
combs.append(word[i:j])
return combs
def greedy_combinations(word):
combs = []
wlen = len(word)
return [ word[:wlen-i] for i in range(len(word)) ]
def freq_combinations(lines, blacklist):
symbols = []
for line in lines:
for word in line.split(' '):
symbols.extend(combinations(word, blacklist))
return nltk.FreqDist(symbols)
|
StarcoderdataPython
|
3390128
|
"""Methods for image transformations/augmentations."""
from random import choice
from typing import Any, Callable, Tuple
import cv2
import numpy as np
from PIL import Image
from scipy.ndimage import gaussian_filter
def transform(
field: np.ndarray,
mask: np.ndarray,
translation: Callable[..., Tuple[np.ndarray, np.ndarray]],
t_idx: int,
noise: Callable[..., Tuple[np.ndarray, np.ndarray]],
n_idx: int,
) -> Tuple[np.ndarray, np.ndarray]:
"""Transform the field and mask using a translation and noise function."""
assert translation.__name__ in ("t_linear", "t_quartile")
assert noise.__name__ in ("t_linear", "t_rotation", "t_flip", "t_blur", "t_gamma")
field, mask = translation(field, mask, t_idx)
return noise(field, mask, n_idx)
def t_linear(
field: np.ndarray,
mask: np.ndarray,
_: int = 0,
) -> Tuple[np.ndarray, np.ndarray]:
"""Apply a linear (i.e. no) transformation and save."""
field_t = np.asarray(Image.fromarray(np.uint8(field), "RGB"))
mask_t = np.asarray(Image.fromarray(np.uint8(mask), "L"))
return field_t, mask_t
def t_quartile(
field: np.ndarray,
mask: np.ndarray,
idx: int,
) -> Tuple[np.ndarray, np.ndarray]:
"""Divide the information into four quarters."""
assert idx in range(0, 3 + 1)
x, y = [(0, 0), (0, 1), (1, 0), (1, 1)][idx]
width, height = mask.shape # 2d array
# Slice and recover shape
field_slice = field[
(width // 2) * x : (width // 2) * (x + 1), (height // 2) * y : (height // 2) * (y + 1)
]
field_slice = field_slice.repeat(2, axis=0).repeat(2, axis=1)
mask_slice = mask[
(width // 2) * x : (width // 2) * (x + 1), (height // 2) * y : (height // 2) * (y + 1)
]
mask_slice = mask_slice.repeat(2, axis=0).repeat(2, axis=1)
# Normalise masking values
values = sorted(set(np.unique(mask_slice)) - {0})
for idx, v in enumerate(values):
mask_slice[mask_slice == v] = idx + 1
return field_slice, mask_slice
def t_rotation(
field: np.ndarray,
mask: np.ndarray,
rot: int,
) -> Tuple[np.ndarray, np.ndarray]:
"""Rotate the data."""
assert rot in range(0, 3 + 1)
for _ in range(rot):
field = np.rot90(field)
mask = np.rot90(mask)
return field, mask
def t_flip(
field: np.ndarray,
mask: np.ndarray,
idx: int,
) -> Tuple[np.ndarray, np.ndarray]:
"""Flip the data."""
assert idx in range(0, 2 + 1)
if idx == 0: # Diagonal
field = np.rot90(np.fliplr(field))
mask = np.rot90(np.fliplr(mask))
if idx == 1: # Horizontal
field = np.flip(field, axis=0)
mask = np.flip(mask, axis=0)
if idx == 2: # Vertical
field = np.flip(field, axis=1)
mask = np.flip(mask, axis=1)
return field, mask
def t_blur(
field: np.ndarray,
mask: np.ndarray,
sigma: int,
) -> Tuple[np.ndarray, np.ndarray]:
"""Blur the image by applying a Gaussian filter."""
assert 0 <= sigma <= 10
sigma_f = 1.0 + (sigma / 10)
field = np.copy(field)
for i in range(3):
field[:, :, i] = gaussian_filter(field[:, :, i], sigma=sigma_f)
return field, mask
def t_gamma(
field: np.ndarray,
mask: np.ndarray,
gamma: int,
) -> Tuple[np.ndarray, np.ndarray]:
"""Apply gamma correction to the image."""
assert gamma in range(5, 15 + 1)
inv_gamma = 1 / (gamma / 10)
table = np.array([((i / 255.0) ** inv_gamma) * 255 for i in np.arange(0, 256)]).astype("uint8")
field = cv2.LUT(field, table)
return field, mask
NOISE = [
(t_linear, (0, 0)),
(t_rotation, (0, 3)),
(t_flip, (0, 2)),
(t_blur, (0, 10)),
(t_gamma, (8, 12)),
]
def get_random_noise() -> Tuple[Callable[..., Any], int]:
"""Get a random noise augmentation."""
f, (a, b) = choice(NOISE) # noqa S311
return f, choice(range(a, b + 1)) # type: ignore # noqa S311
|
StarcoderdataPython
|
1751854
|
import pytest
import reflectivipy
from .ReflectivityExample import ReflectivityExample
from reflectivipy import MetaLink
@pytest.fixture(autouse=True)
def setup():
reflectivipy.uninstall_all()
def test_original_ast_preservation():
example = ReflectivityExample()
link = MetaLink(example, 'tag_exec', 'after', ['node'])
rf_ast = reflectivipy.reflective_method_for(ReflectivityExample, 'example_while')
original_body = rf_ast.original_ast.body[0].body
node = original_body[1].test.left
number_of_nodes = len(original_body)
original_left_id = node.id
reflectivipy.link(link, node)
new_body = rf_ast.original_ast.body[0].body
new_left = new_body[1].test.left
reflective_ast_body = rf_ast.reflective_ast.body[0].body
ReflectivityExample().example_while()
assert example.tag is node
assert new_body is original_body
assert len(new_body) == number_of_nodes
assert original_left_id == new_left.id
assert reflective_ast_body[1] is not new_body[1]
assert len(reflective_ast_body) > number_of_nodes
def test_restore_original():
example = ReflectivityExample()
link = MetaLink(example, 'tag_exec', 'after', ['node'])
rf_ast = reflectivipy.reflective_method_for(ReflectivityExample, 'example_while')
original_body = rf_ast.original_ast.body[0].body
node = original_body[1].test.left
reflectivipy.link(link, node)
example.tag = None
ReflectivityExample().example_while()
assert example.tag is node
reflectivipy.uninstall_all()
example.tag = None
ReflectivityExample().example_while()
assert example.tag is None
def test_uninstall_all():
pass
def test_metalinks_count():
example = ReflectivityExample()
link = MetaLink(example, 'tag_exec_', 'before', [])
rf_method = reflectivipy.reflective_method_for(ReflectivityExample, 'example_assign')
node = rf_method.original_ast.body[0].body[0]
assert len(reflectivipy.metalinks) == 0
reflectivipy.link(link, node)
len(reflectivipy.metalinks) == 1
assert reflectivipy.metalinks.pop() is link
|
StarcoderdataPython
|
160105
|
#!/usr/bin/env python3
#
# Copyright 2019 <NAME> <<EMAIL>>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import argparse
import csv
import os
header1 = '''From: <NAME> <<EMAIL>>
'''
header3 = '''Subject: Outreachy - remote, paid internships
'''
body = '''I'm <NAME>, and we met when you stopped by the Outreachy booth at the Tapia conference last September. I'd like to invite you to apply for Outreachy internships. If this opportunity isn't right for you, please pass it along to a friend or other students.
https://www.outreachy.org/apply/
Outreachy internships are fully remote. You'll be working with a remote mentor from a free and open source sofware community. Projects may include programming, user experience, documentation, illustration, graphical design, or data science.
You'll be paid a $5,500 USD stipend for three months of work. You'll also have a $500 USD travel stipend to attend conferences or events.
Outreachy internships run from May 20 to August 20. Our application period is open now through April 2. Applicants need to make a contribution to a project to be eligible for the internship. It typically takes 1-2 weeks to make a contribution, so apply early!
Outreachy internships are open to applicants around the world who meet our eligibility rules:
https://www.outreachy.org/apply/eligibility/
Outreachy expressly invites women (both cis and trans), trans men, and genderqueer people to apply. We also expressly invite applications from residents and nationals of the United States of any gender who are Black/African American, Hispanic/Latin@, Native American/American Indian, Alaska Native, Native Hawaiian, or Pacific Islander. Anyone who faces under-representation, systemic bias, or discrimination in the technology industry of their country is invited to apply.
The current list of Outreachy internship projects is available at:
https://www.outreachy.org/apply/project-selection/
New Outreachy projects will be added until March 12.
Please reply to this email with your questions. I hope you'll apply!
<NAME>
Outreachy Organizer
'''
promote_body = '''I'm <NAME>, and we met when you stopped by the Outreachy booth at the Tapia conference last September. You said you would be willing to help us spread the word about Outreachy internships to students at your university. Applications for the May to August Outreachy internships are now open!
https://www.outreachy.org/apply/
Outreachy internships are fully remote. Outreachy interns works with a remote mentor from a free and open source sofware community. Projects may include programming, user experience, documentation, illustration, graphical design, or data science.
Outreachy interns are paid a $5,500 USD stipend for three months of work. Interns also have a $500 USD travel stipend to attend conferences or events.
Outreachy internships run from May 20 to August 20. Our application period is open now through April 2. Applicants need to make a contribution to a project to be eligible for the internship. It typically takes 1-2 weeks to make a contribution, so we encourage people to apply early!
Outreachy internships are open to applicants around the world who meet our eligibility rules:
https://www.outreachy.org/apply/eligibility/
Outreachy expressly invites women (both cis and trans), trans men, and genderqueer people to apply. We also expressly invite applications from residents and nationals of the United States of any gender who are Black/African American, Hispanic/Latin@, Native American/American Indian, Alaska Native, Native Hawaiian, or Pacific Islander. Anyone who faces under-representation, systemic bias, or discrimination in the technology industry of their country is invited to apply.
The current list of Outreachy internship projects is available at:
https://www.outreachy.org/apply/project-selection/
New Outreachy projects will be added until March 12.
Thanks for your help passing this opportunity along!
<NAME>
Outreachy Organizer
'''
def write_email(outdir, index, contact, body):
with open(os.path.join(outdir, str(index) + '.txt'), 'w') as email:
email.write(header1)
email.write('To: ' + contact + '\n')
email.write(header3)
email.write(body)
def main():
parser = argparse.ArgumentParser(description='Send an email to people who stopped by the Outreachy booth at Tapia')
parser.add_argument('outdir', help='Directory to create form emails in')
parser.add_argument('csv', help='CSV file of people who stopped by the booth')
args = parser.parse_args()
if not os.path.exists(args.outdir):
os.makedirs(args.outdir)
applicants = []
promoter = []
with open(args.csv, 'r') as csvFile:
freader = csv.DictReader(csvFile, delimiter=';', quotechar='"')
for row in freader:
# Only send email to people interested in the May to August round
if row['Email'] and row['Which Outreachy round do you want to apply for?,May 2019 to August 2019'] == '1':
applicants.append('"' + row['Name'].strip() + '" <' + row['Email'].strip() + '>')
elif row['Email'] and row["Do you want to help promote Outreachy to students at your university?"] == '1':
promotor.append('"' + row['Name'].strip() + '" <' + row['Email'].strip() + '>')
for index, contact in enumerate(applicants):
write_email(args.outdir, index, contact, body)
for index, contact in enumerate(promoter):
write_email(args.outdir, index, contact, promote_body)
print('Wrote', len(applicants + promoter), 'resume draft emails to', args.outdir)
if __name__ == "__main__":
main()
|
StarcoderdataPython
|
1660404
|
#Majority Classifier
import numpy as np
# return array with features full of the most frequent class
def predictMaj(features):
(values,counts) = np.unique(features,return_counts=True)
values[np.argmax(counts)] # prints the most frequent element
return np.full(shape=len(features),fill_value=values[np.argmax(counts)],dtype=int)
def main():
arr = np.array([0,0,0,0,0,0,0,0,0,0, 0, -2, 1, -2, 0, 4, 4, -6, -1])
print predictMaj(arr)
if __name__ == '__main__':
main()
|
StarcoderdataPython
|
3386784
|
<filename>tests/src/python/test_qgsprocessingalgrunner.py
# -*- coding: utf-8 -*-
"""QGIS Unit tests for Processing algorithm runner(s).
.. note:: This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
"""
__author__ = '<NAME>'
__date__ = '2019-02'
__copyright__ = 'Copyright 2019, The QGIS Project'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '176c06ceefb5f555205e72b20c962740cc0ec183'
import re
from qgis.PyQt.QtCore import QCoreApplication
from qgis.testing import start_app, unittest
from qgis.core import QgsProcessingAlgRunnerTask
from processing.core.Processing import Processing
from processing.core.ProcessingConfig import ProcessingConfig
from qgis.testing import start_app, unittest
from qgis.analysis import QgsNativeAlgorithms
from qgis.core import (
QgsApplication,
QgsSettings,
QgsProcessingContext,
QgsProcessingAlgRunnerTask,
QgsProcessingAlgorithm,
QgsProject,
QgsProcessingFeedback,
)
start_app()
class ConsoleFeedBack(QgsProcessingFeedback):
_error = ''
def reportError(self, error, fatalError=False):
self._error = error
print(error)
class CrashingProcessingAlgorithm(QgsProcessingAlgorithm):
"""
Wrong class in factory createInstance()
"""
INPUT = 'INPUT'
OUTPUT = 'OUTPUT'
def tr(self, string):
return QCoreApplication.translate('Processing', string)
def createInstance(self):
"""Wrong!"""
return ExampleProcessingAlgorithm()
def name(self):
return 'mycrashingscript'
def displayName(self):
return self.tr('My Crashing Script')
def group(self):
return self.tr('Example scripts')
def groupId(self):
return 'examplescripts'
def shortHelpString(self):
return self.tr("Example algorithm short description")
def initAlgorithm(self, config=None):
pass
def processAlgorithm(self, parameters, context, feedback):
return {self.OUTPUT: 'an_id'}
class TestQgsProcessingAlgRunner(unittest.TestCase):
@classmethod
def setUpClass(cls):
"""Run before all tests"""
QCoreApplication.setOrganizationName("QGIS_Test")
QCoreApplication.setOrganizationDomain(
"QGIS_TestPyQgsProcessingInPlace.com")
QCoreApplication.setApplicationName("QGIS_TestPyQgsProcessingInPlace")
QgsSettings().clear()
Processing.initialize()
QgsApplication.processingRegistry().addProvider(QgsNativeAlgorithms())
cls.registry = QgsApplication.instance().processingRegistry()
def test_bad_script_dont_crash(self): # spellok
"""Test regression #21270 (segfault)"""
context = QgsProcessingContext()
context.setProject(QgsProject.instance())
feedback = ConsoleFeedBack()
task = QgsProcessingAlgRunnerTask(CrashingProcessingAlgorithm(), {}, context=context, feedback=feedback)
self.assertTrue(task.isCanceled())
self.assertIn('name \'ExampleProcessingAlgorithm\' is not defined', feedback._error)
if __name__ == '__main__':
unittest.main()
|
StarcoderdataPython
|
1645557
|
from kivymd.uix.screen import MDScreen
class IntroScreen(MDScreen):
def goto_exercise(self):
self.manager.goto_exercise()
print("IntroScreen: goto_exercise")
def goto_choose(self):
self.manager.goto_choose()
|
StarcoderdataPython
|
3327798
|
from .main import Voice
from .phonemes import (PhonemeList, Phoneme, FrenchPhonemes,
BritishEnglishPhonemes, GreekPhonemes, ArabicPhonemes,
SpanishPhonemes, GermanPhonemes, ItalianPhonemes,
PortuguesePhonemes, AmericanEnglishPhonemes)
|
StarcoderdataPython
|
172570
|
#!/usr/bin/env python
from __future__ import generators
"""web.py: makes web apps (http://webpy.org)"""
__version__ = "0.2"
__revision__ = "$Rev: 62 $"
__author__ = "<NAME> <<EMAIL>>"
__license__ = "public domain"
__contributors__ = "see http://webpy.org/changes"
# todo:
# - some sort of accounts system
import utils, db, net, wsgi, http, webapi, request, httpserver, debugerror
import template, form
from utils import *
from db import *
from net import *
from wsgi import *
from http import *
from webapi import *
from request import *
from httpserver import *
from debugerror import *
try:
import cheetah
from cheetah import *
except ImportError:
pass
def main():
import doctest
doctest.testmod(utils)
doctest.testmod(db)
doctest.testmod(net)
doctest.testmod(wsgi)
doctest.testmod(http)
doctest.testmod(webapi)
doctest.testmod(request)
try:
doctest.testmod(cheetah)
except NameError:
pass
template.test()
import sys
urls = ('/web.py', 'source')
class source:
def GET(self):
header('Content-Type', 'text/python')
print open(sys.argv[0]).read()
if listget(sys.argv, 1) != 'test':
run(urls, locals())
if __name__ == "__main__": main()
|
StarcoderdataPython
|
3244570
|
'''
code for preparing and loading dataset for the SET game
'''
### imports
import os
import numpy as np
import pandas as pd
import logging
from pathlib import Path
from tensorflow.keras.preprocessing.image import ImageDataGenerator
# dataset path
dataset_path = Path(os.path.dirname(os.path.abspath(__file__))) / "data" / "128x96"
csv_path = dataset_path.parent / "dataset.csv"
# setup logger
logger = logging.getLogger(__name__)
def get_labels_from_iterator(directory_iterator, y):
'''
return array with class labels from a generator. Helper function to get labels as convenient np array
Args:
directory_iterator (DirectoryIterator instance): The Iterator instance returned by flow_from_directory
y (nd array): Should be a onehot vector of shape (m, 81)
'''
# class_indices contains a dictionary mapping class indices to classnames
# we use this to obtain a classnames vector, where the index corresponds to the
# class index, and the value to the classname
class_names = np.array(list(directory_iterator.class_indices.items()))[:, 0]
# return classnames for y
return class_names[np.argmax(y, axis=1)]
def upsample_dataset(images_path, nr_images, aug_params={}, target_size=(96, 128), batch_size=81, shuffle=True, seed=42, output_path=None):
'''
return generated images and target labels as numpy arrays
Args:
images_path (Pathlib.path): Path that contains the dataset (should contain subfolders for each label)
nr_images (integer): Amount of images to return
aug_params (dict): Parameters for ImageDataGenerator. Use if you want to use augmentation.
target_size (tuple of integers): The dimensions (height, width) to which all images found will be resized. (default: (96, 128)
batch_size (integer): Size of the batches of data. (default 81)
shuffle (boolean): Whether to shuffle the data (default: True)
seed (integer): Random seed for shuffling and transformations. (default 42)
'''
flow_params = {
'target_size': target_size,
'batch_size': batch_size,
'shuffle': shuffle,
'seed': seed
}
if output_path:
flow_params.update({'save_to_dir': str(output_path)})
# initalize generator and augment all images in dataset folder
directory_iterator = ImageDataGenerator(**aug_params).flow_from_directory(images_path, **flow_params)
# nr of batches we need to reach nr_images, using batch_size of 81
nr_batches = np.ceil(nr_images / 81).astype(int)
X = np.array([]) # matrix with resulting shape: (nr_images, height, width, 3)
y = np.array([]) # vector with resulting shape: (nr_images, 81)
# get all batches and concatenate them
for batch in np.arange(nr_batches):
X_batch, y_batch = next(directory_iterator)
if len(X) == 0:
X = X_batch
y = y_batch
else:
X = np.concatenate([X, X_batch])
y = np.concatenate([y, y_batch])
return X[0:nr_images], get_labels_from_iterator(directory_iterator, y[0:nr_images])
def get_card_data(df_meta, card_id):
return df_meta.iloc[int(card_id)]
def get_image_path(df_meta, card_id, train_val="validation"):
return dataset_path / train_val / str(card_id) / df_meta.iloc[int(card_id)].get('filename')
def get_feature_codes(df_meta, predictions):
feature_codes = ['color_code', 'shape_code', 'fill_code', 'number_code']
return np.array(list(map(lambda x: get_card_data(df_meta, x)[feature_codes], predictions))).astype(int)
def get_feature_labels(df_meta, predictions):
return list(map(lambda x: get_card_data(df_meta, x)['label'], predictions))
def load_metadata():
# dataframe with info on each card and variant
df_meta = pd.read_csv(csv_path)
# drop columns we don't need here
df_meta.drop(labels=['variant'], axis='columns', inplace=True)
# get unique card ids
df_meta.drop_duplicates(inplace=True, subset=['card_id'])
# set card_id as index
df_meta.set_index('card_id', drop=True, inplace=True)
# define features and values in desired sortorder
features = {
'color': ['red', 'green', 'purple'],
'shape': ['square', 'squiggle', 'round'],
'fill': ['solid', 'open', 'dotted'],
'number': ['one', 'two', 'three'],
}
# create a label for the card
df_meta['label'] = df_meta[features.keys()].apply(lambda x: ' '.join(x), axis='columns')
# create category codes for features
for feature, feature_values in features.items():
df_meta[feature] = pd.Categorical(df_meta[feature], ordered=True, categories=feature_values)
df_meta[f'{feature}_code'] = df_meta[feature].cat.codes
return df_meta
def load_dataset(target_size=(96, 128), nr_images=810, shuffle=True, output_path=None, preprocessing_func=None):
# augmentation parameters for training data
aug_params = {
'shear_range': 0.4,
'zoom_range': 0.4,
'rotation_range': 45,
'horizontal_flip': True,
'vertical_flip': True,
'brightness_range': (0.5, 1.0),
'fill_mode': 'constant',
}
# get training data
X_train, y_train = upsample_dataset(dataset_path / "train", nr_images=nr_images, aug_params=aug_params, target_size=target_size, batch_size=81, shuffle=shuffle, output_path=output_path)
# get validation data
X_val, y_val = upsample_dataset(dataset_path / "validation", nr_images=81, aug_params={}, target_size=target_size, batch_size=81, shuffle=shuffle, output_path=None)
if preprocessing_func is not None:
X_train = preprocessing_func(X_train)
X_val = preprocessing_func(X_val)
return X_train, y_train, X_val, y_val
|
StarcoderdataPython
|
20157
|
import typing
from typing import Any
import json
import os
from multiprocessing import Process, Queue
from allennlp.data.tokenizers.word_splitter import SpacyWordSplitter
from spacy.tokenizer import Tokenizer
import spacy
from tqdm.auto import tqdm
import time
nlp = spacy.load("en")
class TokenizingWorker(Process):
def __init__(
self,
pbar: Any,
is_json: bool,
queue_in: Queue, # Queue where text comes in
queue_out: Queue, #Queue where tokens go
tokenizer_type: str = "just_spaces",
):
super(TokenizingWorker, self).__init__()
self.queue_in = queue_in
self.queue_out = queue_out
self.is_json = is_json
self.pbar = pbar
if tokenizer_type == "just_spaces":
tokenizer = SpacyWordSplitter()
self.tokenizer = lambda text: list(map(str, tokenizer.split_words(text)))
elif tokenizer_type == "spacy":
tokenizer = Tokenizer(nlp.vocab)
self.tokenizer = lambda text: list(map(str, tokenizer(text)))
def run(self):
for line in iter(self.queue_in.get, None):
if self.is_json:
text = json.loads(line)["text"]
else:
text = line
tokens = self.tokenizer(text)
while self.queue_out.full():
time.sleep(0.01)
self.queue_out.put(" ".join(tokens),block=False,)
self.pbar.update()
def multi_proc_data_loader(data_path: str, tokenizer_type: str = "just_spaces"):
num_processes = max(1, os.cpu_count() - 1)
queue_in = Queue()
queue_out = Queue(maxsize=10000)
workers =[]
is_json = data_path.endswith(".jsonl") or data_path.endswith(".json")
pbar = tqdm()
for _ in range(num_processes): # minus one if the main processus is CPU intensive
worker = TokenizingWorker(
pbar=pbar,
is_json=is_json, queue_in=queue_in, queue_out=queue_out,tokenizer_type=tokenizer_type
)
workers.append(worker)
worker.start()
with (open(data_path, "r")) as f:
for line in f:
queue_in.put(line)
for worker in workers:
#ensure each worker gets a None which tells it to stop
queue_in.put(None)
alive = any(map(lambda x:x.is_alive(),workers))
res=[]
while alive:
while not queue_out.empty():
tokens =queue_out.get(block=False)
res.append(tokens)
alive = any(map(lambda x: x.is_alive(), workers))
if alive:
time.sleep(0.01)
return res
|
StarcoderdataPython
|
24740
|
<filename>python/brainvisa/maker/components_definition.py
# -*- coding: utf-8 -*-
from __future__ import absolute_import
import os
import sys
# groups:
# anatomist: projects needed for anatomist (aims, soma-io and dependencies)
# opensource
# brainvisa: public brainvisa distro
# bvdev: same as brainvisa but excludes restricted access projects
# (such as brainrat-private)
# standard: most useful projects. Includes internal, non-open projects,
# but not restricted ones (such as brainrat-private)
# cea: CEA (Neurospin/MirCen/SHFJ) distro including internal projects
# cati_platform: standard + CATI projects
# all: all projects except those really not useful
components_definition = [
('development', {
'components': [
['brainvisa-cmake', {
'groups': ['all', 'anatomist', 'opensource', 'brainvisa',
'bvdev', 'standard', 'cea', 'cati_platform'],
'branches': {
'trunk': ('git https://github.com/brainvisa/brainvisa-cmake.git branch:master','development/brainvisa-cmake/integration'),
'bug_fix': ('git https://github.com/brainvisa/brainvisa-cmake.git branch:master','development/brainvisa-cmake/master'),
'5.0': ('git https://github.com/brainvisa/brainvisa-cmake.git branch:5.0','development/brainvisa-cmake/5.0'),
},
}],
['casa-distro', {
'groups': ['all', 'anatomist', 'opensource', 'brainvisa',
'bvdev', 'standard', 'cea', 'cati_platform'],
'branches': {
'trunk': ('git https://github.com/brainvisa/casa-distro.git branch:master','development/casa-distro/integration'),
'bug_fix': ('git https://github.com/brainvisa/casa-distro.git branch:master','development/casa-distro/master'),
'5.0': ('git https://github.com/brainvisa/casa-distro.git branch:brainvisa-5.0','development/casa-distro/5.0'),
},
'build_model': 'pure_python',
}],
],
}),
('communication', {
'components': [
['web', {
'groups': ['all'],
'branches': {
'trunk': ('git https://bioproj.extra.cea.fr/git/brainvisa-commu/web.git branch:integration','communication/web/trunk'),
'bug_fix': ('git https://bioproj.extra.cea.fr/git/brainvisa-commu/web.git branch:master','communication/web/master'),
'5.0': ('git https://bioproj.extra.cea.fr/git/brainvisa-commu/web.git branch:master','communication/web/5.0'),
},
}],
],
}),
('brainvisa-share', {
'components': [
['brainvisa-share', {
'groups': ['all', 'anatomist', 'opensource', 'brainvisa',
'bvdev', 'standard', 'cea', 'cati_platform'],
'branches': {
'trunk': ('git https://github.com/brainvisa/brainvisa-share.git branch:master','brainvisa-share/integration'),
'bug_fix': ('git https://github.com/brainvisa/brainvisa-share.git branch:master','brainvisa-share/master'),
'5.0': ('git https://github.com/brainvisa/brainvisa-share.git branch:5.0','brainvisa-share/5.0'),
},
}],
],
}),
('soma', {
'description': 'Set of lower-level libraries for neuroimaging processing infrastructure',
'components': [
['soma-base', {
'groups': ['all', 'anatomist', 'opensource', 'brainvisa',
'bvdev', 'standard', 'cea', 'cati_platform'],
'branches': {
'trunk': ('git https://github.com/populse/soma-base.git branch:master','soma/soma-base/integration'),
'bug_fix': ('git https://github.com/populse/soma-base.git branch:master','soma/soma-base/master'),
'5.0': ('git https://github.com/populse/soma-base.git branch:5.0','soma/soma-base/5.0'),
},
}],
['soma-io', {
'groups': ['all', 'anatomist', 'opensource', 'brainvisa',
'bvdev', 'standard', 'cea', 'cati_platform'],
'branches': {
'trunk': ('git https://github.com/brainvisa/soma-io.git branch:master','soma/soma-io/integration'),
'bug_fix': ('git https://github.com/brainvisa/soma-io.git branch:master','soma/soma-io/master'),
'5.0': ('git https://github.com/brainvisa/soma-io.git branch:5.0','soma/soma-io/5.0'),
},
}],
['soma-workflow', {
'groups': ['all', 'opensource', 'brainvisa', 'bvdev',
'standard', 'cea', 'cati_platform'],
'branches': {
'trunk': ('git https://github.com/populse/soma-workflow.git branch:master','soma/soma-workflow/integration'),
'bug_fix': ('git https://github.com/populse/soma-workflow.git default:master','soma/soma-workflow/master'),
'5.0': ('git https://github.com/populse/soma-workflow.git branch:brainvisa-5.0','soma/soma-workflow/5.0'),
},
}],
],
}),
('populse', {
'components': [
['capsul', {
'groups': ['all', 'opensource', 'brainvisa', 'bvdev',
'standard', 'cea', 'cati_platform'],
'branches': {
'trunk': ('git https://github.com/populse/capsul.git branch:master','capsul/integration'),
'bug_fix': ('git https://github.com/populse/capsul.git default:master','capsul/master'),
'5.0': ('git https://github.com/populse/capsul.git branch:brainvisa-5.0','capsul/5.0'),
},
'build_model': 'pure_python',
}],
['populse_db', {
'groups': ['all', 'opensource', 'brainvisa', 'bvdev',
'standard', 'cea', 'cati_platform'],
'branches': {
'trunk': ('git https://github.com/populse/populse_db.git default:master','populse/populse_db/integration'),
'bug_fix': ('git https://github.com/populse/populse_db.git default:master','populse/populse_db/master'),
'5.0': ('git https://github.com/populse/populse_db.git branch:brainvisa-5.0','populse/populse_db/5.0'),
},
'build_model': 'pure_python',
}],
],
}),
('aims', {
'description': '3D/4D neuroimaging data manipulation and processing library and commands. Includes C++ libraries, command lines, and a Python API.',
'components': [
['aims-free', {
'groups': ['all', 'anatomist', 'opensource', 'brainvisa',
'bvdev', 'standard', 'cea', 'cati_platform'],
'branches': {
'trunk': ('git https://github.com/brainvisa/aims-free.git branch:master','aims/aims-free/integration'),
'bug_fix': ('git https://github.com/brainvisa/aims-free.git branch:master','aims/aims-free/master'),
'5.0': ('git https://github.com/brainvisa/aims-free.git branch:5.0','aims/aims-free/5.0'),
},
}],
['aims-gpl', {
'groups': ['all', 'anatomist', 'opensource', 'brainvisa',
'bvdev', 'standard', 'cea', 'cati_platform'],
'branches': {
'trunk': ('git https://github.com/brainvisa/aims-gpl.git branch:master','aims/aims-gpl/integration'),
'bug_fix': ('git https://github.com/brainvisa/aims-gpl.git branch:master','aims/aims-gpl/master'),
'5.0': ('git https://github.com/brainvisa/aims-gpl.git branch:5.0','aims/aims-gpl/5.0'),
},
}],
['aims-til', {
'groups': ['all', 'anatomist', 'opensource', 'brainvisa',
'bvdev', 'standard', 'cea', 'cati_platform'],
'branches': {
'5.0': ('git https://github.com/brainvisa/aims-til.git branch:5.0','aims/aims-til/5.0'),
},
}],
],
}),
('anatomist', {
'description': '3D/4D neuroimaging data viewer. Modular and versatile, Anatomist can display any kind of neuroimaging data (3D/4D images, meshes and textures, fiber tracts, and structured sets of objects such as cortical sulci), in an arbitrary number of views. Allows C++ and Python programming, both for plugins add-ons, as well as complete custom graphical applications design.',
'components': [
['anatomist-free', {
'groups': ['all', 'anatomist', 'opensource', 'brainvisa',
'bvdev', 'standard', 'cea', 'cati_platform'],
'branches': {
'trunk': ('git https://github.com/brainvisa/anatomist-free.git branch:master','anatomist/anatomist-free/integration'),
'bug_fix': ('git https://github.com/brainvisa/anatomist-free.git branch:master','anatomist/anatomist-free/master'),
'5.0': ('git https://github.com/brainvisa/anatomist-free.git branch:5.0','anatomist/anatomist-free/5.0'),
},
}],
['anatomist-gpl', {
'groups': ['all', 'anatomist', 'opensource', 'brainvisa',
'bvdev', 'standard', 'cea', 'cati_platform'],
'branches': {
'trunk': ('git https://github.com/brainvisa/anatomist-gpl.git branch:master','anatomist/anatomist-gpl/integration'),
'bug_fix': ('git https://github.com/brainvisa/anatomist-gpl.git branch:master','anatomist/anatomist-gpl/master'),
'5.0': ('git https://github.com/brainvisa/anatomist-gpl.git branch:5.0','anatomist/anatomist-gpl/5.0'),
},
}],
],
}),
('axon', {
'description': 'Axon organizes processing, pipelining, and data management for neuroimaging. It works both as a graphical user interface or batch and programming interfaces, and allows transparent processing distribution on a computing resource.',
'components': [
['axon', {
'groups': ['all', 'opensource', 'brainvisa', 'bvdev',
'standard', 'cea', 'cati_platform'],
'branches': {
'trunk': ('git https://github.com/brainvisa/axon.git branch:master','axon/integration'),
'bug_fix': ('git https://github.com/brainvisa/axon.git branch:master','axon/master'),
'5.0': ('git https://github.com/brainvisa/axon.git branch:5.0','axon/5.0'),
},
}],
],
}),
('brainvisa-spm', {
'description': 'Python module and Axon toolbox for SPM.',
'components': [
['brainvisa-spm', {
'groups': ['all', 'opensource', 'brainvisa', 'bvdev',
'standard', 'cea', 'cati_platform'],
'branches': {
'trunk': ('git https://github.com/brainvisa/brainvisa-spm.git branch:integration','brainvisa-spm/integration'),
'bug_fix': ('git https://github.com/brainvisa/brainvisa-spm.git branch:master','brainvisa-spm/master'),
'5.0': ('git https://github.com/brainvisa/brainvisa-spm.git branch:5.0','brainvisa-spm/5.0'),
},
}],
],
}),
('datamind', {
'description': 'Statistics, data mining, machine learning [OBSOLETE].',
'components': [
['datamind', {
'groups': ['all', 'opensource', 'brainvisa', 'bvdev',
'standard', 'cea', 'cati_platform'],
'branches': {
'5.0': ('svn https://bioproj.extra.cea.fr/neurosvn/brainvisa/datamind/branches/5.0','datamind/5.0'),
},
}],
],
}),
('highres-cortex', {
'description': 'Process 3D images of the cerebral cortex at a sub-millimetre scale',
'components': [
['highres-cortex', {
'groups': ['all', 'opensource', 'brainvisa', 'bvdev',
'standard', 'cea', 'cati_platform'],
'branches': {
'trunk': ('git https://github.com/neurospin/highres-cortex.git branch:master','highres-cortex/integration'),
'bug_fix': ('git https://github.com/neurospin/highres-cortex.git default:master','highres-cortex/master'),
'5.0': ('git https://github.com/neurospin/highres-cortex.git branch:5.0','highres-cortex/5.0'),
},
}],
],
}),
('morphologist', {
'description': 'Anatomical MRI (T1) analysis toolbox, featuring cortex and sulci segmentation, and sulci analysis tools, by the <a href="http://lnao.fr">LNAO team</a>.',
'components': [
['morphologist-nonfree', {
'groups': ['all', 'brainvisa', 'bvdev', 'standard', 'cea',
'cati_platform'],
'branches': {
'trunk': ('git https://github.com/brainvisa/morphologist-nonfree.git branch:integration','morphologist/morphologist-nonfree/integration'),
'bug_fix': ('git https://github.com/brainvisa/morphologist-nonfree.git branch:master','morphologist/morphologist-nonfree/master'),
'5.0': ('git https://github.com/brainvisa/morphologist-nonfree.git branch:5.0','morphologist/morphologist-nonfree/5.0'),
},
}],
['morphologist-gpl', {
'groups': ['all', 'opensource', 'brainvisa', 'bvdev',
'standard', 'cea', 'cati_platform'],
'branches': {
'trunk': ('git https://github.com/brainvisa/morphologist-gpl.git branch:integration','morphologist/morphologist-gpl/integration'),
'bug_fix': ('git https://github.com/brainvisa/morphologist-gpl.git branch:master','morphologist/morphologist-gpl/master'),
'5.0': ('git https://github.com/brainvisa/morphologist-gpl.git branch:5.0','morphologist/morphologist-gpl/5.0'),
},
}],
['morphologist-baby', {
'groups': ['all', 'standard', 'cea'],
'branches': {
'trunk': ('git https://bioproj.extra.cea.fr/git/brainvisa-t1mri/morphologist-baby.git branch:integration','morphologist/morphologist-baby/integration'),
'bug_fix': ('git https://bioproj.extra.cea.fr/git/brainvisa-t1mri/morphologist-baby.git branch:master','morphologist/morphologist-baby/master'),
'5.0': ('git https://bioproj.extra.cea.fr/git/brainvisa-t1mri/morphologist-baby.git branch:5.0','morphologist/morphologist-baby/5.0'),
},
}],
['tms', {
'groups': ['all'],
'branches': {
},
}],
['sulci-data', {
'groups': [],
'branches': {
'trunk': ('svn https://bioproj.extra.cea.fr/neurosvn/brainvisa/morphologist/sulci-data/trunk','morphologist/sulci-data/trunk'),
'bug_fix': ('svn https://bioproj.extra.cea.fr/neurosvn/brainvisa/morphologist/sulci-data/trunk','morphologist/sulci-data/bug_fix'),
},
}],
['sulci-nonfree', {
'groups': ['all', 'brainvisa', 'bvdev', 'standard', 'cea',
'cati_platform'],
'branches': {
'trunk': ('git https://github.com/brainvisa/sulci-nonfree.git branch:integration','morphologist/sulci-nonfree/integration'),
'bug_fix': ('git https://github.com/brainvisa/sulci-nonfree.git branch:master','morphologist/sulci-nonfree/master'),
'5.0': ('git https://github.com/brainvisa/sulci-nonfree.git branch:5.0','morphologist/sulci-nonfree/5.0'),
},
}],
['morphologist-ui', {
'groups': ['all', 'opensource', 'brainvisa', 'bvdev',
'standard', 'cea', 'cati_platform'],
'branches': {
'trunk': ('git https://github.com/brainvisa/morphologist.git branch:master', 'morphologist/morphologist-ui/integration'),
'bug_fix': ('git https://github.com/brainvisa/morphologist.git default:master', 'morphologist/morphologist-ui/master'),
'5.0': ('git https://github.com/brainvisa/morphologist.git branch:5.0', 'morphologist/morphologist-ui/5.0'),
},
}],
['morpho-deepsulci', {
'groups': ['all', 'opensource', 'brainvisa', 'bvdev',
'standard', 'cea', 'cati_platform'],
'branches': {
'trunk': ('git https://github.com/brainvisa/morpho-deepsulci.git branch:master', 'morphologist/morpho-deepsulci/integration'),
'bug_fix': ('git https://github.com/brainvisa/morpho-deepsulci.git default:master', 'morphologist/morpho-deepsulci/master'),
'5.0': ('git https://github.com/brainvisa/morpho-deepsulci.git branch:5.0', 'morphologist/morpho-deepsulci/5.0'),
},
}],
],
}),
('brainrat', {
'description': 'Ex vivo 3D reconstruction and analysis toolbox, from the <a href="http://www-dsv.cea.fr/dsv/instituts/institut-d-imagerie-biomedicale-i2bm/services/mircen-mircen/unite-cnrs-ura2210-lmn/fiches-thematiques/traitement-et-analyse-d-images-biomedicales-multimodales-du-cerveau-normal-ou-de-modeles-precliniques-de-maladies-cerebrales">BioPICSEL CEA team</a>. Homepage: <a href="http://brainvisa.info/doc/brainrat-gpl/brainrat_man/en/html/index.html">http://brainvisa.info/doc/brainrat-gpl/brainrat_man/en/html/index.html</a>',
'components': [
['brainrat-gpl', {
'groups': ['all', 'brainvisa', 'cea'],
'branches': {
'trunk': ('git https://bioproj.extra.cea.fr/git/brainrat-gpl branch:master', 'brainrat/brainrat-gpl/integration'),
'bug_fix': ('git https://bioproj.extra.cea.fr/git/brainrat-gpl branch:master', 'brainrat/brainrat-gpl/master'),
'5.0': ('git https://bioproj.extra.cea.fr/git/brainrat-gpl branch:5.0', 'brainrat/brainrat-gpl/5.0'),
},
}],
['brainrat-private', {
'groups': ['all', 'brainvisa', 'cea'],
'branches': {
'trunk': ('git https://bioproj.extra.cea.fr/git/brainrat-private branch:master', 'brainrat/brainrat-private/integration'),
'bug_fix': ('git https://bioproj.extra.cea.fr/git/brainrat-private branch:master', 'brainrat/brainrat-private/master'),
'5.0': ('git https://bioproj.extra.cea.fr/git/brainrat-private branch:5.0', 'brainrat/brainrat-private/5.0'),
},
}],
['bioprocessing', {
'groups': ['all', 'cea'],
'branches': {
'trunk': ('git https://bioproj.extra.cea.fr/git/bioprocessing branch:master', 'brainrat/bioprocessing/integration'),
'bug_fix': ('git https://bioproj.extra.cea.fr/git/bioprocessing branch:master', 'brainrat/bioprocessing/master'),
'5.0': ('git https://bioproj.extra.cea.fr/git/bioprocessing branch:5.0', 'brainrat/bioprocessing/5.0'),
},
}],
['preclinical-imaging-iam', {
'groups': ['all'],
'branches': {
'trunk': ('git https://bioproj.extra.cea.fr/git/preclinical-imaging-iam branch:master', 'brainrat/preclinical-imaging-iam/integration'),
'bug_fix': ('git https://bioproj.extra.cea.fr/git/preclinical-imaging-iam branch:master', 'brainrat/preclinical-imaging-iam/master'),
},
}],
['primatologist-gpl', {
'groups': ['all', 'brainvisa', 'cea'],
'branches': {
'trunk': ('git https://bioproj.extra.cea.fr/git/primatologist-gpl branch:master', 'brainrat/primatologist-gpl/integration'),
'bug_fix': ('git https://bioproj.extra.cea.fr/git/primatologist-gpl branch:master', 'brainrat/primatologist-gpl/master'),
'5.0': ('git https://bioproj.extra.cea.fr/git/primatologist-gpl branch:5.0', 'brainrat/primatologist-gpl/5.0'),
},
}],
['3dns-private', {
'groups': ['3dns'],
'branches': {
'trunk': ('git https://bioproj.extra.cea.fr/git/3dns-private branch:master', 'brainrat/3dns-private/integration'),
'bug_fix': ('git https://bioproj.extra.cea.fr/git/3dns-private branch:master', 'brainrat/3dns-private/master'),
'5.0': ('git https://bioproj.extra.cea.fr/git/3dns-private branch:5.0', 'brainrat/3dns-private/5.0'),
},
}],
],
}),
('constellation', {
'components': [
['constellation-gpl', {
'groups': ['all', 'cea'],
'branches': {
'trunk': ('git https://github.com/brainvisa/constellation-gpl.git branch:integration','constellation/constellation-gpl/integration'),
'bug_fix': ('git https://github.com/brainvisa/constellation-gpl.git branch:master','constellation/constellation-gpl/master'),
'5.0': ('git https://github.com/brainvisa/constellation-gpl.git branch:5.0','constellation/constellation-gpl/5.0'),
},
}],
['constellation-nonfree', {
'groups': ['all', 'cea'],
'branches': {
'trunk': ('git https://github.com/brainvisa/constellation-nonfree.git branch:integration','constellation/constellation-nonfree/integration'),
'bug_fix': ('git https://github.com/brainvisa/constellation-nonfree.git branch:master','constellation/constellation-nonfree/master'),
'5.0': ('git https://github.com/brainvisa/constellation-nonfree.git branch:5.0','constellation/constellation-nonfree/5.0'),
},
}],
],
}),
('cortical_surface', {
'description': 'Cortex-based surfacic parameterization and analysis toolbox from the <a href="http://www.lsis.org">LSIS team</a>. Homepage: <a href="http://olivier.coulon.perso.esil.univmed.fr/brainvisa.html">http://olivier.coulon.perso.esil.univmed.fr/brainvisa.html</a>.<br/>Also contains the FreeSurfer toolbox for BrainVisa, by the LNAO team.',
'components': [
['cortical_surface-nonfree', {
'groups': ['all', 'brainvisa', 'bvdev', 'standard', 'cea',
'cati_platform'],
'branches': {
'trunk': ('git https://github.com/brainvisa/cortical_surface-nonfree.git branch:integration','cortical_surface/cortical_surface-nonfree/integration'),
'bug_fix': ('git https://github.com/brainvisa/cortical_surface-nonfree.git branch:master','cortical_surface/cortical_surface-nonfree/master'),
'5.0': ('git https://github.com/brainvisa/cortical_surface-nonfree.git branch:5.0','cortical_surface/cortical_surface-nonfree/5.0'),
},
}],
['cortical_surface-gpl', {
'groups': ['all', 'opensource', 'brainvisa', 'bvdev',
'standard', 'cea', 'cati_platform'],
'branches': {
'trunk': ('git https://github.com/brainvisa/cortical_surface-gpl.git branch:integration','cortical_surface/cortical_surface-gpl/integration'),
'bug_fix': ('git https://github.com/brainvisa/cortical_surface-gpl.git branch:master','cortical_surface/cortical_surface-gpl/master'),
'5.0': ('git https://github.com/brainvisa/cortical_surface-gpl.git branch:5.0','cortical_surface/cortical_surface-gpl/5.0'),
},
}],
['brainvisa_freesurfer', {
'groups': ['all', 'opensource', 'brainvisa', 'bvdev',
'standard', 'cea', 'cati_platform'],
'branches': {
'trunk': ('git https://github.com/brainvisa/brainvisa_freesurfer.git branch:integration','cortical_surface/brainvisa_freesurfer/integration'),
'bug_fix': ('git https://github.com/brainvisa/brainvisa_freesurfer.git branch:master','cortical_surface/brainvisa_freesurfer/master'),
'5.0': ('git https://github.com/brainvisa/brainvisa_freesurfer.git branch:5.0','cortical_surface/brainvisa_freesurfer/5.0'),
},
}],
],
}),
('nuclear_imaging', {
'components': [
['nuclear_imaging-gpl', {
'groups': ['all', 'cati_platform'],
'branches': {
'bug_fix': ('git https://github.com/cati-neuroimaging/nuclear_imaging-gpl.git branch:master','nuclear_imaging/nuclear_imaging-gpl/master'),
'5.0': ('git https://github.com/cati-neuroimaging/nuclear_imaging-gpl.git branch:5.0','nuclear_imaging/nuclear_imaging-gpl/5.0'),
},
}],
['nuclear_imaging-nonfree', {
'groups': ['all', 'cati_platform'],
'branches': {
'bug_fix': ('git https://github.com/cati-neuroimaging/nuclear_imaging-nonfree.git branch:master','nuclear_imaging/nuclear_imaging-nonfree/master'),
'5.0': ('git https://github.com/cati-neuroimaging/nuclear_imaging-nonfree.git branch:5.0','nuclear_imaging/nuclear_imaging-nonfree/5.0'),
},
}],
],
}),
('snapbase', {
'components': [
['snapbase', {
'groups': ['all', 'opensource', 'brainvisa', 'bvdev',
'standard', 'cea', 'cati_platform'],
'branches': {
'5.0': ('svn https://bioproj.extra.cea.fr/neurosvn/brainvisa/snapbase/branches/5.0','snapbase/5.0'),
},
}],
],
}),
('catidb', {
'components': [
['catidb-client', {
'groups': ['cati_platform'],
'branches': {
'bug_fix': ('git https://github.com/cati-neuroimaging/catidb-client.git default:main', 'catidb-client'),
},
}],
],
}),
('sacha', {
'components': [
['sacha-nonfree', {
'groups': ['all', 'catidb3_all', 'cati_platform'],
'branches': {
'bug_fix': ('git https://github.com/cati-neuroimaging/sacha-nonfree.git branch:master', 'sacha-nonfree/master'),
},
}],
['sacha-gpl', {
'groups': ['all', 'catidb3_all', 'cati_platform'],
'branches': {
'bug_fix': ('git https://github.com/cati-neuroimaging/sacha-gpl.git branch:master', 'sacha-gpl/master'),
},
}],
],
}),
('whasa', {
'components': [
['whasa-nonfree', {
'groups': ['all', 'catidb3_all', 'cati_platform'],
'branches': {
'bug_fix': ('git https://github.com/cati-neuroimaging/whasa-nonfree.git branch:master', 'whasa-nonfree/master'),
},
}],
['whasa-gpl', { # Experimental branch to propose a new organization
'groups': ['all', 'catidb3_all', 'cati_platform'],
'branches': {
'bug_fix': ('git https://github.com/cati-neuroimaging/whasa-gpl.git branch:master', 'whasa-gpl/master'),
},
}],
],
}),
('longitudinal_pipelines', {
'components': [
['longitudinal_pipelines', {
'groups': ['all', 'cati_platform'],
'branches': {
'bug_fix': ('git https://github.com/cati-neuroimaging/longitudinal_pipelines.git branch:master',
'longitudinal_pipelines/master'),
'5.0': ('git https://github.com/cati-neuroimaging/longitudinal_pipelines.git branch:5.0',
'longitudinal_pipelines/5.0'),
},
}],
],
}),
('disco', {
'components': [
['disco', {
'groups': ['all', 'cea'],
'branches': {
'trunk': ('git https://bioproj.extra.cea.fr/git/brainvisa-disco branch:master', 'disco/integration'),
'bug_fix': ('git https://bioproj.extra.cea.fr/git/brainvisa-disco branch:master', 'disco/master'),
'5.0': ('git https://bioproj.extra.cea.fr/git/brainvisa-disco branch:5.0', 'disco/5.0'),
},
}],
],
}),
('qualicati', {
'components': [
['qualicati', {
'groups': ['cati_platform'],
'branches': {
'bug_fix': ('git https://github.com/cati-neuroimaging/qualicati.git default:main', 'qualicati'),
},
'build_model': 'pure_python',
}],
],
}),
('fmri', {
'description': 'Functional MRI processing toolboxes.',
'components': [
['rsfmri', {
'groups': ['all', 'cati_platform'],
'branches': {
'bug_fix': ('git https://github.com/cati-neuroimaging/rsfmri.git branch:master','rsfmri/master'),
},
'build_model': 'pure_python',
}],
],
}),
]
customize_components_definition = [os.path.expanduser('~/.brainvisa/components_definition.py')]
if 'BV_MAKER_BUILD' in os.environ:
customize_components_definition.append(os.path.join(os.environ['BV_MAKER_BUILD'], 'components_definition.py'))
for ccd in customize_components_definition:
if os.path.exists(ccd):
with open(ccd) as f:
exec(compile(f.read(), ccd, 'exec'))
# allow to name branches master or bug_fix indistinctly, or integration/trunk
for cgroup in components_definition:
for comp in cgroup[1]['components']:
branches = comp[1]['branches']
if 'bug_fix' in branches and 'master' not in branches:
branches['master'] = branches['bug_fix']
elif 'master' in branches and 'bug_fix' not in branches:
branches['bug_fix'] = branches['master']
if 'trunk' in branches and 'integration' not in branches:
branches['integration'] = branches['trunk']
elif 'integration' in branches and 'trunk' not in branches:
branches['trunk'] = branches['integration']
|
StarcoderdataPython
|
3204800
|
import csvreader
import numpy as np
import matplotlib.pyplot as plt
header, player_data = csvreader.csv_reader_with_headers('../data/Player_full_data.csv')
for player in player_data:
plt.style.use('ggplot')
values = [float(player[5]), float(player[10]), float(player[14]), float(player[18])]
feature = ['Body', 'Defense', 'Pass', 'Shot']
N = len(values)
angles = np.linspace(0, 2*np.pi, N, endpoint=False)
values = np.concatenate((values, [values[0]]))
angles = np.concatenate((angles, [angles[0]]))
fig = plt.figure()
ax = fig.add_subplot(111, polar=True)
ax.plot(angles, values, 'o-', linewidth=2)
ax.fill(angles, values, 'r', alpha=0.5)
ax.set_thetagrids(angles*180/np.pi, feature)
ax.set_ylim(0, 100)
name = player[0].split('_')[1]
plt.title(name + ' Score')
ax.grid(True)
plt.savefig('../pics/' + player[0] + '_score.png')
plt.show()
|
StarcoderdataPython
|
1733900
|
<gh_stars>0
"""
The Logging Utility Module
"""
import logging
class BraceMessage:
def __init__(self, fmt, *args, **kwargs):
self.fmt = fmt
self.args = args
self.kwargs = kwargs
def __str__(self):
return self.fmt.format(*self.args, **self.kwargs)
|
StarcoderdataPython
|
1736519
|
import numpy as np
from frameworks.CPLELearning import CPLELearningModel
from frameworks.SelfLearning import SelfLearningModel
from methods.scikitWQDA import WQDA
from examples.plotutils import evaluate_and_plot
# number of data points
N = 60
supevised_data_points = 4
# generate data
meandistance = 1
s = np.random.random()
cov = [[s, 0], [0, s]]
Xs = np.random.multivariate_normal([-s*meandistance, -s*meandistance], cov, (N,))
Xs = np.vstack(( Xs, np.random.multivariate_normal([s*meandistance, s*meandistance], cov, (N,)) ))
ytrue = np.array([0]*N + [1]*N)
ys = np.array([-1]*(2*N))
for i in range(supevised_data_points/2):
ys[np.random.randint(0, N)] = 0
for i in range(supevised_data_points/2):
ys[np.random.randint(N, 2*N)] = 1
Xsupervised = Xs[ys!=-1, :]
ysupervised = ys[ys!=-1]
# compare models
lbl = "Purely supervised QDA:"
print(lbl)
model = WQDA()
model.fit(Xsupervised, ysupervised)
evaluate_and_plot(model, Xs, ys, ytrue, lbl, 1)
lbl = "SelfLearning QDA:"
print(lbl)
model = SelfLearningModel(WQDA())
model.fit(Xs, ys)
evaluate_and_plot(model, Xs, ys, ytrue, lbl, 2)
lbl = "CPLE(pessimistic) QDA:"
print(lbl)
model = CPLELearningModel(WQDA(), predict_from_probabilities=True)
model.fit(Xs, ys)
evaluate_and_plot(model, Xs, ys, ytrue, lbl, 3)
lbl = "CPLE(optimistic) QDA:"
print(lbl)
CPLELearningModel.pessimistic = False
model = CPLELearningModel(WQDA(), predict_from_probabilities=True)
model.fit(Xs, ys)
evaluate_and_plot(model, Xs, ys, ytrue, lbl, 4, block=True)
|
StarcoderdataPython
|
1752418
|
<gh_stars>10-100
# vim: set encoding=utf-8
"""Some common combinations"""
import string
from pyparsing import Empty, FollowedBy, LineEnd, Literal, OneOrMore, Optional
from pyparsing import Suppress, SkipTo, Word, ZeroOrMore
from regparser.grammar import atomic
from regparser.grammar.utils import keep_pos, Marker
period_section = Suppress(".") + atomic.section
part_section = atomic.part + period_section
marker_part_section = (
atomic.section_marker.copy().setParseAction(keep_pos).setResultsName(
"marker")
+ part_section)
depth6_p = atomic.em_roman_p | atomic.plaintext_level6_p
depth5_p = (
(atomic.em_digit_p | atomic.plaintext_level5_p)
+ Optional(depth6_p))
depth4_p = atomic.upper_p + Optional(depth5_p)
depth3_p = atomic.roman_p + Optional(depth4_p)
depth2_p = atomic.digit_p + Optional(depth3_p)
depth1_p = atomic.lower_p + ~FollowedBy(atomic.upper_p) + Optional(depth2_p)
any_depth_p = depth1_p | depth2_p | depth3_p | depth4_p | depth5_p | depth6_p
depth3_c = atomic.upper_c + Optional(atomic.em_digit_c)
depth2_c = atomic.roman_c + Optional(depth3_c)
depth1_c = atomic.digit_c + Optional(depth2_c)
any_a = atomic.upper_a | atomic.digit_a
section_comment = atomic.section + depth1_c
section_paragraph = atomic.section + depth1_p
mps_paragraph = marker_part_section + Optional(depth1_p)
ps_paragraph = part_section + Optional(depth1_p)
part_section_paragraph = (
atomic.part + Suppress(".") + atomic.section + depth1_p)
part_section + Optional(depth1_p)
m_section_paragraph = (
atomic.paragraph_marker.copy().setParseAction(
keep_pos).setResultsName("marker")
+ atomic.section
+ depth1_p)
marker_paragraph = (
(atomic.paragraph_marker | atomic.paragraphs_marker).setParseAction(
keep_pos).setResultsName("marker")
+ depth1_p)
def appendix_section(match):
"""Appendices may have parenthetical paragraphs in its section number."""
if match.appendix_digit:
lst = list(match)
pars = lst[lst.index(match.appendix_digit) + 1:]
section = match.appendix_digit
if pars:
section += '(' + ')('.join(el for el in pars) + ')'
return section
else:
return None
appendix_with_section = (
atomic.appendix
+ '-'
+ (atomic.appendix_digit
+ ZeroOrMore(atomic.lower_p | atomic.roman_p | atomic.digit_p
| atomic.upper_p)
).setParseAction(appendix_section).setResultsName("appendix_section"))
# "the" appendix implies there's only one, so it better be appendix A
section_of_appendix_to_this_part = (
atomic.section_marker
+ atomic.upper_roman_a.copy().setResultsName("appendix_section")
+ Literal("of the appendix to this part").setResultsName(
"appendix").setParseAction(lambda: 'A')
)
appendix_par_of_part = (
atomic.paragraph_marker.copy().setParseAction(keep_pos).setResultsName(
"marker")
+ (Word(string.ascii_uppercase) | Word(string.digits))
+ Optional(any_a) + Optional(any_a)
+ Suppress(".")
+ Marker("of") + Marker("part")
+ atomic.upper_roman_a)
appendix_with_part = (
atomic.appendix_marker.copy().setParseAction(keep_pos).setResultsName(
"marker")
+ atomic.appendix
+ Suppress(",") + Marker('part')
+ atomic.upper_roman_a
+ Optional(any_a) + Optional(any_a) + Optional(any_a))
marker_appendix = (
atomic.appendix_marker.copy().setParseAction(keep_pos).setResultsName(
"marker")
+ (appendix_with_section | atomic.appendix))
marker_part = (
atomic.part_marker.copy().setParseAction(keep_pos).setResultsName("marker")
+ atomic.part)
marker_subpart = (
atomic.subpart_marker.copy().setParseAction(keep_pos).setResultsName(
"marker")
+ atomic.subpart)
marker_subpart_title = (
atomic.subpart_marker.copy().setParseAction(keep_pos).setResultsName(
"marker")
+ atomic.subpart
+ ((Suppress(Literal(u"—"))
+ SkipTo(LineEnd()).setResultsName("subpart_title"))
| (Literal("[Reserved]").setResultsName("subpart_title")))
)
marker_comment = (
atomic.comment_marker.copy().setParseAction(keep_pos).setResultsName(
"marker")
+ (section_comment | section_paragraph | ps_paragraph | mps_paragraph)
+ Optional(depth1_c)
)
_inner_non_comment = (
any_depth_p
| (part_section + Optional(depth1_p))
| (atomic.section + depth1_p)
| appendix_with_section | marker_appendix)
_inner_non_comment_tail = OneOrMore(
Optional(Suppress('('))
+ atomic.conj_phrases
+ _inner_non_comment.copy().setParseAction(keep_pos).setResultsName(
"tail", listAllMatches=True)
+ Optional(Suppress(')')))
multiple_non_comments = (
(atomic.paragraphs_marker | atomic.paragraph_marker
| atomic.sections_marker | atomic.section_marker)
+ _inner_non_comment.copy().setParseAction(keep_pos).setResultsName("head")
+ _inner_non_comment_tail)
multiple_section_paragraphs = (
section_paragraph.copy().setParseAction(keep_pos).setResultsName("head")
+ _inner_non_comment_tail)
multiple_period_sections = (
atomic.sections_marker
+ part_section.copy().setParseAction(keep_pos).setResultsName("head")
+ OneOrMore(
atomic.conj_phrases
+ period_section.copy().setParseAction(keep_pos).setResultsName(
"tail", listAllMatches=True)))
multiple_appendix_section = (
appendix_with_section.copy().setParseAction(keep_pos).setResultsName(
"head")
+ OneOrMore(
Optional(Suppress('('))
+ atomic.conj_phrases
+ _inner_non_comment.copy().setParseAction(keep_pos).setResultsName(
"tail", listAllMatches=True)
+ Optional(Suppress(')'))))
# Use "Empty" so we don't rename atomic.appendix
multiple_appendices = (
atomic.appendices_marker
+ (atomic.appendix + Empty()).setParseAction(keep_pos).setResultsName(
"head")
+ OneOrMore(
atomic.conj_phrases
+ (atomic.appendix + Empty()).setParseAction(keep_pos).setResultsName(
"tail", listAllMatches=True)))
multiple_comments = (
(atomic.comments_marker | atomic.comment_marker)
+ (Optional(atomic.section_marker)
+ _inner_non_comment
+ Optional(depth1_c)).setParseAction(keep_pos).setResultsName("head")
+ OneOrMore(
Optional(Suppress('('))
+ atomic.conj_phrases
+ ((_inner_non_comment + Optional(depth1_c))
| depth1_c).setParseAction(keep_pos).setResultsName(
"tail", listAllMatches=True)
+ Optional(Suppress(')'))))
# e.g. 12 CFR 1005.10
internal_cfr_p = (
atomic.title
+ Suppress("CFR")
+ atomic.part
+ Suppress('.')
+ atomic.section
+ Optional(depth1_p))
# e.g. 12 CFR 1005.10, 1006.21, and 1010.10
multiple_cfr_p = (
internal_cfr_p.copy().setParseAction(keep_pos).setResultsName("head")
+ OneOrMore(
atomic.conj_phrases
+ (atomic.part
+ Suppress('.')
+ atomic.section
+ Optional(depth1_p)).setParseAction(keep_pos).setResultsName(
"tail", listAllMatches=True)))
notice_cfr_p = (
Suppress(atomic.title)
+ Suppress("CFR")
+ Optional(Suppress(atomic.part_marker | atomic.parts_marker))
+ OneOrMore(
atomic.part
+ Optional(Suppress(','))
+ Optional(Suppress('and'))
)
)
|
StarcoderdataPython
|
104380
|
<reponame>daisuke-fujita/monsaca-analytics_20181107
#!/usr/bin/env python
# Copyright (c) 2016 Hewlett Packard Enterprise Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
import logging
from monasca_analytics.component import base
logger = logging.getLogger(__name__)
class BaseIngestor(base.BaseComponent):
"""Base class for all the Ingestor modules"""
def __init__(self, _id, _config):
"""Constructor with ID and configuration
:type _id: str
:param _id: ID assigned to this component
:type _config: dict
:param _config: configuration of this component
"""
self._features = None
super(BaseIngestor, self).__init__(_id, _config)
@abc.abstractmethod
def map_dstream(self, dstream):
"""Transforms the data provided by a dstream to another dstream
Abstract method to be implemented by BaseIngestor children.
The processed dstream should be returned.
:type dstream: pyspark.streaming.DStream
:param dstream: stream of data before being processed
:rtype: pyspark.streaming.DStream
:returns: stream of data after being processed
"""
pass
def set_feature_list(self, features):
"""Set the list of features
:type features: list[str]
:param features: List of features names (when extracted from the data)
"""
self._features = features
|
StarcoderdataPython
|
3312575
|
<filename>patron/verify/api.py
# Copyright 2015 OpenStack.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Handles all requests to the verify service."""
from oslo_config import cfg
from oslo_log import log as logging
import oslo_messaging as messaging
from patron import baserpc
from patron.verify import manager
from patron.verify import rpcapi
from patron.i18n import _LI, _LW
from patron import utils
verify_opts = [
cfg.BoolOpt('use_local',
default=False,
help='Perform patron-verify operations locally'),
cfg.StrOpt('topic',
default='verify',
help='The topic on which verify nodes listen'),
cfg.StrOpt('manager',
default='patron.verify.manager.VerifyManager',
help='Full class name for the Manager for verify'),
cfg.IntOpt('workers',
help='Number of workers for OpenStack Verify service. '
'The default will be the number of CPUs available.')
]
verify_group = cfg.OptGroup(name='verify',
title='Verify Options')
CONF = cfg.CONF
CONF.register_group(verify_group)
CONF.register_opts(verify_opts, verify_group)
LOG = logging.getLogger(__name__)
class LocalAPI(object):
"""A local version of the verify API that does database updates
locally instead of via RPC.
"""
def __init__(self):
# TODO(danms): This needs to be something more generic for
# other/future users of this sort of functionality.
self._manager = utils.ExceptionHelper(manager.VerifyManager())
def verify(self, context, op, target, bypass):
return self._manager.verify(
context, op, target, bypass)
class API(LocalAPI):
"""Verify API that does updates via RPC to the VerifyManager."""
def __init__(self):
self._manager = rpcapi.VerifyAPI()
self.base_rpcapi = baserpc.BaseAPI(topic=CONF.verify.topic)
def verify(self, context, op, target, bypass):
return self._manager.verify(
context, op, target, bypass)
|
StarcoderdataPython
|
3343758
|
import numpy as np
from torch.autograd import Variable, Function
import torch
import types
class VanillaGradExplainer(object):
def __init__(self, model):
self.model = model
def _backprop(self, inp, ind):
output = self.model(inp)
if ind is None:
ind = output.data.max(1)[1]
grad_out = output.data.clone()
grad_out.fill_(0.0)
grad_out.scatter_(1, ind.unsqueeze(0).t(), 1.0)
output.backward(grad_out)
return inp.grad.data
def explain(self, inp, ind=None):
return self._backprop(inp, ind)
class GradxInputExplainer(VanillaGradExplainer):
def __init__(self, model):
super(GradxInputExplainer, self).__init__(model)
def explain(self, inp, ind=None):
grad = self._backprop(inp, ind)
return inp.data * grad
class SaliencyExplainer(VanillaGradExplainer):
def __init__(self, model):
super(SaliencyExplainer, self).__init__(model)
def explain(self, inp, ind=None):
grad = self._backprop(inp, ind)
return grad.abs()
class IntegrateGradExplainer(VanillaGradExplainer):
def __init__(self, model, steps=100):
super(IntegrateGradExplainer, self).__init__(model)
self.steps = steps
def explain(self, inp, ind=None):
grad = 0
inp_data = inp.data.clone()
for alpha in np.arange(1 / self.steps, 1.0, 1 / self.steps):
new_inp = Variable(inp_data * alpha, requires_grad=True)
g = self._backprop(new_inp, ind)
grad += g
return grad * inp_data / self.steps
class DeconvExplainer(VanillaGradExplainer):
def __init__(self, model):
super(DeconvExplainer, self).__init__(model)
self._override_backward()
def _override_backward(self):
class _ReLU(Function):
@staticmethod
def forward(ctx, input):
output = torch.clamp(input, min=0)
return output
@staticmethod
def backward(ctx, grad_output):
grad_inp = torch.clamp(grad_output, min=0)
return grad_inp
def new_forward(self, x):
return _ReLU.apply(x)
def replace(m):
if m.__class__.__name__ == 'ReLU':
m.forward = types.MethodType(new_forward, m)
self.model.apply(replace)
class GuidedBackpropExplainer(VanillaGradExplainer):
def __init__(self, model):
super(GuidedBackpropExplainer, self).__init__(model)
self._override_backward()
def _override_backward(self):
class _ReLU(Function):
@staticmethod
def forward(ctx, input):
output = torch.clamp(input, min=0)
ctx.save_for_backward(output)
return output
@staticmethod
def backward(ctx, grad_output):
output, = ctx.saved_tensors
mask1 = (output > 0).float()
mask2 = (grad_output.data > 0).float()
grad_inp = mask1 * mask2 * grad_output.data
grad_output.data.copy_(grad_inp)
return grad_output
def new_forward(self, x):
return _ReLU.apply(x)
def replace(m):
if m.__class__.__name__ == 'ReLU':
m.forward = types.MethodType(new_forward, m)
self.model.apply(replace)
# modified from https://github.com/PAIR-code/saliency/blob/master/saliency/base.py#L80
class SmoothGradExplainer(object):
def __init__(self, base_explainer, stdev_spread=0.15,
nsamples=25, magnitude=True):
self.base_explainer = base_explainer
self.stdev_spread = stdev_spread
self.nsamples = nsamples
self.magnitude = magnitude
def explain(self, inp, ind=None):
stdev = self.stdev_spread * (inp.data.max() - inp.data.min())
total_gradients = 0
origin_inp_data = inp.data.clone()
for i in range(self.nsamples):
noise = torch.randn(inp.size()).cuda() * stdev
inp.data.copy_(noise + origin_inp_data)
grad = self.base_explainer.explain(inp, ind)
if self.magnitude:
total_gradients += grad ** 2
else:
total_gradients += grad
return total_gradients / self.nsamples
|
StarcoderdataPython
|
3270033
|
<filename>src/RIOT/tests/gnrc_rpl_srh/tests/01-run.py
#!/usr/bin/env python3
# Copyright (C) 2018 Freie Universität Berlin
#
# This file is subject to the terms and conditions of the GNU Lesser
# General Public License v2.1. See the file LICENSE in the top level
# directory for more details.
import os
import random
import re
import sys
import subprocess
from scapy.all import Ether, IPv6, UDP, \
IPv6ExtHdrHopByHop, IPv6ExtHdrDestOpt, \
IPv6ExtHdrFragment, IPv6ExtHdrRouting, \
ICMPv6ParamProblem, ICMPv6TimeExceeded, \
sendp, srp1, AsyncSniffer
from testrunner import run, check_unittests
EXT_HDR_NH = {
IPv6ExtHdrHopByHop: 0,
IPv6ExtHdrRouting: 43,
IPv6ExtHdrFragment: 44,
# IPSec headers currently unsupported by scapy
IPv6ExtHdrDestOpt: 60,
# Mobility header currently unsupported by scapy
}
class Sniffer(object):
def __init__(self, iface, *args, **kwargs):
self.iface = iface
self.sniffer = None
self.stop_filter = None
def start_sniff(self, stop_filter):
assert self.sniffer is None
self.stop_filter = stop_filter
self.sniffer = AsyncSniffer(
iface=self.iface,
stop_filter=stop_filter,
)
self.sniffer.start()
def wait_for_sniff_results(self, timeout=5):
assert self.sniffer is not None
self.sniffer.join(timeout=timeout)
sniffer = self.sniffer
self.sniffer = None
if sniffer.results is None:
return []
return [p for p in sniffer.results
# filter out packets only belonging to stop_filter if
# it existed
if sniffer.kwargs.get("stop_filter") is None or
sniffer.kwargs["stop_filter"](p)]
sniffer = None
def check_and_search_output(cmd, pattern, res_group, *args, **kwargs):
output = subprocess.check_output(cmd, *args, **kwargs).decode("utf-8")
for line in output.splitlines():
m = re.search(pattern, line)
if m is not None:
return m.group(res_group)
return None
def get_bridge(tap):
res = check_and_search_output(
["bridge", "link"],
r"{}.+master\s+(?P<master>[^\s]+)".format(tap),
"master"
)
return tap if res is None else res
def get_host_lladdr(tap):
res = check_and_search_output(
["ip", "addr", "show", "dev", tap, "scope", "link"],
r"inet6\s+(?P<lladdr>[0-9A-Fa-f:]+)/\d+",
"lladdr"
)
if res is None:
raise AssertionError(
"Can't find host link-local address on interface {}".format(tap)
)
else:
return res
def get_host_hwaddr(tap):
res = check_and_search_output(
["ip", "addr", "show", "dev", tap, "scope", "link"],
r"link[^\s]+\s+(?P<hwaddr>[0-9A-Fa-f:]+)",
"hwaddr"
)
if res is None:
raise AssertionError(
"Can't find host hardware address on interface {}".format(tap)
)
else:
return res
def pktbuf_empty(child):
child.sendline("pktbuf")
child.expect(r"packet buffer: first byte: (?P<first_byte>0x[0-9a-fA-F]+), "
r"last byte: 0x[0-9a-fA-F]+ \(size: (?P<size>\d+)\)")
first_byte = child.match.group("first_byte")
size = child.match.group("size")
child.expect(
r"~ unused: {} \(next: (\(nil\)|0), size: {}\) ~".format(
first_byte, size))
def register_protnum(child):
child.sendline("ip reg")
child.expect("Registered to protocol number 59")
def unregister(child):
child.sendline("ip unreg")
child.expect(r"Unregistered from protocol number \d")
def get_first_interface(child):
child.sendline("ifconfig")
child.expect(r"Iface\s+(\d+)\s")
return int(child.match.group(1))
def add_ipv6_address(child, iface, ipv6_addr):
child.sendline("ifconfig {} add {}".format(iface, ipv6_addr))
child.expect(r"success: added [a-f0-9:]+/\d+ to interface \d+")
def del_ipv6_address(child, iface, ipv6_addr):
child.sendline("ifconfig {} del {}".format(iface, ipv6_addr))
child.expect(r"success: removed [a-f0-9:]+ to interface \d+")
def add_neighbor(child, iface, ipv6_addr, hw_addr):
child.sendline("nib neigh add {} {} {}".format(iface, ipv6_addr, hw_addr))
child.sendline("nib neigh")
child.expect(r"{} dev #{} lladdr {}".format(ipv6_addr.lower(), iface,
hw_addr.upper()))
def del_neighbor(child, iface, ipv6_addr):
child.sendline("nib neigh del {} {}".format(iface, ipv6_addr))
def test_wrong_type(child, iface, hw_dst, ll_dst, ll_src):
p = srp1(Ether(dst=hw_dst) / IPv6(dst=ll_dst, src=ll_src) /
IPv6ExtHdrRouting(type=255, segleft=1, addresses=["abcd::1"]),
iface=iface, timeout=1, verbose=0)
assert(p is not None)
assert(ICMPv6ParamProblem in p)
assert(p[ICMPv6ParamProblem].code == 0) # erroneous header field encountered
assert(p[ICMPv6ParamProblem].ptr == 42) # routing header type field
pktbuf_empty(child)
def test_seg_left_gt_len_addresses(child, iface, hw_dst, ll_dst, ll_src):
# send routing header with no (0) addresses but segleft set to a value
# larger than 0
p = srp1(Ether(dst=hw_dst) / IPv6(dst=ll_dst, src=ll_src) /
IPv6ExtHdrRouting(type=3, segleft=18, addresses=[]),
iface=iface, timeout=1, verbose=0)
assert(p is not None)
assert(ICMPv6ParamProblem in p)
assert(p[ICMPv6ParamProblem].code == 0) # erroneous header field encountered
assert(p[ICMPv6ParamProblem].ptr == 43) # segleft field
pktbuf_empty(child)
def test_multicast_dst(child, iface, hw_dst, ll_dst, ll_src):
# sniffing for ICMPv6 parameter problem message
sniffer.start_sniff(lambda p: p.haslayer(ICMPv6ParamProblem) or
(p.haslayer(UDP) and (p[IPv6].dst != "fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b")))
# send routing header with multicast destination
sendp(Ether(dst=hw_dst) / IPv6(dst="fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b", src=ll_src) /
IPv6ExtHdrRouting(type=3, segleft=1, addresses=["fc00:e968:6179::de52:7100"]) /
UDP(dport=2606), iface=iface, verbose=0)
ps = sniffer.wait_for_sniff_results()
p = [p for p in ps if (ICMPv6ParamProblem in p) or
((UDP in p) and (p[UDP].dport == 2606) and
(p[IPv6].dst != "fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b"))]
# packet should be discarded silently:
# see https://tools.ietf.org/html/rfc6554#section-4.2
assert(len(p) == 0)
pktbuf_empty(child)
def test_multicast_addr(child, iface, hw_dst, ll_dst, ll_src):
# sniffing for ICMPv6 parameter problem message
sniffer.start_sniff(lambda p: p.haslayer(ICMPv6ParamProblem) or
(p.haslayer(UDP) and (p[IPv6].dst != ll_dst)))
# Send routing header with multicast address in its destinations
sendp(Ether(dst=hw_dst) / IPv6(dst=ll_dst, src=ll_src) /
IPv6ExtHdrRouting(type=3, segleft=1, addresses=["fc00:e968:6179::de52:7100"]) /
UDP(dport=2606), iface=iface, verbose=0)
ps = sniffer.wait_for_sniff_results()
p = [p for p in ps if (ICMPv6ParamProblem in p) or
((UDP in p) and (p[UDP].dport == 2606) and
(p[IPv6].dst != ll_dst))]
# packet should be discarded silently:
# see https://tools.ietf.org/html/rfc6554#section-4.2
assert(len(p) == 0)
pktbuf_empty(child)
def test_multiple_addrs_of_mine_uncomp(child, iface, hw_dst, ll_dst, ll_src):
dummy = "fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b"
# add dummy IPv6 address
dst_iface = get_first_interface(child)
add_ipv6_address(child, dst_iface, dummy)
p = srp1(Ether(dst=hw_dst) / IPv6(dst=ll_dst, src=ll_src) /
IPv6ExtHdrRouting(type=3, segleft=3, addresses=[ll_dst, ll_src,
dummy]),
iface=iface, timeout=1, verbose=0)
assert(p is not None)
assert(ICMPv6ParamProblem in p)
assert(p[ICMPv6ParamProblem].code == 0) # erroneous header field encountered
assert(p[ICMPv6ParamProblem].ptr == 40+8+(2 * 16)) # dummy in routing header
pktbuf_empty(child)
del_ipv6_address(child, dst_iface, dummy)
def test_forward_uncomp(child, iface, hw_dst, ll_dst, ll_src):
dummy = "fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b"
hl = random.randint(2, 255)
# sniffing for packets to dummy
sniffer.start_sniff(lambda p: p[Ether].src == hw_dst)
# add dummy IPv6 address
dst_iface = get_first_interface(child)
hw_src = get_host_hwaddr(iface)
add_neighbor(child, dst_iface, dummy, hw_src)
sendp(Ether(dst=hw_dst) / IPv6(dst=ll_dst, src=ll_src, hlim=hl) /
IPv6ExtHdrRouting(type=3, segleft=1, addresses=[dummy]),
iface=iface, verbose=0)
ps = sniffer.wait_for_sniff_results()
p = [p for p in ps if p[Ether].src == hw_dst]
assert(len(p) > 0)
p = p[0]
assert(IPv6 in p)
assert(IPv6ExtHdrRouting in p)
assert(p[IPv6].src == ll_src)
assert(p[IPv6].dst == dummy)
assert(p[IPv6].hlim == (hl - 1))
assert(p[IPv6ExtHdrRouting].type == 3)
assert(p[IPv6ExtHdrRouting].segleft == 0)
pktbuf_empty(child)
del_neighbor(child, dst_iface, dummy)
def test_forward_uncomp_not_first_ext_hdr(child, iface, hw_dst, ll_dst, ll_src):
dummy = "fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b"
hl = random.randint(2, 255)
# sniffing for packets to dummy
sniffer.start_sniff(lambda p: p[Ether].src == hw_dst)
# add dummy IPv6 address
dst_iface = get_first_interface(child)
hw_src = get_host_hwaddr(iface)
add_neighbor(child, dst_iface, dummy, hw_src)
sendp(Ether(dst=hw_dst) / IPv6(dst=ll_dst, src=ll_src, hlim=hl) /
IPv6ExtHdrHopByHop() /
IPv6ExtHdrRouting(type=3, segleft=1, addresses=[dummy]),
iface=iface, verbose=0)
ps = sniffer.wait_for_sniff_results()
p = [p for p in ps if p[Ether].src == hw_dst]
assert(len(p) > 0)
p = p[0]
assert(IPv6 in p)
assert(IPv6ExtHdrRouting in p)
assert(p[IPv6].src == ll_src)
assert(p[IPv6].dst == dummy)
assert(p[IPv6].hlim == (hl - 1))
assert(p[IPv6ExtHdrRouting].type == 3)
assert(p[IPv6ExtHdrRouting].segleft == 0)
pktbuf_empty(child)
del_neighbor(child, dst_iface, dummy)
def test_seq_left_0(child, iface, hw_dst, ll_dst, ll_src):
register_protnum(child)
sendp(Ether(dst=hw_dst) / IPv6(dst=ll_dst, src=ll_src) /
IPv6ExtHdrRouting(type=3, segleft=0), iface=iface, verbose=0)
# we are the target, so the packet should be dumped
# empty snip
child.expect(r"~~ SNIP 0 - size:\s+0 byte, type: NETTYPE_UNDEF \(\d+\)")
ipv6_payload_len = 0
# parsed routing header
child.expect(r"~~ SNIP 1 - size:\s+(\d+) byte, type: NETTYPE_\w+ \(\d+\)")
ipv6_payload_len += int(child.match.group(1))
# NH = 59 (IPV6_NONXT), len = 0x00, routing type = 3, segments left = 0
child.expect(r"00000000 3B 00 03 00 00 00 00 00")
# IPv6 header
child.expect(r"~~ SNIP 2 - size:\s+40 byte, type: NETTYPE_IPV6 \(\d+\)")
child.expect_exact(r"length: {} next header: {}".format(
ipv6_payload_len, EXT_HDR_NH[IPv6ExtHdrRouting]
))
child.expect_exact(r"destination address: {}".format(ll_dst))
pktbuf_empty(child)
unregister(child)
def test_time_exc(child, iface, hw_dst, ll_dst, ll_src):
dummy = "fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b"
p = srp1(Ether(dst=hw_dst) / IPv6(dst=ll_dst, hlim=1, src=ll_src) /
IPv6ExtHdrRouting(type=3, segleft=1, addresses=[dummy]),
iface=iface, timeout=1, verbose=0)
assert(p is not None)
assert(ICMPv6TimeExceeded in p)
assert(p[ICMPv6TimeExceeded].code == 0)
pktbuf_empty(child)
def testfunc(child):
global sniffer
tap = get_bridge(os.environ["TAP"])
child.sendline("unittests")
# wait for and check result of unittests
print("." * check_unittests(child), end="", flush=True)
lladdr_src = get_host_lladdr(tap)
child.sendline("ifconfig")
child.expect(r"HWaddr: (?P<hwaddr>[A-Fa-f:0-9]+)\s")
hwaddr_dst = child.match.group("hwaddr").lower()
child.expect(r"(?P<lladdr>fe80::[A-Fa-f:0-9]+)\s")
lladdr_dst = child.match.group("lladdr").lower()
sniffer = Sniffer(tap)
def run(func):
if child.logfile == sys.stdout:
func(child, tap, hwaddr_dst, lladdr_dst, lladdr_src)
else:
try:
func(child, tap, hwaddr_dst, lladdr_dst, lladdr_src)
print(".", end="", flush=True)
except Exception as e:
print("FAILED")
raise e
run(test_wrong_type)
run(test_seg_left_gt_len_addresses)
run(test_multicast_dst)
run(test_multicast_addr)
run(test_multiple_addrs_of_mine_uncomp)
run(test_forward_uncomp)
run(test_forward_uncomp_not_first_ext_hdr)
# compressed tests hard to implement with scapy and also covered in
# unittests
run(test_seq_left_0)
run(test_time_exc)
print("SUCCESS")
if __name__ == "__main__":
if os.geteuid() != 0:
print("\x1b[1;31mThis test requires root privileges.\n"
"It's constructing and sending Ethernet frames.\x1b[0m\n",
file=sys.stderr)
sys.exit(1)
sys.exit(run(testfunc, timeout=1, echo=False))
|
StarcoderdataPython
|
3238095
|
<reponame>kalev/flatpak-status
import functools
import logging
import os
import subprocess
logger = logging.getLogger(__name__)
class GitError(Exception):
pass
class OrderingError(Exception):
pass
class GitRepo:
def __init__(self, repo_dir):
self.repo_dir = repo_dir
def do(self, *args):
full_args = ['git']
full_args += args
try:
subprocess.check_call(full_args, cwd=self.repo_dir)
except subprocess.CalledProcessError as e:
raise GitError(f"{self.repo_dir}: {e}") from e
def capture(self, *args):
full_args = ['git']
full_args += args
try:
return subprocess.check_output(full_args, cwd=self.repo_dir, encoding='UTF-8').strip()
except subprocess.CalledProcessError as e:
raise GitError(f"{self.repo_dir}: {e}") from e
class DistGitRepo(GitRepo):
def __init__(self, pkg, repo_dir, origin, mirror_existing=True):
super().__init__(repo_dir)
self.pkg = pkg
self.origin = origin
self.mirror_existing = mirror_existing
def exists(self):
return os.path.exists(self.repo_dir)
def mirror(self, mirror_always=False):
if not self.exists():
parent_dir = os.path.dirname(self.repo_dir)
if not os.path.isdir(parent_dir):
os.makedirs(parent_dir)
subprocess.check_call(['git', 'clone', '--mirror', self.origin],
cwd=parent_dir)
else:
if self.mirror_existing or mirror_always:
logger.info("Refreshing existing mirror %s", self.pkg)
self.do('remote', 'update')
def _get_branches(self, commit, try_mirroring=False):
return self.capture('branch',
'--contains', commit,
'--format=%(refname:lstrip=2)').split('\n')
def get_branches(self, commit, try_mirroring=False):
need_retry = False
try:
return self._get_branches(commit)
except GitError:
if try_mirroring:
logger.warning(f"Couldn't find {commit} in {self.repo_dir}, refreshing mirror")
need_retry = True
else:
raise
if need_retry:
self.mirror(mirror_always=True)
return self._get_branches(commit)
def rev_parse(self, ref):
return self.capture('rev-parse', ref)
def verify_rev(self, rev):
try:
self.capture('rev-parse', '--quiet', '--verify', rev)
return True
except subprocess.CalledProcessError:
return False
def order(self, commits):
def compare(a, b):
if a == b:
return 0
base = self.capture('merge-base', a, b)
if base == a:
return -1
elif base == b:
return 1
else:
raise OrderingError(f"Commits {a} and {b} are not comparable")
return sorted(commits, key=functools.cmp_to_key(compare))
class DistGit:
def __init__(self, base_url, mirror_dir, mirror_existing=True):
self.base_url = base_url
self.mirror_dir = mirror_dir
self.mirror_existing = mirror_existing
def repo(self, pkg):
return DistGitRepo(pkg,
repo_dir=os.path.join(self.mirror_dir, pkg + '.git'),
origin=self.base_url + '/' + pkg,
mirror_existing=self.mirror_existing)
def mirror_all(self):
for f in sorted(os.listdir(self.mirror_dir)):
for g in sorted(os.listdir(os.path.join(self.mirror_dir, f))):
if g.endswith('.git'):
self.repo(os.path.join(f, g[:-4])).mirror(mirror_always=True)
|
StarcoderdataPython
|
179609
|
import numpy as np
from layers import (
FullyConnectedLayer, ReLULayer,
ConvolutionalLayer, MaxPoolingLayer, Flattener,
softmax_with_cross_entropy, l2_regularization, softmax
)
class ConvNet:
"""
Implements a very simple conv net
Input -> Conv[3x3] -> Relu -> Maxpool[4x4] ->
Conv[3x3] -> Relu -> MaxPool[4x4] ->
Flatten -> FC -> Softmax
"""
def __init__(self, input_shape, n_output_classes, conv1_channels, conv2_channels):
"""
Initializes the neural network
Arguments:
input_shape, tuple of 3 ints - image_width, image_height, n_channels
Will be equal to (32, 32, 3)
n_output_classes, int - number of classes to predict
conv1_channels, int - number of filters in the 1st conv layer
conv2_channels, int - number of filters in the 2nd conv layer
"""
# TODO Create necessary layers
#raise Exception("Not implemented!")
image_width, image_height, image_channels = input_shape
maxpool1_size = 4
maxpool2_size = 4
flattener_width = int(image_width / (maxpool1_size * maxpool2_size))
flattener_height = int(image_width / (maxpool1_size * maxpool2_size))
self.layers = [
ConvolutionalLayer(in_channels=image_channels, out_channels=conv1_channels, filter_size=3, padding=1),
ReLULayer(),
MaxPoolingLayer(maxpool1_size, maxpool1_size),
ConvolutionalLayer(in_channels=conv1_channels, out_channels=conv2_channels, filter_size=3, padding=1),
ReLULayer(),
MaxPoolingLayer(maxpool2_size, maxpool2_size),
Flattener(),
FullyConnectedLayer(flattener_width * flattener_height * conv2_channels, n_output_classes)
]
def compute_loss_and_gradients(self, X, y):
"""
Computes total loss and updates parameter gradients on a batch of training examples
Arguments:
:param X, np array (batch_size, height, width, input_features) - input data
:param y, np array of int (batch_size) - classes
"""
assert X.ndim == 4
assert y.ndim == 1
assert X.shape[0] == y.shape[0]
for _, param in self.params().items():
param.reset_grad()
# forward pass
out = X
for layer in self.layers:
out = layer.forward(out)
# backward pass
loss, d_out = softmax_with_cross_entropy(out, y)
for layer in reversed(self.layers):
d_out = layer.backward(d_out)
return loss
def predict(self, X):
# forward pass
out = X
for layer in self.layers:
out = layer.forward(out)
out = softmax(out)
pred = np.argmax(out, axis=1)
return pred # y_hat
def params(self):
result = {}
for index, layer in enumerate(self.layers):
for name, param in layer.params().items():
result['%s_%s' % (index, name)] = param
return result
|
StarcoderdataPython
|
1777773
|
<reponame>smmckay/quex-mirror
#! /usr/bin/env python
# PURPOSE:
# Tests the function "get_follow_state_combinations(state_combination)"
# from the module state_machine.construction.paralellize.
#
################################################################################
import sys
sys.path.append("../")
from core import *
import paralellize
from quex.engine.state_machine.TEST_help.some_dfas import *
# (*) UNIT TEST ________________________________________________________________
#
paralellize.state_machines = [ sm0, sm1, sm2 ]
paralellize.state_machine_n = len(paralellize.state_machines)
combination = [sm0.init_state_index,
sm1.init_state_index,
sm2.init_state_index]
print repr(paralellize.get_follow_state_combinations(combination)).replace("L", "")
print
combination = [si0_1, si1_1, si2_2 ]
print repr(paralellize.get_follow_state_combinations(combination)).replace("L", "")
print
combination = [ STATE_TERMINATION, si1_2, si2_2 ]
print repr(paralellize.get_follow_state_combinations(combination)).replace("L", "")
print
|
StarcoderdataPython
|
1740820
|
import re
import unittest
from unittest.mock import patch
from click.testing import CliRunner
from tests.util import read_data, DETERMINISTIC_HEADER, skip_if_exception
try:
import blackd
from aiohttp.test_utils import AioHTTPTestCase, unittest_run_loop
from aiohttp import web
except ImportError:
has_blackd_deps = False
else:
has_blackd_deps = True
class BlackDTestCase(AioHTTPTestCase):
@unittest.skipUnless(has_blackd_deps, "blackd's dependencies are not installed")
def test_blackd_main(self) -> None:
with patch("blackd.web.run_app"):
result = CliRunner().invoke(blackd.main, [])
if result.exception is not None:
raise result.exception
self.assertEqual(result.exit_code, 0)
async def get_application(self) -> web.Application:
return blackd.make_app()
# TODO: remove these decorators once the below is released
# https://github.com/aio-libs/aiohttp/pull/3727
@skip_if_exception("ClientOSError")
@unittest.skipUnless(has_blackd_deps, "blackd's dependencies are not installed")
@unittest_run_loop
async def test_blackd_request_needs_formatting(self) -> None:
response = await self.client.post("/", data=b"print('hello world')")
self.assertEqual(response.status, 200)
self.assertEqual(response.charset, "utf8")
self.assertEqual(await response.read(), b'print("hello world")\n')
@skip_if_exception("ClientOSError")
@unittest.skipUnless(has_blackd_deps, "blackd's dependencies are not installed")
@unittest_run_loop
async def test_blackd_request_no_change(self) -> None:
response = await self.client.post("/", data=b'print("hello world")\n')
self.assertEqual(response.status, 204)
self.assertEqual(await response.read(), b"")
@skip_if_exception("ClientOSError")
@unittest.skipUnless(has_blackd_deps, "blackd's dependencies are not installed")
@unittest_run_loop
async def test_blackd_request_syntax_error(self) -> None:
response = await self.client.post("/", data=b"what even ( is")
self.assertEqual(response.status, 400)
content = await response.text()
self.assertTrue(
content.startswith("Cannot parse"),
msg=f"Expected error to start with 'Cannot parse', got {repr(content)}",
)
@skip_if_exception("ClientOSError")
@unittest.skipUnless(has_blackd_deps, "blackd's dependencies are not installed")
@unittest_run_loop
async def test_blackd_unsupported_version(self) -> None:
response = await self.client.post(
"/", data=b"what", headers={blackd.PROTOCOL_VERSION_HEADER: "2"}
)
self.assertEqual(response.status, 501)
@skip_if_exception("ClientOSError")
@unittest.skipUnless(has_blackd_deps, "blackd's dependencies are not installed")
@unittest_run_loop
async def test_blackd_supported_version(self) -> None:
response = await self.client.post(
"/", data=b"what", headers={blackd.PROTOCOL_VERSION_HEADER: "1"}
)
self.assertEqual(response.status, 200)
@skip_if_exception("ClientOSError")
@unittest.skipUnless(has_blackd_deps, "blackd's dependencies are not installed")
@unittest_run_loop
async def test_blackd_invalid_python_variant(self) -> None:
async def check(header_value: str, expected_status: int = 400) -> None:
response = await self.client.post(
"/", data=b"what", headers={blackd.PYTHON_VARIANT_HEADER: header_value}
)
self.assertEqual(response.status, expected_status)
await check("lol")
await check("ruby3.5")
await check("pyi3.6")
await check("py1.5")
await check("2.8")
await check("py2.8")
await check("3.0")
await check("pypy3.0")
await check("jython3.4")
@skip_if_exception("ClientOSError")
@unittest.skipUnless(has_blackd_deps, "blackd's dependencies are not installed")
@unittest_run_loop
async def test_blackd_pyi(self) -> None:
source, expected = read_data("stub.pyi")
response = await self.client.post(
"/", data=source, headers={blackd.PYTHON_VARIANT_HEADER: "pyi"}
)
self.assertEqual(response.status, 200)
self.assertEqual(await response.text(), expected)
@skip_if_exception("ClientOSError")
@unittest.skipUnless(has_blackd_deps, "blackd's dependencies are not installed")
@unittest_run_loop
async def test_blackd_diff(self) -> None:
diff_header = re.compile(
r"(In|Out)\t\d\d\d\d-\d\d-\d\d \d\d:\d\d:\d\d\.\d\d\d\d\d\d \+\d\d\d\d"
)
source, _ = read_data("blackd_diff.py")
expected, _ = read_data("blackd_diff.diff")
response = await self.client.post(
"/", data=source, headers={blackd.DIFF_HEADER: "true"}
)
self.assertEqual(response.status, 200)
actual = await response.text()
actual = diff_header.sub(DETERMINISTIC_HEADER, actual)
self.assertEqual(actual, expected)
@skip_if_exception("ClientOSError")
@unittest.skipUnless(has_blackd_deps, "blackd's dependencies are not installed")
@unittest_run_loop
async def test_blackd_python_variant(self) -> None:
code = (
"def f(\n"
" and_has_a_bunch_of,\n"
" very_long_arguments_too,\n"
" and_lots_of_them_as_well_lol,\n"
" **and_very_long_keyword_arguments\n"
"):\n"
" pass\n"
)
async def check(header_value: str, expected_status: int) -> None:
response = await self.client.post(
"/", data=code, headers={blackd.PYTHON_VARIANT_HEADER: header_value}
)
self.assertEqual(
response.status, expected_status, msg=await response.text()
)
await check("3.6", 200)
await check("py3.6", 200)
await check("3.6,3.7", 200)
await check("3.6,py3.7", 200)
await check("py36,py37", 200)
await check("36", 200)
await check("3.6.4", 200)
await check("2", 204)
await check("2.7", 204)
await check("py2.7", 204)
await check("3.4", 204)
await check("py3.4", 204)
await check("py34,py36", 204)
await check("34", 204)
@skip_if_exception("ClientOSError")
@unittest.skipUnless(has_blackd_deps, "blackd's dependencies are not installed")
@unittest_run_loop
async def test_blackd_line_length(self) -> None:
response = await self.client.post(
"/", data=b'print("hello")\n', headers={blackd.LINE_LENGTH_HEADER: "7"}
)
self.assertEqual(response.status, 200)
@skip_if_exception("ClientOSError")
@unittest.skipUnless(has_blackd_deps, "blackd's dependencies are not installed")
@unittest_run_loop
async def test_blackd_invalid_line_length(self) -> None:
response = await self.client.post(
"/", data=b'print("hello")\n', headers={blackd.LINE_LENGTH_HEADER: "NaN"}
)
self.assertEqual(response.status, 400)
@skip_if_exception("ClientOSError")
@unittest.skipUnless(has_blackd_deps, "blackd's dependencies are not installed")
@unittest_run_loop
async def test_blackd_response_black_version_header(self) -> None:
response = await self.client.post("/")
self.assertIsNotNone(response.headers.get(blackd.BLACK_VERSION_HEADER))
|
StarcoderdataPython
|
1711236
|
"""
Written by <NAME> (<EMAIL>) and <NAME>,
based on Harry-Fairhurst arXiv:1012.4939. Produces a file txt with right ascension,
declination, coherent SNR and GPS time of the maximum for galaxies from the GLADE cathalog
within 90% C.L. of Bayestar skymap for a given Binary Neutron Star candidate trigger on GraceDB.
The list is in descending order of cohSNR, so the higher a galaxy is in the list the more likely
it is that she is the host of the BNS event.
"""
import .virgotools as vt
import .em_mbta_v05 as em
delta_t=1./4096 # time resolution
galFile="BAYESgalaxies.txt"
""" makes list of galaxies objects from file with their coordinates """
galaxies=get_galaxies(galFile)
with open("GCNheader.txt") as GCNheader:
data=GCNheader.read()
TriggerTime=
tStart=TriggerTime-0.5 #### per esempio...
duration=
""" prepares the lists with GW Events for each ifo for the specified
start-time and duration"""
eventsH=get_gw_events(tStart,duration,"H")
eventsL=get_gw_events(tStart,duration,"L")
eventsV=get_gw_events(tStart,duration,"V")
""" re-organizes the events to make lists of triple detection for the same template"""
triple_coincidences=[[evH,evL,evV] for evH in eventsH for evL in eventsL for evV in eventsV
if (evH.parameters.tmplt_index==evL.parameters.tmplt_index)
& (evH.parameters.tmplt_index==evV.parameters.tmplt_index)]
""" Computes the ranking """
galaxies_ranking=rank_galaxies(galaxies,triple_coincidences,TriggerTime)
""" Prints to file galaxies_ranking.txt """
np.savetxt('galaxies_ranking_'{time}'.txt'.format(time=TriggerTime),galaxies_ranking,delimiter=',')
############################################################################################
# #
# Functions definitions #
# #
############################################################################################
def rank_galaxies(galaxies,triple_coincidences,TriggerTime):
""" Returns a 2D array with ra, dec, cohSNR, gps_time_max_cohSNR ordered for descending cohSNR"""
ranking=np.array([])
for galaxy in galaxies:
cohSNR=np.array([compute_cohSNR(triple,galaxy,TriggerTime) for triple in triple_coincidences])
max_cohSNR=cohSNR[np.where(cohSNR == max(cohSNR[:,0]))[0][0]] #extract the maximum value among all
#the triple coincidences
galaxy_SNR=np.append((np.array([galaxy.ra,galaxy.dec,]),max_cohSNR))
ranking=np.vstack((ranking,galaxy_SNR))
final_rank=ranking[ranking[:,2].argsort()[::-1]] # sorts the array in descending order of cohSNR
return final_rank
def compute_proj_matrix(triple_ev,galaxy,TriggerTime):
""" Computes the projection matrix M^{mu,nu} (see Harry-Fairhurst).
Input: list of events in triple coincidence (sublist of the triple_coinc list);
interest galaxy and trigger time (is sufficient as reference time for the scope)"""
galaxy.get_antenna_patterns(TriggerTime)
wp=np.array([
np.sqrt(triple_ev[0].parameters.sigma_sq) * galaxy.antenna_patterns_H[0],
np.sqrt(triple_ev[1].parameters.sigma_sq) * galaxy.antenna_patterns_L[0],
np.sqrt(triple_ev[2].parameters.sigma_sq) * galaxy.antenna_patterns_V[0]])
wc=np.array([
np.sqrt(triple_ev[0].parameters.sigma_sq) * galaxy.antenna_patterns_H[1],
np.sqrt(triple_ev[1].parameters.sigma_sq) * galaxy.antenna_patterns_L[1],
np.sqrt(triple_ev[2].parameters.sigma_sq) * galaxy.antenna_patterns_V[1]])
a=np.dot(wp,wp)
b=np.dot(wc,wc)
c=np.dot(wp,wc)
det=a*b-c**2
zeros=np.zeros((2,2))
block=np.array([[b, -c],
[-c, a] ])
M=(1./det)*np.vstack((np.hstack((block,zeros)),np.hstack((zeros,block))))
return M
def make_MFO_HLV_array(triple_ev,galaxy,TriggerTime):
""" Prepares the MFO array that is necessary for cohSNR. Time series are arranged
according to the time delays between detectors for the given galaxy and trigger time """
reference_ifo=None
max_SNR=0
t_ref=0
### extracts the MFO time series af the events and identifies the reference ifo
for event in triple_ev:
if event.parameters.rwSNR > max_SNR : # selects the reference ifo according to the value of re-weightedSNR
max_SNR=event.parameters.rwSNR
reference_ifo=event.ifo
t_ref=event.parameters.StartTime
# computes the time of travel between the reference ifo and the 3 ifos
galaxy.get_relative_time_delays(reference_ifo,TriggerTime)
[delay_Href,delay_Lref,delay_Vref]=galaxy.time_delays
max_delay=max(galaxy.time_delays[galaxy.time_delays!=0])
min_delay=min(galaxy.time_delays[galaxy.time_delays!=0])
Hrw_timeseries=np.array([
galaxy.antenna_patterns_H[0]*triple_ev[0].mfo_data[:,0], # Fplus_H h_phase
galaxy.antenna_patterns_H[1]*triple_ev[0].mfo_data[:,0], # Fcross_H h_phase
galaxy.antenna_patterns_H[0]*triple_ev[0].mfo_data[:,1], # Fplus_H h_quadr
galaxy.antenna_patterns_H[1]*triple_ev[0].mfo_data[:,1]]) # Fcross_H h_quadr
Lrw_timeseries=np.array([
galaxy.antenna_patterns_L[0]*triple_ev[1].mfo_data[:,0],
galaxy.antenna_patterns_L[1]*triple_ev[1].mfo_data[:,0],
galaxy.antenna_patterns_L[0]*triple_ev[1].mfo_data[:,1],
galaxy.antenna_patterns_L[1]*triple_ev[1].mfo_data[:,1]])
Vrw_timeseries=np.array([
galaxy.antenna_patterns_V[0]*triple_ev[2].mfo_data[:,0],
galaxy.antenna_patterns_V[1]*triple_ev[2].mfo_data[:,0],
galaxy.antenna_patterns_V[0]*triple_ev[2].mfo_data[:,1],
galaxy.antenna_patterns_V[1]*triple_ev[2].mfo_data[:,1]])
#### appends zeros to time series according to the time delay
#### in order to prepare them to be summed together
shift_max=abs(int(np.around(max_delay/delta_t)))
shift_min=abs(int(np.around(min_delay/delta_t)))
if max_delay>0 and min_delay>0:
if delay_Href==0:
Hrw_timeseries=np.hstack((Hrw_timeseries,np.zeros((4,shift_max))))
elif delay_Href==max_delay:
Hrw_timeseries=np.hstack((np.zeros((4,shift_max)),Hrw_timeseries))
elif delay_Href==min_delay:
Hrw_timeseries=np.hstack((np.zeros((4,shift_min)),Hrw_timeseries,
np.zeros((4,shift_max-shift_min))))
if delay_Lref==0:
Lrw_timeseries=np.hstack((Lrw_timeseries,np.zeros((4,shift_max))))
elif delay_Lref==max_delay:
Lrw_timeseries=np.hstack((np.zeros((4,shift_max)),Lrw_timeseries))
elif delay_Lref==min_delay:
Lrw_timeseries=np.hstack((np.zeros((4,shift_min)),Lrw_timeseries,
np.zeros((4,shift_max-shift_min))))
if delay_Vref==0:
Vrw_timeseries=np.hstack((Vrw_timeseries,np.zeros((4,shift_max))))
elif delay_Vref==max_delay:
Vrw_timeseries=np.hstack((np.zeros((4,shift_max)),Vrw_timeseries))
elif delay_Vref==min_delay:
Vrw_timeseries=np.hstack((np.zeros((4,shift_min)),Vrw_timeseries,
np.zeros((4,shift_max-shift_min))))
elif max_delay>0 and min_delay<0:
if delay_Href==0:
Hrw_timeseries=np.hstack((np.zeros((4,shift_min)),Hrw_timeseries,np.zeros((4,shift_max))))
elif delay_Href==max_delay:
Hrw_timeseries=np.hstack((np.zeros((4,shift_max+shift_min)),Hrw_timeseries))
elif delay_Href==min_delay:
Hrw_timeseries=np.hstack((Hrw_timeseries,np.zeros((4,shift_max+shift_min))))
if delay_Lref==0:
Lrw_timeseries=np.hstack((np.zeros((4,shift_min)),Lrw_timeseries,np.zeros((4,shift_max))))
elif delay_Lref==max_delay:
Lrw_timeseries=np.hstack((np.zeros((4,shift_max+shift_min)),Lrw_timeseries))
elif delay_Vref==min_delay:
Lrw_timeseries=np.hstack((Lrw_timeseries,np.zeros((4,shift_max+shift_min))))
if delay_Vref==0:
Vrw_timeseries=np.hstack((np.zeros((4,shift_min)),Vrw_timeseries,np.zeros((4,shift_max))))
elif delay_Vref==max_delay:
Vrw_timeseries=np.hstack((np.zeros((4,shift_max+shift_min)),Vrw_timeseries))
elif delay_Vref==min_delay:
Vrw_timeseries=np.hstack((Vrw_timeseries,np.zeros((4,shift_max+shift_min))))
elif max_delay<0 and min_delay<0:
if delay_Href==0:
Hrw_timeseries=np.hstack((np.zeros((4,shift_min)),Hrw_timeseries))
elif delay_Href==max_delay:
Hrw_timeseries=np.hstack((np.zeros((4,shift_min-shift_max)),Hrw_timeseries,
np.zeros((4,shift_max))))
elif delay_Href==min_delay:
Hrw_timeseries=np.hstack((Hrw_timeseries,np.zeros((4,shift_min))))
if delay_Lref==0:
Lrw_timeseries=np.hstack((np.zeros((4,shift_min)),Lrw_timeseries))
elif delay_Lref==max_delay:
Lrw_timeseries=np.hstack((np.zeros((4,shift_min-shift_max)),Lrw_timeseries,
np.zeros((4,shift_max))))
elif delay_Lref==min_delay:
Lrw_timeseries=np.hstack((Lrw_timeseries,np.zeros((4,shift_min))))
if delay_Vref==0:
Vrw_timeseries=np.hstack((np.zeros((4,shift_min)),Vrw_timeseries))
elif delay_Vref==max_delay:
Vrw_timeseries=np.hstack((np.zeros((4,shift_min-shift_max)),Vrw_timeseries,
np.zeros((4,shift_max))))
elif delay_Vref==min_delay:
Vrw_timeseries=np.hstack((Vrw_timeseries,np.zeros((4,shift_min))))
else:# raise error
pass
MFO_HLV=Hrw_timeseries+Lrw_timeseries+Vrw_timeseries
return MFO_HLV,time_origin
def compute_cohSNR(triple_ev,galaxy,TriggerTime):
""" computes the coherent snr time series and returns
the value and the (approximate) gps time of its maximum """
M=compute_proj_matrix(triple_ev,galaxy,TriggerTime)
MFO_HLV,time_origin = make_MFO_HLV_array(triple_ev,galaxy,TriggerTime)
cohSNR=np.einsum('it,ij,jt->t',MFO_HLV,M,MFO_HLV) # as Eq.(2.26) in Harry-Fairhurst. t can be viewed as time index
max_cohSNR=max(cohSNR)
time_steps=np.where(cohSNR == max_cohSNR)[0][0]
time_of_max_coh_SNR=time_origin+delta_t*time_steps # time_origin is the gps time associated to the first element of the MFO_HLV array
return max_cohSNR,time_of_max_coh_SNR
def get_gw_events(tStart,duration,ifo):
""" Returns a list of GW events, instancies of the GWEvent class, for given input parameters"""
events=[]
ffl=vt.FrameFile("Mbta%c1_RX_Clstr.ffl"%(ifo))
inputFile=ffl.get_frame(TriggerTime) ### dove entra l'informazione dell'ifo??
iFile=PyFd.FrFileINew(inputFile) ### come trovare l'input file?
event=PyFd.FrEventReadTF(iFile,"Mbta%c_00-Chi2OK"%(ifo), tStart, duration, 0, 0)
while(event):
ev=em.GWEvent(event,inputFile,ifo)
ev.get_template_params()
if ev.parameters.mass1 <= 3 and ev.parameters.mass2 <= 3 : # selects only BNS candidates
ev.get_mfo()
events.append(ev)
event=event[0].next
else:
event=event[0].next
return events
def get_galaxies(galFile):
""" Input: comma-separated file with ra and dec coordinates of galaxies withing skymap;
Output: list of galaxies, instancies of the Galaxy class defined in em_mbta"""
infile=open(galFile,'r')
coordinates=np.array([[float(coords.split(',')[0]),float(coords.split(',')[1])]
for coords in infile])
infile.close()
galaxies=[]
for ra, dec in coordinates:
gal=em.Galaxy(galFile,ra,dec)
galaxies.append(gal)
return galaxies
|
StarcoderdataPython
|
1673017
|
from django.shortcuts import render, redirect
from hujan_ui import maas
from hujan_ui.maas.utils import MAAS
from .forms import VlanForm, VlanEditForm
from django.utils.translation import ugettext_lazy as _
import sweetify
from hujan_ui.maas.exceptions import MAASError
def index(request):
try:
vlans = maas.get_vlans()
except (MAASError, ConnectionError, TimeoutError) as e:
vlans = None
sweetify.sweetalert(request, 'Warning', icon='error', text=str(e), button='Ok', timer=5000)
context = {
'title': 'Vlan List',
'vlans': vlans
}
return render(request, 'maas/vlans/index.html', context)
def add(request):
form = VlanForm(request.POST or None)
if form.is_valid():
try:
m = MAAS()
data = form.clean()
fabId = data['fabric_id']
resp = m.post(f'fabrics/{fabId}/vlans/', data=data)
if resp.status_code in m.ok:
sweetify.success(request, _('Vlan Added Successful'), timer=3000)
return redirect('maas:subnets:index')
sweetify.warning(request, _(resp.text), timer=5000)
except (MAASError, ConnectionError, TimeoutError) as e:
sweetify.sweetalert(request, 'Warning', icon='error', text=str(e), button='Ok', timer=5000)
context = {
'title': _('Add Vlan'),
'form': form
}
return render(request, 'maas/vlans/add.html', context)
def edit(request, vlan_id):
try:
vlan = maas.get_vlans(vlan_id)
form = VlanEditForm(request.POST or None, initial=vlan)
if form.is_valid():
m = MAAS()
data = form.clean()
fabId = data['fabric_id']
vid = data['vid']
resp = m.put(f'fabrics/{fabId}/vlans/{vid}/',data=data)
if resp.status_code in m.ok:
sweetify.success(request, _('Vlan Updated Successful'), timer=3000)
return redirect('maas:subnets:index')
sweetify.warning(request, _(resp.text), timer=5000)
except (MAASError, ConnectionError, TimeoutError) as e:
sweetify.sweetalert(request, 'Warning', icon='error', text=str(e), button='Ok', timer=5000)
context = {
'title': 'Edit Vlan',
'form': form
}
return render(request, 'maas/vlans/add.html', context)
def detail(request, vlan_id):
try:
vlan = maas.get_vlans(vlan_id)
if vlan:
context = {
'title': _('Detail Vlan - {}'.format(vlan['fabric'])),
'vlan': vlan
}
return render(request, 'maas/vlans/detail.html', context)
except (MAASError, ConnectionError, TimeoutError) as e:
sweetify.sweetalert(request, 'Warning', icon='error', text=str(e), button='Ok', timer=5000)
return redirect('maas:vlans:index')
def delete(request, vlan_id):
try:
vlan = maas.get_vlans(vlan_id)
fid = vlan['fabric_id']
vid = vlan['vid']
m = MAAS()
resp = m.delete(f'fabrics/{fid}/vlans/{vid}/')
if resp.status_code in m.ok:
sweetify.success(request, _('Vlan Deleted Successful'), timer=5000)
return redirect('maas:subnets:index')
return redirect('maas:subnets:index')
except (MAASError, ConnectionError, TimeoutError) as e:
sweetify.sweetalert(request, 'Warning', icon='error', text=str(e), button='Ok', timer=5000)
|
StarcoderdataPython
|
3370460
|
from .ContactUsController import ContactUsController
from .OpenApiController import OpenApiController
|
StarcoderdataPython
|
1794972
|
<filename>aidfin/conf/GlobalSettings.py<gh_stars>0
# -*- coding: UTF-8 -*-
"""
Default settings. Override these with settings in the module pointed to
by the environment variable.
"""
####################
# CORE #
####################
EVENT_LOGS = "/opt/logs/tensor"
TRAIN_DATA = "/opt/logs/tensor/mnist_with_summaries"
MAX_STEPS = 1000000
LOG_DEVICE_PLACEMENT = False
LEARNING_RATE = 0.01
DROPOUT = 0.9
NAME_SCOP = "input"
|
StarcoderdataPython
|
174547
|
import scapy.all as scapy
import netfilterqueue
import re
from threading import Thread
import time
import subprocess
class Injector:
def __init__(self):
self.ack_list=[]
self.injection = ''
self.injector_running = False
def enable_forward_chain(self):
subprocess.call(["iptables", "--flush"])
subprocess.call(["iptables", "-I", "FORWARD", "-j", "NFQUEUE", "--queue-num", "1"])
def set_load(self, packet, load):
packet[scapy.Raw].load = load
del packet[scapy.IP].len
del packet[scapy.IP].chksum
del packet[scapy.TCP].chksum
return packet
def process_packet(self, packet):
if self.injection:
scapy_packet = scapy.IP(packet.get_payload())
if scapy_packet.haslayer(scapy.Raw):
if scapy_packet[scapy.TCP].dport == 80:
# scapy_packet.show()
print("[+] Request")
modified_load = re.sub("Accept-Encoding:.*?\\r\\n", "", scapy_packet[scapy.Raw].load.decode())
new_packet = self.set_load(scapy_packet, modified_load)
packet.set_payload(bytes(new_packet))
elif scapy_packet[scapy.TCP].sport == 80:
print("[+] Response")
# scapy_packet.show()
first = "</body>" in scapy_packet[scapy.Raw].load.decode()
second = ("<script>" + self.injection + "</script>") in scapy_packet[scapy.Raw].load.decode()
if first and not second:
injection = ("<script>" + self.injection + "</script>")
modified_load = scapy_packet[scapy.Raw].load.decode().replace("</body>", injection + "</body>")
print(modified_load)
len_search = re.search(r"(?:Content-Length:\s)(\d*)", modified_load)
if len_search and "text/html" in modified_load:
content_len = len_search.group(1)
new_len = int(content_len) + len(injection)
modified_load = modified_load.replace(content_len, str(new_len))
print("Content Length Modified")
new_packet = self.set_load(scapy_packet, modified_load)
packet.set_payload(bytes(new_packet))
print("modified")
packet.accept()
def set_injection(self, injection):
self.injection = injection
self.injector_running = True
def remove_injection(self):
self.injection = ''
print("Injection removed successfully")
def run(self):
print("Code Queue running...")
time.sleep(2)
try:
queue = netfilterqueue.NetfilterQueue()
queue.bind(1, self.process_packet)
queue.run()
except Exception as e:
raise Exception("Queue error")
def bind(self):
t = Thread(target = self.run)
self.injector_running = True
t.start()
# injector = Injector()
# injector.enable_forward_chain()
# injector.set_injection('alert("abc");')
# injector.bind()
|
StarcoderdataPython
|
104996
|
from helper import *
import json
URL = "https://www.umb.edu/academics/course_catalog/subjects/2018%20Spring"
SEM = "2018 Spring"
# Get the catalog page
text = read_URL(URL)
# Get the list of majors
cut = cut_text(text, "<h3>Undergraduate Subjects</h3>", "</ul>")
urls = get_URLs(cut)
cut = cut_text(text, "<h3>Graduate Subjects</h3>", "</ul>")
urls += get_URLs(cut)
majors = []
for i in urls:
read = read_URL(i)
title = cut_text(read, "page-title", "</h2>")[12:-5]
m_url = cut_text(read, "<p>" + SEM, "</div>")
m_urls = get_URLs(m_url)
courses = []
for j in m_urls:
courses.append(process_course(j))
major = {"major": title, "courses": courses}
majors.append(major)
with open("umb.txt", 'wb') as outfile:
json.dump(majors, outfile)
|
StarcoderdataPython
|
27321
|
# Licensed to the .NET Foundation under one or more agreements.
# The .NET Foundation licenses this file to you under the MIT license.
# See the LICENSE file in the project root for more information.
import pyspark
from pyspark.sql import SparkSession
class TpchBase:
def __init__(self, spark, dir):
self.customer = spark.read.parquet(dir + "customer")
self.lineitem = spark.read.parquet(dir + "lineitem")
self.nation = spark.read.parquet(dir + "nation")
self.region = spark.read.parquet(dir + "region")
self.orders = spark.read.parquet(dir + "orders")
self.part = spark.read.parquet(dir + "part")
self.partsupp = spark.read.parquet(dir + "partsupp")
self.supplier = spark.read.parquet(dir + "supplier")
|
StarcoderdataPython
|
1719125
|
<gh_stars>1-10
# -*- coding: utf-8 -*-
"""
Date created : 27 August 2016
@author : <NAME>
@description : Data Transformation
"""
import pandas as pd
import numpy as np
## Source file
df = pd.read_csv('path/to/file.csv')
## List of the original column names from the flat file
step = df['step']
gridx = df['grid_x']
gridy = df['grid_y']
gridz = df['grid_z']
velocity0 = df['velocity(0)']
velocity1 = df['velocity(1)']
velocity2 = df['velocity(2)']
pressure = df['pressure']
"""
Calculator
"""
# (1) Convert the dimensionless step value into step value with unit of seconds
time_step = (step * 4.00E-06)
#(2) Calculate the magnitudes of the velocity vectors
magnitude = np.sqrt(velocity0*velocity0 + velocity1*velocity1 + velocity2*velocity2)
"""
Creating new data frame
"""
# (1) Create a dictionary
# List all of the columns including its values
dictionary = {'step': time_step,
'grid_x':gridx,
'grid_y':gridy,
'grid_z':gridz,
'velocity(0)':velocity0,
'velocity(1)':velocity1,
'velocity(2)':velocity2,
'pressure':pressure,
'magnitudes':magnitude}
# (2) Create the new DataFrame from the dictionary
new_df = pd.DataFrame(dictionary,columns = ['step','grid_x','grid_y','grid_z','velocity(0)',
'velocity(1)','velocity(2)','pressure','magnitudes'])
"""
Transform the new DataFrame into flat file (CSV)
"""
new_df.to_csv(r'path/to/file.csv', header=True, index=None, sep=',', mode='a')
|
StarcoderdataPython
|
1615615
|
# -*- coding: utf-8 -*-
##############################################################################
import os.path
import requests
from bs4 import BeautifulSoup
import sys
import os
os.system("clear")
if sys.version_info[0] != 3:
print('''\t \n\t\tREQUIRED PYTHON 3.x\n\t\tinstall and try: python3
fb.py\n\t ''')
sys.exit()
PASSWORD_FILE = "passwords.txt"
MIN_PASSWORD_LENGTH = 6
POST_URL = 'https://www.facebook.com/login.php'
HEADERS = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/65.0.3325.181 Safari/537.36',
}
PAYLOAD = {}
COOKIES = {}
def create_form():
form = dict()
cookies = {'fr': '0ZvhC3YwYm63ZZat1..Ba0Ipu.Io.AAA.0.0.Ba0Ipu.AWUPqDLy'}
data = requests.get(POST_URL, headers=HEADERS)
for i in data.cookies:
cookies[i.name] = i.value
data = BeautifulSoup(data.text, 'html.parser').form
if data.input['name'] == 'lsd':
form['lsd'] = data.input['value']
return form, cookies
def is_this_a_password(email, index, password):
global PAYLOAD, COOKIES
if index % 10 == 0:
PAYLOAD, COOKIES = create_form()
PAYLOAD['email'] = email
PAYLOAD['pass'] = password
r = requests.post(POST_URL, data=PAYLOAD, cookies=COOKIES, headers=HEADERS)
if 'Find Friends' in r.text or 'security code' in r.text or 'Two-factor authentication' in r.text or "Log Out" in r.text:
open('temp', 'w').write(str(r.content))
print('\npassword Trovata: ', password)
return True
return False
if __name__ == "__main__":
print('''\n
Welcome to Mr.Robot
...Brutalforce...
.-"""-.
/` `
,-==-. ; ;
/( \`. | |
| \ ,-. \ ( : ꙳ 𓄂𓆃 ꙳ ; _____
\ \`-.> ) | \ B.r-M.R / \_ \ __ _ _ __ ___
\_`. _|_| `._ _.` / /\/ / _` | '_ ` _ |
\o_`-_|/ _|`"'|-. /\/ /_ | (_| | | | | | |
/` `>. __ .-'`-|___|_ ) \____/ \__,_|_| |_| |_|
|\ (^ >' `>-----._/ )
| `._\ / / / | --- -; __ _ _
: `| ( ( | ___ _/ /\/\ _ __ /__\ ___ | |__ ___ | |_
\ `. `\ \_\ ___ _/ / \| '__| / \/// _ \| '_ \ / _ \| __|
`. `-='`t----' `--.______/ / /\/\ \ |_ / _ \ (_) | |_) | (_) | |_
`. ,-''-.) |---| \/ \/_(_) \/ \_/\___/|_.__/ \___/ \__|
`.(,-=-./ \_/
| | V
|-''`-. `.
/ ,-'-.\ `-.
| ( \ `.
\ \ | ,.'
''')
if not os.path.isfile(PASSWORD_FILE):
print("Il file delle password non esiste: ", PASSWORD_FILE)
sys.exit(0)
password_data = open(PASSWORD_FILE, 'r').read().split("\n")
print("File con le password selezionate: ", PASSWORD_FILE)
email = input('Enter Email/Username to target: ').strip()
for index, password in zip(range(password_data.__len__()), password_data):
password = password.<PASSWORD>()
if len(password) < MIN_PASSWORD_LENGTH:
continue
print("Prova la password [", index, "]: ", password)
if is_this_a_password(email, index, password):
break
|
StarcoderdataPython
|
4841661
|
<filename>exs/mundo_3/python/099.py
"""
Desafio 099
Problema: Faça um programa que tenha a função maior(), que recebe vários parâmetros
com valores inteiros.
Seu programa tem que analisar todos os valores e dizer qual deles é o maior.
Resolução do problema:
"""
from time import sleep
# Função para impressão de barra
def barra():
print('-' * 50)
# Função para verificar o maior valor entre vários informados
def maior(*args):
barra()
print('Os valores estão em processo de análise...')
sleep(1.5)
# Percorrendo tupla de valores
maior = None
for elemento in args:
print(elemento, end=' ')
sleep(0.5)
if maior is None or elemento > maior:
maior = elemento
print(f'--> {len(args)} Valores informados...')
print(f'O maior valor é --> {maior}')
# Chamada da função passando e não passando argumentos
maior(2, 9, 4, 5, 7, 1)
maior(4, 7, 0)
maior(1, 2)
maior(6)
maior()
|
StarcoderdataPython
|
3304453
|
<filename>recipes/Python/102114_ThreadedContext/recipe-102114.py
ThreadedContext is like a dictionary, but stores its data in a private namespace for every thread.
A thread can't access to the data from other thread.
USAGE:
In Thread 1:
d = ThreadedContext()
d[1]=1
In Thread 2:
d[1] #raises KeyError exception
d[1]= 2
In Thread 1:
print d[1] #prints 1
In Thread 2:
print d[1] #prints 2
If a thread is deleted its keys in ThreadedContext are erased.
"""
from weakref import WeakKeyDictionary as _WeakKeyDictionary
from threading import currentThread as _currentThread
class ThreadedContext:
def __init__(self):
self.__thread_dict = _WeakKeyDictionary()
def __getattr__(self,name):
return getattr(self.__currentDict(),name)
def __currentDict(self, _currentThread = _currentThread):
try:
return self.__thread_dict[_currentThread()]
except KeyError:
self.__thread_dict[_currentThread()] = result = {}
return result
|
StarcoderdataPython
|
3391277
|
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
from datetime import datetime
import os
import json
import requests
import xmlrpc.client
import pprint
import pkg_resources
import time
from botbuilder.core import CardFactory, TurnContext, MessageFactory
from botbuilder.core.teams import (
TeamsActivityHandler,
TeamsInfo,
)
from botbuilder.schema import (
Activity,
ActivityTypes,
Attachment,
CardAction,
ChannelAccount,
ConversationAccount,
ConversationParameters,
HeroCard,
Mention,
MessageReaction,
)
from botbuilder.schema.teams import (
ChannelInfo,
FileDownloadInfo,
FileConsentCard,
FileConsentCardResponse,
FileInfoCard,
TeamsChannelAccount,
TeamsChannelData, # TODO: https://github.com/microsoft/botbuilder-python/pull/1069
TeamInfo,
MessagingExtensionAction,
MessagingExtensionActionResponse,
TaskModuleContinueResponse,
MessagingExtensionResult,
TaskModuleTaskInfo,
MessagingExtensionAttachment,
MessagingExtensionQuery,
MessagingExtensionResult,
MessagingExtensionResponse,
)
from example_data import ExampleData
from adaptive_card_helper import (
create_adaptive_card_editor,
create_adaptive_card_preview,
)
from botbuilder.schema.teams.additional_properties import ContentType
from botbuilder.schema._connector_client_enums import ActionTypes
from activity_log import ActivityLog
from typing import List
class PythonIntegrationBot(TeamsActivityHandler):
def __init__(self, app_id: str, app_password: str, log: ActivityLog):
self._app_id = app_id
self._app_password = <PASSWORD>
self._log = log
self._activity_ids = []
async def on_message_activity(self, turn_context: TurnContext):
if turn_context.activity.text:
TurnContext.remove_recipient_mention(turn_context.activity)
turn_context.activity.text = turn_context.activity.text.strip().lower()
text = turn_context.activity.text
if text == "command:reset":
await self._reset_bot(turn_context)
elif text == "command:getsdkversions":
await self._return_current_sdk_version(turn_context)
elif text == "proactivent":
await self._send_proactive_non_threaded_message(turn_context)
elif text == "proactive":
await self._send_proactive_threaded_message(turn_context)
elif text == "delete":
await self._delete_activity(turn_context)
elif text == "update":
await self._update_activity(turn_context)
elif text == "1":
await self._send_adaptive_card(turn_context, 1)
elif text == "2":
await self._send_adaptive_card(turn_context, 2)
elif text == "3":
await self._send_adaptive_card(turn_context, 3)
elif text == "hero":
await self._send_hero_card(turn_context)
elif text == "thumbnail":
await self._send_thumbnail_card(turn_context)
elif text == "receipt":
await self._send_receipt_card(turn_context)
elif text == "signin":
await self._send_signin_card(turn_context)
elif text == "carousel":
await self._send_carousel_card(turn_context)
elif text == "list":
await self._send_list_card(turn_context)
elif text == "o365":
await self._send_o365_card(turn_context)
elif text == "file":
filename = "teams-logo.png"
file_path = "files/" + filename
file_size = os.path.getsize(file_path)
await self._send_file_card(turn_context, filename, file_size)
elif text == "show members":
await self._show_members(turn_context)
elif text == "show channels":
await self._show_channels(turn_context)
elif text == "show details":
await self._show_team_details(turn_context)
elif text == "task module":
await self._show_task_module(turn_context)
elif text == "mention":
await self._mention_activity(turn_context)
elif text == "upload file":
await self._show_upload_file(turn_context)
else:
await self._send_message_and_log_activity_id(
turn_context, f"You said: {turn_context.activity.text}"
)
else:
await turn_context.send_activity(
MessageFactory.text("App sent a message with empty text")
)
if turn_context.activity.value:
await self._send_message_and_log_activity_id(
turn_context,
f"but with value {json.dumps(turn_context.activity.value)}",
)
return
async def on_reactions_added(
self, message_reactions: List[MessageReaction], turn_context: TurnContext
):
for reaction in message_reactions:
activity = await self._log.find(turn_context.activity.reply_to_id)
if activity:
await self._send_message_and_log_activity_id(
turn_context,
f"You added '{reaction.type}' regarding '{activity.text}'",
)
else:
await self._send_message_and_log_activity_id(
turn_context,
f"Activity {turn_context.activity.reply_to_id} not found in the log.",
)
async def on_reactions_removed(
self, message_reactions: List[MessageReaction], turn_context: TurnContext
):
for reaction in message_reactions:
activity = await self._log.find(turn_context.activity.reply_to_id)
if activity:
await self._send_message_and_log_activity_id(
turn_context,
f"You removed '{reaction.type}' regarding '{activity.text}'",
)
else:
await self._send_message_and_log_activity_id(
turn_context,
f"Activity {turn_context.activity.reply_to_id} not found in the log.",
)
async def on_teams_members_removed(
self, teams_members_removed: [TeamsChannelAccount], turn_context: TurnContext
):
if not turn_context:
raise Exception("turn_context cannot be null")
# TODO: Update once https://github.com/microsoft/botbuilder-python/pull/1069 is resolved
channel_data = TeamsChannelData().deserialize(
turn_context.activity.channel_data
)
if channel_data:
team_info = channel_data.team
location = (
team_info.name
if team_info
else turn_context.activity.conversation.conversation_type
)
hero_card = HeroCard(
text=" ".join(
[
f"{member.id} removed from {location}"
for member in teams_members_removed
]
)
)
await turn_context.send_activity(
MessageFactory.attachment(CardFactory.hero_card(hero_card))
)
async def on_teams_members_added( # pylint: disable=unused-argument
self,
teams_members_added: [TeamsChannelAccount],
team_info: TeamInfo,
turn_context: TurnContext,
):
if not turn_context:
raise Exception("turn_context cannot be null")
location = (
team_info.name
if team_info
else turn_context.activity.conversation.conversation_type
)
hero_card = HeroCard(
text=" ".join(
[f"{member.id} joined {location}" for member in teams_members_added]
)
)
await turn_context.send_activity(
MessageFactory.attachment(CardFactory.hero_card(hero_card))
)
async def _create_with_preview(
self, turn_context: TurnContext, action: MessagingExtensionAction
):
preview_card = create_adaptive_card_preview(
user_text=action.data["Question"],
is_multi_select=action.data["MultiSelect"],
option1=action.data["Option1"],
option2=action.data["Option2"],
option3=action.data["Option3"],
)
extension_result = MessagingExtensionResult(
type="botMessagePreview",
activity_preview=MessageFactory.attachment(preview_card),
)
return MessagingExtensionActionResponse(compose_extension=extension_result)
async def _send_hero_card(self, turn_context: TurnContext):
card_path = os.path.join(os.getcwd(), "cards\\hero_card.json")
with open(card_path, "rb") as in_file:
card_data = json.load(in_file)
await turn_context.send_activity(
MessageFactory.attachment(
Attachment(
content_type=CardFactory.content_types.hero_card, content=card_data
)
)
)
async def _send_thumbnail_card(self, turn_context: TurnContext):
card_path = os.path.join(os.getcwd(), "cards\\thumbnail_card.json")
with open(card_path, "rb") as in_file:
card_data = json.load(in_file)
await turn_context.send_activity(
MessageFactory.attachment(
Attachment(
content_type=CardFactory.content_types.thumbnail_card,
content=card_data,
)
)
)
async def _send_receipt_card(self, turn_context: TurnContext):
card_path = os.path.join(os.getcwd(), "cards\\receipt_card.json")
with open(card_path, "rb") as in_file:
card_data = json.load(in_file)
await turn_context.send_activity(
MessageFactory.attachment(
Attachment(
content_type=CardFactory.content_types.receipt_card,
content=card_data,
)
)
)
async def _send_signin_card(self, turn_context: TurnContext):
card_path = os.path.join(os.getcwd(), "cards\\signin_card.json")
with open(card_path, "rb") as in_file:
card_data = json.load(in_file)
await turn_context.send_activity(
MessageFactory.attachment(
Attachment(
content_type=CardFactory.content_types.signin_card,
content=card_data,
)
)
)
async def _return_current_sdk_version(self, turn_context: TurnContext):
version = pkg_resources.get_distribution("botbuilder-core").version
await turn_context.send_activity(MessageFactory.text(f"{turn_context.activity.value} The bot version is {version}"))
async def _reset_bot(self, turn_context):
await self._log.delete(self._activity_ids)
self._activity_ids = []
await turn_context.send_activity(MessageFactory.text("I'm reset. Test away!"))
async def _send_carousel_card(self, turn_context: TurnContext):
card_path = os.path.join(os.getcwd(), "cards\\hero_card.json")
with open(card_path, "rb") as in_file:
card_data = json.load(in_file)
attachment = Attachment(
content_type=CardFactory.content_types.hero_card, content=card_data
)
await turn_context.send_activity(
MessageFactory.carousel([attachment, attachment, attachment])
)
async def _show_task_module(self, turn_context: TurnContext):
card_path = os.path.join(os.getcwd(), "cards\\task_module_hero_card.json")
with open(card_path, "rb") as in_file:
card_data = json.load(in_file)
await turn_context.send_activity(MessageFactory.attachment(card_data))
async def _send_list_card(self, turn_context: TurnContext):
card_path = os.path.join(os.getcwd(), "cards\\hero_card.json")
with open(card_path, "rb") as in_file:
card_data = json.load(in_file)
attachment = Attachment(
content_type=CardFactory.content_types.hero_card, content=card_data
)
await turn_context.send_activity(
MessageFactory.list([attachment, attachment, attachment])
)
async def _send_o365_card(self, turn_context: TurnContext):
card_path = os.path.join(os.getcwd(), "cards\\o365_card.json")
with open(card_path, "rb") as in_file:
card_data = json.load(in_file)
await turn_context.send_activity(
MessageFactory.attachment(
Attachment(
content_type="application/vnd.microsoft.teams.card.o365connector",
content=card_data,
)
)
)
async def _send_file_card(self, turn_context: TurnContext):
card_path = os.path.join(os.getcwd(), "cards\\file_card.json")
with open(card_path, "rb") as in_file:
card_data = json.load(in_file)
await turn_context.send_activity(MessageFactory.attachment(card_data))
async def _create_card_command(
self,
turn_context: TurnContext, # pylint: disable=unused-argument
action: MessagingExtensionAction,
) -> MessagingExtensionActionResponse:
title = action.data["title"]
sub_title = action.data["subTitle"]
text = action.data["text"]
card = HeroCard(title=title, subtitle=sub_title, text=text)
attachment = MessagingExtensionAttachment(
content=card,
content_type=CardFactory.content_types.hero_card,
preview=CardFactory.hero_card(card),
)
attachments = [attachment]
extension_result = MessagingExtensionResult(
attachment_layout="list", type="result", attachments=attachments
)
return MessagingExtensionActionResponse(compose_extension=extension_result)
async def _send_adaptive_card(self, turn_context: TurnContext, card_type: int):
card = None
if card_type == 1:
card_path = card_path = os.path.join(os.getcwd(), "cards\\bot_action.json")
with open(card_path, "rb") as in_file:
card_data = json.load(in_file)
card = CardFactory.adaptive_card(card_data)
elif card_type == 2:
card_path = card_path = os.path.join(os.getcwd(), "cards\\task_module.json")
with open(card_path, "rb") as in_file:
card_data = json.load(in_file)
card = CardFactory.adaptive_card(card_data)
elif card_type == 3:
card_path = card_path = os.path.join(
os.getcwd(), "cards\\submit_action.json"
)
with open(card_path, "rb") as in_file:
card_data = json.load(in_file)
card = CardFactory.adaptive_card(card_data)
else:
raise Exception("Invalid card type. Must be 1, 2 or 3.")
reply_activity = MessageFactory.attachment(card)
await turn_context.send_activity(reply_activity)
async def _update_activity(self, turn_context: TurnContext):
for activity_id in self._activity_ids:
new_activity = MessageFactory.text(turn_context.activity.text)
new_activity.id = activity_id
await turn_context.update_activity(new_activity)
return
async def _share_message_command(
self,
turn_context: TurnContext, # pylint: disable=unused-argument
action: MessagingExtensionAction,
) -> MessagingExtensionActionResponse:
# The user has chosen to share a message by choosing the 'Share Message' context menu command.
title = f"{action.message_payload.from_property.user.display_name} orignally sent this message:"
text = action.message_payload.body.content
card = HeroCard(title=title, text=text)
if not action.message_payload.attachments is None:
# This sample does not add the MessagePayload Attachments. This is left as an
# exercise for the user.
card.subtitle = (
f"({len(action.message_payload.attachments)} Attachments not included)"
)
# This Messaging Extension example allows the user to check a box to include an image with the
# shared message. This demonstrates sending custom parameters along with the message payload.
include_image = action.data["includeImage"]
if include_image == "true":
image = CardImage(
url="https://encrypted-tbn0.gstatic.com/images?q=tbn:ANd9GcQtB3AwMUeNoq4gUBGe6Ocj8kyh3bXa9ZbV7u1fVKQoyKFHdkqU"
)
card.images = [image]
attachment = MessagingExtensionAttachment(
content=card,
content_type=CardFactory.content_types.hero_card,
preview=CardFactory.hero_card(card),
)
extension_result = MessagingExtensionResult(
attachment_layout="list", type="result", attachments=[attachment]
)
return MessagingExtensionActionResponse(compose_extension=extension_result)
async def on_teams_team_renamed_activity( # pylint: disable=unused-argument
self, team_info: TeamInfo, turn_context: TurnContext
):
if not turn_context:
raise Exception("turn_context cannot be null")
if not team_info:
raise Exception("team_info cannot be null")
hero_card = HeroCard(text=f"{team_info.name} is the Team name")
await turn_context.send_activity(
MessageFactory.attachment(CardFactory.hero_card(hero_card))
)
async def on_teams_channel_deleted( # pylint: disable=unused-argument
self, channel_info: ChannelInfo, team_info: TeamInfo, turn_context: TurnContext
):
if not turn_context:
raise Exception("turn_context cannot be null")
if not channel_info:
raise Exception("channel_info cannot be null")
if not team_info:
raise Exception("team_info cannot be null")
hero_card = HeroCard(text=f"{channel_info.name} is the Channel deleted")
await turn_context.send_activity(
MessageFactory.attachment(CardFactory.hero_card(hero_card))
)
async def on_teams_channel_created( # pylint: disable=unused-argument
self, channel_info: ChannelInfo, team_info: TeamInfo, turn_context: TurnContext
):
if not turn_context:
raise Exception("turn_context cannot be null")
if not channel_info:
raise Exception("channel_info cannot be null")
if not team_info:
raise Exception("team_info cannot be null")
hero_card = HeroCard(text=f"{channel_info.name} is the Channel Created")
await turn_context.send_activity(
MessageFactory.attachment(CardFactory.hero_card(hero_card))
)
async def on_teams_messaging_extension_submit_action( # pylint: disable=unused-argument
self, turn_context: TurnContext, action: MessagingExtensionAction
) -> MessagingExtensionActionResponse:
if action.command_id == "createCard":
return await self._create_card_command(turn_context, action)
if action.command_id == "shareMessage":
return await self._share_message_command(turn_context, action)
if action.command_id == "createWithPreview":
return await self._create_with_preview(turn_context, action)
# preview_card = create_adaptive_card_preview(
# user_text=action.data["Question"],
# is_multi_select=action.data["MultiSelect"],
# option1=action.data["Option1"],
# option2=action.data["Option2"],
# option3=action.data["Option3"],
# )
# extension_result = MessagingExtensionResult(
# type="botMessagePreview",
# activity_preview=MessageFactory.attachment(preview_card),
# )
return MessagingExtensionActionResponse(compose_extension=extension_result)
async def on_teams_messaging_extension_fetch_task(
self, turn_context: TurnContext, action: MessagingExtensionAction
) -> MessagingExtensionActionResponse:
card = create_adaptive_card_editor()
task_info = TaskModuleTaskInfo(
card=card, height=450, title="Task Module Fetch Example", width=500
)
continue_response = TaskModuleContinueResponse(value=task_info)
return MessagingExtensionActionResponse(task=continue_response)
async def on_teams_messaging_extension_bot_message_preview_edit( # pylint: disable=unused-argument
self, turn_context: TurnContext, action: MessagingExtensionAction
) -> MessagingExtensionActionResponse:
activity_preview = action.bot_activity_preview[0]
content = activity_preview.attachments[0].content
data = self._get_example_data(content)
card = create_adaptive_card_editor(
data.question,
data.is_multi_select,
data.option1,
data.option2,
data.option3,
)
task_info = TaskModuleTaskInfo(
card=card, height=450, title="Task Module Fetch Example", width=500
)
continue_response = TaskModuleContinueResponse(value=task_info)
return MessagingExtensionActionResponse(task=continue_response)
async def on_teams_messaging_extension_bot_message_preview_send( # pylint: disable=unused-argument
self, turn_context: TurnContext, action: MessagingExtensionAction
) -> MessagingExtensionActionResponse:
activity_preview = action.bot_activity_preview[0]
content = activity_preview.attachments[0].content
data = self._get_example_data(content)
card = create_adaptive_card_preview(
data.question,
data.is_multi_select,
data.option1,
data.option2,
data.option3,
)
message = MessageFactory.attachment(card)
await turn_context.send_activity(message)
async def on_teams_channel_renamed( # pylint: disable=unused-argument
self, channel_info: ChannelInfo, team_info: TeamInfo, turn_context: TurnContext
):
if not turn_context:
raise Exception("turn_context cannot be null")
if not channel_info:
raise Exception("channel_info cannot be null")
hero_card = HeroCard(text=f"{channel_info.name} is the new Channel name")
await turn_context.send_activity(
MessageFactory.attachment(CardFactory.hero_card(hero_card))
)
async def on_teams_file_consent_accept(
self,
turn_context: TurnContext,
file_consent_card_response: FileConsentCardResponse,
):
"""
The user accepted the file upload request. Do the actual upload now.
"""
file_path = "files/" + file_consent_card_response.context["filename"]
file_size = os.path.getsize(file_path)
headers = {
"Content-Length": f'"{file_size}"',
"Content-Range": f"bytes 0-{file_size-1}/{file_size}",
}
response = requests.put(
file_consent_card_response.upload_info.upload_url,
open(file_path, "rb"),
headers=headers,
)
if response.status_code != 201:
print(
f"Failed to upload, status {response.status_code}, file_path={file_path}"
)
await self._file_upload_failed(turn_context, "Unable to upload file.")
else:
await self._file_upload_complete(turn_context, file_consent_card_response)
async def on_teams_file_consent_decline(
self,
turn_context: TurnContext,
file_consent_card_response: FileConsentCardResponse,
):
"""
#The user declined the file upload.
"""
context = file_consent_card_response.context
reply = self._create_reply(
turn_context.activity,
f"Declined. We won't upload file <b>{context['filename']}</b>.",
"xml",
)
await turn_context.send_activity(reply)
async def on_teams_messaging_extension_query(
self, turn_context: TurnContext, query: MessagingExtensionQuery
):
search_query = str(query.parameters[0].value).strip()
if search_query == "":
await turn_context.send_activity(
MessageFactory.text("You cannot enter a blank string for the search")
)
return
search_results = self._get_search_results(search_query)
attachments = []
for obj in search_results:
hero_card = HeroCard(
title=obj["name"], tap=CardAction(type="invoke", value=obj)
)
attachment = MessagingExtensionAttachment(
content_type=CardFactory.content_types.hero_card,
content=HeroCard(title=obj["name"]),
preview=CardFactory.hero_card(hero_card),
)
attachments.append(attachment)
return MessagingExtensionResponse(
compose_extension=MessagingExtensionResult(
type="result", attachment_layout="list", attachments=attachments
)
)
async def on_teams_messaging_extension_select_item(
self, turn_context: TurnContext, query
) -> MessagingExtensionResponse:
hero_card = HeroCard(
title=query["name"],
subtitle=query["summary"],
buttons=[
CardAction(
type="openUrl", value=f"https://pypi.org/project/{query['name']}"
)
],
)
attachment = MessagingExtensionAttachment(
content_type=CardFactory.content_types.hero_card, content=hero_card
)
return MessagingExtensionResponse(
compose_extension=MessagingExtensionResult(
type="result", attachment_layout="list", attachments=[attachment]
)
)
def _get_example_data(self, content: dict) -> ExampleData:
body = content["body"]
question = body[1]["text"]
choice_set = body[3]
multi_select = "isMultiSelect" in choice_set
option1 = choice_set["choices"][0]["value"]
option2 = choice_set["choices"][1]["value"]
option3 = choice_set["choices"][2]["value"]
return ExampleData(question, multi_select, option1, option2, option3)
def _get_search_results(self, query: str):
client = xmlrpc.client.ServerProxy("https://pypi.org/pypi")
search_results = client.search({"name": query})
return search_results[:10] if len(search_results) > 10 else search_results
async def _show_members(self, turn_context):
members = await TeamsInfo.get_members(turn_context)
reply_activity = MessageFactory.text(
f"Total of {len(members)} members are currently in the team"
)
messages = [
f"{member.aad_object_id} --> {member.name} --> {member.user_principal_name}"
for member in members
]
await self._send_in_batches(turn_context, messages)
async def _show_channels(self, turn_context: TurnContext):
team_id = TeamsInfo.get_team_id(turn_context)
if team_id:
channels = await TeamsInfo.get_team_channels(turn_context, team_id)
reply_activity = MessageFactory.text(
f"Total of {len(channels)} channels are currently in team"
)
await turn_context.send_activity(reply_activity)
messages = [f"{channel.id} --> {channel.name}" for channel in channels]
await self._send_in_batches(turn_context, messages)
else:
await self._send_message_and_log_activity_id(turn_context, "This only works in the team scope")
async def _send_in_batches(self, turn_context: TurnContext, messages: List[str]):
batch = []
for msg in messages:
batch.append(msg)
if len(batch) == 10:
await self._send_message_and_log_activity_id(
turn_context, "<br>".join(batch)
)
batch.clear()
if len(batch) > 0:
await self._send_message_and_log_activity_id(
turn_context, "<br>".join(batch)
)
async def _show_team_details(self, turn_context: TurnContext):
team_id = TeamsInfo.get_team_id(turn_context)
if team_id:
team_details = await TeamsInfo.get_team_details(turn_context, team_id)
await self._send_message_and_log_activity_id(
turn_context,
f"The team name is {team_details.name}. The team ID is {team_details.id}. The ADD Group Id is {team_details.aad_group_id}.",
)
else:
await self._send_message_and_log_activity_id(turn_context, "This only works in the team scope")
async def _send_message_and_log_activity_id(
self, turn_context: TurnContext, text: str
):
reply_activity = MessageFactory.text(text)
resource_response = await turn_context.send_activity(reply_activity)
await self._log.append(resource_response.id, reply_activity)
self._activity_ids.append(resource_response.id)
async def _show_card(self, turn_context: TurnContext):
card = HeroCard(
title="Welcome Card",
text="Click the buttons to update this card",
buttons=[
CardAction(
type=ActionTypes.message_back,
title="Update Card",
text="UpdateCardAction",
value={"count": 0},
),
CardAction(
type=ActionTypes.message_back,
title="Message all memebers",
text="MessageAllMembers",
),
],
)
await turn_context.send_activity(
MessageFactory.attachment(CardFactory.hero_card(card))
)
async def _show_upload_file(self, turn_context: TurnContext):
message_with_file_download = (
False
if not turn_context.activity.attachments
else turn_context.activity.attachments[0].content_type
== ContentType.FILE_DOWNLOAD_INFO
)
if message_with_file_download:
file = turn_context.activity.attachments[0]
file_download = FileDownloadInfo.deserialize(file.content)
file_path = "files/" + file.name
response = requests.get(file_download.download_url, allow_redirects=True)
open(file_path, "wb").write(response.content)
reply = self._create_reply(
turn_context.activity, f"Complete downloading <b>{file.name}</b>", "xml"
)
await turn_context.send_activity(reply)
else:
filename = "teams-logo.png"
file_path = "files/" + filename
file_size = os.path.getsize(file_path)
await self._send_file_card(turn_context, filename, file_size)
async def _mention_activity(self, turn_context: TurnContext):
mention = Mention(
mentioned=turn_context.activity.from_property,
text=f"<at>{turn_context.activity.from_property.name}</at>",
type="mention",
)
reply_activity = MessageFactory.text(f"Hello {mention.text}")
reply_activity.entities = [Mention().deserialize(mention.serialize())]
await turn_context.send_activity(reply_activity)
async def _update_card_activity(self, turn_context: TurnContext):
data = turn_context.activity.value
data["count"] += 1
card = CardFactory.hero_card(
HeroCard(
title="Welcome Card",
text=f"Updated count - {data['count']}",
buttons=[
CardAction(
type=ActionTypes.message_back,
title="Update Card",
value=data,
text="UpdateCardAction",
),
CardAction(
type=ActionTypes.message_back,
title="Message all members",
text="MessageAllMembers",
),
CardAction(
type=ActionTypes.message_back,
title="Delete card",
text="Delete",
),
],
)
)
updated_activity = MessageFactory.attachment(card)
updated_activity.id = turn_context.activity.reply_to_id
await turn_context.update_activity(updated_activity)
async def _message_all_members(self, turn_context: TurnContext):
team_members = await TeamsInfo.get_members(turn_context)
for member in team_members:
conversation_reference = TurnContext.get_conversation_reference(
turn_context.activity
)
conversation_parameters = ConversationParameters(
is_group=False,
bot=turn_context.activity.recipient,
members=[member],
tenant_id=turn_context.activity.conversation.tenant_id,
)
async def get_ref(tc1):
conversation_reference_inner = TurnContext.get_conversation_reference(
tc1.activity
)
return await tc1.adapter.continue_conversation(
conversation_reference_inner, send_message, self._app_id
)
async def send_message(tc2: TurnContext):
return await tc2.send_activity(
f"Hello {member.name}. I'm a Teams conversation bot."
) # pylint: disable=cell-var-from-loop
await turn_context.adapter.create_conversation(
conversation_reference, get_ref, conversation_parameters
)
await turn_context.send_activity(
MessageFactory.text("All messages have been sent")
)
async def _delete_activity(self, turn_context: TurnContext):
activity = MessageFactory.text("This message will be deleted in 5 seconds")
activity.reply_to_id = turn_context.activity.id
activity_id = await turn_context.send_activity(activity)
time.sleep(5)
await turn_context.delete_activity(activity_id.id)
async def _send_file_card(
self, turn_context: TurnContext, filename: str, file_size: int
):
consent_context = {"filename": filename}
file_card = FileConsentCard(
description="This is the file I want to send you",
size_in_bytes=file_size,
accept_context=consent_context,
decline_context=consent_context,
)
as_attachment = Attachment(
content=file_card.serialize(),
content_type=ContentType.FILE_CONSENT_CARD,
name=filename,
)
reply_activity = self._create_reply(turn_context.activity)
reply_activity.attachments = [as_attachment]
await turn_context.send_activity(reply_activity)
async def _file_upload_complete(
self,
turn_context: TurnContext,
file_consent_card_response: FileConsentCardResponse,
):
"""
The file was uploaded, so display a FileInfoCard so the user can view the
file in Teams.
"""
name = file_consent_card_response.upload_info.name
download_card = FileInfoCard(
unique_id=file_consent_card_response.upload_info.unique_id,
file_type=file_consent_card_response.upload_info.file_type,
)
as_attachment = Attachment(
content=download_card.serialize(),
content_type=ContentType.FILE_INFO_CARD,
name=name,
content_url=file_consent_card_response.upload_info.content_url,
)
reply = self._create_reply(
turn_context.activity,
f"<b>File uploaded.</b> Your file <b>{name}</b> is ready to download",
"xml",
)
reply.attachments = [as_attachment]
await turn_context.send_activity(reply)
async def _file_upload_failed(self, turn_context: TurnContext, error: str):
reply = self._create_reply(
turn_context.activity,
f"<b>File upload failed.</b> Error: <pre>{error}</pre>",
"xml",
)
await turn_context.send_activity(reply)
def _create_reply(self, activity, text=None, text_format=None):
return Activity(
type=ActivityTypes.message,
timestamp=datetime.utcnow(),
from_property=ChannelAccount(
id=activity.recipient.id, name=activity.recipient.name
),
recipient=ChannelAccount(
id=activity.from_property.id, name=activity.from_property.name
),
reply_to_id=activity.id,
service_url=activity.service_url,
channel_id=activity.channel_id,
conversation=ConversationAccount(
is_group=activity.conversation.is_group,
id=activity.conversation.id,
name=activity.conversation.name,
),
text=text or "",
text_format=text_format or None,
locale=activity.locale,
)
async def _send_proactive_non_threaded_message(self, turn_context: TurnContext):
conversation_reference = TurnContext.get_conversation_reference(
turn_context.activity
)
conversation_parameters = ConversationParameters(
is_group=False,
bot=turn_context.activity.recipient,
members=[turn_context.activity.from_property],
tenant_id=turn_context.activity.conversation.tenant_id,
)
proactive_message = MessageFactory.text("This is a proactive message")
proactive_message.label = turn_context.activity.id
async def get_ref(tc1):
conversation_reference_inner = TurnContext.get_conversation_reference(
tc1.activity
)
return await tc1.adapter.continue_conversation(
conversation_reference_inner, send_message, self._app_id
)
async def send_message(tc2: TurnContext):
return await tc2.send_activity(proactive_message)
await turn_context.adapter.create_conversation(
conversation_reference, get_ref, conversation_parameters
)
async def _send_proactive_threaded_message(self, turn_context: TurnContext):
activity = MessageFactory.text("I will send two messages to this thread")
result = await turn_context.send_activity(activity)
team_id = TeamsInfo.get_team_id(turn_context)
for i in range(2):
proactive_activity = MessageFactory.text(
f"This is message {i+1}/2 that will be sent."
)
TurnContext.apply_conversation_reference(
proactive_activity, TurnContext.get_conversation_reference(activity)
)
await turn_context.send_activity(proactive_activity)
|
StarcoderdataPython
|
1793890
|
from common.tools.blockstring import BlockString
from common.tools.padders import PKCS7Padder, PKCS7Unpadder
from common.tools.xor import ByteXOR
class BlockCipherMode(object):
DEFAULT_BLOCK_SIZE = 16
@classmethod
def name(cls):
return cls.__name__
def __init__(self, block_size=None):
self.block_size = self.DEFAULT_BLOCK_SIZE if block_size is None\
else block_size
def _pad(self, string):
return PKCS7Padder(string).value(self.block_size)
def _unpad_if_needed(self, index, block):
if self.block_string.is_last_block_index(index):
block = PKCS7Unpadder(block).value()
return block
def _iterate_blocks_with(self, block_string, cipher, callback):
self.cipher = cipher
self.block_string = block_string
result = BlockString(block_size=self.block_size)
return reduce(lambda _result, block: _result + callback(*block),
enumerate(self.block_string), result)
def _block_encryption_callback(self, message, cipher):
raise NotImplementedError
def _block_decryption_callback(self, message, cipher):
raise NotImplementedError
def encrypt_with_cipher(self, plaintext, cipher):
if type(plaintext) != BlockString:
plaintext = BlockString(plaintext, self.block_size)
plaintext = self._pad(plaintext)
return self._iterate_blocks_with(plaintext, cipher,
self._block_encryption_callback)
def decrypt_with_cipher(self, ciphertext, cipher):
if type(ciphertext) != BlockString:
ciphertext = BlockString(ciphertext, self.block_size)
return self._iterate_blocks_with(ciphertext, cipher,
self._block_decryption_callback)
class ECB(BlockCipherMode):
def _block_encryption_callback(self, index, block):
return self.cipher.encrypt_block(block)
def _block_decryption_callback(self, index, block):
plaintext_block = self.cipher.decrypt_block(block)
plaintext_block = self._unpad_if_needed(index, plaintext_block)
return plaintext_block
class CBC(BlockCipherMode):
def __init__(self, iv, block_size=None):
BlockCipherMode.__init__(self, block_size)
self.iv = iv
def _xor(self, string1, string2):
return ByteXOR(string1, string2).value()
def _block_encryption_callback(self, index, block):
if index == 0:
self.last_ciphertext_block = self.iv
xor_block = self._xor(block, self.last_ciphertext_block)
ciphertext_block = self.cipher.encrypt_block(xor_block)
self.last_ciphertext_block = ciphertext_block
return ciphertext_block
def _block_decryption_callback(self, index, block):
if index == 0:
self.last_ciphertext_block = self.iv
decrypted_block = self.cipher.decrypt_block(block)
plaintext_block = self._xor(decrypted_block,
self.last_ciphertext_block)
plaintext_block = self._unpad_if_needed(index, plaintext_block)
self.last_ciphertext_block = block
return plaintext_block
class CTR(BlockCipherMode):
def __init__(self, counter=None, nonce=None, block_size=None):
from counter import DefaultCounter, NonceBasedCounter
BlockCipherMode.__init__(self, block_size)
if nonce is not None:
counter = NonceBasedCounter(nonce, block_size)
self.counter = counter if counter is not None\
else DefaultCounter(block_size)
def _pad(self, plaintext):
# CTR mode does not need padding.
return plaintext
def _xor(self, key, block):
block_length = len(block)
return ByteXOR(block, key[:block_length]).value()
def _block_callback(self, index, block):
key_argument = self.counter.count(index)
key = self.cipher.encrypt_block(key_argument)
return self._xor(key, block)
def _block_encryption_callback(self, index, block):
return self._block_callback(index, block)
def _block_decryption_callback(self, index, block):
return self._block_callback(index, block)
class RandomAccessCTR(CTR):
def __init__(self, *args, **kwargs):
CTR.__init__(self, *args, **kwargs)
self.keystream = str()
def get_keystream(self):
return self.keystream
def _xor(self, key, block):
self.keystream += key
return CTR._xor(self, key, block)
|
StarcoderdataPython
|
1612549
|
<reponame>mbaak/Eskapade
from eskapade.data_mimic.links import *
|
StarcoderdataPython
|
192972
|
<reponame>matis11/Human-Computer-Communication
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import csv
import matplotlib.pyplot as plt
from matplotlib.legend_handler import HandlerLine2D
class Algorithm:
source_file = ""
label = ""
color = ""
def __init__(self, source_file, label, color):
self.source_file = source_file
self.label = label
self.color = color
OUTPUT_FILENAME = 'myplot.pdf'
Y_AXIS_LABEL = "Odsetek wygranych gier"
X_AXIS_LABEL = "Rozegranych gier"
ALGORITHMS = [Algorithm('2cel.csv', '2-Coev', 'purple'),
Algorithm('2cel-rs.csv', '2-Coev-RS', 'red'),
Algorithm('cel.csv', '1-Coev', 'black'),
Algorithm('cel-rs.csv', '1-Coev-RS', 'green'),
Algorithm('rsel.csv', '1-Evol-RS', 'blue')]
def read_data(filename):
with open('{filename}'.format(filename=filename), 'rb') as csvfile:
x, y = [], []
raw_data = csv.reader(csvfile, delimiter=',', quotechar='|')
data = list(raw_data)[1:]
for row in list(data):
x.append(row[1])
values = row[2:]
values = map(float, values)
y.append(reduce(lambda a, b: a + b, values) / len(values))
return x[1:], y[1:]
def main():
for algorithm in ALGORITHMS:
visualize_algorithm_results(algorithm)
render_plot()
def visualize_algorithm_results(algorithm):
x, y = read_data(algorithm.source_file)
line, = plt.plot(x, y, label=algorithm.label, color=algorithm.color)
plt.legend(handler_map={line: HandlerLine2D(numpoints=4)})
def render_plot():
plt.xlabel(X_AXIS_LABEL)
plt.ylabel(Y_AXIS_LABEL)
plt.legend(bbox_to_anchor=(1.00, 0), loc=4, borderaxespad=0.)
plt.xlim([0, 500000])
plt.savefig(OUTPUT_FILENAME)
plt.close()
if __name__ == '__main__':
main()
|
StarcoderdataPython
|
1717165
|
import argparse
import json
import math
from functools import lru_cache
from urllib.parse import urlencode
from urllib.request import urlopen
class AirportService:
def __init__(self, longitude, latitude):
self.location = MapCoordinate(longitude, latitude)
def get_nearest_airports_in_radius(self, radius):
offset_coo1 = self.location.get_offset_coordinate_by_distance(radius)
offset_coo2 = self.location.get_offset_coordinate_by_distance(-1 * radius)
airport_data = CloudantService.get_airports_in_coordinate_box(offset_coo2.longitude, offset_coo1.longitude,
offset_coo2.latitude, offset_coo1.latitude)
airports = []
for airport_dict in airport_data['rows']:
airports.append(
Airport(airport_dict['fields']['lon'], airport_dict['fields']['lat'], airport_dict['fields']['name']))
return sorted(airports, key=lambda airport: self.location.distance_from_coordinate(airport.location))
class Airport:
def __init__(self, longitude, latitude, name):
self.location = MapCoordinate(longitude, latitude)
self.name = name
def __str__(self):
return f'{self.name} - {self.location}'
class MapCoordinate:
EARTH_RADIUS = 6378137
_RADIAN = 180 / math.pi
def __init__(self, longitude, latitude):
self.longitude = longitude
self.latitude = latitude
def __str__(self):
return f'(longitude: {self.longitude}, latitude: {self.latitude})'
def get_offset_coordinate_by_distance(self, longitudinal_distance, latitudinal_distance=None):
if latitudinal_distance is None:
latitudinal_distance = longitudinal_distance
return MapCoordinate(self._calculate_longitude_offset_by_distance(longitudinal_distance),
self._calculate_latitude_offset_by_distance(latitudinal_distance))
@lru_cache(maxsize=128)
def distance_from_coordinate(self, other):
"""Haversine formula"""
longitudinal_distance = math.radians(other.longitude - self.longitude)
latitudinal_distance = math.radians(other.latitude - self.latitude)
a = math.sin(latitudinal_distance / 2) ** 2 + math.cos(math.radians(self.latitude)) \
* math.cos(math.radians(other.latitude)) * math.sin(longitudinal_distance / 2) ** 2
c = 2 * math.atan2(math.sqrt(a), math.sqrt(1 - a))
return self.EARTH_RADIUS * c
def _calculate_longitude_offset_by_distance(self, distance):
return self.longitude + (distance / self.EARTH_RADIUS) * self._RADIAN / math.cos(math.pi * self.latitude / 180)
def _calculate_latitude_offset_by_distance(self, distance):
return self.latitude + (distance / self.EARTH_RADIUS) * self._RADIAN
class CloudantService:
CLOUDANT_REST_API_URL = 'https://mikerhodes.cloudant.com/airportdb/_design/view1/_search/geo'
@staticmethod
def get_airports_in_coordinate_box(lon1, lon2, lat1, lat2):
cloudant_response = urlopen(CloudantService._build_url(lon1, lon2, lat1, lat2))
data = cloudant_response.read()
data_encoding = cloudant_response.info().get_content_charset('utf-8')
return json.loads(data.decode(data_encoding))
@staticmethod
def _build_url(lon1, lon2, lat1, lat2):
if lon1 > lon2:
lon1, lon2 = lon2, lon1
if lat1 > lat2:
lat1, lat2 = lat2, lat1
values = {'q': f'lon:[{lon1} TO {lon2}] AND lat:[{lat1} TO {lat2}]'}
data = urlencode(values)
return f'{CloudantService.CLOUDANT_REST_API_URL}?{data}'
def get_input_from_user(text):
while True:
try:
value = float(input(f'{text}: '))
except ValueError:
print(f'{text} must be an float value!')
else:
return value
def get_input_data():
print('Please provide the following information (radius in meters):')
longitude = get_input_from_user('Longitude')
latitude = get_input_from_user('Latitude')
radius = get_input_from_user('Radius')
return longitude, latitude, radius
def main():
parser = argparse.ArgumentParser(description='Find the closest airports in a radius of a coordinate.')
parser.add_argument('--longitude', type=float, help='a float value for the longitudinal part of the coordinate')
parser.add_argument('--latitude', type=float, help='a float value for the latitudinal part of the coordinate')
parser.add_argument('--radius', type=float,
help='a float value that represents the radius of the search area in meters')
args = parser.parse_args()
if args.longitude is None or args.latitude is None or args.radius is None:
longitude, latitude, radius = get_input_data()
else:
longitude, latitude, radius = args.longitude, args.latitude, args.radius
print(f'Finding airports sorted by distance in a radius of {radius:,.0f} meters from the coordinate '
f'longitude: {longitude}, latitude: {latitude}')
airport_service = AirportService(longitude, latitude)
airports = airport_service.get_nearest_airports_in_radius(radius)
for airport in airports:
print(f'{airport}: {airport_service.location.distance_from_coordinate(airport.location):,.0f} meter')
if __name__ == "__main__":
main()
|
StarcoderdataPython
|
68812
|
<gh_stars>0
# !/usr/bin/env python
# -*- coding: utf-8 -*-
"""
__project__ = 'leetcode'
__file__ = '__init__.py.py'
__author__ = 'king'
__time__ = '2022/2/15 16:02'
_ooOoo_
o8888888o
88" . "88
(| -_- |)
O\ = /O
____/`---'\____
.' \\| |// `.
/ \\||| : |||// \
/ _||||| -:- |||||- \
| | \\\ - /// | |
| \_| ''\---/'' | |
\ .-\__ `-` ___/-. /
___`. .' /--.--\ `. . __
."" '< `.___\_<|>_/___.' >'"".
| | : `- \`.;`\ _ /`;.`/ - ` : | |
\ \ `-. \_ __\ /__ _/ .-` / /
======`-.____`-.___\_____/___.-`____.-'======
`=---='
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
佛祖保佑 永无BUG
"""
"""
难度:中等
给你两个字符串s1和s2,写一个函数来判断s2是否包含s1的排列。如果是,返回true;否则,返回false。
换句话说,s1的排列之一是s2的子串。
示例 1:
输入:s1 = "ab" s2 = "eidbaooo"
输出:true
解释:s2 包含 s1 的排列之一 ("ba").
示例 2:
输入:s1= "ab" s2 = "eidboaoo"
输出:false
提示:
1 <= s1.length, s2.length <= 10⁴
s1 和 s2 仅包含小写字母
Related Topics 哈希表 双指针 字符串 滑动窗口
"""
import collections
class Solution:
def checkInclusion(self, s1: str, s2: str) -> bool:
window = collections.defaultdict(int)
needs = collections.defaultdict(int)
for c in s1:
needs[c] += 1
left = right = valid = 0
while right < len(s2):
c = s2[right]
right += 1
if c in needs:
window[c] += 1
if needs[c] == window[c]:
valid += 1
while right - left >= len(s1):
if valid == len(s1):
return True
d = s2[left]
left += 1
if d in needs:
if window[d] == needs[d]:
valid -= 1
window[d] -= 1
return False
s1 = "ab"
s2 = "eidbaooo"
s = Solution()
print(s.checkInclusion(s1, s2))
s1 = "ab"
s2 = "eidboaoo"
print(s.checkInclusion(s1, s2))
|
StarcoderdataPython
|
1797208
|
<reponame>DariaMachado/Algoritmos_Python
n: int; i: int; fora: int; dentro: int; x: int
n = int(input("Quantos numeros voce vai digitar? "))
fora = 0
dentro = 0
for i in range(0, n):
x = int(input("Digite um numero: "))
if x < 10 or x > 20:
fora = fora + 1
else:
dentro = dentro + 1
print(f"{dentro} DENTRO")
print(f"{fora} FORA")
|
StarcoderdataPython
|
1616794
|
<gh_stars>0
""""
Program name : Website cloner
author : https://github.com/codeperfectplus
How to use : Check README.md
"""
import os
import sys
import requests
from bs4 import BeautifulSoup
class CloneWebsite:
def __init__(self,website_name):
self.website_name = website_name
def crawl_website(self):
""" This function will crawl website and return content"""
content = requests.get(website_name)
if content.status_code == 200:
return content
def create_folder(self):
''' This funtion will create folder for website '''
folder_name = (website_name.split("/"))[2]
try:
os.makedirs(folder_name)
except Exception as e:
print(e)
return folder_name
def save_website(self):
''' This function will save website to respective folder '''
folder_name = self.create_folder()
content = self.crawl_website()
with open(f"{folder_name}/index.html", "w",encoding='ascii',errors='ignore') as file:
file.write(content.text)
def save_image(self):
folder_name = self.create_folder()
os.chdir(folder_name)
data = requests.get(website_name).text
soup = BeautifulSoup(data, "html.parser")
for img in soup.find_all('img'):
src = img["src"]
print(src)
image_name = src.split("/")[-1]
path = src.split("/")[:-1]
path = "/".join(path)
try:
os.makedirs(path)
except Exception:
print("File Exists")
if "/" == src[:1]:
print(src)
src = website_name + src
img_data = requests.get(src).content
with open(f"{path}/{image_name}", "wb") as file:
file.write(img_data)
print("complete")
if __name__ == '__main__':
website_name = sys.argv[1]
clone = CloneWebsite(website_name)
clone.save_website()
clone.save_image()
|
StarcoderdataPython
|
79094
|
# -*- coding: utf-8 -*-
"""This module provides access to the execution REST api of Camunda."""
from __future__ import annotations
import dataclasses
import typing
__all__ = []
@dataclasses.dataclass
class Execution:
"""Data class of execution as returned by the REST api of Camunda."""
id_: str
process_instance_id: str
ended: bool
tenant_id: str
@classmethod
def load(cls, data: typing.Mapping[str, typing.Any]) -> Execution:
return cls(
id_=data['id'],
process_instance_id=data['processInstanceId'],
ended=data['ended'],
tenant_id=data['tenantId']
)
|
StarcoderdataPython
|
3229648
|
#Importing all the necessary libraries
from tkinter import *
import random,string
import pyperclip
#initialize Window
root = Tk()
root.geometry("400x400")
root.resizable(0,0)
root.title("Password Generator")
Label(root, text ='Password Generator', font='arial 15 bold').pack(side = BOTTOM)
pass_label = Label(root, text = 'Password Length', font = 'arial 10 bold').pack()
pass_length = IntVar()
length = Spinbox(root, from_=8, to_= 32, textvariable = pass_length, width = 15).pack()
pass_str = StringVar()
#Pass_Genrate Func
def Generator():
password = ''
for x in range(0,4):
password= random.choice(string.ascii_uppercase)+random.choice(string.ascii_lowercase)+random.choice(string.digits)+random.choice(string.punctuation)
for y in range (pass_length.get()-4):
password = password + random.choice(string.ascii_uppercase + string.ascii_lowercase + string.digits + string.punctuation)
pass_str.set(password)
Button(root, text = "Generate Password", command = Generator).pack(pady = 5)
Entry(root, textvariable = pass_str).pack()
#Copy Func
def copy_pass():
pyperclip.copy(pass_str.get())
Button(root, text = 'Copy Password', command = copy_pass).pack(pady=5)
|
StarcoderdataPython
|
72286
|
import csv
# convert to csv
with open('/home/elad_ch/security_prj/security_project/10-million-password-list-top-1000000.txt', 'r') as in_file:
stripped = (line.strip() for line in in_file)
lines = (line.split(",") for line in stripped if line)
with open('10-million-password-list-top-1000000.csv', 'w') as out_file:
writer = csv.writer(out_file)
writer.writerow(('title', 'intro'))
writer.writerows(lines)
|
StarcoderdataPython
|
3317912
|
<gh_stars>1-10
"""Message Processor package."""
|
StarcoderdataPython
|
136613
|
<filename>db/model/APolitician.py
from db.model.ADBItem import ADBItem
from datetime import date
"""
Abstract Politician class that can be extended as needed
"""
class APolitician(ADBItem):
FIRST_NAME = None
MIDDLE_NAME = None
LAST_NAME = None
DATE_OF_BIRTH = None
GENDER = None
PARTY = None
LEADERSHIP_ROLE = None
TWITTER_ACCOUNT = None
FACEBOOK_ACCOUNT = None
YOUTUBE_ACCOUNT = None
def set_common_politician_fields(self, db_object):
self._id = db_object.get('ID')
self.FIRST_NAME = db_object.get('FIRST_NAME')
self.MIDDLE_NAME = db_object.get('MIDDLE_NAME')
self.LAST_NAME = db_object.get('LAST_NAME')
self.DATE_OF_BIRTH = date.fromordinal(db_object.get('DATE_OF_BIRTH'))
self.GENDER = db_object.get('GENDER')
self.PARTY = db_object.get('PARTY')
self.LEADERSHIP_ROLE = db_object.get('LEADERSHIP_ROLE')
self.TWITTER_ACCOUNT = db_object.get('TWITTER_ACCOUNT')
self.FACEBOOK_ACCOUNT = db_object.get('FACEBOOK_ACCOUNT')
self.YOUTUBE_ACCOUNT = db_object.get('YOUTUBE_ACCOUNT')
def get_base_politician_dict(self):
return {
'first_name': self.FIRST_NAME,
'middle_name': self.MIDDLE_NAME,
'last_name': self.LAST_NAME,
'date_of_birth': self.DATE_OF_BIRTH.isoformat(),
'gender': self.GENDER,
'party': self.PARTY,
'leadership_role': self.LEADERSHIP_ROLE,
'twitter_account': self.TWITTER_ACCOUNT,
'facebook_account': self.FACEBOOK_ACCOUNT,
'youtube_account': self.YOUTUBE_ACCOUNT,
'id': self._id
}
|
StarcoderdataPython
|
3376940
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import argparse
import sys
import traceback
import json
import pika
DATA = []
def publish(host, port, exchange, routing_key, data):
connection = pika.BlockingConnection(
pika.ConnectionParameters(host=host, port=port))
main_channel = connection.channel()
for body in data:
main_channel.basic_publish(
exchange=exchange, routing_key=routing_key, body=body)
connection.close()
def callback(ch, method, properties, body):
DATA.append(body)
def consume(host='localhost', port=None, queue=None):
connection = pika.BlockingConnection(
pika.ConnectionParameters(host=host, port=port))
channel = connection.channel()
try:
channel.basic_consume(callback, queue=queue, no_ack=True)
except Exception, e:
print e
print traceback.format_exc()
connection.close()
def persistence(filename, data):
try:
data = json.dumps(data)
f = file(filename, 'w')
f.write(data)
f.close()
except Exception, e:
print e
print traceback.format_exc()
def parseArgument():
parser = argparse.ArgumentParser()
val = parser.add_argument('--host')
val = parser.add_argument('--port')
val = parser.add_argument('--queue', required=True)
val = parser.add_argument('--file', required=True)
val = parser.add_argument('--exchange', required=True)
val = parser.add_argument('--routingkey', required=True)
args = parser.parse_args()
if args.port:
args.port = int(args.port)
return args
def main():
args = parseArgument()
consume(args.host, args.port, args.queue)
persistence(args.file, DATA)
publish(args.host, args.port, args.exchange, args.routingkey, DATA)
if __name__ == "__main__":
main()
|
StarcoderdataPython
|
126023
|
#!/usr/bin/env python
from distutils.core import setup
setup(
name='pysoftether',
version='1.0.1',
description='SoftEther VPN Server Python Management API',
author='vandot',
author_email='<EMAIL>',
url='https://github.com/vandot/pysoftether',
packages=['softether'],
)
|
StarcoderdataPython
|
1662693
|
"""Generate CronWorkflow specifications."""
from dataclasses import dataclass, field
from enum import Enum
from typing import Any, Mapping, Optional
from dagger.dag import DAG
from dagger.runtime.argo.extra_spec_options import with_extra_spec_options
from dagger.runtime.argo.workflow_spec import Workflow, workflow_spec
class CronConcurrencyPolicy(Enum):
"""
Concurrency policies allowed by Argo/Kubernetes.
Docs: https://v1-20.docs.kubernetes.io/docs/tasks/job/automated-tasks-with-cron-jobs/#concurrency-policy
"""
ALLOW = "Allow"
FORBID = "Forbid"
REPLACE = "Replace"
@dataclass(frozen=True)
class Cron:
"""
Scheduling options for the cron job.
Spec: https://github.com/argoproj/argo-workflows/blob/v3.0.4/docs/fields.md#cronworkflowspec
"""
schedule: str
starting_deadline_seconds: int = 0
concurrency_policy: CronConcurrencyPolicy = CronConcurrencyPolicy.ALLOW
timezone: Optional[str] = None
successful_jobs_history_limit: Optional[int] = None
failed_jobs_history_limit: Optional[int] = None
extra_spec_options: Mapping[str, Any] = field(default_factory=dict)
def cron_workflow_spec(
dag: DAG,
workflow: Workflow,
cron: Cron,
) -> Mapping[str, Any]:
"""
Return a minimal representation of a CronWorkflowSpec with the supplied parameters.
Spec: https://github.com/argoproj/argo-workflows/blob/v3.0.4/docs/fields.md#cronworkflowspec
Parameters
----------
dag
The DAG to generate the spec for
workflow
The configuration for this workflow
cron
The configuration for the cron workflow
Raises
------
ValueError
If any of the cron.extra_spec_options collides with a property used by the runtime.
"""
spec = {
"schedule": cron.schedule,
"startingDeadlineSeconds": cron.starting_deadline_seconds,
"concurrencyPolicy": cron.concurrency_policy.value,
"workflowSpec": workflow_spec(dag, workflow),
}
if cron.timezone:
spec["timezone"] = cron.timezone
if cron.successful_jobs_history_limit:
spec["successfulJobsHistoryLimit"] = cron.successful_jobs_history_limit
if cron.failed_jobs_history_limit:
spec["failedJobsHistoryLimit"] = cron.failed_jobs_history_limit
spec = with_extra_spec_options(
original=spec,
extra_options=cron.extra_spec_options,
context="the CronWorkflow spec",
)
return spec
|
StarcoderdataPython
|
1775620
|
### $Id: admin.py,v 1.29 2017/12/18 09:12:52 muntaza Exp $
from django.contrib import admin
from umum.models import Provinsi, Kabupaten, LokasiBidang, SKPD, SUBSKPD, KodeBarang, HakTanah, SatuanBarang, KeadaanBarang, SKPenghapusan, MutasiBerkurang, JenisPemanfaatan, AsalUsul, Tahun, GolonganBarang, Tanah, KontrakTanah, PenghapusanTanah, TanahPenghapusan, PemanfaatanTanah, TanahPemanfaatan, HargaTanah, TahunBerkurangUsulHapusTanah, TanahUsulHapus
#### Tanah
from umum.models import TanahParinginKota, KontrakTanahParinginKota, HargaTanahParinginKota, TanahUsulHapusParinginKota, TahunBerkurangUsulHapusTanahParinginKota
from umum.models import TanahPenghapusanParinginKota, TahunBerkurangTanahParinginKota, PenghapusanTanahParinginKota
from umum.models import SKPDAsalTanahParinginKota, SKPDTujuanTanahParinginKota, FotoTanahParinginKota
from umum.admin import HargaTanahInline, TanahAdmin, KontrakTanahAdmin, HargaTanahAdmin, TahunBerkurangUsulHapusTanahInline, TanahUsulHapusAdmin
from umum.admin import TahunBerkurangTanahInline, PenghapusanTanahInline, TanahPenghapusanAdmin
from umum.admin import SKPDAsalTanahInline, SKPDTujuanTanahInline, FotoTanahInline
from umum.admin import GedungBangunanInline
#### Gedung Bangunan
from gedungbangunan.models import StatusTingkat, StatusBeton, KontrakGedungBangunan, HargaGedungBangunan, GedungBangunan, PenghapusanGedungBangunan, PemanfaatanGedungBangunan, TahunBerkurangGedungBangunan, Ruangan, TahunBerkurangUsulHapusGedung
from gedungbangunan.models import GedungBangunanPemanfaatan, GedungBangunanPenghapusan, GedungBangunanRuangan, GedungBangunanUsulHapus
from gedungbangunan.models import GedungBangunanParinginKota, KontrakGedungBangunanParinginKota, HargaGedungBangunanParinginKota, GedungBangunanRuanganParinginKota, GedungBangunanUsulHapusParinginKota, TahunBerkurangUsulHapusGedungParinginKota
from gedungbangunan.models import GedungBangunanPenghapusanParinginKota, TahunBerkurangGedungBangunanParinginKota, PenghapusanGedungBangunanParinginKota
from gedungbangunan.models import SKPDAsalGedungBangunanParinginKota, SKPDTujuanGedungBangunanParinginKota, FotoGedungBangunanParinginKota
from gedungbangunan.admin import HargaGedungBangunanInline, GedungBangunanAdmin, KontrakGedungBangunanAdmin, HargaGedungBangunanAdmin, RuanganInline, GedungBangunanRuanganAdmin, KDPGedungBangunanAdmin, TahunBerkurangUsulHapusGedungInline, GedungBangunanUsulHapusAdmin
from gedungbangunan.admin import TahunBerkurangGedungBangunanInline, PenghapusanGedungBangunanInline, GedungBangunanPenghapusanAdmin
from gedungbangunan.admin import SKPDAsalGedungBangunanInline, SKPDTujuanGedungBangunanInline, FotoGedungBangunanInline
#### Peralatan Mesin
from peralatanmesin.models import KontrakPeralatanMesin, HargaPeralatanMesin, PeralatanMesin, PenghapusanPeralatanMesin, PemanfaatanPeralatanMesin, TahunBerkurangPeralatanMesin, TahunBerkurangUsulHapusPeralatanMesin
#untuk menampung inline
from peralatanmesin.models import PeralatanMesinPemanfaatan, PeralatanMesinPenghapusan, PeralatanMesinUsulHapus
from peralatanmesin.models import PeralatanMesinParinginKota, KontrakPeralatanMesinParinginKota, HargaPeralatanMesinParinginKota, PeralatanMesinUsulHapusParinginKota, TahunBerkurangUsulHapusPeralatanMesinParinginKota
from peralatanmesin.models import PeralatanMesinPenghapusanParinginKota, TahunBerkurangPeralatanMesinParinginKota, PenghapusanPeralatanMesinParinginKota
from peralatanmesin.models import SKPDAsalPeralatanMesinParinginKota, SKPDTujuanPeralatanMesinParinginKota, FotoPeralatanMesinParinginKota
from peralatanmesin.admin import HargaPeralatanMesinInline, PeralatanMesinAdmin, KontrakPeralatanMesinAdmin, HargaPeralatanMesinAdmin, TahunBerkurangUsulHapusPeralatanMesinInline, PeralatanMesinUsulHapusAdmin
from peralatanmesin.admin import TahunBerkurangPeralatanMesinInline, PenghapusanPeralatanMesinInline, PeralatanMesinPenghapusanAdmin
from peralatanmesin.admin import SKPDAsalPeralatanMesinInline, SKPDTujuanPeralatanMesinInline, FotoPeralatanMesinInline
#### Class Tanah
class TahunBerkurangTanahParinginKotaInline(TahunBerkurangTanahInline):
model = TahunBerkurangTanahParinginKota
class PenghapusanTanahParinginKotaInline(PenghapusanTanahInline):
model = PenghapusanTanahParinginKota
class SKPDAsalTanahParinginKotaInline(SKPDAsalTanahInline):
model = SKPDAsalTanahParinginKota
class SKPDTujuanTanahParinginKotaInline(SKPDTujuanTanahInline):
model = SKPDTujuanTanahParinginKota
class FotoTanahParinginKotaInline(FotoTanahInline):
model = FotoTanahParinginKota
class GedungBangunanParinginKotaInline(GedungBangunanInline):
model = GedungBangunanParinginKota
class HargaTanahParinginKotaInline(HargaTanahInline):
model = HargaTanahParinginKota
def formfield_for_foreignkey(self, db_field, request, **kwargs):
if db_field.name == "id_kontrak":
kwargs["queryset"] = KontrakTanah.objects.filter(id_skpd__exact=29)
return super(HargaTanahParinginKotaInline, self).formfield_for_foreignkey(db_field, request, **kwargs)
class TahunBerkurangUsulHapusTanahParinginKotaInline(TahunBerkurangUsulHapusTanahInline):
model = TahunBerkurangUsulHapusTanahParinginKota
class TanahParinginKotaAdmin(TanahAdmin):
inlines = [HargaTanahParinginKotaInline,
SKPDAsalTanahParinginKotaInline,
FotoTanahParinginKotaInline, ]
def formfield_for_foreignkey(self, db_field, request, **kwargs):
if db_field.name == "id_sub_skpd":
kwargs["queryset"] = SUBSKPD.objects.filter(id_skpd__exact=29)
return super(TanahParinginKotaAdmin, self).formfield_for_foreignkey(db_field, request, **kwargs)
def get_queryset(self, request):
qs = SUBSKPD.objects.filter(id_skpd__exact=29)
return self.model.objects.filter(id_sub_skpd__in=qs).filter(id_mutasi_berkurang__exact=5)
class TanahUsulHapusParinginKotaAdmin(TanahUsulHapusAdmin):
inlines = [TahunBerkurangUsulHapusTanahParinginKotaInline,
SKPDAsalTanahParinginKotaInline,
FotoTanahParinginKotaInline, ]
def get_queryset(self, request):
qs = SUBSKPD.objects.filter(id_skpd__exact=29)
return self.model.objects.filter(id_sub_skpd__in=qs).filter(id_mutasi_berkurang__exact=3)
class KontrakTanahParinginKotaAdmin(KontrakTanahAdmin):
def formfield_for_foreignkey(self, db_field, request, **kwargs):
if db_field.name == "id_skpd":
kwargs["queryset"] = SKPD.objects.filter(id__exact=29)
return super(KontrakTanahParinginKotaAdmin, self).formfield_for_foreignkey(db_field, request, **kwargs)
def get_queryset(self, request):
return self.model.objects.filter(id_skpd__exact=29)
class HargaTanahParinginKotaAdmin(HargaTanahAdmin):
def get_queryset(self, request):
sub_skpd_qs = SUBSKPD.objects.filter(id_skpd__exact=29)
tanah_qs = Tanah.objects.filter(id_sub_skpd__in=sub_skpd_qs)
return self.model.objects.filter(id_tanah__in=tanah_qs)
class TanahPenghapusanParinginKotaAdmin(TanahPenghapusanAdmin):
inlines = [PenghapusanTanahParinginKotaInline, TahunBerkurangTanahParinginKotaInline,
SKPDAsalTanahParinginKotaInline,
SKPDTujuanTanahParinginKotaInline,
FotoTanahParinginKotaInline, ]
def get_queryset(self, request):
qs = SUBSKPD.objects.filter(id_skpd__exact=29)
return self.model.objects.filter(id_sub_skpd__in=qs).filter(id_mutasi_berkurang__in=[2,4,6,7,10,])
### Register Tanah ParinginKota
admin.site.register(TanahParinginKota, TanahParinginKotaAdmin)
admin.site.register(TanahUsulHapusParinginKota, TanahUsulHapusParinginKotaAdmin)
admin.site.register(KontrakTanahParinginKota, KontrakTanahParinginKotaAdmin)
admin.site.register(HargaTanahParinginKota, HargaTanahParinginKotaAdmin)
admin.site.register(TanahPenghapusanParinginKota, TanahPenghapusanParinginKotaAdmin)
from gedungbangunan.models import KDPGedungBangunanParinginKota
#### Class Gedung dan Bangunan
class TahunBerkurangGedungBangunanParinginKotaInline(TahunBerkurangGedungBangunanInline):
model = TahunBerkurangGedungBangunanParinginKota
class PenghapusanGedungBangunanParinginKotaInline(PenghapusanGedungBangunanInline):
model = PenghapusanGedungBangunanParinginKota
class SKPDAsalGedungBangunanParinginKotaInline(SKPDAsalGedungBangunanInline):
model = SKPDAsalGedungBangunanParinginKota
class SKPDTujuanGedungBangunanParinginKotaInline(SKPDTujuanGedungBangunanInline):
model = SKPDTujuanGedungBangunanParinginKota
class FotoGedungBangunanParinginKotaInline(FotoGedungBangunanInline):
model = FotoGedungBangunanParinginKota
class HargaGedungBangunanParinginKotaInline(HargaGedungBangunanInline):
model = HargaGedungBangunanParinginKota
def formfield_for_foreignkey(self, db_field, request, **kwargs):
if db_field.name == "id_kontrak_gedung_bangunan":
kwargs["queryset"] = KontrakGedungBangunan.objects.filter(id_skpd__exact=29)
return super(HargaGedungBangunanParinginKotaInline, self).formfield_for_foreignkey(db_field, request, **kwargs)
class TahunBerkurangUsulHapusGedungParinginKotaInline(TahunBerkurangUsulHapusGedungInline):
model = TahunBerkurangUsulHapusGedungParinginKota
class GedungBangunanParinginKotaAdmin(GedungBangunanAdmin):
inlines = [HargaGedungBangunanParinginKotaInline,
SKPDAsalGedungBangunanParinginKotaInline,
FotoGedungBangunanParinginKotaInline, ]
def formfield_for_foreignkey(self, db_field, request, **kwargs):
if db_field.name == "id_sub_skpd":
kwargs["queryset"] = SUBSKPD.objects.filter(id_skpd__exact=29)
return super(GedungBangunanParinginKotaAdmin, self).formfield_for_foreignkey(db_field, request, **kwargs)
def get_queryset(self, request):
qs = SUBSKPD.objects.filter(id_skpd__exact=29)
return self.model.objects.filter(id_sub_skpd__in=qs).filter(id_golongan_barang__exact=3).filter(id_mutasi_berkurang__exact=5)
class KDPGedungBangunanParinginKotaAdmin(KDPGedungBangunanAdmin):
inlines = [HargaGedungBangunanParinginKotaInline,
SKPDAsalGedungBangunanParinginKotaInline,
FotoGedungBangunanParinginKotaInline, ]
def formfield_for_foreignkey(self, db_field, request, **kwargs):
if db_field.name == "id_sub_skpd":
kwargs["queryset"] = SUBSKPD.objects.filter(id_skpd__exact=29)
return super(KDPGedungBangunanParinginKotaAdmin, self).formfield_for_foreignkey(db_field, request, **kwargs)
def get_queryset(self, request):
qs = SUBSKPD.objects.filter(id_skpd__exact=29)
return self.model.objects.filter(id_sub_skpd__in=qs).filter(id_golongan_barang__exact=6).filter(id_mutasi_berkurang__exact=5)
class GedungBangunanRuanganParinginKotaAdmin(GedungBangunanRuanganAdmin):
def get_queryset(self, request):
qs = SUBSKPD.objects.filter(id_skpd__exact=29)
return self.model.objects.filter(id_sub_skpd__in=qs).filter(id_golongan_barang__exact=3).filter(id_mutasi_berkurang__exact=5)
class GedungBangunanUsulHapusParinginKotaAdmin(GedungBangunanUsulHapusAdmin):
inlines = [TahunBerkurangUsulHapusGedungParinginKotaInline,
SKPDAsalGedungBangunanParinginKotaInline,
FotoGedungBangunanParinginKotaInline, ]
def get_queryset(self, request):
qs = SUBSKPD.objects.filter(id_skpd__exact=29)
return self.model.objects.filter(id_sub_skpd__in=qs).filter(id_golongan_barang__exact=3).filter(id_mutasi_berkurang__exact=3)
class KontrakGedungBangunanParinginKotaAdmin(KontrakGedungBangunanAdmin):
def formfield_for_foreignkey(self, db_field, request, **kwargs):
if db_field.name == "id_skpd":
kwargs["queryset"] = SKPD.objects.filter(id__exact=29)
return super(KontrakGedungBangunanParinginKotaAdmin, self).formfield_for_foreignkey(db_field, request, **kwargs)
def get_queryset(self, request):
return self.model.objects.filter(id_skpd__exact=29)
class HargaGedungBangunanParinginKotaAdmin(HargaGedungBangunanAdmin):
def get_queryset(self, request):
sub_skpd_qs = SUBSKPD.objects.filter(id_skpd__exact=29)
gedung_bangunan_qs = GedungBangunan.objects.filter(id_sub_skpd__in=sub_skpd_qs)
return self.model.objects.filter(id_gedung_bangunan__in=gedung_bangunan_qs)
class GedungBangunanPenghapusanParinginKotaAdmin(GedungBangunanPenghapusanAdmin):
inlines = [PenghapusanGedungBangunanParinginKotaInline, TahunBerkurangGedungBangunanParinginKotaInline,
SKPDAsalGedungBangunanParinginKotaInline,
SKPDTujuanGedungBangunanParinginKotaInline,
FotoGedungBangunanParinginKotaInline, ]
def get_queryset(self, request):
qs = SUBSKPD.objects.filter(id_skpd__exact=29)
return self.model.objects.filter(id_sub_skpd__in=qs).filter(id_mutasi_berkurang__in=[2,4,6,7,10,])
###Register GedungBangunan ParinginKota
admin.site.register(GedungBangunanParinginKota, GedungBangunanParinginKotaAdmin)
admin.site.register(KDPGedungBangunanParinginKota, KDPGedungBangunanParinginKotaAdmin)
admin.site.register(GedungBangunanRuanganParinginKota, GedungBangunanRuanganParinginKotaAdmin)
admin.site.register(GedungBangunanUsulHapusParinginKota, GedungBangunanUsulHapusParinginKotaAdmin)
admin.site.register(KontrakGedungBangunanParinginKota, KontrakGedungBangunanParinginKotaAdmin)
admin.site.register(HargaGedungBangunanParinginKota, HargaGedungBangunanParinginKotaAdmin)
admin.site.register(GedungBangunanPenghapusanParinginKota, GedungBangunanPenghapusanParinginKotaAdmin)
#### Class Peralatan Mesin
class TahunBerkurangPeralatanMesinParinginKotaInline(TahunBerkurangPeralatanMesinInline):
model = TahunBerkurangPeralatanMesinParinginKota
class PenghapusanPeralatanMesinParinginKotaInline(PenghapusanPeralatanMesinInline):
model = PenghapusanPeralatanMesinParinginKota
class SKPDAsalPeralatanMesinParinginKotaInline(SKPDAsalPeralatanMesinInline):
model = SKPDAsalPeralatanMesinParinginKota
class SKPDTujuanPeralatanMesinParinginKotaInline(SKPDTujuanPeralatanMesinInline):
model = SKPDTujuanPeralatanMesinParinginKota
class FotoPeralatanMesinParinginKotaInline(FotoPeralatanMesinInline):
model = FotoPeralatanMesinParinginKota
class HargaPeralatanMesinParinginKotaInline(HargaPeralatanMesinInline):
model = HargaPeralatanMesinParinginKota
def formfield_for_foreignkey(self, db_field, request, **kwargs):
if db_field.name == "id_kontrak_peralatan_mesin":
kwargs["queryset"] = KontrakPeralatanMesin.objects.filter(id_skpd__exact=29)
return super(HargaPeralatanMesinParinginKotaInline, self).formfield_for_foreignkey(db_field, request, **kwargs)
class TahunBerkurangUsulHapusPeralatanMesinParinginKotaInline(TahunBerkurangUsulHapusPeralatanMesinInline):
model = TahunBerkurangUsulHapusPeralatanMesinParinginKota
class PeralatanMesinParinginKotaAdmin(PeralatanMesinAdmin):
inlines = [HargaPeralatanMesinParinginKotaInline,
SKPDAsalPeralatanMesinParinginKotaInline,
FotoPeralatanMesinParinginKotaInline, ]
def formfield_for_foreignkey(self, db_field, request, **kwargs):
if db_field.name == "id_sub_skpd":
kwargs["queryset"] = SUBSKPD.objects.filter(id_skpd__exact=29)
if db_field.name == "id_ruangan":
kwargs["queryset"] = Ruangan.objects.filter(id_gedung_bangunan__id_sub_skpd__id_skpd__exact=29)
return super(PeralatanMesinParinginKotaAdmin, self).formfield_for_foreignkey(db_field, request, **kwargs)
def get_queryset(self, request):
qs = SUBSKPD.objects.filter(id_skpd__exact=29)
return self.model.objects.filter(id_sub_skpd__in=qs).filter(id_mutasi_berkurang__exact=5)
class PeralatanMesinUsulHapusParinginKotaAdmin(PeralatanMesinUsulHapusAdmin):
inlines = [TahunBerkurangUsulHapusPeralatanMesinParinginKotaInline,
SKPDAsalPeralatanMesinParinginKotaInline,
FotoPeralatanMesinParinginKotaInline, ]
def get_queryset(self, request):
qs = SUBSKPD.objects.filter(id_skpd__exact=29)
return self.model.objects.filter(id_sub_skpd__in=qs).filter(id_mutasi_berkurang__exact=3)
class KontrakPeralatanMesinParinginKotaAdmin(KontrakPeralatanMesinAdmin):
def formfield_for_foreignkey(self, db_field, request, **kwargs):
if db_field.name == "id_skpd":
kwargs["queryset"] = SKPD.objects.filter(id__exact=29)
return super(KontrakPeralatanMesinParinginKotaAdmin, self).formfield_for_foreignkey(db_field, request, **kwargs)
def get_queryset(self, request):
return self.model.objects.filter(id_skpd__exact=29)
class HargaPeralatanMesinParinginKotaAdmin(HargaPeralatanMesinAdmin):
def get_queryset(self, request):
sub_skpd_qs = SUBSKPD.objects.filter(id_skpd__exact=29)
peralatan_mesin_qs = PeralatanMesin.objects.filter(id_sub_skpd__in=sub_skpd_qs)
return self.model.objects.filter(id_peralatan_mesin__in=peralatan_mesin_qs)
class PeralatanMesinPenghapusanParinginKotaAdmin(PeralatanMesinPenghapusanAdmin):
inlines = [PenghapusanPeralatanMesinParinginKotaInline, TahunBerkurangPeralatanMesinParinginKotaInline,
SKPDAsalPeralatanMesinParinginKotaInline,
SKPDTujuanPeralatanMesinParinginKotaInline,
FotoPeralatanMesinParinginKotaInline, ]
def get_queryset(self, request):
qs = SUBSKPD.objects.filter(id_skpd__exact=29)
return self.model.objects.filter(id_sub_skpd__in=qs).filter(id_mutasi_berkurang__in=[2,4,6,7,10,])
###Register PeralatanMesin ParinginKota
admin.site.register(PeralatanMesinParinginKota, PeralatanMesinParinginKotaAdmin)
admin.site.register(PeralatanMesinUsulHapusParinginKota, PeralatanMesinUsulHapusParinginKotaAdmin)
admin.site.register(KontrakPeralatanMesinParinginKota, KontrakPeralatanMesinParinginKotaAdmin)
admin.site.register(HargaPeralatanMesinParinginKota, HargaPeralatanMesinParinginKotaAdmin)
admin.site.register(PeralatanMesinPenghapusanParinginKota, PeralatanMesinPenghapusanParinginKotaAdmin)
#### Jalan, Irigasi, dan Jaringan
from jalanirigasijaringan.models import KontrakJalanIrigasiJaringan, HargaJalanIrigasiJaringan, JalanIrigasiJaringan, PenghapusanJalanIrigasiJaringan, PemanfaatanJalanIrigasiJaringan, TahunBerkurangJalanIrigasiJaringan, TahunBerkurangUsulHapusJalanIrigasiJaringan
from jalanirigasijaringan.models import JalanIrigasiJaringanPemanfaatan, JalanIrigasiJaringanPenghapusan, JalanIrigasiJaringanUsulHapus
from jalanirigasijaringan.models import JalanIrigasiJaringanParinginKota, KontrakJalanIrigasiJaringanParinginKota, HargaJalanIrigasiJaringanParinginKota, KDPJalanIrigasiJaringanParinginKota, JalanIrigasiJaringanUsulHapusParinginKota, TahunBerkurangUsulHapusJalanIrigasiJaringanParinginKota
from jalanirigasijaringan.models import JalanIrigasiJaringanPenghapusanParinginKota, TahunBerkurangJalanIrigasiJaringanParinginKota, PenghapusanJalanIrigasiJaringanParinginKota
from jalanirigasijaringan.models import SKPDAsalJalanIrigasiJaringanParinginKota, SKPDTujuanJalanIrigasiJaringanParinginKota, FotoJalanIrigasiJaringanParinginKota
from jalanirigasijaringan.admin import HargaJalanIrigasiJaringanInline, JalanIrigasiJaringanAdmin, KontrakJalanIrigasiJaringanAdmin, HargaJalanIrigasiJaringanAdmin, KDPJalanIrigasiJaringanAdmin, TahunBerkurangUsulHapusJalanIrigasiJaringanInline, JalanIrigasiJaringanUsulHapusAdmin
from jalanirigasijaringan.admin import TahunBerkurangJalanIrigasiJaringanInline, PenghapusanJalanIrigasiJaringanInline, JalanIrigasiJaringanPenghapusanAdmin
from jalanirigasijaringan.admin import SKPDAsalJalanIrigasiJaringanInline, SKPDTujuanJalanIrigasiJaringanInline, FotoJalanIrigasiJaringanInline
#### Class Jalan, Irigasi dan Jaringan
class TahunBerkurangJalanIrigasiJaringanParinginKotaInline(TahunBerkurangJalanIrigasiJaringanInline):
model = TahunBerkurangJalanIrigasiJaringanParinginKota
class PenghapusanJalanIrigasiJaringanParinginKotaInline(PenghapusanJalanIrigasiJaringanInline):
model = PenghapusanJalanIrigasiJaringanParinginKota
class SKPDAsalJalanIrigasiJaringanParinginKotaInline(SKPDAsalJalanIrigasiJaringanInline):
model = SKPDAsalJalanIrigasiJaringanParinginKota
class SKPDTujuanJalanIrigasiJaringanParinginKotaInline(SKPDTujuanJalanIrigasiJaringanInline):
model = SKPDTujuanJalanIrigasiJaringanParinginKota
class FotoJalanIrigasiJaringanParinginKotaInline(FotoJalanIrigasiJaringanInline):
model = FotoJalanIrigasiJaringanParinginKota
class HargaJalanIrigasiJaringanParinginKotaInline(HargaJalanIrigasiJaringanInline):
model = HargaJalanIrigasiJaringanParinginKota
def formfield_for_foreignkey(self, db_field, request, **kwargs):
if db_field.name == "id_kontrak_jalan_irigasi_jaringan":
kwargs["queryset"] = KontrakJalanIrigasiJaringan.objects.filter(id_skpd__exact=29)
return super(HargaJalanIrigasiJaringanParinginKotaInline, self).formfield_for_foreignkey(db_field, request, **kwargs)
class TahunBerkurangUsulHapusJalanIrigasiJaringanParinginKotaInline(TahunBerkurangUsulHapusJalanIrigasiJaringanInline):
model = TahunBerkurangUsulHapusJalanIrigasiJaringanParinginKota
class JalanIrigasiJaringanParinginKotaAdmin(JalanIrigasiJaringanAdmin):
inlines = [HargaJalanIrigasiJaringanParinginKotaInline,
SKPDAsalJalanIrigasiJaringanParinginKotaInline,
FotoJalanIrigasiJaringanParinginKotaInline, ]
def formfield_for_foreignkey(self, db_field, request, **kwargs):
if db_field.name == "id_sub_skpd":
kwargs["queryset"] = SUBSKPD.objects.filter(id_skpd__exact=29)
return super(JalanIrigasiJaringanParinginKotaAdmin, self).formfield_for_foreignkey(db_field, request, **kwargs)
def get_queryset(self, request):
qs = SUBSKPD.objects.filter(id_skpd__exact=29)
return self.model.objects.filter(id_sub_skpd__in=qs).filter(id_golongan_barang__exact=4).filter(id_mutasi_berkurang__exact=5)
class JalanIrigasiJaringanUsulHapusParinginKotaAdmin(JalanIrigasiJaringanUsulHapusAdmin):
inlines = [TahunBerkurangUsulHapusJalanIrigasiJaringanParinginKotaInline,
SKPDAsalJalanIrigasiJaringanParinginKotaInline,
FotoJalanIrigasiJaringanParinginKotaInline, ]
def get_queryset(self, request):
qs = SUBSKPD.objects.filter(id_skpd__exact=29)
return self.model.objects.filter(id_sub_skpd__in=qs).filter(id_golongan_barang__exact=4).filter(id_mutasi_berkurang__exact=3)
class KDPJalanIrigasiJaringanParinginKotaAdmin(KDPJalanIrigasiJaringanAdmin):
inlines = [HargaJalanIrigasiJaringanParinginKotaInline,
SKPDAsalJalanIrigasiJaringanParinginKotaInline,
FotoJalanIrigasiJaringanParinginKotaInline, ]
def formfield_for_foreignkey(self, db_field, request, **kwargs):
if db_field.name == "id_sub_skpd":
kwargs["queryset"] = SUBSKPD.objects.filter(id_skpd__exact=29)
return super(KDPJalanIrigasiJaringanParinginKotaAdmin, self).formfield_for_foreignkey(db_field, request, **kwargs)
def get_queryset(self, request):
qs = SUBSKPD.objects.filter(id_skpd__exact=29)
return self.model.objects.filter(id_sub_skpd__in=qs).filter(id_golongan_barang__exact=6).filter(id_mutasi_berkurang__exact=5)
class KontrakJalanIrigasiJaringanParinginKotaAdmin(KontrakJalanIrigasiJaringanAdmin):
def formfield_for_foreignkey(self, db_field, request, **kwargs):
if db_field.name == "id_skpd":
kwargs["queryset"] = SKPD.objects.filter(id__exact=29)
return super(KontrakJalanIrigasiJaringanParinginKotaAdmin, self).formfield_for_foreignkey(db_field, request, **kwargs)
def get_queryset(self, request):
return self.model.objects.filter(id_skpd__exact=29)
class HargaJalanIrigasiJaringanParinginKotaAdmin(HargaJalanIrigasiJaringanAdmin):
def get_queryset(self, request):
sub_skpd_qs = SUBSKPD.objects.filter(id_skpd__exact=29)
jalan_irigasi_jaringan_qs = JalanIrigasiJaringan.objects.filter(id_sub_skpd__in=sub_skpd_qs)
return self.model.objects.filter(id_jalan_irigasi_jaringan__in=jalan_irigasi_jaringan_qs)
class JalanIrigasiJaringanPenghapusanParinginKotaAdmin(JalanIrigasiJaringanPenghapusanAdmin):
inlines = [PenghapusanJalanIrigasiJaringanParinginKotaInline, TahunBerkurangJalanIrigasiJaringanParinginKotaInline,
SKPDAsalJalanIrigasiJaringanParinginKotaInline,
SKPDTujuanJalanIrigasiJaringanParinginKotaInline,
FotoJalanIrigasiJaringanParinginKotaInline, ]
def get_queryset(self, request):
qs = SUBSKPD.objects.filter(id_skpd__exact=29)
return self.model.objects.filter(id_sub_skpd__in=qs).filter(id_mutasi_berkurang__in=[2,4,6,7,10,])
###Register JalanIrigasiJaringan ParinginKota
admin.site.register(JalanIrigasiJaringanParinginKota, JalanIrigasiJaringanParinginKotaAdmin)
admin.site.register(JalanIrigasiJaringanUsulHapusParinginKota, JalanIrigasiJaringanUsulHapusParinginKotaAdmin)
admin.site.register(KDPJalanIrigasiJaringanParinginKota, KDPJalanIrigasiJaringanParinginKotaAdmin)
admin.site.register(KontrakJalanIrigasiJaringanParinginKota, KontrakJalanIrigasiJaringanParinginKotaAdmin)
admin.site.register(HargaJalanIrigasiJaringanParinginKota, HargaJalanIrigasiJaringanParinginKotaAdmin)
admin.site.register(JalanIrigasiJaringanPenghapusanParinginKota, JalanIrigasiJaringanPenghapusanParinginKotaAdmin)
#### Aset Tetap Lainnya
from atl.models import KontrakATL, HargaATL, ATL, PenghapusanATL, PemanfaatanATL, TahunBerkurangATL, TahunBerkurangUsulHapusATL
from atl.models import ATLPemanfaatan, ATLPenghapusan, ATLUsulHapus
from atl.models import ATLParinginKota, KontrakATLParinginKota, HargaATLParinginKota, ATLUsulHapusParinginKota, TahunBerkurangUsulHapusATLParinginKota
from atl.models import ATLPenghapusanParinginKota, TahunBerkurangATLParinginKota, PenghapusanATLParinginKota
from atl.models import SKPDAsalATLParinginKota, SKPDTujuanATLParinginKota, FotoATLParinginKota
from atl.admin import HargaATLInline, ATLAdmin, KontrakATLAdmin, HargaATLAdmin, TahunBerkurangUsulHapusATLInline, ATLUsulHapusAdmin
from atl.admin import TahunBerkurangATLInline, PenghapusanATLInline, ATLPenghapusanAdmin
from atl.admin import SKPDAsalATLInline, SKPDTujuanATLInline, FotoATLInline
#### Class Aset Tetap Lainnya
class TahunBerkurangATLParinginKotaInline(TahunBerkurangATLInline):
model = TahunBerkurangATLParinginKota
class PenghapusanATLParinginKotaInline(PenghapusanATLInline):
model = PenghapusanATLParinginKota
class SKPDAsalATLParinginKotaInline(SKPDAsalATLInline):
model = SKPDAsalATLParinginKota
class SKPDTujuanATLParinginKotaInline(SKPDTujuanATLInline):
model = SKPDTujuanATLParinginKota
class FotoATLParinginKotaInline(FotoATLInline):
model = FotoATLParinginKota
class HargaATLParinginKotaInline(HargaATLInline):
model = HargaATLParinginKota
def formfield_for_foreignkey(self, db_field, request, **kwargs):
if db_field.name == "id_kontrak_atl":
kwargs["queryset"] = KontrakATL.objects.filter(id_skpd__exact=29)
return super(HargaATLParinginKotaInline, self).formfield_for_foreignkey(db_field, request, **kwargs)
class TahunBerkurangUsulHapusATLParinginKotaInline(TahunBerkurangUsulHapusATLInline):
model = TahunBerkurangUsulHapusATLParinginKota
class ATLParinginKotaAdmin(ATLAdmin):
inlines = [HargaATLParinginKotaInline,
SKPDAsalATLParinginKotaInline,
FotoATLParinginKotaInline, ]
def formfield_for_foreignkey(self, db_field, request, **kwargs):
if db_field.name == "id_sub_skpd":
kwargs["queryset"] = SUBSKPD.objects.filter(id_skpd__exact=29)
if db_field.name == "id_ruangan":
kwargs["queryset"] = Ruangan.objects.filter(id_gedung_bangunan__id_sub_skpd__id_skpd__exact=29)
return super(ATLParinginKotaAdmin, self).formfield_for_foreignkey(db_field, request, **kwargs)
def get_queryset(self, request):
qs = SUBSKPD.objects.filter(id_skpd__exact=29)
return self.model.objects.filter(id_sub_skpd__in=qs).filter(id_mutasi_berkurang__exact=5)
class ATLUsulHapusParinginKotaAdmin(ATLUsulHapusAdmin):
inlines = [TahunBerkurangUsulHapusATLParinginKotaInline,
SKPDAsalATLParinginKotaInline,
FotoATLParinginKotaInline, ]
def get_queryset(self, request):
qs = SUBSKPD.objects.filter(id_skpd__exact=29)
return self.model.objects.filter(id_sub_skpd__in=qs).filter(id_golongan_barang__exact=5).filter(id_mutasi_berkurang__exact=3)
class KontrakATLParinginKotaAdmin(KontrakATLAdmin):
def formfield_for_foreignkey(self, db_field, request, **kwargs):
if db_field.name == "id_skpd":
kwargs["queryset"] = SKPD.objects.filter(id__exact=29)
return super(KontrakATLParinginKotaAdmin, self).formfield_for_foreignkey(db_field, request, **kwargs)
def get_queryset(self, request):
return self.model.objects.filter(id_skpd__exact=29)
class HargaATLParinginKotaAdmin(HargaATLAdmin):
def get_queryset(self, request):
sub_skpd_qs = SUBSKPD.objects.filter(id_skpd__exact=29)
atl_qs = ATL.objects.filter(id_sub_skpd__in=sub_skpd_qs)
return self.model.objects.filter(id_atl__in=atl_qs)
class ATLPenghapusanParinginKotaAdmin(ATLPenghapusanAdmin):
inlines = [PenghapusanATLParinginKotaInline, TahunBerkurangATLParinginKotaInline,
SKPDAsalATLParinginKotaInline,
SKPDTujuanATLParinginKotaInline,
FotoATLParinginKotaInline, ]
def get_queryset(self, request):
qs = SUBSKPD.objects.filter(id_skpd__exact=29)
return self.model.objects.filter(id_sub_skpd__in=qs).filter(id_mutasi_berkurang__in=[2,4,6,7,10,])
###Register ATL ParinginKota
admin.site.register(ATLParinginKota, ATLParinginKotaAdmin)
admin.site.register(ATLUsulHapusParinginKota, ATLUsulHapusParinginKotaAdmin)
admin.site.register(KontrakATLParinginKota, KontrakATLParinginKotaAdmin)
admin.site.register(HargaATLParinginKota, HargaATLParinginKotaAdmin)
admin.site.register(ATLPenghapusanParinginKota, ATLPenghapusanParinginKotaAdmin)
|
StarcoderdataPython
|
3330720
|
from PyQt5.QtWidgets import QDialog
from trpgcreator.ui.dialogs.create_resource import Ui_CreateResourceDialog
class CreateResourceDialog(QDialog):
def __init__(self):
super().__init__()
self.ui = Ui_CreateResourceDialog()
self.ui.setupUi(self)
self.ui.buttonBox.accepted.connect(lambda: self.done(1))
self.ui.buttonBox.rejected.connect(lambda: self.done(0))
@classmethod
def get_new_id(cls, resource_type):
dialog = CreateResourceDialog()
dialog.setWindowTitle('Create new ' + resource_type)
dialog.ui.label.setText('Enter ID for ' + resource_type)
if dialog.exec_():
return str(dialog.ui.lineEditId.text())
else:
return None
|
StarcoderdataPython
|
142759
|
<reponame>dubizzle/django_influxdb_metrics
"""URLs to run the tests."""
try:
from django.conf.urls import include, url
except ImportError: # Pre-Django 1.4 version
from django.conf.urls.defaults import include, url
from django.contrib import admin
admin.autodiscover()
urlpatterns = [
url(r'^admin/', include(admin.site.urls)),
]
|
StarcoderdataPython
|
1627700
|
<reponame>leonardovida/remindo-etl-airflow<gh_stars>0
from sqlalchemy import Date, Float, String, Integer, Column, DateTime, BIGINT
from sqlalchemy import ForeignKey
from sqlalchemy.orm import relationship
from src.warehouse.base import Base
from src.warehouse.models.moment import Moment
from src.warehouse.models.recipe import Recipe
class Reliability(Base):
"""Database class for reliabilities table"""
__tablename__ = 'reliabilities'
__table_args__ = {'schema': 'staging_schema'}
# TODO: sequence on Oracles
id = Column(Integer, primary_key=True, nullable=False)
alpha = Column(Float)
sem = Column(Float)
notes = Column(String(50))
missing_count = Column(Integer)
answer_count = Column(Integer)
stdev = Column(Float)
average = Column(Float)
max = Column(Float)
recipe = relationship(
"Recipe",
back_populates='reliabilities',
cascade="all, delete")
recipe_id = Column(Integer, ForeignKey(Recipe.id, ondelete="CASCADE"))
moment = relationship(
"Moment",
back_populates='reliability')
moment_id = Column(Integer, ForeignKey(Moment.id, ondelete="CASCADE"))
extract_date = Column(DateTime, nullable=False)
job_run_id = Column(Integer)
def __repr__(self):
return "<Moment(id='%s', recipe_id='%s', \
moment_id='%s', extract_date='%s', job_run_id='%s')>" % (
self.id,
self.recipe_id,
self.moment_id,
self.extract_date,
self.job_run_id
)
|
StarcoderdataPython
|
1713922
|
#!/usr/bin/env python3
import pytest
import isce3.ext.isce3.geometry as m
from isce3.ext.isce3.core import DataInterpMethod
from isce3.geometry import DEMInterpolator
from isce3.io import Raster
import iscetest
import os
import collections as cl
import numpy.testing as npt
from osgeo import gdal
def dem_info_from_gdal(file_raster: str) -> cl.namedtuple:
"""Get shape, min, max, mean of dem"""
dset = gdal.Open(file_raster, gdal.GA_ReadOnly)
band = dset.GetRasterBand(1)
dem = band.ReadAsArray()
return cl.namedtuple('dem_info', 'shape min max mean')(
dem.shape, dem.min(), dem.max(), dem.mean())
def test_constructor_ref_height():
href = 10.
dem = DEMInterpolator()
dem.ref_height = href
assert dem.ref_height == href
dem = DEMInterpolator(href)
assert dem.ref_height == href
assert dem.interpolate_xy(0, 0) == href
assert dem.interpolate_lonlat(0, 0) == href
npt.assert_equal(dem.have_raster, False)
def test_constructor_raster_obj():
# filename of the DEM ratster
filename_dem = 'dem_himalayas_E81p5_N28p3_short.tiff'
file_raster = os.path.join(iscetest.data, filename_dem)
# get some DEM info via gdal to be used as a reference for V&V
dem_info = dem_info_from_gdal(file_raster)
# build DEM object
raster_obj = Raster(file_raster)
dem_obj = DEMInterpolator(raster_obj)
# validate existence and details of DEM data
npt.assert_equal(dem_obj.have_raster, True, err_msg='No DEM ratser data')
npt.assert_equal(dem_obj.data.shape, dem_info.shape,
err_msg='Wrong shape of DEM ratser data')
npt.assert_allclose(dem_obj.data.min(), dem_info.min,
err_msg='Wrong min DEM height')
npt.assert_allclose(dem_obj.data.max(), dem_info.max,
err_msg='Wrong max DEM height')
npt.assert_allclose(dem_obj.data.mean(), dem_info.mean,
err_msg='Wrong mean DEM height')
def test_methods():
# pybind11::enum_ is not iterable
for name in "SINC BILINEAR BICUBIC NEAREST BIQUINTIC".split():
# enum constructor
method = getattr(DataInterpMethod, name)
dem = DEMInterpolator(method=method)
assert dem.interp_method == method
# string constructor
dem = DEMInterpolator(method=name)
assert dem.interp_method == method
dem = DEMInterpolator(method="bicubic")
assert dem.interp_method == DataInterpMethod.BICUBIC
dem = DEMInterpolator(method="biCUBic")
assert dem.interp_method == DataInterpMethod.BICUBIC
with pytest.raises(ValueError):
dem = DEMInterpolator(method="TigerKing")
|
StarcoderdataPython
|
151857
|
<reponame>calibear20/NHentai-API
import functools
from expiringdict import ExpiringDict
from typing import Callable
class Cache:
_CACHE = None
def __init__(self, cache_key_position: int, cache_key_name: str, max_age_seconds: int=3600, max_size: int=100):
self._CACHE = ExpiringDict(max_len=max_size, max_age_seconds=max_age_seconds, items=None)
self.cache_key_position = cache_key_position
self.cache_key_name = cache_key_name
def cache(self, function: Callable):
def wrapper(*args, **kwargs):
key = kwargs.get(self.cache_key_name) if kwargs.get(self.cache_key_name) is not None else args[self.cache_key_position]
if self._CACHE.get(key) is not None:
print(f'INFO::retrieving cached object with key {key}')
return self._CACHE.get(key)
else:
new_execution = function(*args, **kwargs)
self._CACHE[key] = new_execution
return new_execution
return wrapper
def async_cache(self, function: Callable):
@functools.wraps(function)
async def wrapper(*args, **kwargs):
key = kwargs.get(self.cache_key_name) if kwargs.get(self.cache_key_name) is not None else args[self.cache_key_position]
if self._CACHE.get(key) is not None:
print(f'INFO::retrieving cached object with key {key}')
return self._CACHE.get(key)
else:
new_execution = await function(*args, **kwargs)
self._CACHE[key] = new_execution
return new_execution
return wrapper
|
StarcoderdataPython
|
3212819
|
<gh_stars>0
import pathlib
from ted_sws import config
from ted_sws.mapping_suite_processor.adapters.allegro_triple_store import AllegroGraphTripleStore
def repository_exists(triple_store: AllegroGraphTripleStore, repository_name) -> bool:
"""
Method to check if the repository is in the triple store
:param triple_store:
:param repository_name:
:return:
"""
return True if repository_name in triple_store.list_repositories() else False
def load_mapping_suite_output_into_triple_store(package_folder_path, allegro_host=config.ALLEGRO_HOST,
allegro_user=config.AGRAPH_SUPER_USER,
allegro_password=config.<PASSWORD>,
allegro_catalog_name: str = None):
"""
Method to create a repository in the triple store and load all ttl files from the output folder of a mapping suite
package. Name of the repository will be auto-generated from the folder name.
:param package_folder_path:
:param allegro_host:
:param allegro_user:
:param allegro_password:
:param allegro_catalog_name:
:return:
"""
package_folder_path = pathlib.Path(package_folder_path)
metadata_file = package_folder_path / "metadata.json"
assert metadata_file.exists()
package_name = package_folder_path.stem
ttl_files_paths = [str(path) for path in package_folder_path.glob("output/**/*.ttl")]
triple_store = AllegroGraphTripleStore(host=allegro_host, password=<PASSWORD>,
user=allegro_user, catalog_name=allegro_catalog_name)
if repository_exists(triple_store=triple_store, repository_name=package_name):
triple_store.delete_repository(repository_name=package_name)
triple_store.create_repository(repository_name=package_name)
for ttl_file_path in ttl_files_paths:
triple_store.add_file_to_repository(file_path=ttl_file_path, repository_name=package_name)
|
StarcoderdataPython
|
3363516
|
import FWCore.ParameterSet.Config as cms
process = cms.Process("GeometryTest")
process.load("Configuration.StandardSequences.MagneticField_38T_cff")
#from xml text files #process.load("Geometry.CMSCommonData.cmsIdealGeometryXML_cfi")
#from db xml source: #process.load("GeometryReaders.XMLIdealGeometryESSource.cmsGeometryDB_cff")
#from db sql file: #process.load("Geometry.CaloEventSetup.xmlsqlitefile")
#from db frontier: #process.load('Configuration/StandardSequences/FrontierConditions_GlobalTag_cff')
#xml source: #process.load("Geometry.CaloEventSetup.CaloGeometry_cff")
#reco source from db: #
process.load("Geometry.CaloEventSetup.CaloGeometryDBReader_cfi")
#reco from db sql : #process.load("Geometry.CaloEventSetup.calodbsqlitefile")
#reco from db frontier: #
process.load('Configuration/StandardSequences/FrontierConditions_GlobalTag_cff')
process.load("Geometry.CaloEventSetup.CaloTopology_cfi")
process.load("Geometry.CaloEventSetup.EcalTrigTowerConstituents_cfi")
process.load("FWCore.MessageLogger.MessageLogger_cfi")
process.maxEvents = cms.untracked.PSet( input = cms.untracked.int32(4) )
process.source = cms.Source("EmptySource")
process.etta = cms.EDAnalyzer("DumpEcalTrigTowerMapping")
process.ctgw = cms.EDAnalyzer("TestEcalGetWindow")
process.cga = cms.EDAnalyzer("CaloGeometryAnalyzer",
fullEcalDump = cms.untracked.bool(True)
)
process.mfa = cms.EDAnalyzer("testMagneticField")
process.Timing = cms.Service("Timing")
process.SimpleMemoryCheck = cms.Service("SimpleMemoryCheck")
process.TFileService = cms.Service("TFileService",
fileName = cms.string('calogeom.root')
)
process.p1 = cms.Path(process.etta*process.ctgw*process.cga*process.mfa)
|
StarcoderdataPython
|
1718843
|
from random import random
row = []
row = ["city", "moving_from", "bhk", "min_budget", "max_budget", "cutomer_type", "workers", "no_of_cars",
"house_type", "travelling_time", "furnishing", "lease_type", "seen_other_options", "show_old_construction",
"status", "is_urgent", "state"]
print (", ".join(row))
for i in range(1, 100000):
row = []
rand = random()
# city
if rand < 0.96517:
row.append(1)
elif rand < 0.99022:
row.append(2)
else:
row.append(3)
# moving from
if rand < 0.55897:
row.append(1)
elif rand < 0.78995:
row.append(2)
else:
row.append(3)
# bhk
if rand < 0.53399:
row.append(3) # 2BHK
elif rand < 0.75348:
row.append(4) # 3BHK
elif rand < 0.92974:
row.append(2) # 1BHK
elif rand < 0.96899:
row.append(1) # 1RK
elif rand < 0.99786:
row.append(5) # 4BHK
else:
row.append(6) # 5BHK
# min budget --------------- ensure < max budget
if rand < 0.34479:
row.append(15000)
minBudget = 15000
elif rand < 0.59242:
row.append(20000)
minBudget = 20000
elif rand < 0.69272:
row.append(25000)
minBudget = 25000
elif rand < 0.75833:
row.append(30000)
minBudget = 30000
elif rand < 0.8008:
row.append(10000)
minBudget = 10000
elif rand < 0.8367:
row.append(18000)
minBudget = 18000
elif rand < 0.8836:
row.append(35000)
minBudget = 35000
elif rand < 0.89995:
row.append(16000)
minBudget = 16000
elif rand < 0.91614:
row.append(40000)
minBudget = 40000
elif rand < 0.93111:
row.append(22000)
minBudget = 22000
elif rand < 0.93798:
row.append(17000)
minBudget = 17000
else:
row.append(20000)
minBudget = 20000
# max budget
if rand < 0.2348:
if minBudget > 15000:
row.append(minBudget + 5000)
else:
row.append(15000)
elif rand < 0.42056:
if minBudget > 20000:
row.append(minBudget + 5000)
else:
row.append(20000)
elif rand < 0.56141:
if minBudget > 25000:
row.append(minBudget + 5000)
else:
row.append(25000)
elif rand < 0.65842:
if minBudget > 30000:
row.append(minBudget + 5000)
else:
row.append(30000)
elif rand < 0.71204:
if minBudget > 18000:
row.append(minBudget + 5000)
else:
row.append(18000)
elif rand < 0.75329:
if minBudget > 35000:
row.append(minBudget + 5000)
else:
row.append(35000)
elif rand < 0.78598:
if minBudget > 16000:
row.append(minBudget + 5000)
else:
row.append(16000)
elif rand < 0.81714:
if minBudget > 22000:
row.append(minBudget + 5000)
else:
row.append(22000)
elif rand < 0.84342:
if minBudget > 40000:
row.append(minBudget + 5000)
else:
row.append(40000)
elif rand < 0.86695:
if minBudget > 17000:
row.append(minBudget + 5000)
else:
row.append(17000)
elif rand < 0.88559:
if minBudget > 50000:
row.append(minBudget + 5000)
else:
row.append(50000)
elif rand < 0.89903:
if minBudget > 23000:
row.append(minBudget + 5000)
else:
row.append(23000)
elif rand < 0.91171:
if minBudget > 45000:
row.append(minBudget + 5000)
else:
row.append(45000)
elif rand < 0.9221:
if minBudget > 28000:
row.append(minBudget + 5000)
else:
row.append(28000)
elif rand < 0.93142:
if minBudget > 24000:
row.append(minBudget + 5000)
else:
row.append(24000)
else:
if minBudget > 23000:
row.append(minBudget + 5000)
else:
row.append(23000)
# customer type
if rand < 0.30263:
row.append(1)
elif rand < 0.54598:
row.append(5)
elif rand < 0.71356:
row.append(6)
elif rand < 0.85517:
row.append(2)
elif rand < 0.9175:
row.append(7)
elif rand < 0.9644:
row.append(4)
else:
row.append(3)
# workers
if rand < 0.37504:
row.append(2)
elif rand < 0.64879:
row.append(3)
elif rand < 0.81026:
row.append(4)
elif rand < 0.9395:
row.append(1)
elif rand < 0.98029:
row.append(5)
elif rand < 0.99559:
row.append(6)
elif rand < 0.99742:
row.append(7)
elif rand < 0.99864:
row.append(8)
elif rand < 0.9994:
row.append(10)
else:
row.append(9)
# no of cars
if rand < 0.56493:
row.append(1)
elif rand < 0.89979:
row.append(0)
elif rand < 0.99328:
row.append(2)
elif rand < 0.99832:
row.append(3)
else:
row.append(4)
# house type
if rand < 0.48457:
row.append(3)
elif rand < 0.7982:
row.append(1)
elif rand < 0.90239:
row.append(4)
elif rand < 0.98274:
row.append(2)
else:
row.append(5)
# travelling time
if rand < 0.36603:
row.append(15)
elif rand < 0.60022:
row.append(30)
elif rand < 0.76857:
row.append(20)
elif rand < 0.82586:
row.append(10)
elif rand < 0.8642:
row.append(25)
elif rand < 0.8891:
row.append(0)
elif rand < 0.91033:
row.append(45)
elif rand < 0.92973:
row.append(40)
elif rand < 0.93951:
row.append(60)
elif rand < 0.94501:
row.append(5)
elif rand < 0.95051:
row.append(35)
else:
row.append(20)
# furnishing
if rand < 0.58692:
row.append(1)
else:
row.append(2)
# lease type
if rand < 0.97006:
row.append(1)
else:
row.append(2)
# seen other options
if rand < 0.73938:
row.append(2)
else:
row.append(1)
# show old construction
if rand < 0.7212:
row.append(1)
else:
row.append(2)
# status
if rand < 0.44332:
row.append(2)
elif rand < 0.72716:
row.append(3)
elif rand < 0.97556:
row.append(0)
else:
row.append(4)
# is urgent
if rand < 0.83379:
row.append(2)
else:
row.append(1)
#result
if rand < 0.03161:
row.append(1)
else:
row.append(2)
print(str(row).strip('[]'))
|
StarcoderdataPython
|
3342197
|
# -*- coding: utf-8 -*-
import re
import struct
from .common import *
from .CuSMVersion import CuSMVersion
# Pattern that matches an instruction string
p_InsPattern = re.compile(r'(@!?U?P\d|@!PT)?\s*\{?\s*(\w+.*)\s*')
# Pattern that matches scoreboard sets, such as {1}, {4,2}
# Seems only appear after opcode DEPBAR
p_SBSet = re.compile(r'\{\s*(\d\s*,\s*)*\d\s*\}')
# NOTE: about constants translate dict
# 1) +/-QNAN is not recognized by python float(), use +/-NAN
# +/-INF seems OK,
# QNAN for FSEL may not work properly, needs special treatment
# 2) .reuse will be treated seperately for control codes, hence ignored here.
# 3) RZ may also appear in FADD/FMUL/FFMA.RZ ...
# 4) UPT is not found, may be just PT?
p_ConstTrDict = {r'(?<!\.)\bRZ\b' : 'R255', r'\bURZ\b' : 'UR63',
r'\bPT\b' : 'P7', r'\bUPT\b' : 'UP7', r'\bQNAN\b' : 'NAN', r'\.reuse\b':''}
# Pattern for striping modifiers from an operand
# .*? for non-greedy match, needed for [R0.X4].A
p_ModifierPattern = re.compile(r'^([~\-\|!]*)(.*?)((\.\w+)*)$')
# Match Label+Index (including translated RZ/URZ/PT)
# SBSet is the score board set for DEPBAR, translated before parsing
p_IndexedPattern = re.compile(r'\b(R|UR|P|UP|B|SB|SBSET)(\d+)\b')
# Immediate floating point numbers, (NOTE: add 0f0000000 to skip conversion)
# Some instruction in
p_FIType = re.compile(r'^(((-?\d+)(\.\d*)?((e|E)[-+]?\d+)?)(\.NEG)?|([+-]?INF)|([+-]NAN)|(0[fF][0-9a-f]+))$')
# Pattern for constant memory, some instructions have a mysterious space between two square brackets...
p_ConstMemType = re.compile(r'c\[(0x\w+)\] *\[([+-?\w\.]+)\]')
# Pattern for matching white spaces
p_WhiteSpace = re.compile(r'\s+')
# modifiers (1 char) that may appear before operands
c_OpPreModifierChar = {'!':'NOT', '-':'NEG', '|':'ABS', '~':'BITNOT'}
# Jump functions that may use the instruction address
# TODO: Some instruction already have neg sign before address, will it still work?
c_AddrFuncs = set(['BRA', 'BRX', 'BRXU', 'CALL', 'JMP',
'JMX', 'JMXU', 'RET', 'BSSY', 'BSYNC',
'SSY', 'CAL', 'PBK'])
# Functions that have position dependent modifiers, such as F2F.F16.F32 != F2F.F32.F16
c_PosDepFuncs = set(['I2I', 'F2F', 'IDP', 'HMMA', 'IMMA', 'XMAD', 'VADD'])
c_PosDepModis = set(['S8', 'S16', 'S32', 'S64', 'U8', 'U16', 'U32', 'U64', 'F16', 'F32', 'F64']) # TODO:
# I2F/F2I/F2F has different OpCode for 32/64,
# but 32bit modifier may not be displayed
# FRND may not need this
c_FloatCvtOpcodes = set(['I2F', 'I2I', 'F2I', 'F2F', 'FRND'])
class CuInsParser():
''' CuInsParser will parse the instruction string to inskey, values, and modifiers.
Which could be then assembled by CuInsAssembler.
Since the parser will consume considerable amount of memory, the "parse" should be
called with limited instances, which will update the members accordingly.
We don't make the "parse" a static function, since we frequently need to check some
internal variables of parsing results, especially during debugging.
'''
# predicate value is the first element in value vector
PRED_VAL_IDX = 0
#
OPERAND_VAL_IDX = 1
def __init__(self, arch='sm_75'):
self.m_InsAddr = 0 # ins address, needed by branch type of ins
self.m_InsString = '' # original asm string
self.m_CTrString = '' # constants translated asm string
self.m_InsCode = None # instruction code
self.m_InsKey = '' # key for current type of ins, eg: FFMA_R_R_R_R
self.m_InsOp = '' # function name, such as FFMA, MOV, ...
self.m_InsOpFull = '' # function name with modifiers
self.m_InsPredVal = 0 # predicate value (0b****)
self.m_InsPredStr = '' # predicate string
self.m_InsModifier = [] # modifier dict
self.m_InsVals = [] # array of operand values (not include labels)
self.m_SMVersion = CuSMVersion(arch)
def dumpInfo(self):
print('#### CuInsParser @ 0x%016x ####' % id(self))
print('InsString: ' + self.m_InsString)
print('CTrString: ' + self.m_CTrString)
print('InsAddr: 0x%x' % self.m_InsAddr)
print('InsPred: %s (%s)' % (self.m_InsPredStr, bin(self.m_InsPredVal)) )
print('InsCode: 0x%032x' % self.m_InsCode)
print('InsKey: ' + self.m_InsKey)
print('InsVals: ' + intList2Str(self.m_InsVals))
print('InsModifier: ' + str(self.m_InsModifier))
print('\n')
def parse(self, s, addr=0, code=None):
''' Parse input string as instruction.'''
self.m_InsString = s.strip()
self.m_CTrString = self.__preprocess(self.m_InsString)
r = p_InsPattern.match(self.m_CTrString)
if r is None:
return None
#raise ValueError("Unknown instruction: " + s)
self.m_InsAddr = addr
self.m_InsCode = code
self.m_InsPredStr = r.groups()[0]
# Currently pred is treated as known format operand
# The value will be directly computed.
self.m_InsPredVal = self.__parsePred(self.m_InsPredStr)
ins_main = r.groups()[1]
# TODO: use more robust tokenizer
tokens = re.split(',', ins_main) # Splitting operands
# usually ',' will be sufficient to split the operands
# ( space does not work for c[0x0] [0x0].F32 )
# And user may add extra spaces.
#
# Exception: "RET.REL.NODEC R10 0x0 ;"
# we will split it again, treat it as another separate operand
ts = tokens[0].split(' ')
ts.extend(tokens[1:])
tokens = [t.strip() for t in ts]
op_tokens = tokens[0].split('.') # Op and Op modifiers
self.m_InsKey = op_tokens[0]
self.m_InsOp = op_tokens[0]
self.m_InsOpFull = tokens[0]
self.m_InsVals = [self.m_InsPredVal]
self.m_InsModifier = ['0_' + m for m in op_tokens] # TODO: May be we can treat pos dep modifiers here?
for iop, op in enumerate(tokens[1:]):
if len(op)==0: # ?
continue
optype, opval, opmodi = self.__parseOperand(op)
self.m_InsKey += '_' + optype
self.m_InsVals.extend(opval)
self.m_InsModifier.extend([('%d_'%(iop+1))+m for m in opmodi])
self.__specialTreatment() #
return self.m_InsKey, self.m_InsVals, self.m_InsModifier
def __preprocess(self, s):
''' Translate pre-defined constants (RZ/URZ/PT/...) to known or indexed values.
Translate scoreboard sets {4,2} to SBSet
'''
s = s.strip(' ;')
for cm in p_ConstTrDict:
s = re.sub(cm, p_ConstTrDict[cm], s)
res = p_SBSet.search(s)
if res is not None:
SB_valstr = self.__transScoreboardSet(res.group())
s = p_SBSet.sub(SB_valstr, s)
s = s.strip(' {}')
return s
def __parseOperand(self, operand):
'''Parse operand to (type, val, modi).
Every operand should return with:
type:str, val:list, modi:list'''
#print('Parsing operand: ' + operand)
# all spaces inside the operand part of instruction are insignificant
# subn returns (result, num_of_replacements), thus the trailing [0]
operand = p_WhiteSpace.subn('', operand)[0]
# Every operand may have one or more modifiers
op, modi = self.stripModifier(operand)
if p_IndexedPattern.match(op) is not None:
optype, opval, tmodi = self.__parseIndexedToken(op)
opmodi = modi
opmodi.extend(tmodi)
elif op[0] == '[': # address
optype, opval, opmodi = self.__parseAddress(op)
# elif op[0] == '{': # BarSet such as {3,4}, only for DEPBAR (deprecated? could set in control codes)
# # DEPBAR may wait a certain number of counts for one scoreboard,
# optype, opval, opmodi = self.__parseBarSet(op)
# NOTE: the scoreboard set is translated to indexed type in preprocess, thus no treatment here.
elif op.startswith('c['):
optype, opval, opmodi = self.__parseConstMemory(op)
opmodi.extend(modi)
elif op.startswith('0x'):
optype = 'II'
opval, opmodi = self.__parseIntImme(operand)
elif p_FIType.match(operand) is not None:
optype = 'FI'
opval, opmodi = self.__parseFloatImme(operand)
else: # label type, keep as is
optype = operand
opval = [1]
opmodi = []
return optype, opval, opmodi
def __parseIndexedToken(self, s):
'''Parse index token such as R0, UR1, P2, UP3, B4, SB5, ...
(RZ, URZ, PT should be translated In advance)'''
tmain, modi = self.stripModifier(s)
r = p_IndexedPattern.search(tmain)
t = r.groups()[0]
v = [int(r.groups()[1])]
return t, v, modi
def __parsePred(self, s):
''' Parse predicates (@!?U?P[\dT]) to values.
'''
if s is None or len(s)==0:
return 7
t, v, modi = self.__parseIndexedToken(s.lstrip('@'))
if 'NOT' in modi:
return v[0] + 8
else:
return v[0]
def __parseFloatImme(self, s):
''' Parse float point immediates to binary, according to the instruction precision.
precision is the opcode precision, currently D/F/H for double/single(float)/half.
NOTE: currently, +/-QNAN will be always translated to a UNIQUE binary,
but sometimes nan could represent a set of values.
But since it's not showed in the assembly string, there's no way to recover this value.
'''
p = self.m_InsOp[0]
if p in set(['H', 'F', 'D']):
prec = p
elif self.m_InsOp=="MUFU": # It's rather wield that MUFU will have an imme input, any side effect?
if '64' in self.m_InsOpFull:
prec = 'D'
else:
prec = 'F'
else:
self.dumpInfo()
raise ValueError('Unknown float precision (%s)!' % self.m_InsOp)
if self.m_InsOp.endswith('32I'):
nbits = 32
else:
nbits = -1
v, modi = self.m_SMVersion.convertFloatImme(s, prec, nbits)
return [v], modi
def __parseIntImme(self, s):
''' Parse interger immediates.
Positive int immediates are always kept as is,
but negtive ints may depend on the type.
Currently we try to let the coefficient determined by the code, not predetermined.
TODO(Done):
Some ALU instructions such as IADD3 in sm5x/6x, the sign bit will be moved to the modifier.
If the sign bit is explicitly show (such as -0x1), it can be handled by 'NegIntImme'.
But if it's implicitly defined (such as 0xfffff, 20bit used, but int imme has only 19bit),
we need to handle it seperately.
'''
i = int(s, 16)
if i>=0:
return self.m_SMVersion.splitIntImmeModifier(self, i)
else:
return [i], ['NegIntImme']
def __parseConstMemory(self, s):
opmain, opmodi = self.stripModifier(s)
r = p_ConstMemType.match(opmain)
if r is None:
raise ValueError("Invalid constant memory operand: %s" %s)
opval = [int(r.groups()[0], 16)]
atype, aval, amodi = self.__parseAddress(r.groups()[1])
optype = 'c' + atype
opval.extend(aval)
opmodi.extend(amodi)
return optype, opval, opmodi
def __transScoreboardSet(self, s):
''' Translate scoreboard set such as {3,4} to int values.
This is done during preprocessing, since the comma(',') will be used to split the operands.
'''
ss = s.strip('{}').split(',')
v = 0
for bs in ss: # ???
v += 1<<(int(bs))
return 'SBSET%d'%v
def __parseAddress(self, s):
''' Parse operand type Address [R0.X8+UR4+-0x8]
Zero immediate will be appended if not present.
It's harmless if there is no such field, since the value will always be 0.
TODO(Done): what for [R0.U32+UR4.U64] ?? Could in another order?
May need extra tag in modifiers?
'''
ss = s.strip('[]').split('+')
optype = 'A'
opval = []
opmodi = []
for ts in ss:
if '0x' in ts:
optype += 'I'
i_opval, i_opmodi = self.__parseIntImme(ts)
opval.extend(i_opval)
opmodi.extend(i_opmodi)
else:
ttype, tval, tmodi = self.__parseIndexedToken(ts)
optype += ttype
opval.extend(tval)
# The modifier is prefixed by type
# Thus [R0.U32+UR4.U64] => ['R.U32', 'UR.U64']
# (If any) [R0.U64+UR4.U32] => ['R.U64', 'UR.U32']
opmodi.extend([ (ttype+'.'+m) for m in tmodi])
# Pad with zero immediate if not present
# Harmless even if it does not support immediates
if not optype.endswith('I'):
optype += 'I'
opval.append(0)
return optype, opval, opmodi
def __specialTreatment(self):
''' Special treatments after parsing.
Handle exceptions that cannot processed with current approach.
TODO: Use dict mapping to subroutines, rather than if/else
How??? F2F may need several special treatments...
'''
if self.m_InsOp == 'PLOP3': # immLut for PLOP3 is encoded with seperating 5+3 bits
# e.g.: 0x2a = 0b00101010 => 00101 xxxxx 010
# LOP3 seems fine
v = self.m_InsVals[-2]
self.m_InsVals[-2] = (v&7) + ((v&0xf8)<<5)
elif self.m_InsOp in c_FloatCvtOpcodes:
if '64' in self.m_InsOpFull:
self.m_InsModifier.append('0_CVT64')
elif self.m_InsOp in c_AddrFuncs: # Functions that use address of current instruction
# CHECK: what if the address is not the last operand?
if self.m_InsKey.endswith('_II'):
if 'ABS' not in self.m_InsOpFull: # CHECK: Other absolute address?
addr = self.m_InsVals[-1] - self.m_InsAddr - self.m_SMVersion.getInstructionLength()
if addr<0:
self.m_InsModifier.append('0_NegAddrOffset')
# The value length of same key should be kept the same
self.m_InsVals[-1] = addr
if self.m_InsOp in c_PosDepFuncs:
# the modifier of I2I/F2F is position dependent
# eg: F2F.F32.F64 vs F2F.F64.F32
# TODO: find all instructions with position dependent modifiers
counter = 0
for i,m in enumerate(self.m_InsModifier):
if m.startswith('0_') and m[2:] in c_PosDepModis:
self.m_InsModifier[i] += '@%d'%counter
counter += 1
def stripModifier(self, s):
'''Split the token to three parts
preModifier([~-|!]), opmain, postModifier(.FTZ, .X16, ...) '''
r = p_ModifierPattern.match(s) # split token to three parts
if r is None:
raise ValueError("Unknown token %s" % s)
else:
pre = r.groups()[0]
post = r.groups()[2]
opmain = r.groups()[1]
opmodi = []
for c in pre:
opmodi.append(c_OpPreModifierChar[c])
for c in post.split('.'):
if len(c)==0:
continue
opmodi.append(c)
return opmain, opmodi
|
StarcoderdataPython
|
3386960
|
from sys import exit
from django.conf import settings
from PIL import Image as PilImage
from PIL import UnidentifiedImageError
from forum.cdn.models import Image
def run():
print('already done')
exit(1)
all_images = list()
to_delete = set()
for idx, image in enumerate(Image.objects.all()): # type: int, Image
if idx % 1000 == 0:
print(idx)
abs_path = \
settings.CDN['PATH_SIZES']['downloaded'].joinpath(image.cdn_path)
try:
with PilImage.open(fp=abs_path) as pil_image:
image.width, image.height = pil_image.size
all_images.append(image)
except UnidentifiedImageError:
to_delete.add(image.pk)
Image.objects.bulk_update(
objs=all_images, fields=['width', 'height'], batch_size=500)
print('to delete:', to_delete)
Image.objects.filter(pk__in=to_delete).delete()
|
StarcoderdataPython
|
174611
|
<reponame>chart21/fdrtd<gh_stars>0
"""
contains the entry points of the API
"""
from flask import current_app
from fdrtd.server.exceptions import handle_exception
def get_bus():
"""get the singleton bus of the server application"""
with current_app.app_context():
return current_app.bus
def list_representations():
"""list available server-side objects"""
try:
response = get_bus().list_representations()
return {'type': 'list', 'list': response}, 200 # OK
except Exception as exception:
return handle_exception(exception)
def create_representation(body):
"""create a representation"""
try:
response = get_bus().create_representation(body)
return {'type': 'uuid', 'uuid': response}, 200 # OK
except Exception as exception:
return handle_exception(exception)
def upload_representation(body):
"""upload an object, and return its representation"""
try:
response = get_bus().upload_representation(body)
return {'type': 'uuid', 'uuid': response}, 200 # OK
except Exception as exception:
return handle_exception(exception)
def call_representation(representation_uuid, body):
"""call a server-side object"""
try:
response = get_bus().call_representation(representation_uuid, body)
if response is None:
return {'type': 'none'}, 200 # OK
return {'type': 'uuid', 'uuid': response}, 200 # OK
except Exception as exception:
return handle_exception(exception)
def download_representation(representation_uuid):
"""download a serialized version of a server-side object"""
try:
response = get_bus().download_representation(representation_uuid)
return {'type': 'object', 'object': response}, 200 # OK
except Exception as exception:
return handle_exception(exception)
def release_representation(representation_uuid):
"""release a representation"""
try:
get_bus().release_representation(representation_uuid)
return {'type': 'none'}, 200 # OK
except Exception as exception:
return handle_exception(exception)
def create_attribute(representation_uuid, attribute_name):
"""create a representation of an attribute of a representation"""
try:
uuid = get_bus().create_attribute(representation_uuid, attribute_name, public=True)
return {'type': 'uuid', 'uuid': uuid}, 200 # OK
except Exception as exception:
return handle_exception(exception)
|
StarcoderdataPython
|
1742863
|
<reponame>Jianguo188/LeetCode-Py
class Heapq:
# 堆调整方法:调整为大顶堆
def heapAdjust(self, nums: [int], index: int, end: int):
left = index * 2 + 1
right = left + 1
while left <= end:
# 当前节点为非叶子结点
max_index = index
if nums[left] > nums[max_index]:
max_index = left
if right <= end and nums[right] > nums[max_index]:
max_index = right
if index == max_index:
# 如果不用交换,则说明已经交换结束
break
nums[index], nums[max_index] = nums[max_index], nums[index]
# 继续调整子树
index = max_index
left = index * 2 + 1
right = left + 1
# 将数组构建为二叉堆
def heapify(self, nums: [int]):
size = len(nums)
# (size - 2) // 2 是最后一个非叶节点,叶节点不用调整
for i in range((size - 2) // 2, -1, -1):
# 调用调整堆函数
self.heapAdjust(nums, i, size - 1)
# 入队操作
def heappush(self, nums: list, value):
nums.append(value)
size = len(nums)
i = size - 1
# 寻找插入位置
while (i - 1) // 2 >= 0:
cur_root = (i - 1) // 2
# value 小于当前根节点,则插入到当前位置
if nums[cur_root] > value:
break
# 继续向上查找
nums[i] = nums[cur_root]
i = cur_root
# 找到插入位置或者到达根位置,将其插入
nums[i] = value
# 出队操作
def heappop(self, nums: list) -> int:
size = len(nums)
nums[0], nums[-1] = nums[-1], nums[0]
# 得到最大值(堆顶元素)然后调整堆
top = nums.pop()
if size > 0:
self.heapAdjust(nums, 0, size - 2)
return top
# 升序堆排序
def heapSort(self, nums: [int]):
self.heapify(nums)
size = len(nums)
for i in range(size):
nums[0], nums[size - i - 1] = nums[size - i - 1], nums[0]
self.heapAdjust(nums, 0, size - i - 2)
return nums
nums = [49, 38, 65, 97, 76, 13, 27, 49]
heap = Heapq()
# 1. 创建堆,并进行堆排序
heap.heapSort(nums)
heap.heapify(nums)
# 2. 测试 heappop()
rst = heap.heappop(nums)
print(rst)
rst = heap.heappop(nums)
print(rst)
rst = heap.heappop(nums)
print(rst)
rst = heap.heappop(nums)
print(rst)
rst = heap.heappop(nums)
print(rst)
rst = heap.heappop(nums)
print(rst)
# 3. 测试 heappush()
nums = [49, 38, 65, 97, 76, 13, 27, 49]
heapList = []
for num in nums:
heap.heappush(heapList, num)
print(heapList)
# 4. 堆排序
rst = heap.heapSort(heapList)
print(heapList)
|
StarcoderdataPython
|
1676595
|
<gh_stars>1000+
# Copyright 2018 Espressif Systems (Shanghai) PTE LTD
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
def _load_source(name, path):
try:
from importlib.machinery import SourceFileLoader
return SourceFileLoader(name, path).load_module()
except ImportError:
# importlib.machinery doesn't exists in Python 2 so we will use imp (deprecated in Python 3)
import imp
return imp.load_source(name, path)
idf_path = os.environ['IDF_PATH']
# protocomm component related python files generated from .proto files
constants_pb2 = _load_source('constants_pb2', idf_path + '/components/protocomm/python/constants_pb2.py')
sec0_pb2 = _load_source('sec0_pb2', idf_path + '/components/protocomm/python/sec0_pb2.py')
sec1_pb2 = _load_source('sec1_pb2', idf_path + '/components/protocomm/python/sec1_pb2.py')
session_pb2 = _load_source('session_pb2', idf_path + '/components/protocomm/python/session_pb2.py')
# wifi_provisioning component related python files generated from .proto files
wifi_constants_pb2 = _load_source('wifi_constants_pb2', idf_path + '/components/wifi_provisioning/python/wifi_constants_pb2.py')
wifi_config_pb2 = _load_source('wifi_config_pb2', idf_path + '/components/wifi_provisioning/python/wifi_config_pb2.py')
wifi_scan_pb2 = _load_source('wifi_scan_pb2', idf_path + '/components/wifi_provisioning/python/wifi_scan_pb2.py')
# custom_provisioning component related python files generated from .proto files
custom_config_pb2 = _load_source('custom_config_pb2', idf_path +
'/examples/provisioning/legacy/custom_config/components/custom_provisioning/python/custom_config_pb2.py')
|
StarcoderdataPython
|
4818515
|
<reponame>HashFlag/tracer
import datetime
from django.utils.deprecation import MiddlewareMixin
from django.conf import settings
from app01 import models
from django.shortcuts import redirect
class tracer:
def __init__(self):
self.user_obj = None
self.price_policy = None
self.project = None
class LoginAuth(MiddlewareMixin):
def process_request(self, request):
current_path = request.path
user_id = request.session.get("user_id")
tracer_obj = tracer()
request.tracer_obj = tracer_obj
user_obj = models.UserInfo.objects.filter(pk=user_id).first()
request.tracer_obj.user_obj = user_obj
print("current_path:", type(current_path))
# 白名单判断,在白名单中就放行
if current_path in settings.WHITE_LIST:
return
# 重定向页面
elif not user_id:
return redirect('app01:login')
# 将当前用户的购买策略加到request对象中,方便后面使用
tra_obj = models.Transaction.objects.filter(
user=request.tracer_obj.user_obj
).order_by('-id').first()
# 判断是否过期
# 先看一下是不是免费版的,通过结束时间来判断
end_datetime = tra_obj.end_datetime
current_time = datetime.datetime.now()
# 如果过期
if end_datetime and end_datetime < current_time:
# 根据购买记录找到对应的策略,然后将策略保存到request对象中
price_policy = models.PricePolicy.objects.filter(catogory=1, title='免费').first()
request.tracer_obj.price_policy = price_policy
else:
# 没过期,就把当前策略添加到这里
request.tracer_obj.price_policy = tra_obj.price_policy
def process_view(self, request, view, args, kwargs):
if not request.path.startswith('/app01/manage/'):
return
pro_id = kwargs.get('pro_id')
# 我创建的
project_obj = models.Project.objects.filter(
id=pro_id, creator=request.tracer_obj.user_obj)
if project_obj:
request.tracer_obj.project = project_obj.first()
return
# 我参与的
project_obj = models.ProjectUser.objects.filter(
project_id=pro_id, user=request.tracer_obj.user_obj
)
if project_obj:
request.tracer_obj.project = project_obj.first().project
return
return redirect('app01:project_list')
|
StarcoderdataPython
|
1620662
|
<reponame>Web-Dev-Collaborative/DS-ALGO-OFFICIAL<gh_stars>10-100
def even_occuring_element(arr):
"""Returns the even occuring element within a list of integers"""
dict = {}
for num in arr:
if num in dict:
dict[num] += 1
else:
dict[num] = 1
for num in dict:
if not dict[num] & 1: # bitwise check for parity.
return num
|
StarcoderdataPython
|
1744373
|
<gh_stars>0
from __future__ import print_function, unicode_literals
from PyInquirer import prompt, print_json
from tracra_export import writeMails
from mailbox import MailboxAnalyzeObject
from oauth_utils import openOauthWebsite, generateOauthString
import pandas as pd
import os, sys
from multiprocessing import freeze_support
class StudyApp:
def __init__(self):
self.mailbox = MailboxAnalyzeObject()
self.email = None
self.imap = None
self.password = <PASSWORD>
self.email_alias_set = set()
def fetchMailsAndWriteToDisk(self):
self.mailbox.fetchMails(self.password, self.imap)
self.mailbox.analyzeMails()
writeMails(self.mailbox.analyzed_mails, "output", False, meta_infos=self.mailbox.additional_meta_infos)
def login_form(self):
questions = [
{
"type": 'input',
"name": "first_name",
"message": "Vorname"
},
{
"type": 'input',
"name": "last_name",
"message": "Nachname"
},
{
"type": 'input',
"name": "email",
"message": "Email"
}
]
print("Hinweis: Vor- und Nachname werden benötigt um herauszufinden, ob eine E-Mail eine persönliche Ansprache enthält.")
print("Die E-Mail-Adresse wird neben dem Login auch genutzt um herauszufinden, \
ob diese beim Öffnen von E-Mails oder beim Klicken auf Links von einem externen Akteur \
mitgelesen werden könnte. Zu diesem Zweck werden auch mögliche Aliase abgefragt.")
answers = prompt(questions)
self.mailbox.email = answers['email'].strip().lower()
self.mailbox.forename = answers['first_name']
self.mailbox.lastname = answers['last_name']
self.email_alias_set.add(self.mailbox.email)
self.email_alias_form()
if self.mailbox.email.endswith("gmail.com"):
self.imap = "imap.gmail.com"
self.google_form()
else:
self.imap_form()
def google_form(self):
confirmquestion = [
{"type": "input",
"message": "Google erfordert eine explizite Erlaubnis um auf Emails zuzugreifen. Du wirst gleich auf google.com weitergeleitet um dem Zugriff zuzustimmen. Drücke dafür Enter.",
"name": "continue"
}
]
prompt(confirmquestion)
openOauthWebsite()
tokenquestion = [
{"type": "input",
"message": "Access Token",
"name": "token"
}
]
answers = prompt(tokenquestion)
access_token = answers["token"]
auth_code = generateOauthString(access_token, self.mailbox.email)
self.password = <PASSWORD>
self.fetchMailsAndWriteToDisk()
self.exit_form()
def imap_form(self):
infered_login, infered_imap = self.mailbox.inferIMAPServer(self.mailbox.email)
questions = [
{
"type": 'input',
"name": "imap",
"message": "IMAP Server",
"default": infered_imap
},
{
"type": 'input',
"name": "login",
"message": "Login",
"default": infered_login
},
{
"type": 'password',
"name": "password",
"message": "<PASSWORD>wort"
}
]
answers = prompt(questions)
self.imap = answers['imap']
self.password = answers['password']
self.mailbox.login = answers['login']
self.fetchMailsAndWriteToDisk()
self.exit_form()
def exit_form(self):
questions = [
{
"type": 'input',
"name": "exit_propmpt",
"message": "Fertig. Drücke Enter zum schließen."
}
]
answers = prompt(questions)
sys.exit("Programm beendet.")
def email_alias_form(self):
ask_again = True
while ask_again:
questions = [
{
"type": 'input',
"message": "Solltest du E-Mail-Aliasse nutzen, kannst du diese hier angeben. \
Jeweils einen Alias eintragen, dann Enter drücken. \
Falls nicht, lasse das Feld leer und drücke einfach Enter um Fortzufahren.",
"name": "alias"
}
]
answers = prompt(questions)
alias = answers['alias'].strip().lower()
if alias != "":
self.email_alias_set.add(alias)
else:
ask_again = False
self.mailbox.email_aliases = list(self.email_alias_set)
if __name__ == "__main__":
freeze_support()
APP = StudyApp()
APP.login_form()
|
StarcoderdataPython
|
3358095
|
import os
import sys
import time
import shutil
#sys.path.append('C:/prismx/')
import prismx as px
libs = px.list_libraries()
clustn = 26
f = open("validationscore"+str(clustn)+".txt", 'r')
libraries = [x.split("\t")[0] for x in f.readlines()]
newlibs = list(set(libs).difference(set(libraries)))
for i in range(0, len(newlibs)):
try:
print(newlibs[i])
gmt_file = px.load_library(newlibs[i])
print("loaded")
g1, g2, g3 = px.read_gmt(gmt_file)
# set output configuration
outname = newlibs[i]
correlationFolder = "correlation_"+str(clustn)+"_folder"
predictionFolder = "prediction_"+str(clustn)
outfolder = "prismxresult_"+str(clustn)
if len(g1) < 14000:
# calculate PrismX predictions with pretrained model
px.predict_gmt("gobp_model_"+str(clustn)+".pkl", gmt_file, correlationFolder, predictionFolder, outfolder, outname, step_size=200, intersect=True, verbose=True)
# benchmark the prediction quality
geneAUC, setAUC = px.benchmarkGMTfast(gmt_file, correlationFolder, predictionFolder, outfolder+"/"+outname+".f", intersect=True, verbose=True)
gv = geneAUC.iloc[:,1].mean()
sv = setAUC.iloc[:,1].mean()
gl_gv = geneAUC.iloc[:,0].mean()
gl_sv = setAUC.iloc[:,0].mean()
f = open('validationscore'+str(clustn)+'.txt', 'a')
f.write(outname+"\t"+str(gl_gv)+"\t"+str(gv)+"\t"+str(gl_sv)+"\t"+str(sv)+"\n")
f.close()
else:
f = open('validationscore'+str(clustn)+'.txt', 'a')
f.write(outname+"\t"+str(0)+"\t"+str(0)+"\t"+str(0)+"\t"+str(0)+"\n")
f.close()
except:
print("Failed: "+print(newlibs[i]))
shutil.rmtree(predictionFolder, ignore_errors=True)
|
StarcoderdataPython
|
3378599
|
from .contact import ContactForm
from .auth import UserForm, PermissionForm, RoutePermissionForm, LoginForm, ChangePassForm
__all__ = ['ContactForm', 'UserForm', 'PermissionForm', 'RoutePermissionForm', 'LoginForm', 'ChangePassForm']
|
StarcoderdataPython
|
1672118
|
<filename>devices/cisco/cisco_ios.py
from devices.cisco import BaseCisco
class CiscoIOS(BaseCisco):
"""
Class to represent Cisco IOS device
"""
def __init__(self, **kwargs):
super(CiscoIOS, self).__init__(**kwargs)
@property
def device_type(self):
"""
Returns device type
:param self:
:return tuple:
"""
return 'cisco_ios',
def _get_version_regex(self):
"""
Returns the regular expression string required for the version property to determine the Cisco IOS version from
a show version output
:return: Regular expression string
"""
return r"Cisco IOS Software, .* Version ?(.*)"
|
StarcoderdataPython
|
4801226
|
<reponame>JesseBausell/Hydrolight_MFile_reader_py
# Hydrolight_MFile_reader
# <NAME>
# October 31, 2020
#
# This python script reformats a series of Hydrolight-generated m-files (radiative
# transfer outputs) into hdf5 files. This enables easier access to data for investigators,
# who can work with structured variables inside the hdf5 files rather than unweildy ascii
# files, which are difficult to utilize on a large scale. See GitHub readme for more details.
### 1. Import python libraries into the workspace
import h5py
import numpy as np
from tkinter import filedialog as fd
import os
### 2. Define functions used for the script.
def createFolder(directory):
""" createFolder searches for a dirctory specified by the user. If there is none, it creates one"""
try:
if not os.path.exists(directory): # If the folder doesn't exist
os.makedirs(directory) # Create a folder
except OSError: # If there is an error other than a non-existant folder
print ('Error: Creating directory. ' + directory) # report error and shut down createFolder
def hdf5_fileWRITER(filE_NAME,HE53_dict):
"""Takes the python dictionary that was generated by ascii_MFILE_compiler and writes them
into a hdf5 (.h5) file. Data within hdf5 file is formatted the same as the aforementioned
python dictionary. User should note however that "bb/b ratio" will be changed to "bb fraction"
in all headers.
Inputs:
filE_NAME - name of future hdf5 file that will contain dictionary data
HE53_dict - dictionary formatted by ascii_MFILE_compiler
Outputs:
filE_NAME.h5 - hdf5 file containing python dictionary data"""
filE_NAME = filE_NAME[:-4]
with h5py.File(filE_NAME + '.h5','w') as hf: # Create an open hdf5 file for writing.
for k in HE53_dict:
# for-loop disects the m-file dictionary and writes data and dictionary elements
# into a hdf5 file.
k1 = k.replace('/','-') # replace the forward slash with a hyphen
hf.create_group(k1) # Create a key/element in hdf5 file based on nested python dictionary
for l in HE53_dict[k]:
# Within the python dictionary, take all elements (keys and data) and incorporate them
# into hdf5 file
hf[k1][l] = HE53_dict[k][l] # Create new nested element with data in hdf5 file
def ASCII_to_hdf5(fileNAME_mfile):
"""ASCII_to_hdf5 takes an ascii file produced by Hydrolight (m-file) and puts the data into
a python dictionary.
Input:
fileNAME_mfile - name of the ascii file
Output:
Hydro_OUTPUT - python dictionary containing data from ascii file"""
with open(fileNAME_mfile) as FID_mFILE: # Open a Hydrolight m-file that is prescribed by the user
Hydro_OUTPUT = {} # Create an empty dictionary to store all of the data extracted from the
# Before any data is collected and stored, process the first four lines of the m-file
for n in range(4):
# for-loop discards the first four lines of mfile text because they are worthless~
tLINE = FID_mFILE.readline() # Grab a line from the mfile, but don't save it
#print(n,tLINE)
if n == 1:
# if the script is examining the second header line of the entire m-file (sometimes called ascii file)
tLINE = tLINE.split() # Assign the second header line to a variable and split it into a list
wv_NUM = int(tLINE[0]) # Take the first list element (number of wavelengths). Set it equal to wv_NUM
keY = 0 # Set Key equal to 0. This variable will determine when to break the subsequent while loop
# every time the subequent while loop doesn't complete itself from start to finish,
while 1:
# while loop will cycle through the entire m-file until it reaches the end. It will allow
# all data to be examined, filtered, and stored in the Hydro_OUTPUT dictionary
#######################################################################################
if keY > 1:
# if script is unable to run twice
break # break the while loop!
### The code below places ascii data into a dictionary.
#######################################################################################
try: # attempt to run the following code for each while-loop iteration
### 1. For each section of the ascii file, prepare the first three header lines
temP_DICT = {} # Create an empty dictionary with each new while loop iteration
temP_DICT['linE'] = FID_mFILE.readline()[:-1].split('"') # Grab one line of the m-file
temP_DICT['linE2'] = FID_mFILE.readline()[:-1].split('"') # take the second line of the m-file and (again) split it by "
temP_DICT['linE3'] = FID_mFILE.readline()[:-1].split('"') # take the third line of the m-file and (again) split it by "
#print(temP_DICT['linE3'])
for t in temP_DICT:
# for-loop cycles through the temporary dictionary (temP_DICT), which contains
# ascii data headers, and eliminates empty list elements.
for i in np.flip(np.arange(len(temP_DICT[t])),0):
# nested for-loop removes empty elements from each dictionary key (list)
if not temP_DICT[t][i].strip():
# if the list element is empty...
temP_DICT[t].pop(i) # excise the element from the list completely
if temP_DICT['linE'] == []:
# If the first line of the ascii header is empty
temP_DICT['linE'] = temP_DICT['linE2'] # make the first ascii header the second
temP_DICT['linE2'] = [] # make the second ascii header the first
################################################################################################################################
### 2. Now that the first three header lines have been fixed, try and determine the
### dimensions of the data below the three-line header
try:
# If the last element of the line1 list contains number of rows and columns, create variables
roW,coL = np.asarray(temP_DICT['linE'][-1].split(),dtype=int) # take the last list element (matrix dimensions) and split it
temP_DICT['linE'].pop(-1) # remove last element from the line 1 list
except:
# If there are no row and column values listed in line 1 list
coL = np.nan # set column number equal to nan
Hydro_OUTPUT[temP_DICT['linE'][0]] = {} # Create a dictionary within a dictionary
Hydro_OUTPUT[temP_DICT['linE'][0]]['Meta'] = temP_DICT['linE'][-1] # Include a metadata description of each nested dictionary
################################################################################################################################
### 3. m-file sections have several different formats. Some are matrices, others are headered columns
### It is therefore important to distinguish between each type of m-file section and preceed accordingly
if coL == len(temP_DICT['linE3']):
# If the number of column headers, as indicated in the first line of the header, is the same as the
# number of column headers listed in the third line of the header. These AOPs are typically modeled
# according to wavelength, but NOT according to depth.
for r in range(roW):
# for-loop sorts through data row-by-row and sorts them into the appropriate dictionary lists.
# the for-loop will run for as many iterations as there are subsequent rows of data.
linE4 = FID_mFILE.readline()[:-1] # Grab a new line of data and remove end-of-line character
if "in air" in linE4:
# If the words, "in air" appear in the row of data...
INDr = linE4.rfind('"') # Find index the end of the "in air" statement
linE4 = '-1 ' + linE4[INDr+1:] # replace "in air" with "-1" within the string
linE4 = np.asarray(linE4.split(),dtype=float) # linE4 string and make it into a numpy array
for c,k3 in enumerate(temP_DICT['linE3']):
# nested for-loop distributes linE4 into appropriate dictionary elements via indexing
try:
# if nested Hydro_OUTPUT dictionary key (and element) already exist
Hydro_OUTPUT[temP_DICT['linE'][0]][k3] = np.append(Hydro_OUTPUT[temP_DICT['linE'][0]][k3],linE4[c]) #append
except:
# if nested Hydro_OUTPUT dictionary key (and element) do not yet exist
Hydro_OUTPUT[temP_DICT['linE'][0]][k3] = np.array(linE4[c]) #create a new one
else:
# If the number of columns headers, as indicated in the first line of the header, is NOT the same as
# the number of column headers listed in the third line of the header. These AOPs are typically structured
# as a 2D matrix, with one matrix dimension representing depth bins and the other dimension representing
# wavelengths
### Set up the appropriate dictionary keys/elements using the ascii header
temP_DICT['linE3'].pop(0) # remove the first element of the third header line (now a list)
try:
# Attempt to convert the rest of the third header line (now a list) into a numpy array
Hydro_OUTPUT[temP_DICT['linE'][0]]['depth'] = np.asarray(temP_DICT['linE3'][0].split(),dtype=float)
except:
# If the list to numpy array conversion (see above) was unsuccessful, it means that the first list element is a string
temP_DICT['linE3'][0] = -1 # replace the first list element with "-1"
deptH = [temP_DICT['linE3'][0]] + temP_DICT['linE3'][1].split() # Re-create a list with the third header
Hydro_OUTPUT[temP_DICT['linE'][0]]['depth'] = np.asarray(deptH,dtype=float) # Convert list of depths into numpy array
### Set up the row and column numbers, as well as a nan-matrix in which to place data
coL = len(Hydro_OUTPUT[temP_DICT['linE'][0]]['depth']) + 1 # calculate the number of columns based on depth bins
roW = wv_NUM # re-assign the number of rows based on the number of wavelengths in the m-file
TEMP_MATRIX = np.ones([roW,coL])*np.nan # Create a nan matrix in which to place AOP and wavelenth data
### Fill TEMP_MATRIX with data from m-file
for r in range(roW):
# for-loop goes through the m-file row by row and fills the nan-matrix (TEMP_MATRIX)
linE4 = np.asarray(FID_mFILE.readline()[:-1].split(),dtype=float) # grab line from m-file. Convert to numpy array
TEMP_MATRIX[r,:] = linE4 # Fill row of TEMP_MATRIX
Hydro_OUTPUT[temP_DICT['linE'][0]]['data'] = TEMP_MATRIX[:,1:] # Assign all columns (except for the first) of TEMP_MATRIX as "data"
Hydro_OUTPUT[temP_DICT['linE'][0]]['wvl'] = TEMP_MATRIX[:,0] # Assign first column of TEMP_MATRIX as "wvl"
keY = 0 # upon successful completion of "try" script, reset keY to zero
except:
keY += 1 # if "try" script fails to run for ANY REASON, increase keY by one
pass # skip to the next ascii header section
return(Hydro_OUTPUT) # returns full ascii file in a dictionary
### 3. Create a script that converts Hydrolight m-files one at a time
### 3a. Create new folder in which to place newly-created HDF5 files
template_dir = fd.askdirectory() # Select directory containing m-files
if '/' in template_dir:
# If files are on a mac
dasH = '/' # Folder separator for directory pathway
else:
# If files are on a pc
dasH = '\\' # Folder separator for directory pathway
dasH_IND = template_dir.rfind(dasH) # Find the last nested directory
repository_dir = template_dir[:dasH_IND]+dasH+'HDF5'+dasH # Create a new pathway for HDF5 files
createFolder(repository_dir) # Create a new folder adjacent to m-file folder
matLISt = os.listdir(template_dir) # list all files in m-file directory
### 3b. Covert m-files into HDF5
for i,mFILE in enumerate(matLISt):
# This for-loop cyles through m-files in user-selected folder. Data in each m-file (ascii)
# is re-formatted into a hdf5 file (.h5) which is placed into a folder named "hdf5"
# adjacent to the user-selected m-file folder.
try: # If mFILE is a Hydroligth m-file
HE53_dict = ASCII_to_hdf5(template_dir+dasH+mFILE) # Puts m-file data into dictionary
hdf5_fileWRITER(repository_dir+mFILE,HE53_dict) # Converts dictionary into hdf5 file
except: # If mFILE is NOT a Hydrolight m-file
pass # Ignore it!
|
StarcoderdataPython
|
29631
|
import os
import numpy as np
import pytest
from pennylane import qchem
from openfermion.hamiltonians import MolecularData
ref_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), "test_ref_files")
table_1 = np.array(
[
[0.0, 0.0, 0.0, 0.0, 0.68238953],
[0.0, 1.0, 1.0, 0.0, 0.68238953],
[1.0, 0.0, 0.0, 1.0, 0.68238953],
[1.0, 1.0, 1.0, 1.0, 0.68238953],
[0.0, 0.0, 2.0, 2.0, 0.17900058],
[0.0, 1.0, 3.0, 2.0, 0.17900058],
[1.0, 0.0, 2.0, 3.0, 0.17900058],
[1.0, 1.0, 3.0, 3.0, 0.17900058],
[0.0, 2.0, 0.0, 2.0, 0.17900058],
[0.0, 3.0, 1.0, 2.0, 0.17900058],
[1.0, 2.0, 0.0, 3.0, 0.17900058],
[1.0, 3.0, 1.0, 3.0, 0.17900058],
[0.0, 2.0, 2.0, 0.0, 0.67073278],
[0.0, 3.0, 3.0, 0.0, 0.67073278],
[1.0, 2.0, 2.0, 1.0, 0.67073278],
[1.0, 3.0, 3.0, 1.0, 0.67073278],
[2.0, 0.0, 0.0, 2.0, 0.67073278],
[2.0, 1.0, 1.0, 2.0, 0.67073278],
[3.0, 0.0, 0.0, 3.0, 0.67073278],
[3.0, 1.0, 1.0, 3.0, 0.67073278],
[2.0, 0.0, 2.0, 0.0, 0.17900058],
[2.0, 1.0, 3.0, 0.0, 0.17900058],
[3.0, 0.0, 2.0, 1.0, 0.17900058],
[3.0, 1.0, 3.0, 1.0, 0.17900058],
[2.0, 2.0, 0.0, 0.0, 0.17900058],
[2.0, 3.0, 1.0, 0.0, 0.17900058],
[3.0, 2.0, 0.0, 1.0, 0.17900058],
[3.0, 3.0, 1.0, 1.0, 0.17900058],
[2.0, 2.0, 2.0, 2.0, 0.70510563],
[2.0, 3.0, 3.0, 2.0, 0.70510563],
[3.0, 2.0, 2.0, 3.0, 0.70510563],
[3.0, 3.0, 3.0, 3.0, 0.70510563],
]
)
table_2 = np.array(
[
[0.0, 0.0, 0.0, 0.0, 0.70510563],
[0.0, 1.0, 1.0, 0.0, 0.70510563],
[1.0, 0.0, 0.0, 1.0, 0.70510563],
[1.0, 1.0, 1.0, 1.0, 0.70510563],
]
)
table_3 = np.array(
[
[0.0, 0.0, 0.0, 0.0, 0.48731097],
[0.0, 1.0, 1.0, 0.0, 0.48731097],
[1.0, 0.0, 0.0, 1.0, 0.48731097],
[1.0, 1.0, 1.0, 1.0, 0.48731097],
[0.0, 0.0, 0.0, 2.0, -0.04857958],
[0.0, 1.0, 1.0, 2.0, -0.04857958],
[1.0, 0.0, 0.0, 3.0, -0.04857958],
[1.0, 1.0, 1.0, 3.0, -0.04857958],
[0.0, 0.0, 2.0, 0.0, -0.04857958],
[0.0, 1.0, 3.0, 0.0, -0.04857958],
[1.0, 0.0, 2.0, 1.0, -0.04857958],
[1.0, 1.0, 3.0, 1.0, -0.04857958],
[0.0, 0.0, 2.0, 2.0, 0.01306398],
[0.0, 1.0, 3.0, 2.0, 0.01306398],
[1.0, 0.0, 2.0, 3.0, 0.01306398],
[1.0, 1.0, 3.0, 3.0, 0.01306398],
[0.0, 2.0, 0.0, 0.0, -0.04857958],
[0.0, 3.0, 1.0, 0.0, -0.04857958],
[1.0, 2.0, 0.0, 1.0, -0.04857958],
[1.0, 3.0, 1.0, 1.0, -0.04857958],
[0.0, 2.0, 0.0, 2.0, 0.01306398],
[0.0, 3.0, 1.0, 2.0, 0.01306398],
[1.0, 2.0, 0.0, 3.0, 0.01306398],
[1.0, 3.0, 1.0, 3.0, 0.01306398],
[0.0, 2.0, 2.0, 0.0, 0.22361004],
[0.0, 3.0, 3.0, 0.0, 0.22361004],
[1.0, 2.0, 2.0, 1.0, 0.22361004],
[1.0, 3.0, 3.0, 1.0, 0.22361004],
[0.0, 2.0, 2.0, 2.0, 0.00748417],
[0.0, 3.0, 3.0, 2.0, 0.00748417],
[1.0, 2.0, 2.0, 3.0, 0.00748417],
[1.0, 3.0, 3.0, 3.0, 0.00748417],
[2.0, 0.0, 0.0, 0.0, -0.04857958],
[2.0, 1.0, 1.0, 0.0, -0.04857958],
[3.0, 0.0, 0.0, 1.0, -0.04857958],
[3.0, 1.0, 1.0, 1.0, -0.04857958],
[2.0, 0.0, 0.0, 2.0, 0.22361004],
[2.0, 1.0, 1.0, 2.0, 0.22361004],
[3.0, 0.0, 0.0, 3.0, 0.22361004],
[3.0, 1.0, 1.0, 3.0, 0.22361004],
[2.0, 0.0, 2.0, 0.0, 0.01306398],
[2.0, 1.0, 3.0, 0.0, 0.01306398],
[3.0, 0.0, 2.0, 1.0, 0.01306398],
[3.0, 1.0, 3.0, 1.0, 0.01306398],
[2.0, 0.0, 2.0, 2.0, 0.00748417],
[2.0, 1.0, 3.0, 2.0, 0.00748417],
[3.0, 0.0, 2.0, 3.0, 0.00748417],
[3.0, 1.0, 3.0, 3.0, 0.00748417],
[2.0, 2.0, 0.0, 0.0, 0.01306398],
[2.0, 3.0, 1.0, 0.0, 0.01306398],
[3.0, 2.0, 0.0, 1.0, 0.01306398],
[3.0, 3.0, 1.0, 1.0, 0.01306398],
[2.0, 2.0, 0.0, 2.0, 0.00748417],
[2.0, 3.0, 1.0, 2.0, 0.00748417],
[3.0, 2.0, 0.0, 3.0, 0.00748417],
[3.0, 3.0, 1.0, 3.0, 0.00748417],
[2.0, 2.0, 2.0, 0.0, 0.00748417],
[2.0, 3.0, 3.0, 0.0, 0.00748417],
[3.0, 2.0, 2.0, 1.0, 0.00748417],
[3.0, 3.0, 3.0, 1.0, 0.00748417],
[2.0, 2.0, 2.0, 2.0, 0.33788228],
[2.0, 3.0, 3.0, 2.0, 0.33788228],
[3.0, 2.0, 2.0, 3.0, 0.33788228],
[3.0, 3.0, 3.0, 3.0, 0.33788228],
]
)
@pytest.mark.parametrize(
("name", "core", "active", "table_exp", "v_core_exp"),
[
("h2_pyscf", None, None, table_1, 0),
("h2_pyscf", [0], None, table_2, 0.6823895331520422),
("h2_pyscf", None, [0, 1], table_1, 0),
("h2_pyscf", [0], [1], table_2, 0.6823895331520422),
("lih", [0], [1, 2], table_3, 1.6585666870874103),
],
)
def test_table_two_particle(name, core, active, table_exp, v_core_exp, tol):
r"""Test the table of two-particle matrix elements and the contribution of core orbitals
as implemented in the `two_particle` function of the `obs` module"""
hf_data = MolecularData(filename=os.path.join(ref_dir, name))
table, v_core = qchem.two_particle(hf_data.two_body_integrals, core=core, active=active)
assert np.allclose(table, table_exp, **tol)
assert np.allclose(v_core, v_core_exp, **tol)
v_me_1D = np.array([1, 2, 3, 4])
v_me_4D = np.full((2, 2, 2, 2), 0.5)
@pytest.mark.parametrize(
("v_me", "core", "active", "msg_match"),
[
(v_me_1D, [0], None, "'matrix_elements' must be a 4D array"),
(v_me_4D, [-1, 0, 1, 2], None, "Indices of core orbitals must be between 0 and"),
(v_me_4D, [0, 1, 2, 3], None, "Indices of core orbitals must be between 0 and"),
(v_me_4D, None, [-1, 0], "Indices of active orbitals must be between 0 and"),
(v_me_4D, None, [2, 6], "Indices of active orbitals must be between 0 and"),
],
)
def test_exceptions_two_particle(v_me, core, active, msg_match):
"""Test that the function `'two_particle'` throws an exception
if the dimension of the matrix elements array is not a 4D array or
if the indices of core and/or active orbitals are out of range."""
with pytest.raises(ValueError, match=msg_match):
qchem.two_particle(v_me, core=core, active=active)
|
StarcoderdataPython
|
1636191
|
from math import sqrt
# Omega(sqrt(n)) = Omega(n^(1/2)) = Omega((2^c)^(1/2))
def is_prime(n):
is_prime = (n + 1) * [True]
for candidate in range(2, int(sqrt(n)) + 1):
if is_prime[candidate]:
for witness in range(candidate * candidate, n + 1, candidate):
is_prime[witness] = False
return is_prime[n]
for candidate in range(2, 100):
if is_prime(candidate):
print candidate
|
StarcoderdataPython
|
4823616
|
<gh_stars>0
from Bio import SeqIO
import numpy as np
import os
#print("something")
class sequenceReaderDecoder():
def __init__(self, filePath, fileDestiny):
self.filePath = filePath
self.fileDestiny = fileDestiny
self.aminoacids = {"G" : 0 ,"P" : 1,"A" : 2,"V" : 3,"L" : 4,"I" : 5,"M" : 6,"C" : 7,"F" : 8,"Y" : 9,"W" : 10,"H" : 11,"K" : 12,"R" : 13,"Q" : 14,"N" : 15,"E" : 16,"D" : 17,"S" : 18,"T" : 19}
def setFilePath(self, filepath):
self.filePath = filepath
def setFileDestiny(self, filepath):
self.filePath = filepath
def ReadBioSeqAndTransformToTrainable(self, windowSize):
record = SeqIO.read(self.filePath, "embl")
print(dir(record))
#print(record.features)
cleavageloc = []
for i in range(2,len(record.features)):
#print (i.qualifiers)
a = record.features[i].location.start
print(a)
cleavageloc.append(a)
##print(i.location.extract(record.seq))
a = np.array(record.seq)
a2 = [self.aminoacids[i] for i in a]
#print(a2)
startPosition = 0
endPosition = windowSize
endArray = []
for i in range(0,len(a2)-(windowSize)):
if(i-(windowSize//2) in cleavageloc):
#print(True)
subArray = [1, a2[startPosition:endPosition]]
startPosition = startPosition+1
endPosition = endPosition+1
endArray.append(subArray)
else:
subArray = [0,a2[startPosition:endPosition]]
startPosition = startPosition+1
endPosition = endPosition+1
endArray.append(subArray)
#print(endArray)
ansStr= ''
f = open(self.fileDestiny, 'w+')
for i in endArray:
ansStr = str(i[0])+' '
for j in i[1]:
ansStr += str(j)+' '
ansStr += '\n'
f.write(ansStr)
f.close()
def InverseTransform(self, filePath, windowSize):
self.aminoacids = { 0 : "G" , 1 : "P", 2 : "A", 3 : "V", 4 : "L", 5 : "I" ,6 : "M",7 : "C",8 : "F",9 : "Y",10 : "W",11 : "H",12 : "K",13 : "R",14 : "Q",15 : "N",16 : "E",17 : "D",18 : "S",19 : "T"}
f = open(filePath)
test = []
contentInArchive = f.readlines()
for i in contentInArchive:
aux = i [ : len(i) - 2]
ans1 = aux.split(' ')
a2 = [int(i) for i in ans1]
a2 = a2[1 :]
#print(a2)
test.append(a2)
fullArray = []
for i in test[0]:
fullArray.append(i)
for i in range(1, len(test)):
a = test[i][8]
fullArray.append(a)
a2 = [self.aminoacids[i] for i in fullArray]
record = SeqIO.read(self.filePath, "embl")
aOriginal = np.array(record.seq)
#print(len(a2))
#print(len(aOriginal))
#print(aOriginal)
for i in range(0, len(a2)):
if(a2[i] != aOriginal[i]):
print(a2[i])
print(' ')
print(aOriginal[i])
print('es igual')
def ReadBioSeqAndTransformToTrainableWithoutSaving(self, windowSize):
record = SeqIO.read(self.filePath, "embl")
#print(dir(record))
#print(record.features)
cleavageloc = []
for i in range(2,len(record.features)):
#print (i.qualifiers)
a = record.features[i].location.start
#print(a)
cleavageloc.append(a)
##print(i.location.extract(record.seq))
if(len(cleavageloc) > 9):
cleavageloc = cleavageloc[ :9]
a = np.array(record.seq)
a2 = [self.aminoacids[i] for i in a]
startPosition = 0
endPosition = windowSize
endArray = []
cont = 1
for i in range(0,len(a2)-windowSize):
if(i-5 in cleavageloc):
#print(True)
subArray = [cont, a2[startPosition:endPosition]]
startPosition = startPosition+1
endPosition = endPosition+1
endArray.append(subArray)
cont = cont + 1
else:
subArray = [0,a2[startPosition:endPosition]]
startPosition = startPosition+1
endPosition = endPosition+1
endArray.append(subArray)
#print(endArray)
ansArray = []
ansStr= ''
for i in endArray:
ansStr = str(i[0])+' '
for j in i[1]:
ansStr += str(j)+' '
ansStr += '\n'
ansArray.append(ansStr)
return ansArray
def ReadBioSeqAndTransformToEvaluable(self, windowsSize):
record = SeqIO.read(self.filePath, "embl")
a = np.array(record.seq)
a2 = [self.aminoacids[i] for i in a]
startPosition = 0
endPosition = windowsSize
endArray = []
for i in range(0,len(a2)-windowsSize):
subArray = a2[startPosition:endPosition]
startPosition = startPosition+1
endPosition = endPosition+1
endArray.append(subArray)
print(len(endArray))
f = open(self.fileDestiny, 'w+')
for i in endArray:
#print(i)
ansStr = ''
for j in range(0,len(i)):
ansStr += str(i[j])+' '
ansStr += '\n'
#print(ansStr)
f.write(ansStr)
f.close()
class TestTrainSetter():
def __init__(self, directoryEMBLfiles, directoryTestTrain):
self.directoryEMBLfiles = directoryEMBLfiles
self.directoryTestTrain = directoryTestTrain
def readFilesAndTransformLocally(self):
reader = sequenceReaderDecoder('','')
allFiles = []
for filename in os.listdir(self.directoryEMBLfiles):
if filename.endswith(".embl"):
reader.setFilePath(self.directoryEMBLfiles +'/'+ filename)
allFiles.append(reader.ReadBioSeqAndTransformToTrainableWithoutSaving(11))
return allFiles
def convertToUsable(self):
fullArray = self.readFilesAndTransformLocally()
test = []
for j in fullArray:
for i in j:
aux = i [ : len(i) - 2]
ans1 = aux.split(' ')
a2 = [int(i) for i in ans1]
#print(a2)
test.append(a2)
return test
def splitPositiveFromNegative(self):
complete = self.convertToUsable()
pos = []
neg = []
for i in complete :
if (i[0] == 0):
neg.append(i)
else:
pos.append(i)
return(pos,neg)
def splitAndSafe(self):
pos,neg = self.splitPositiveFromNegative()
one = []
two = []
three = []
four = []
five = []
six = []
seven = []
eight = []
nine = []
for i in pos :
if (i[0] == 1):
one.append(i)
if (i[0] == 2):
two.append(i)
if (i[0] == 3):
three.append(i)
if (i[0] == 4):
four.append(i)
if (i[0] == 5):
five.append(i)
if (i[0] == 6):
six.append(i)
if (i[0] == 7):
seven.append(i)
if (i[0] == 8):
eight.append(i)
if (i[0] == 9):
nine.append(i)
f1 = open(self.directoryTestTrain+'/testPost1', 'w+')
for i in one:
ansStr = ''
for j in i :
ansStr += str(j) + ' '
ansStr += '\n'
f1.write(ansStr)
f1.close()
f2 = open(self.directoryTestTrain+'/testPost2', 'w+')
for i in two:
ansStr = str(1) + ' '
for j in range(1,len(i)) :
ansStr += str(i[j]) + ' '
ansStr += '\n'
f2.write(ansStr)
f2.close()
f3 = open(self.directoryTestTrain+'/testPost3', 'w+')
for i in three:
ansStr = str(1) + ' '
for j in range(1,len(i)) :
ansStr += str(i[j]) + ' '
ansStr += '\n'
f3.write(ansStr)
f3.close()
f4 = open(self.directoryTestTrain+'/testPost4', 'w+')
for i in four:
ansStr = str(1) + ' '
for j in range(1,len(i)) :
ansStr += str(i[j]) + ' '
ansStr += '\n'
f4.write(ansStr)
f4.close()
f5 = open(self.directoryTestTrain+'/testPost5', 'w+')
for i in five:
ansStr = str(1) + ' '
for j in range(1,len(i)) :
ansStr += str(i[j]) + ' '
ansStr += '\n'
f5.write(ansStr)
f5.close()
f6 = open(self.directoryTestTrain+'/testPost6', 'w+')
for i in six:
ansStr = str(1) + ' '
for j in range(1,len(i)) :
ansStr += str(i[j]) + ' '
ansStr += '\n'
f6.write(ansStr)
f6.close()
f7 = open(self.directoryTestTrain+'/testPost7', 'w+')
for i in seven:
ansStr = str(1) + ' '
for j in range(1,len(i)) :
ansStr += str(i[j]) + ' '
ansStr += '\n'
f7.write(ansStr)
f7.close()
f8 = open(self.directoryTestTrain+'/testPost8', 'w+')
for i in eight:
ansStr = str(1) + ' '
for j in range(1,len(i)) :
ansStr += str(i[j]) + ' '
ansStr += '\n'
f8.write(ansStr)
f8.close()
f9 = open(self.directoryTestTrain+'/testPost9', 'w+')
for i in nine:
ansStr = str(1) + ' '
for j in range(1,len(i)) :
ansStr += str(i[j]) + ' '
ansStr += '\n'
f9.write(ansStr)
f9.close()
ninth = len(neg)//9
for i in range(0,9):
f = open(self.directoryTestTrain + '/trestneg'+str(i+1), 'w+')
partition = neg [i*ninth : (i*ninth)+ninth]
for i in partition:
ansStr = ''
for j in i :
ansStr += str(j) + ' '
ansStr += '\n'
f.write(ansStr)
f.close()
#ReadBioSeqAndTransformToTrainableWithoutSaving
#newobj = sequenceReaderDecoder('ab079887.embl','ab079887.SAMPLE')
#newobj.ReadBioSeqAndTransformToTrainable(9)
#newobj.ReadBioSeqAndTransformToEvaluable(11)
#newobj = TestTrainSetter('./embl', './trainables')
#newobj.splitAndSafe()
#newobj.InverseTransform('ab079887.SAMPLE', 9)
|
StarcoderdataPython
|
1773907
|
<filename>tests/PySys/tedge/tedge_agent_user_sudo_access/run.py
import time
from pysys.basetest import BaseTest
import subprocess
from threading import Timer
"""
Validate tedge-agent user has a limited sudo right
Given tedge_apt_plugin and tedge_agent are installed
When we run plugin located in plugin directory as tedge-agent
Then a plugin is executed
When we run plugin located out of plugin directory as tedge-agent
Then a plugin is not executed
"""
class TedgeAgentUserSudoAccess(BaseTest):
def setup(self):
self.sudo = "/usr/bin/sudo"
self.log.info("Copy apt plugin 'deb'")
self.startProcess(
command=self.sudo,
arguments=["cp", "/etc/tedge/sm-plugins/apt", "/etc/tedge/sm-plugins/deb"],
stdouterr="copy_apt_plugin",
)
self.addCleanupFunction(self.mycleanup)
def execute(self):
proc1 = self.startProcess(
command=self.sudo,
arguments=["-u", "tedge-agent", self.sudo, "/etc/tedge/sm-plugins/apt"],
stdouterr="apt",
expectedExitStatus="==1",
)
self.assertThat("value" + proc1.expectedExitStatus, value=proc1.exitStatus)
proc2 = self.startProcess(
command=self.sudo,
arguments=["-u", "tedge-agent", self.sudo, "/etc/tedge/sm-plugins/deb"],
stdouterr="deb",
expectedExitStatus="==1",
)
self.assertThat("value" + proc2.expectedExitStatus, value=proc2.exitStatus)
# To Do
# vulnerability check
# sudo -u tedge-agent sudo /etc/tedge/sm-plugins/../../../bin/ls
# Must be asked a password of tedge-agent
def mycleanup(self):
self.log.info("Remove the copied apt 'deb' plugin")
self.startProcess(
command=self.sudo,
arguments=["rm", "/etc/tedge/sm-plugins/deb"],
stdouterr="remove_copied_apt_plugin",
)
|
StarcoderdataPython
|
3314920
|
from django.db import models
# Create your models here.
class Car(models.Model):
brand = models.CharField(max_length=20)
users = models.ManyToManyField('auth.User', blank=True, db_constraint=False)
|
StarcoderdataPython
|
3276535
|
""" Handles the logic of simple reusable dialogs
"""
import logging
import numpy as np
from PyQt5 import QtWidgets
from meggie.utilities.dialogs.simpleDialogUi import Ui_SimpleDialog
from meggie.utilities.widgets.batchingWidgetMain import BatchingWidget
from meggie.utilities.validators import validate_name
from meggie.utilities.validators import assert_arrays_same
from meggie.utilities.messaging import exc_messagebox
from meggie.utilities.messaging import messagebox
class SimpleDialog(QtWidgets.QDialog):
""" Contains logic for simple reusable dialog.
"""
def __init__(self, experiment, parent, default_name, handler,
batching=True, title='Simple dialog'):
QtWidgets.QDialog.__init__(self, parent)
self.ui = Ui_SimpleDialog()
self.ui.setupUi(self)
self.parent = parent
self.experiment = experiment
self.handler = handler
self.setWindowTitle('Meggie - ' + title)
if batching:
self.batching_widget = BatchingWidget(
experiment_getter=self._experiment_getter,
parent=self,
container=self.ui.groupBoxBatching,
geometry=self.ui.batchingWidgetPlaceholder.geometry())
self.ui.gridLayoutBatching.addWidget(self.batching_widget, 0, 0, 1, 1)
else:
self.ui.groupBoxBatching.hide()
self.ui.pushButtonBatch.hide()
self.ui.lineEditName.setText(default_name)
def _experiment_getter(self):
return self.experiment
def accept(self):
subject = self.experiment.active_subject
try:
evoked_name = validate_name(self.ui.lineEditName.text())
except Exception as exc:
exc_messagebox(self, exc)
return
try:
params = {'name': evoked_name}
self.handler(subject, params)
self.experiment.save_experiment_settings()
except Exception as exc:
exc_messagebox(self, exc)
return
self.parent.initialize_ui()
self.close()
def acceptBatch(self):
experiment = self.experiment
try:
evoked_name = validate_name(self.ui.lineEditName.text())
except Exception as exc:
exc_messagebox(self, exc)
return
selected_subject_names = self.batching_widget.selected_subjects
params = {'name': evoked_name}
for name, subject in self.experiment.subjects.items():
if name in selected_subject_names:
try:
self.handler(subject, params)
subject.release_memory()
except Exception as exc:
self.batching_widget.failed_subjects.append(
(subject, str(exc)))
logging.getLogger('ui_logger').exception('')
self.batching_widget.cleanup()
try:
self.experiment.save_experiment_settings()
except Exception as exc:
exc_messagebox(self, exc)
return
self.parent.initialize_ui()
self.close()
|
StarcoderdataPython
|
3396221
|
from setuptools import setup, find_packages
def setup_package():
setup(
name="motion-marmot",
description="The marmot serves as a motion detector which can target out all possible motions.",
url="https://github.com/daohuei/motion-marmot",
author="daohuei",
author_email="<EMAIL>",
maintainer="daohuei",
maintainer_email="<EMAIL>",
packages=find_packages(exclude=["data", "model"]),
license="MIT",
# Note: many of these required packages are included in base python
# but are listed here because different linux distros use custom
# python installations. And users can remove packages at any point
install_requires=[
"typer",
"numpy",
"pandas",
"opencv-python",
"sklearn",
"visdom",
],
)
if __name__ == "__main__":
setup_package()
|
StarcoderdataPython
|
1671539
|
from flask import Blueprint, request, render_template
import json, random, dbconfig_MapleStory
WispsWonderBerry = Blueprint("WispsWonderBerry", __name__, url_prefix="/MapleStory/WispsWonderBerry")
db_Class = dbconfig_MapleStory.DataBase()
WispsWonderBerry_Item_Name_List = []
WispsWonderBerry_Item_Probability_List = []
WispsWonderBerry_GetData_SQL = "SELECT * FROM WispsWonderBerry"
WispsWonderBerry_GetData_ROW = db_Class.executeAll(WispsWonderBerry_GetData_SQL)
for RoyalStyle_GetData_Item in WispsWonderBerry_GetData_ROW:
WispsWonderBerry_Item_Name_List.append(RoyalStyle_GetData_Item["Name"])
WispsWonderBerry_Item_Probability_List.append(int(float(RoyalStyle_GetData_Item["Probability"]) * 100))
db_Class.close()
@WispsWonderBerry.route("/WispsWonderBerry-Simulator")
def function_WispsWonderBerry():
Random = random.randrange(1, sum(WispsWonderBerry_Item_Probability_List))
WispsWonderBerry_Item_Name = ""
WispsWonderBerry_Item_Probability = 0
before_Probability = 1
after_Probability = WispsWonderBerry_Item_Probability_List[0]
for i in range(len(WispsWonderBerry_Item_Probability_List)):
if Random >= before_Probability and Random <= after_Probability:
WispsWonderBerry_Item_Name = WispsWonderBerry_Item_Name_List[i]
WispsWonderBerry_Item_Probability = WispsWonderBerry_Item_Probability_List[i] / 100
break
before_Probability += WispsWonderBerry_Item_Probability_List[i]
after_Probability += WispsWonderBerry_Item_Probability_List[i + 1]
return json.dumps({"Result": "Success", "Item_Name": WispsWonderBerry_Item_Name, "Item_Probability": WispsWonderBerry_Item_Probability}, ensure_ascii=False)
@WispsWonderBerry.route("/WispsWonderBerry-Probability")
def function_WispsWonderBerry_Probability():
Result_List = {}
Result_List["Result"] = "Success"
for i in range(len(WispsWonderBerry_Item_Name_List)):
Temp_List = {}
Temp_List["Item_Name"] = WispsWonderBerry_Item_Name_List[i]
Temp_List["Item_Probability"] = WispsWonderBerry_Item_Probability_List[i] / 100
Result_List[i] = Temp_List
return json.dumps(Result_List, ensure_ascii=False)
|
StarcoderdataPython
|
3356634
|
<reponame>cognifloyd/stackstorm-device42<gh_stars>1-10
from lib.base_action import BaseAction
class Update_Device(BaseAction):
def run(self, identifier, identifier_type, changes):
# designate which device to update, based on any id_type:id pair
payload = {identifier_type: identifier}
# include the KVP changes meant to change the device
payload.update(changes)
url = 'device/'
response = self.putAPI(url, payload=payload)
return response
|
StarcoderdataPython
|
3234367
|
<filename>Chapter11_OpenAI_Gym/taxi/Taxi-v3.py
# There are 4 locations (labeled by different letters) and your job is to pick up the passenger at one location
# and drop him off in another. You receive +20 points for a successful dropoff,
# and lose 1 point for every timestep it takes.
# There is also a 10 point penalty for illegal pick-up and drop-off actions.
import gym
import numpy as np
env = gym.make('Taxi-v3')
def q_learning(episode):
alpha = .8
Q = np.zeros([env.observation_space.n, env.action_space.n])
for i in range(episode):
done = False
G, reward = 0,0
state = env.reset()
while not done:
env.render()
action = np.argmax(Q[state])
state2, reward, done, info = env.step(action)
Q[state, action] += alpha*(reward + np.max(Q[state2]) - Q[state, action])
G += reward
state = state2
print("episode {}, total reward = {}".format(i, G))
def random_policy(episode, step):
for i_episode in range(episode):
env.reset()
for t in range(step):
env.render()
action = env.action_space.sample()
state, reward, done, info = env.step(action)
if done:
print("Episode finished after {} timesteps".format(t+1))
break
print("Starting next episode")
if __name__ == '__main__':
q_learning(1000)
|
StarcoderdataPython
|
35230
|
from demo.components.server import server
from chips.api.api import *
def application(chip):
eth = Component("application.c")
eth(
chip,
inputs = {
"eth_in" : chip.inputs["input_eth_rx"],
"am_in" : chip.inputs["input_radio_am"],
"fm_in" : chip.inputs["input_radio_fm"],
"rs232_rx":chip.inputs["input_rs232_rx"],
},
outputs = {
"eth_out" : chip.outputs["output_eth_tx"],
"audio_out" : chip.outputs["output_audio"],
"frequency_out" : chip.outputs["output_radio_frequency"],
"samples_out" : chip.outputs["output_radio_average_samples"],
"rs232_tx":chip.outputs["output_rs232_tx"],
},
)
|
StarcoderdataPython
|
4824669
|
<gh_stars>1-10
import json
from typing import List
from unittest import mock
import boto3
import pandas as pd
import pytest
from moto import mock_s3
from ruamel.yaml import YAML
import great_expectations.exceptions.exceptions as ge_exceptions
from great_expectations import DataContext
from great_expectations.core.batch import (
BatchDefinition,
BatchRequest,
BatchRequestBase,
IDDict,
)
from great_expectations.data_context.util import instantiate_class_from_config
from great_expectations.datasource.data_connector import ConfiguredAssetS3DataConnector
yaml = YAML()
@mock_s3
def test_basic_instantiation():
region_name: str = "us-east-1"
bucket: str = "test_bucket"
conn = boto3.resource("s3", region_name=region_name)
conn.create_bucket(Bucket=bucket)
client = boto3.client("s3", region_name=region_name)
test_df: pd.DataFrame = pd.DataFrame(data={"col1": [1, 2], "col2": [3, 4]})
keys: List[str] = [
"alpha-1.csv",
"alpha-2.csv",
"alpha-3.csv",
]
for key in keys:
client.put_object(
Bucket=bucket, Body=test_df.to_csv(index=False).encode("utf-8"), Key=key
)
my_data_connector = ConfiguredAssetS3DataConnector(
name="my_data_connector",
datasource_name="FAKE_DATASOURCE_NAME",
default_regex={
"pattern": "alpha-(.*)\\.csv",
"group_names": ["index"],
},
bucket=bucket,
prefix="",
assets={"alpha": {}},
)
assert my_data_connector.self_check() == {
"class_name": "ConfiguredAssetS3DataConnector",
"data_asset_count": 1,
"example_data_asset_names": [
"alpha",
],
"data_assets": {
"alpha": {
"example_data_references": [
"alpha-1.csv",
"alpha-2.csv",
"alpha-3.csv",
],
"batch_definition_count": 3,
},
},
"example_unmatched_data_references": [],
"unmatched_data_reference_count": 0,
# FIXME: (Sam) example_data_reference removed temporarily in PR #2590:
# "example_data_reference": {},
}
# noinspection PyProtectedMember
my_data_connector._refresh_data_references_cache()
assert my_data_connector.get_data_reference_list_count() == 3
assert my_data_connector.get_unmatched_data_references() == []
# Illegal execution environment name
with pytest.raises(ValueError):
print(
my_data_connector.get_batch_definition_list_from_batch_request(
BatchRequest(
datasource_name="something",
data_connector_name="my_data_connector",
data_asset_name="something",
)
)
)
@mock.patch(
"great_expectations.core.usage_statistics.usage_statistics.UsageStatisticsHandler.emit"
)
@mock_s3
def test_instantiation_from_a_config(mock_emit, empty_data_context_stats_enabled):
context: DataContext = empty_data_context_stats_enabled
region_name: str = "us-east-1"
bucket: str = "test_bucket"
conn = boto3.resource("s3", region_name=region_name)
conn.create_bucket(Bucket=bucket)
client = boto3.client("s3", region_name=region_name)
test_df: pd.DataFrame = pd.DataFrame(data={"col1": [1, 2], "col2": [3, 4]})
keys: List[str] = [
"alpha-1.csv",
"alpha-2.csv",
"alpha-3.csv",
]
for key in keys:
client.put_object(
Bucket=bucket, Body=test_df.to_csv(index=False).encode("utf-8"), Key=key
)
report_object = context.test_yaml_config(
f"""
module_name: great_expectations.datasource.data_connector
class_name: ConfiguredAssetS3DataConnector
datasource_name: FAKE_DATASOURCE
name: TEST_DATA_CONNECTOR
default_regex:
pattern: alpha-(.*)\\.csv
group_names:
- index
bucket: {bucket}
prefix: ""
assets:
alpha:
""",
return_mode="report_object",
)
assert report_object == {
"class_name": "ConfiguredAssetS3DataConnector",
"data_asset_count": 1,
"example_data_asset_names": [
"alpha",
],
"data_assets": {
"alpha": {
"example_data_references": [
"alpha-1.csv",
"alpha-2.csv",
"alpha-3.csv",
],
"batch_definition_count": 3,
},
},
"example_unmatched_data_references": [],
"unmatched_data_reference_count": 0,
# FIXME: (Sam) example_data_reference removed temporarily in PR #2590:
# "example_data_reference": {},
}
assert mock_emit.call_count == 1
anonymized_name = mock_emit.call_args_list[0][0][0]["event_payload"][
"anonymized_name"
]
expected_call_args_list = [
mock.call(
{
"event": "data_context.test_yaml_config",
"event_payload": {
"anonymized_name": anonymized_name,
"parent_class": "ConfiguredAssetS3DataConnector",
},
"success": True,
}
),
]
assert mock_emit.call_args_list == expected_call_args_list
@mock.patch(
"great_expectations.core.usage_statistics.usage_statistics.UsageStatisticsHandler.emit"
)
@mock_s3
def test_instantiation_from_a_config_regex_does_not_match_paths(
mock_emit, empty_data_context_stats_enabled
):
context: DataContext = empty_data_context_stats_enabled
region_name: str = "us-east-1"
bucket: str = "test_bucket"
conn = boto3.resource("s3", region_name=region_name)
conn.create_bucket(Bucket=bucket)
client = boto3.client("s3", region_name=region_name)
test_df: pd.DataFrame = pd.DataFrame(data={"col1": [1, 2], "col2": [3, 4]})
keys: List[str] = [
"alpha-1.csv",
"alpha-2.csv",
"alpha-3.csv",
]
for key in keys:
client.put_object(
Bucket=bucket, Body=test_df.to_csv(index=False).encode("utf-8"), Key=key
)
report_object = context.test_yaml_config(
f"""
module_name: great_expectations.datasource.data_connector
class_name: ConfiguredAssetS3DataConnector
datasource_name: FAKE_DATASOURCE
name: TEST_DATA_CONNECTOR
bucket: {bucket}
prefix: ""
default_regex:
pattern: beta-(.*)\\.csv
group_names:
- index
assets:
alpha:
""",
return_mode="report_object",
)
assert report_object == {
"class_name": "ConfiguredAssetS3DataConnector",
"data_asset_count": 1,
"example_data_asset_names": [
"alpha",
],
"data_assets": {
"alpha": {"example_data_references": [], "batch_definition_count": 0},
},
"example_unmatched_data_references": [
"alpha-1.csv",
"alpha-2.csv",
"alpha-3.csv",
],
"unmatched_data_reference_count": 3,
# FIXME: (Sam) example_data_reference removed temporarily in PR #2590:
# "example_data_reference": {},
}
assert mock_emit.call_count == 1
anonymized_name = mock_emit.call_args_list[0][0][0]["event_payload"][
"anonymized_name"
]
expected_call_args_list = [
mock.call(
{
"event": "data_context.test_yaml_config",
"event_payload": {
"anonymized_name": anonymized_name,
"parent_class": "ConfiguredAssetS3DataConnector",
},
"success": True,
}
),
]
assert mock_emit.call_args_list == expected_call_args_list
@mock_s3
def test_return_all_batch_definitions_unsorted():
region_name: str = "us-east-1"
bucket: str = "test_bucket"
conn = boto3.resource("s3", region_name=region_name)
conn.create_bucket(Bucket=bucket)
client = boto3.client("s3", region_name=region_name)
test_df: pd.DataFrame = pd.DataFrame(data={"col1": [1, 2], "col2": [3, 4]})
keys: List[str] = [
"alex_20200809_1000.csv",
"eugene_20200809_1500.csv",
"james_20200811_1009.csv",
"abe_20200809_1040.csv",
"will_20200809_1002.csv",
"james_20200713_1567.csv",
"eugene_20201129_1900.csv",
"will_20200810_1001.csv",
"james_20200810_1003.csv",
"alex_20200819_1300.csv",
]
for key in keys:
client.put_object(
Bucket=bucket, Body=test_df.to_csv(index=False).encode("utf-8"), Key=key
)
my_data_connector_yaml = yaml.load(
f"""
class_name: ConfiguredAssetS3DataConnector
datasource_name: test_environment
#execution_engine:
# class_name: PandasExecutionEngine
bucket: {bucket}
prefix: ""
assets:
TestFiles:
default_regex:
pattern: (.+)_(.+)_(.+)\\.csv
group_names:
- name
- timestamp
- price
""",
)
my_data_connector: ConfiguredAssetS3DataConnector = instantiate_class_from_config(
config=my_data_connector_yaml,
runtime_environment={
"name": "general_s3_data_connector",
"datasource_name": "test_environment",
},
config_defaults={"module_name": "great_expectations.datasource.data_connector"},
)
with pytest.raises(TypeError):
my_data_connector.get_batch_definition_list_from_batch_request()
# with unnamed data_asset_name
with pytest.raises(TypeError):
my_data_connector.get_batch_definition_list_from_batch_request(
BatchRequest(
datasource_name="test_environment",
data_connector_name="general_s3_data_connector",
data_asset_name=None,
)
)
# with unnamed data_asset_name
unsorted_batch_definition_list = (
my_data_connector._get_batch_definition_list_from_batch_request(
BatchRequestBase(
datasource_name="test_environment",
data_connector_name="general_s3_data_connector",
data_asset_name=None,
)
)
)
expected = [
BatchDefinition(
datasource_name="test_environment",
data_connector_name="general_s3_data_connector",
data_asset_name="TestFiles",
batch_identifiers=IDDict(
{"name": "abe", "timestamp": "20200809", "price": "1040"}
),
),
BatchDefinition(
datasource_name="test_environment",
data_connector_name="general_s3_data_connector",
data_asset_name="TestFiles",
batch_identifiers=IDDict(
{"name": "alex", "timestamp": "20200809", "price": "1000"}
),
),
BatchDefinition(
datasource_name="test_environment",
data_connector_name="general_s3_data_connector",
data_asset_name="TestFiles",
batch_identifiers=IDDict(
{"name": "alex", "timestamp": "20200819", "price": "1300"}
),
),
BatchDefinition(
datasource_name="test_environment",
data_connector_name="general_s3_data_connector",
data_asset_name="TestFiles",
batch_identifiers=IDDict(
{"name": "eugene", "timestamp": "20200809", "price": "1500"}
),
),
BatchDefinition(
datasource_name="test_environment",
data_connector_name="general_s3_data_connector",
data_asset_name="TestFiles",
batch_identifiers=IDDict(
{"name": "eugene", "timestamp": "20201129", "price": "1900"}
),
),
BatchDefinition(
datasource_name="test_environment",
data_connector_name="general_s3_data_connector",
data_asset_name="TestFiles",
batch_identifiers=IDDict(
{"name": "james", "timestamp": "20200713", "price": "1567"}
),
),
BatchDefinition(
datasource_name="test_environment",
data_connector_name="general_s3_data_connector",
data_asset_name="TestFiles",
batch_identifiers=IDDict(
{"name": "james", "timestamp": "20200810", "price": "1003"}
),
),
BatchDefinition(
datasource_name="test_environment",
data_connector_name="general_s3_data_connector",
data_asset_name="TestFiles",
batch_identifiers=IDDict(
{"name": "james", "timestamp": "20200811", "price": "1009"}
),
),
BatchDefinition(
datasource_name="test_environment",
data_connector_name="general_s3_data_connector",
data_asset_name="TestFiles",
batch_identifiers=IDDict(
{"name": "will", "timestamp": "20200809", "price": "1002"}
),
),
BatchDefinition(
datasource_name="test_environment",
data_connector_name="general_s3_data_connector",
data_asset_name="TestFiles",
batch_identifiers=IDDict(
{"name": "will", "timestamp": "20200810", "price": "1001"}
),
),
]
assert expected == unsorted_batch_definition_list
# with named data_asset_name
unsorted_batch_definition_list = (
my_data_connector.get_batch_definition_list_from_batch_request(
BatchRequest(
datasource_name="test_environment",
data_connector_name="general_s3_data_connector",
data_asset_name="TestFiles",
)
)
)
assert expected == unsorted_batch_definition_list
@mock_s3
def test_return_all_batch_definitions_sorted():
region_name: str = "us-east-1"
bucket: str = "test_bucket"
conn = boto3.resource("s3", region_name=region_name)
conn.create_bucket(Bucket=bucket)
client = boto3.client("s3", region_name=region_name)
test_df: pd.DataFrame = pd.DataFrame(data={"col1": [1, 2], "col2": [3, 4]})
keys: List[str] = [
"alex_20200809_1000.csv",
"eugene_20200809_1500.csv",
"james_20200811_1009.csv",
"abe_20200809_1040.csv",
"will_20200809_1002.csv",
"james_20200713_1567.csv",
"eugene_20201129_1900.csv",
"will_20200810_1001.csv",
"james_20200810_1003.csv",
"alex_20200819_1300.csv",
]
for key in keys:
client.put_object(
Bucket=bucket, Body=test_df.to_csv(index=False).encode("utf-8"), Key=key
)
my_data_connector_yaml = yaml.load(
f"""
class_name: ConfiguredAssetS3DataConnector
datasource_name: test_environment
#execution_engine:
# class_name: PandasExecutionEngine
bucket: {bucket}
prefix: ""
assets:
TestFiles:
default_regex:
pattern: (.+)_(.+)_(.+)\\.csv
group_names:
- name
- timestamp
- price
sorters:
- orderby: asc
class_name: LexicographicSorter
name: name
- datetime_format: "%Y%m%d"
orderby: desc
class_name: DateTimeSorter
name: timestamp
- orderby: desc
class_name: NumericSorter
name: price
""",
)
my_data_connector: ConfiguredAssetS3DataConnector = instantiate_class_from_config(
config=my_data_connector_yaml,
runtime_environment={
"name": "general_s3_data_connector",
"datasource_name": "test_environment",
},
config_defaults={"module_name": "great_expectations.datasource.data_connector"},
)
self_check_report = my_data_connector.self_check()
assert self_check_report["class_name"] == "ConfiguredAssetS3DataConnector"
assert self_check_report["data_asset_count"] == 1
assert self_check_report["data_assets"]["TestFiles"]["batch_definition_count"] == 10
assert self_check_report["unmatched_data_reference_count"] == 0
sorted_batch_definition_list = (
my_data_connector.get_batch_definition_list_from_batch_request(
BatchRequest(
datasource_name="test_environment",
data_connector_name="general_s3_data_connector",
data_asset_name="TestFiles",
)
)
)
expected = [
BatchDefinition(
datasource_name="test_environment",
data_connector_name="general_s3_data_connector",
data_asset_name="TestFiles",
batch_identifiers=IDDict(
{"name": "abe", "timestamp": "20200809", "price": "1040"}
),
),
BatchDefinition(
datasource_name="test_environment",
data_connector_name="general_s3_data_connector",
data_asset_name="TestFiles",
batch_identifiers=IDDict(
{"name": "alex", "timestamp": "20200819", "price": "1300"}
),
),
BatchDefinition(
datasource_name="test_environment",
data_connector_name="general_s3_data_connector",
data_asset_name="TestFiles",
batch_identifiers=IDDict(
{"name": "alex", "timestamp": "20200809", "price": "1000"}
),
),
BatchDefinition(
datasource_name="test_environment",
data_connector_name="general_s3_data_connector",
data_asset_name="TestFiles",
batch_identifiers=IDDict(
{"name": "eugene", "timestamp": "20201129", "price": "1900"}
),
),
BatchDefinition(
datasource_name="test_environment",
data_connector_name="general_s3_data_connector",
data_asset_name="TestFiles",
batch_identifiers=IDDict(
{"name": "eugene", "timestamp": "20200809", "price": "1500"}
),
),
BatchDefinition(
datasource_name="test_environment",
data_connector_name="general_s3_data_connector",
data_asset_name="TestFiles",
batch_identifiers=IDDict(
{"name": "james", "timestamp": "20200811", "price": "1009"}
),
),
BatchDefinition(
datasource_name="test_environment",
data_connector_name="general_s3_data_connector",
data_asset_name="TestFiles",
batch_identifiers=IDDict(
{"name": "james", "timestamp": "20200810", "price": "1003"}
),
),
BatchDefinition(
datasource_name="test_environment",
data_connector_name="general_s3_data_connector",
data_asset_name="TestFiles",
batch_identifiers=IDDict(
{"name": "james", "timestamp": "20200713", "price": "1567"}
),
),
BatchDefinition(
datasource_name="test_environment",
data_connector_name="general_s3_data_connector",
data_asset_name="TestFiles",
batch_identifiers=IDDict(
{"name": "will", "timestamp": "20200810", "price": "1001"}
),
),
BatchDefinition(
datasource_name="test_environment",
data_connector_name="general_s3_data_connector",
data_asset_name="TestFiles",
batch_identifiers=IDDict(
{"name": "will", "timestamp": "20200809", "price": "1002"}
),
),
]
# TEST 1: Sorting works
assert expected == sorted_batch_definition_list
my_batch_request: BatchRequest = BatchRequest(
datasource_name="test_environment",
data_connector_name="general_s3_data_connector",
data_asset_name="TestFiles",
data_connector_query=IDDict(
**{
"batch_filter_parameters": {
"name": "james",
"timestamp": "20200713",
"price": "1567",
}
}
),
)
my_batch_definition_list: List[BatchDefinition]
my_batch_definition: BatchDefinition
# TEST 2: Should only return the specified partition
my_batch_definition_list = (
my_data_connector.get_batch_definition_list_from_batch_request(
batch_request=my_batch_request
)
)
assert len(my_batch_definition_list) == 1
my_batch_definition = my_batch_definition_list[0]
expected_batch_definition: BatchDefinition = BatchDefinition(
datasource_name="test_environment",
data_connector_name="general_s3_data_connector",
data_asset_name="TestFiles",
batch_identifiers=IDDict(
**{
"name": "james",
"timestamp": "20200713",
"price": "1567",
}
),
)
assert my_batch_definition == expected_batch_definition
# TEST 3: Without data_connector_query, should return all 10
my_batch_request: BatchRequest = BatchRequest(
datasource_name="test_environment",
data_connector_name="general_s3_data_connector",
data_asset_name="TestFiles",
data_connector_query=None,
)
# should return 10
my_batch_definition_list = (
my_data_connector.get_batch_definition_list_from_batch_request(
batch_request=my_batch_request
)
)
assert len(my_batch_definition_list) == 10
@mock_s3
def test_alpha():
region_name: str = "us-east-1"
bucket: str = "test_bucket"
conn = boto3.resource("s3", region_name=region_name)
conn.create_bucket(Bucket=bucket)
client = boto3.client("s3", region_name=region_name)
test_df: pd.DataFrame = pd.DataFrame(data={"col1": [1, 2], "col2": [3, 4]})
keys: List[str] = [
"test_dir_alpha/A.csv",
"test_dir_alpha/B.csv",
"test_dir_alpha/C.csv",
"test_dir_alpha/D.csv",
]
for key in keys:
client.put_object(
Bucket=bucket, Body=test_df.to_csv(index=False).encode("utf-8"), Key=key
)
my_data_connector_yaml = yaml.load(
f"""
module_name: great_expectations.datasource.data_connector
class_name: ConfiguredAssetS3DataConnector
bucket: {bucket}
prefix: test_dir_alpha
assets:
A:
default_regex:
pattern: .*(.+)\\.csv
group_names:
- part_1
""",
)
my_data_connector: ConfiguredAssetS3DataConnector = instantiate_class_from_config(
config=my_data_connector_yaml,
runtime_environment={
"name": "general_s3_data_connector",
"datasource_name": "BASE",
},
config_defaults={"module_name": "great_expectations.datasource.data_connector"},
)
self_check_report = my_data_connector.self_check()
print(json.dumps(self_check_report, indent=2))
assert self_check_report["class_name"] == "ConfiguredAssetS3DataConnector"
assert self_check_report["data_asset_count"] == 1
assert set(list(self_check_report["data_assets"].keys())) == {"A"}
assert self_check_report["unmatched_data_reference_count"] == 0
my_batch_definition_list: List[BatchDefinition]
my_batch_definition: BatchDefinition
# Try to fetch a batch from a nonexistent asset
my_batch_request: BatchRequest = BatchRequest(
datasource_name="BASE",
data_connector_name="general_s3_data_connector",
data_asset_name="B",
data_connector_query=None,
)
my_batch_definition_list = (
my_data_connector.get_batch_definition_list_from_batch_request(
batch_request=my_batch_request
)
)
assert len(my_batch_definition_list) == 0
my_batch_request: BatchRequest = BatchRequest(
datasource_name="BASE",
data_connector_name="general_s3_data_connector",
data_asset_name="A",
data_connector_query=IDDict(**{"batch_filter_parameters": {"part_1": "B"}}),
)
my_batch_definition_list = (
my_data_connector.get_batch_definition_list_from_batch_request(
batch_request=my_batch_request
)
)
assert len(my_batch_definition_list) == 1
@mock_s3
def test_foxtrot():
region_name: str = "us-east-1"
bucket: str = "test_bucket"
conn = boto3.resource("s3", region_name=region_name)
conn.create_bucket(Bucket=bucket)
client = boto3.client("s3", region_name=region_name)
test_df: pd.DataFrame = pd.DataFrame(data={"col1": [1, 2], "col2": [3, 4]})
keys: List[str] = [
"test_dir_foxtrot/A/A-1.csv",
"test_dir_foxtrot/A/A-2.csv",
"test_dir_foxtrot/A/A-3.csv",
"test_dir_foxtrot/B/B-1.txt",
"test_dir_foxtrot/B/B-2.txt",
"test_dir_foxtrot/B/B-3.txt",
"test_dir_foxtrot/C/C-2017.csv",
"test_dir_foxtrot/C/C-2018.csv",
"test_dir_foxtrot/C/C-2019.csv",
"test_dir_foxtrot/D/D-aaa.csv",
"test_dir_foxtrot/D/D-bbb.csv",
"test_dir_foxtrot/D/D-ccc.csv",
"test_dir_foxtrot/D/D-ddd.csv",
"test_dir_foxtrot/D/D-eee.csv",
]
for key in keys:
client.put_object(
Bucket=bucket, Body=test_df.to_csv(index=False).encode("utf-8"), Key=key
)
my_data_connector_yaml = yaml.load(
f"""
module_name: great_expectations.datasource.data_connector
class_name: ConfiguredAssetS3DataConnector
bucket: {bucket}
prefix: test_dir_foxtrot
assets:
A:
prefix: test_dir_foxtrot/A/
B:
prefix: test_dir_foxtrot/B/
pattern: (.+)-(.+)\\.txt
group_names:
- part_1
- part_2
C:
prefix: test_dir_foxtrot/C/
D:
prefix: test_dir_foxtrot/D/
default_regex:
pattern: (.+)-(.+)\\.csv
group_names:
- part_1
- part_2
""",
)
my_data_connector: ConfiguredAssetS3DataConnector = instantiate_class_from_config(
config=my_data_connector_yaml,
runtime_environment={
"name": "general_s3_data_connector",
"datasource_name": "BASE",
},
config_defaults={"module_name": "great_expectations.datasource.data_connector"},
)
self_check_report = my_data_connector.self_check()
assert self_check_report == {
"class_name": "ConfiguredAssetS3DataConnector",
"data_asset_count": 4,
"example_data_asset_names": ["A", "B", "C"],
"data_assets": {
"A": {
"batch_definition_count": 3,
"example_data_references": [
"test_dir_foxtrot/A/A-1.csv",
"test_dir_foxtrot/A/A-2.csv",
"test_dir_foxtrot/A/A-3.csv",
],
},
"B": {
"batch_definition_count": 3,
"example_data_references": [
"test_dir_foxtrot/B/B-1.txt",
"test_dir_foxtrot/B/B-2.txt",
"test_dir_foxtrot/B/B-3.txt",
],
},
"C": {
"batch_definition_count": 3,
"example_data_references": [
"test_dir_foxtrot/C/C-2017.csv",
"test_dir_foxtrot/C/C-2018.csv",
"test_dir_foxtrot/C/C-2019.csv",
],
},
},
"unmatched_data_reference_count": 0,
"example_unmatched_data_references": [],
# FIXME: (Sam) example_data_reference removed temporarily in PR #2590:
# "example_data_reference": {},
}
my_batch_definition_list: List[BatchDefinition]
my_batch_definition: BatchDefinition
my_batch_request = BatchRequest(
datasource_name="BASE",
data_connector_name="general_s3_data_connector",
data_asset_name="A",
data_connector_query=None,
)
my_batch_definition_list = (
my_data_connector.get_batch_definition_list_from_batch_request(
batch_request=my_batch_request
)
)
assert len(my_batch_definition_list) == 3
@mock_s3
def test_return_all_batch_definitions_sorted_sorter_named_that_does_not_match_group():
region_name: str = "us-east-1"
bucket: str = "test_bucket"
conn = boto3.resource("s3", region_name=region_name)
conn.create_bucket(Bucket=bucket)
client = boto3.client("s3", region_name=region_name)
test_df: pd.DataFrame = pd.DataFrame(data={"col1": [1, 2], "col2": [3, 4]})
keys: List[str] = [
"alex_20200809_1000.csv",
"eugene_20200809_1500.csv",
"james_20200811_1009.csv",
"abe_20200809_1040.csv",
"will_20200809_1002.csv",
"james_20200713_1567.csv",
"eugene_20201129_1900.csv",
"will_20200810_1001.csv",
"james_20200810_1003.csv",
"alex_20200819_1300.csv",
]
for key in keys:
client.put_object(
Bucket=bucket, Body=test_df.to_csv(index=False).encode("utf-8"), Key=key
)
my_data_connector_yaml = yaml.load(
f"""
class_name: ConfiguredAssetS3DataConnector
datasource_name: test_environment
#execution_engine:
# class_name: PandasExecutionEngine
bucket: bucket
assets:
TestFiles:
pattern: (.+)_(.+)_(.+)\\.csv
group_names:
- name
- timestamp
- price
default_regex:
pattern: (.+)_.+_.+\\.csv
group_names:
- name
sorters:
- orderby: asc
class_name: LexicographicSorter
name: name
- datetime_format: "%Y%m%d"
orderby: desc
class_name: DateTimeSorter
name: timestamp
- orderby: desc
class_name: NumericSorter
name: for_me_Me_Me
""",
)
with pytest.raises(ge_exceptions.DataConnectorError):
# noinspection PyUnusedLocal
my_data_connector: ConfiguredAssetS3DataConnector = (
instantiate_class_from_config(
config=my_data_connector_yaml,
runtime_environment={
"name": "general_s3_data_connector",
"datasource_name": "test_environment",
},
config_defaults={
"module_name": "great_expectations.datasource.data_connector"
},
)
)
@mock_s3
def test_return_all_batch_definitions_too_many_sorters():
region_name: str = "us-east-1"
bucket: str = "test_bucket"
conn = boto3.resource("s3", region_name=region_name)
conn.create_bucket(Bucket=bucket)
client = boto3.client("s3", region_name=region_name)
test_df: pd.DataFrame = pd.DataFrame(data={"col1": [1, 2], "col2": [3, 4]})
keys: List[str] = [
"alex_20200809_1000.csv",
"eugene_20200809_1500.csv",
"james_20200811_1009.csv",
"abe_20200809_1040.csv",
"will_20200809_1002.csv",
"james_20200713_1567.csv",
"eugene_20201129_1900.csv",
"will_20200810_1001.csv",
"james_20200810_1003.csv",
"alex_20200819_1300.csv",
]
for key in keys:
client.put_object(
Bucket=bucket, Body=test_df.to_csv(index=False).encode("utf-8"), Key=key
)
my_data_connector_yaml = yaml.load(
f"""
class_name: ConfiguredAssetS3DataConnector
datasource_name: test_environment
#execution_engine:
# class_name: PandasExecutionEngine
bucket: {bucket}
prefix: ""
assets:
TestFiles:
default_regex:
pattern: (.+)_.+_.+\\.csv
group_names:
- name
sorters:
- orderby: asc
class_name: LexicographicSorter
name: name
- datetime_format: "%Y%m%d"
orderby: desc
class_name: DateTimeSorter
name: timestamp
- orderby: desc
class_name: NumericSorter
name: price
""",
)
with pytest.raises(ge_exceptions.DataConnectorError):
# noinspection PyUnusedLocal
my_data_connector: ConfiguredAssetS3DataConnector = (
instantiate_class_from_config(
config=my_data_connector_yaml,
runtime_environment={
"name": "general_s3_data_connector",
"datasource_name": "test_environment",
},
config_defaults={
"module_name": "great_expectations.datasource.data_connector"
},
)
)
@mock_s3
def test_example_with_explicit_data_asset_names():
region_name: str = "us-east-1"
bucket: str = "test_bucket"
conn = boto3.resource("s3", region_name=region_name)
conn.create_bucket(Bucket=bucket)
client = boto3.client("s3", region_name=region_name)
test_df: pd.DataFrame = pd.DataFrame(data={"col1": [1, 2], "col2": [3, 4]})
keys: List[str] = [
"my_base_directory/alpha/files/go/here/alpha-202001.csv",
"my_base_directory/alpha/files/go/here/alpha-202002.csv",
"my_base_directory/alpha/files/go/here/alpha-202003.csv",
"my_base_directory/beta_here/beta-202001.txt",
"my_base_directory/beta_here/beta-202002.txt",
"my_base_directory/beta_here/beta-202003.txt",
"my_base_directory/beta_here/beta-202004.txt",
"my_base_directory/gamma-202001.csv",
"my_base_directory/gamma-202002.csv",
"my_base_directory/gamma-202003.csv",
"my_base_directory/gamma-202004.csv",
"my_base_directory/gamma-202005.csv",
]
for key in keys:
client.put_object(
Bucket=bucket, Body=test_df.to_csv(index=False).encode("utf-8"), Key=key
)
yaml_string = f"""
class_name: ConfiguredAssetS3DataConnector
datasource_name: FAKE_DATASOURCE_NAME
bucket: {bucket}
prefix: my_base_directory/
default_regex:
pattern: ^(.+)-(\\d{{4}})(\\d{{2}})\\.(csv|txt)$
group_names:
- data_asset_name
- year_dir
- month_dir
assets:
alpha:
prefix: my_base_directory/alpha/files/go/here/
pattern: ^(.+)-(\\d{{4}})(\\d{{2}})\\.csv$
beta:
prefix: my_base_directory/beta_here/
pattern: ^(.+)-(\\d{{4}})(\\d{{2}})\\.txt$
gamma:
pattern: ^(.+)-(\\d{{4}})(\\d{{2}})\\.csv$
"""
config = yaml.load(yaml_string)
my_data_connector = instantiate_class_from_config(
config,
config_defaults={"module_name": "great_expectations.datasource.data_connector"},
runtime_environment={"name": "my_data_connector"},
)
# noinspection PyProtectedMember
my_data_connector._refresh_data_references_cache()
assert len(my_data_connector.get_unmatched_data_references()) == 0
assert (
len(
my_data_connector.get_batch_definition_list_from_batch_request(
batch_request=BatchRequest(
datasource_name="FAKE_DATASOURCE_NAME",
data_connector_name="my_data_connector",
data_asset_name="alpha",
)
)
)
== 3
)
assert (
len(
my_data_connector.get_batch_definition_list_from_batch_request(
batch_request=BatchRequest(
datasource_name="FAKE_DATASOURCE_NAME",
data_connector_name="my_data_connector",
data_asset_name="beta",
)
)
)
== 4
)
assert (
len(
my_data_connector.get_batch_definition_list_from_batch_request(
batch_request=BatchRequest(
datasource_name="FAKE_DATASOURCE_NAME",
data_connector_name="my_data_connector",
data_asset_name="gamma",
)
)
)
== 5
)
|
StarcoderdataPython
|
187083
|
"""
Get Cannabis Data for Connecticut
Copyright (c) 2021 Cannlytics
Author: <NAME>
Contact: <<EMAIL>>
Created: 9/16/2021
Updated: 9/18/2021
License: MIT License <https://github.com/cannlytics/cannlytics-ai/blob/main/LICENSE>
Data Sources:
Connecticut Medical Marijuana Brand Registry: https://data.ct.gov/Health-and-Human-Services/Medical-Marijuana-Brand-Registry/egd5-wb6r/data
Connecticut Socrata Open Data API: https://dev.socrata.com/foundry/data.ct.gov/egd5-wb6r
"""
# Standard imports
import os
# External imports
import sys
sys.path.append('../../../../')
from cannlytics.firebase import initialize_firebase, update_document
import pandas as pd
from sodapy import Socrata
CANNABINOIDS = {
'tetrahydrocannabinol_thc': 'thc',
'tetrahydrocannabinol_acid_thca': 'thca',
'cannabidiols_cbd': 'cbd',
'cannabidiol_acid_cbda': 'cbda',
'cbg': 'cbg',
'cbg_a': 'cbga',
'cannabavarin_cbdv': 'cbdv',
'cannabichromene_cbc': 'cbc',
'cannbinol_cbn': 'cbn',
'tetrahydrocannabivarin_thcv': 'thcv',
}
TERPENES = {
'a_pinene': 'a_pinene',
'b_myrcene': 'b_myrcene',
'b_caryophyllene': 'b_caryophyllene',
'b_pinene': 'b_pinene',
'limonene': 'limonene',
'ocimene': 'ocimene',
'linalool_lin': 'linalool',
'humulene_hum': 'humulene',
'b_eudesmol': 'b_eudesmol',
'fenchone': 'fenchone',
'camphor': 'camphor',
'a_bisabolol': 'a_bisabolol',
'a_phellandrene': 'a_phellandrene',
'a_terpinene': 'a_terpinene',
'b_terpinene': 'b_terpinene',
'pulegol': 'pulegol',
'borneol': 'borneol',
'isopulegol': 'isopulegol',
'carene': 'carene',
'camphene': 'camphene',
'caryophyllene_oxide': 'caryophyllene_oxide',
'cedrol': 'cedrol',
'eucalyptol': 'eucalyptol',
'geraniol': 'geraniol',
'guaiol': 'guaiol',
'geranyl_acetate': 'geranyl_acetate',
'isoborneol': 'isoborneol',
'menthol': 'menthol',
'l_fenchone': 'l_fenchone',
'nerol': 'nerol',
'sabinene': 'sabinene',
'terpineol': 'terpineol',
'terpinolene': 'terpinolene',
'trans_b_farnesene': 'trans_b_farnesene',
'valencene': 'valencene',
'a_cedrene': 'a_cedrene',
'a_farnesene': 'a_farnesene',
'b_farnesene': 'b_farnesene',
'cis_nerolidol': 'cis_nerolidol',
'fenchol': 'fenchol',
'trans_nerolidol': 'trans_nerolidol',
}
STANDARD_COLUMNS = {
'analyte': 'analyte',
'concentration': 'concentration',
'brand_name': 'sample_name',
'dosage_form': 'sample_type',
'producer': 'organization',
'registration_number': 'sample_id',
'approval_date': 'tested_at',
}
def get_column_dict_value(df, column, key):
"""Return a column's dictionary values as a column. Handles missing values.
Args:
df (DataFrame): A DataFrame that contains a column with dictionary values.
column (str): The column name that contains the dictionary.
key (str): The key of the dictionary to return.
Returns:
(Series): A series of dictionary values from the column.
"""
df[column] = df[column].fillna({i: {} for i in df.index})
subdata = pd.json_normalize(df[column])
return subdata[key]
def get_data_ct():
"""Get public cannabis data for Connecticut.
Returns
stats (dict): A dictionary of state statistics.
"""
#-------------------------------------------------------------------
# Get the data.
#-------------------------------------------------------------------
# Get the cannabinoid data.
app_token = os.environ.get('APP_TOKEN', None)
client = Socrata('data.ct.gov', app_token)
response = client.get('egd5-wb6r', limit=15000)
data = pd.DataFrame.from_records(response)
# Convert values to floats, coding suspected non-detects as 0.
for analyte in list(TERPENES.keys()) + list(CANNABINOIDS.keys()):
data[analyte] = data[analyte].str.replace('<0.10', '0.0', regex=False)
data[analyte] = data[analyte].str.replace('<0.1', '0.0', regex=False)
data[analyte] = data[analyte].str.replace('<0.29', '0.0', regex=False)
data[analyte] = data[analyte].str.replace('%', '', regex=False)
data[analyte] = data[analyte].str.replace('-', '0.0', regex=False)
data[analyte] = pd.to_numeric(data[analyte], errors='coerce').fillna(0.0)
# Calculate total terpenes and total cannabinoids.
data['total_terpenes'] = data[list(TERPENES.keys())].sum(axis=1)
data['total_cannabinoids'] = data[list(CANNABINOIDS.keys())].sum(axis=1)
# Clean organization name.
data['organization'] = data['producer'].str.title().str.replace('Llc', 'LLC')
# Rename certain columns, including analytes, for standardization.
columns = {**STANDARD_COLUMNS, **CANNABINOIDS, **TERPENES}
data = data.rename(columns, axis=1)
# Get the CoA URL from the lab_analysis column.
data['coa_url'] = get_column_dict_value(data, 'lab_analysis', 'url')
# Get the sample image from the product_image column.
data['image_url'] = get_column_dict_value(data, 'product_image', 'url')
# Get the label image from the product_image column.
data['label_url'] = get_column_dict_value(data, 'label_image', 'url')
# Remove duplicate data.
data.drop(['lab_analysis', 'product_image', 'label_image'], axis=1, inplace=True)
#-------------------------------------------------------------------
# Calculate terpene prevalence.
#-------------------------------------------------------------------
# Calculate the prevalence (percent of samples that contains) of each terpene.
# Also, calculate the average for each terpene when the terpene is present.
prevalence = {}
analytes = pd.DataFrame(columns=STANDARD_COLUMNS.values())
for analyte in list(TERPENES.values()):
analyte_present_data = data.loc[data[analyte] > 0].copy(deep=True)
prevalence[analyte] = len(analyte_present_data) / len(data)
analyte_present_data['analyte'] = analyte
analyte_present_data['concentration'] = analyte_present_data[analyte]
subset = analyte_present_data[list(STANDARD_COLUMNS.values())]
# subset = subset.rename(STANDARD_COLUMNS, axis=1)
analytes = analytes.append(subset)
# Create a DataFrame with statistics for each analyte.
prevalence_stats = pd.DataFrame(
prevalence.items(),
columns=['analyte', 'prevalence'],
index=prevalence.keys()
)
# Sort the data by the most prevelant terpene.
prevalence_stats = prevalence_stats.sort_values('prevalence', ascending=False)
prevalence_stats = prevalence_stats.to_dict(orient='records')
# Add analyte name.
analytes = []
for item in prevalence_stats:
item['name'] = item['analyte'] \
.replace('_', ' ') \
.replace(' lin', '') \
.replace(' hum', '') \
.title() \
.replace('B ', 'β-') \
.replace('A ', 'α-') \
.replace('Trans ', 'trans-') \
.replace('Cis ', 'cis-')
analytes.append(item)
# Define statistics.
stats = {'analytes': analytes}
#-------------------------------------------------------------------
# Upload the data and the statistics.
#-------------------------------------------------------------------
# # Initialize Firebase.
# initialize_firebase()
# # Upload the statistics.
# update_document('public/stats/ct/terpene_prevalence', stats)
# # Upload the data.
# for _, values in data.iterrows():
# doc_id = values['sample_id']
# doc_data = values.to_dict()
# ref = f'public/data/state_data/ct/lab_results/{doc_id}'
# update_document(ref, doc_data)
# Return the statistics.
# return stats
|
StarcoderdataPython
|
3203038
|
<gh_stars>100-1000
# Manul - network file
# -------------------------------------
# <NAME> <<EMAIL>> <<EMAIL>>
#
# Copyright 2019 Salesforce.com, inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
import socket
import sys
from manul_utils import *
from printing import *
import time
import pickle
import socket
'''
Protocol format:
size nthreads|files_list [content]
'''
MAX_RESPONSE_SIZE = 1500
REQUEST_THREADS_COUNT = "nthreads"
FILES_LIST_SEQUENCE = "files_list"
REQUEST_BITMAP = "bitmap_request"
SEND_BITMAP = "bitmap_send"
REMOTE_BITMAPS_SYNC_FREQUENCY = 15
# TODO: a lot of code duplicates, remove it + make it as class?
def get_slaves_ips(file_path):
'''
:param file_path: path to file with list of string in IP:PORT\n format
:return: list of (ip:port) in necessary form
'''
if not os.path.isfile(file_path):
ERROR("Unable to find file specified %s" % file_path)
res = list()
content = open(file_path).readlines()
for line in content:
line = line.replace("\n", "")
if line == "":
continue
if line is None or ":" not in line:
ERROR("Slave's IP:PORT address is in invalid format %s" % line)
line = line.split(":")
ip = line[0]
port = line[1]
res.append((ip, int(port)))
return res
def get_remote_threads_count(ips): # TODO: test it on 3 and more slaves
res = list()
total_threads_count = 0
for ip, port in ips:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
INFO(0, None, None, "Requesting available parallel fuzzers count from %s:%d" % (ip, port))
sock.connect((ip, port))
message = "%d %s" % (len(REQUEST_THREADS_COUNT), REQUEST_THREADS_COUNT)
INFO(1, None, None, "Sending %s" % message)
sock.sendall(message)
data = sock.recv(MAX_RESPONSE_SIZE) # TODO: wait time ?
except socket.error as exc:
sock.close()
ERROR("Request failed, socket return error %s" % exc)
INFO(1, None, None, "Receiving %s" % data)
if data is None or len(data) == 0:
ERROR("Violation of protocol. Slave returned empty string")
try:
tokens = data.split(" ") # 0 - size, 1 - nthreads 2 - threads_count
token = tokens[1]
if token != "nthreads":
sock.close()
ERROR("Violation of protocol. Received wrong data from slave. Terminating")
threads_count = int(tokens[2])
except:
ERROR("Violation of protocol. Unable to parse %s" % data)
INFO(0, None, None, "Slave has %d available fuzzers" % threads_count)
total_threads_count += threads_count
res.append((ip, port, int(threads_count)))
sock.close()
INFO(0, None, None, "Total available fuzzers %d" % total_threads_count)
return res, total_threads_count
def send_files_list(ip, port, files):
INFO(0, None, None, "Sending list of %d files to %s:%d" % (len(files), ip, port))
try:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect((ip, int(port)))
except socket.error as exc:
sock.close()
ERROR("Failed to connect with slave, socket returned error %s" % exc)
message = "%s %s" % (FILES_LIST_SEQUENCE, str(files))
message = str(len(message)) + " " + message
INFO(1, None, None, "Sending %s" % message)
try:
sock.sendall(message)
except socket.error as exc:
sock.close()
ERROR("Failed to send files list to slave, socket returned error %s" % exc)
sock.close()
def get_files_list_from_master(ip_port, nthreads):
ip = ip_port.split(":")[0]
port = int(ip_port.split(":")[1])
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
INFO(0, None, None, "Waiting for incoming job from master")
try:
server_address = (ip, port)
sock.bind(server_address)
sock.listen(1)
connection, client_address = sock.accept()
except socket.error as exc:
sock.close()
ERROR("Failed to connect with master, socket returned error %s" % exc)
INFO(0, None, None, "Incoming connection from %s:%d" % (client_address[0], client_address[1]))
# Step 1. Master is asking for threads count
try:
data = connection.recv(MAX_RESPONSE_SIZE)
except socket.error as exc:
sock.close()
ERROR("Failed to read request, socket returned error %d: %s" % exc)
INFO(1, None, None, "Received from master: %s" % data)
if data is None or len(data) == 0:
ERROR("Protocol violation. Nothing received from master")
data = data.split(" ")
if int(data[0]) > MAX_RESPONSE_SIZE:
sock.close()
ERROR("Protocol violation. Invalid size. %s")
elif data[1] != REQUEST_THREADS_COUNT:
sock.close()
ERROR("Protocol violation. Invalid token.")
# Step 2. Answering master with threads count
message = "%s %d" % (REQUEST_THREADS_COUNT, nthreads)
message = str(len(message)) + " " + message
INFO(1, None, None, "Answering master with %s" % message)
try:
connection.sendall(message)
except (socket.error, exc):
sock.close()
ERROR("Failed to send threads count to master. Socket return error %s" % exc)
# Step 3. Master is answering with files list
INFO(0, None, None, "Waiting for files list")
try:
connection, client_address = sock.accept()
except (socket.error, exc):
sock.close()
ERROR("Failed to connect with master %d: %s" % exc)
files_list = ""
bytes_to_read = MAX_RESPONSE_SIZE
bytes_read = 0
while bytes_read < bytes_to_read:
try:
data = connection.recv(bytes_to_read)
except (socket.error, exc):
connection.close()
sock.close()
ERROR("Failed to read master's response. Socket return error %s" % exc)
if data is None or len(data) == 0:
ERROR("Nothing received from master")
content = data
INFO(1, None, None, "Received files list from master: %s" % content)
if bytes_read == 0: # at first iteration reading size of data that master wants to actually send
try:
content = content.split(" ")
size_needed = int(content[0])
bytes_to_read = size_needed
content = content[2:]
content = "".join(content)
except:
connection.close()
sock.close()
ERROR("Protocol violation. Failed to parse list of files received %s" % data)
files_list += content
bytes_read += len(data)
# Step 4. Parsing str as list to actual list. Master sends it as list of lists split by thread. I don't want to
# parse list of lists, so I just split them again later.
if len(files_list) == 0:
ERROR("File lists is empty, nothing to fuzz")
files_list = files_list.replace("[", "")
files_list = files_list.replace("]", "")
files_list = files_list.replace("\'", "")
files_list = files_list.split(",")
INFO(1, None, None, "Files list from master: %s" % files_list)
connection.close()
sock.close()
return files_list
def sync_bitmap_net(virgin_bits, remote_virgin_bits):
INFO(0, None, None, "Synchronizing bitmaps")
new_cov = False
for i in range(0, SHM_SIZE):
byte = int(remote_virgin_bits[i]) # TODO: make sure it works
if byte != 0xFF and virgin_bits[i] == 0xFF:
virgin_bits[i] = byte
new_cov = 1
if new_cov:
INFO(1, None, None, "New coverage found")
else:
INFO(1, None, None, "Nothing new found")
def socket_recv(socket_instance, error_on_empty):
try:
rec_data = socket_instance.recv(1500)
except socket.error as exc:
socket_instance.close()
ERROR("Failed to recv data from socket. Socket return error %s" % exc)
if error_on_empty and (rec_data is None or len(rec_data) == 0):
socket_instance.close()
ERROR("Failed to recv data from socket. Data received is 0 length")
return rec_data
def recv_data(socket_instance):
data = ""
# read size of data we are going to recv
rec_data = socket_recv(socket_instance, True)
try:
rec_data = rec_data.split(" ")
length = int(rec_data[0])
data = rec_data[1]
except:
socket_instance.close()
ERROR("Protocol violation, length or token is not specified")
bytes_read = len(data)
while bytes_read < length:
data += socket_recv(socket_instance, False)
bytes_read = len(data)
INFO(1, None, None, "Raw bytes received %d" % len(data))
return pickle.loads(data)
def send_data(msg, sock, connection):
data_str = pickle.dumps(msg)
package = str(len(data_str)) + " " + data_str
try:
connection.send(package)
except socket.error as exc:
connection.close()
sock.close()
ERROR("Failed to send, socket returned error %d: %s" % exc)
return 0
return 1
def receive_bitmap_slave(ip_port, virgin_bits):
ip = ip_port.split(":")[0]
port = int(ip_port.split(":")[1])
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server_address = (ip, port)
sock.bind(server_address)
sock.listen(1)
while True:
INFO(0, None, None, "Waiting for incoming job from master on %s:%d" % (ip, port))
try:
connection, client_address = sock.accept()
except socket.error as exc:
sock.close()
ERROR("Failed to connect with master, socket returned error %s" % exc)
INFO(0, None, None, "Incoming connection from %s:%d" % (client_address[0], client_address[1]))
# Step 1. Master is sending control request
data = connection.recv(1500)
data = pickle.loads(data)
try:
data = data.split(" ")[1]
except:
connection.close()
socket.close()
ERROR("Protocol violation invalid token received %s" % data)
if data.startswith(REQUEST_BITMAP):
INFO(1, None, None, "Sending bitmap")
bitmap_to_send = list("\x00" * SHM_SIZE)
for i, y in enumerate(virgin_bits):
bitmap_to_send[i] = y
send_data(bitmap_to_send, sock, connection)
elif data.startswith(SEND_BITMAP):
INFO(1, None, None, "Receiving new bitmap")
data = recv_data(connection)
INFO(1, None, None, "Synchronizing bitmaps")
sync_bitmap_net(virgin_bits, data)
else:
connection.close()
sock.close()
ERROR("Protocol violation. Invalid request from master %" % data)
connection.close()
sock.close() # TODO: do we really need to open and close socket each time ?
def sync_remote_bitmaps(virgin_bits, ips):
'''
:param virgin_bits: coverage bitmap automatically updated by local fuzzers
:param is_master: master or slave instance ?
:return:
'''
while True:
time.sleep(REMOTE_BITMAPS_SYNC_FREQUENCY)
INFO(1, None, None, "Syncronizing bitmaps")
# Step 1. Pulling bitmap from slaves
for ip, port in ips: # TODO: what if our target is running in simple mode ?
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
INFO(1, None, None, "Requesting bitmap from %s:%d" % (ip, port))
sock.connect((ip, port))
time.sleep(1)
message = "%d %s" % (len(REQUEST_BITMAP), REQUEST_BITMAP)
INFO(1, None, None, "Sending %s" % message)
data_str = pickle.dumps(message)
res = sock.sendall(data_str)
data = recv_data(sock)
except socket.error as exc:
sock.close()
WARNING(None, "Request to %s failed, socket return error %s" % (ip, exc))
continue
INFO(1, None, None, "Received %d bitmap" % len(data))
if data is None or len(data) == 0:
sock.close()
ERROR("Violation of protocol. Slave %s returned empty string" % ip)
#for symbol in data:
if len(data) != SHM_SIZE:
sock.close()
ERROR("Bitmap is less than %d bytes" % SHM_SIZE)
# TODO: use has_new_bits or sync_bitmap
sync_bitmap_net(virgin_bits, data)
sock.close()
# Step 2. Sending updated bitmap to slaves
for ip, port in ips: # TODO: what if our target is running in simple mode ?
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
INFO(1, None, None, "Sending bitmap to %s:%d" % (ip, port))
sock.connect((ip, port))
message = "%d %s" % (len(SEND_BITMAP), SEND_BITMAP)
INFO(1, None, None, "Sending %s" % message)
data_str = pickle.dumps(message)
sock.sendall(data_str)
bitmap_to_send = list("\x00" * SHM_SIZE)
for i, y in enumerate(virgin_bits):
bitmap_to_send[i] = y
raw_data = pickle.dumps(bitmap_to_send)
msg = str(len(raw_data)) + " " + raw_data
INFO(1, None, None, "Sending actual bitmap %d" % len(msg))
sock.sendall(msg)
except socket.error as exc:
WARNING(None, "Request to %s failed, socket return error %s" % (ip, exc))
sock.close()
class Network(object):
def __init__(self, target_ip, target_port, target_protocol):
self.target_ip = target_ip
self.target_port = target_port
self.target_protocol = target_protocol
self.s = None
# open socket
if self.target_protocol == "tcp":
self.protocol_l4 = socket.SOCK_STREAM
else:
self.protocol_l4 = socket.SOCK_DGRAM
def send_test_case(self, data):
self.s = socket.socket(socket.AF_INET, self.protocol_l4)
INFO(1, None, None, "Connecting to %s on port %d" % (self.target_ip, self.target_port))
try:
self.s.connect((self.target_ip, self.target_port))
except:
ERROR("Failed to connect to the host specified %s %d" % (self.target_ip, self.target_port))
INFO(1, None, None, "Sending %d bytes, content %s" % (len(data), data))
res = self.s.sendall(data)
if res:
WARNING(None, "Failed to send data to the server")
else:
INFO(1, None, None, "Done")
# receiving data from the server if any
#while 1:
# data = self.s.recv(4096)
# if not data: break
# INFO(1, None, None, "Received %d bytes from the server in response", len(data))
self.s.close()
|
StarcoderdataPython
|
1703688
|
<gh_stars>1-10
import os
import subprocess
from collections.abc import Iterable
from io import StringIO
from urllib.parse import urlparse
import pandas as pd
from pdfminer.converter import TextConverter
from pdfminer.layout import LAParams
from pdfminer.pdfinterp import PDFPageInterpreter, PDFResourceManager
from pdfminer.pdfpage import PDFPage
def ensure_list(x):
"""
确保输入参数转换为`list`
Parameters
----------
x : object
输入
Returns
-------
res : list
将输入转换为list
Notes
-------
避免无意义的单一字符循环
Example
-------
>>> ensure_list('000001')
['000001']
>>> ensure_list(('000001','000002'))
['000001', '000002']
"""
if isinstance(x, str):
return [x]
elif pd.core.dtypes.common.is_number(x):
return [x]
elif isinstance(x, Iterable):
return [v for v in x]
else:
raise TypeError('输入参数"x"要么为str对象,要么为可迭代对象。')
def get_exchange_from_code(stock_code):
"""股票市场分类"""
if stock_code[:3] == '688':
return '科创板'
f = stock_code[0]
if f == '2':
return '深市B'
elif f == '3':
return '创业板'
elif f == '6':
return '沪市A'
elif f == '9':
return '沪市B'
elif stock_code[:3] == '002':
return '中小板'
return '深主板A'
def to_plural(word):
"""转换为单词的复数"""
word = word.lower()
if word.endswith('y'):
return word[:-1] + 'ies'
elif word[-1] in 'sx' or word[-2:] in ['sh', 'ch']:
return word + 'es'
elif word.endswith('an'):
return word[:-2] + 'en'
else:
return word + 's'
def filter_a(codes):
"""过滤A股代码"""
codes = ensure_list(codes)
return [x for x in codes if x[0] in ('0', '3', '6')]
def is_connectivity(server):
"""判断网络是否连接"""
fnull = open(os.devnull, 'w')
result = subprocess.call('ping ' + server + ' -c 2',
shell=True, stdout=fnull, stderr=fnull)
if result:
res = False
else:
res = True
fnull.close()
return res
def get_pdf_text(fname, pages=None):
"""读取pdf文件内容"""
if not pages:
pagenums = set()
else:
pagenums = set(pages)
output = StringIO()
manager = PDFResourceManager()
converter = TextConverter(manager, output, laparams=LAParams())
interpreter = PDFPageInterpreter(manager, converter)
infile = open(fname, 'rb')
for page in PDFPage.get_pages(infile, pagenums):
interpreter.process_page(page)
infile.close()
converter.close()
text = output.getvalue()
output.close
return text
def get_server_name(url):
"""获取主机网络地址
Arguments:
url {string} -- 网址
Returns:
string -- 返回主机地址
"""
return urlparse(url)[1]
|
StarcoderdataPython
|
46262
|
<filename>twisted/plugins/tftp_plugin.py
'''
@author: shylent
'''
from tftp.backend import FilesystemSynchronousBackend
from tftp.protocol import TFTP
from twisted.application import internet
from twisted.application.service import IServiceMaker
from twisted.plugin import IPlugin
from twisted.python import usage
from twisted.python.filepath import FilePath
from zope.interface import implementer
def to_path(str_path):
return FilePath(str_path)
class TFTPOptions(usage.Options):
optFlags = [
['enable-reading', 'r', 'Lets the clients read from this server.'],
['enable-writing', 'w', 'Lets the clients write to this server.'],
['verbose', 'v', 'Make this server noisy.']
]
optParameters = [
['port', 'p', 1069, 'Port number to listen on.', int],
['root-directory', 'd', None, 'Root directory for this server.', to_path]
]
def postOptions(self):
if self['root-directory'] is None:
raise usage.UsageError("You must provide a root directory for the server")
@implementer(IServiceMaker, IPlugin)
class TFTPServiceCreator(object):
tapname = "tftp"
description = "A TFTP Server"
options = TFTPOptions
def makeService(self, options):
backend = FilesystemSynchronousBackend(options["root-directory"],
can_read=options['enable-reading'],
can_write=options['enable-writing'])
return internet.UDPServer(options['port'], TFTP(backend))
serviceMaker = TFTPServiceCreator()
|
StarcoderdataPython
|
3351151
|
import json
from datetime import datetime
from typing import List
from flask_restplus import fields
from controllers.common.models.CommonModels import EntityModel, CommonModels
from controllers.integration.models.DataIntegrationModels import DataIntegrationModels
from infrastructor.IocManager import IocManager
from models.dao.common.Log import Log
from models.dao.operation import DataOperation, DataOperationJob
from models.dao.operation.DataOperationContact import DataOperationContact
class DataOperationIntegrationModel(EntityModel):
def __init__(self,
Order: int = None,
Limit: int = None,
ProcessCount: int = None,
Integration=None,
*args, **kwargs):
super().__init__(*args, **kwargs)
self.Order: int = Order
self.Limit: int = Limit
self.ProcessCount: int = ProcessCount
self.Integration = Integration
class DataOperationContactModel(EntityModel):
def __init__(self,
Email: str = None,
*args, **kwargs):
super().__init__(*args, **kwargs)
self.Email: str = Email
class DataOperationModel(EntityModel):
def __init__(self,
Id: int = None,
Name: str = None,
*args, **kwargs):
super().__init__(*args, **kwargs)
self.Id: int = Id
self.Name: str = Name
class DataIntegrationLogModel():
def __init__(self,
Id: int = None,
Type: str = None,
Content: str = None,
LogDatetime: datetime = None,
JobId: int = None,
*args, **kwargs):
super().__init__(*args, **kwargs)
self.Id = Id
self.Type = Type
self.Content = Content
self.LogDatetime = LogDatetime
self.JobId = JobId
class DataOperationModels:
ns = IocManager.api.namespace('DataOperation', description='Data Operation endpoints',
path='/api/DataOperation')
operation_contact = IocManager.api.model('Data Operation Contact', {
'Email': fields.String(description='Operation contact email', required=False),
})
operation_integration = IocManager.api.model('Data Operation Integration', {
'Limit': fields.Integer(description='Operation code value', required=False, example=10000),
'ProcessCount': fields.Integer(description='Operation code value', required=True, example=1),
'Integration': fields.Nested(DataIntegrationModels.create_data_integration_model,
description='Integration information', required=True)
})
create_data_operation_model = IocManager.api.model('CreateDataOperation', {
'Name': fields.String(description='Data Operation Name', required=True),
'Contacts': fields.List(fields.Nested(operation_contact), description='Contact Email list',
required=False),
'Integrations': fields.List(fields.Nested(operation_integration), description='Integration code list',
required=True),
})
update_data_operation_model = IocManager.api.model('UpdateDataOperation', {
'Name': fields.String(description='Data Operation Name', required=True),
'Integrations': fields.List(fields.Nested(operation_integration), description='Integration code list',
required=True),
})
delete_data_operation_model = IocManager.api.model('DeleteDataOperationModel', {
'Id': fields.Integer(description='Connection Database Id', required=True),
})
@staticmethod
def get_data_operation_contact_model(data_operation_contact: DataOperationContact) -> DataOperationContactModel:
entity_model = DataOperationContactModel(
Email=data_operation_contact.Email,
)
result_model = json.loads(json.dumps(entity_model.__dict__, default=CommonModels.date_converter))
return result_model
@staticmethod
def get_data_operation_contact_models(data_operation_contacts: List[DataOperationContact]) -> List[
DataOperationContactModel]:
entities = []
for data_operation_contact in data_operation_contacts:
entity = DataOperationModels.get_data_operation_contact_model(data_operation_contact)
entities.append(entity)
return entities
@staticmethod
def get_data_operation_result_model(data_operation: DataOperation) -> DataOperationModel:
entity_model = DataOperationModel(
Id=data_operation.Id,
Name=data_operation.Name,
)
result_model = json.loads(json.dumps(entity_model.__dict__, default=CommonModels.date_converter))
integrations = []
for data_operation_integration in data_operation.Integrations:
entity_model = DataOperationIntegrationModel(
Id=data_operation_integration.Id,
Order=data_operation_integration.Order,
Limit=data_operation_integration.Limit,
ProcessCount=data_operation_integration.ProcessCount,
)
data_operation_integration_result_model = json.loads(
json.dumps(entity_model.__dict__, default=CommonModels.date_converter))
integration = DataIntegrationModels.get_data_integration_model(data_operation_integration.DataIntegration)
data_operation_integration_result_model['Integration'] = integration
integrations.append(data_operation_integration_result_model)
contacts = DataOperationModels.get_data_operation_contact_models(data_operation.Contacts)
result_model['Contacts'] = contacts
result_model['Integrations'] = integrations
return result_model
@staticmethod
def get_data_operation_result_models(data_operations: List[DataOperation]) -> List[DataOperationModel]:
entities = []
for data_operation in data_operations:
entity = DataOperationModels.get_data_operation_result_model(data_operation)
entities.append(entity)
return entities
@staticmethod
def get_pdi_logs_model(logs: List[Log]) -> List[
DataIntegrationLogModel]:
entities = []
for log in logs:
result = DataIntegrationLogModel(
Id=log.Id,
JobId=log.JobId,
Type='Info' if log.TypeId == 2 else 'Error',
Content=log.Content,
LogDatetime=log.LogDatetime,
)
entity = json.loads(json.dumps(result.__dict__, default=CommonModels.date_converter))
entities.append(entity)
return entities
|
StarcoderdataPython
|
3267912
|
list_of_books = []
def add_book():
book_name = str(input("Enter book name to add: "))
list_of_books.append(book_name)
print("Book is successfully added: "+ book_name)
def del_book():
book_name = str(input("Enter book name to delete: "))
is_book_exist = list_of_books.__contains__(book_name)
if is_book_exist:
list_of_books.remove(book_name)
print("Book is successfully deleted: "+ book_name)
else:
print("There's no such book: "+ book_name)
def count_book():
print("Book count: " + str(len(list_of_books)))
print(list_of_books)
def sort_book():
print("Book is sorted: "+ str(sorted(list_of_books)))
print("This is a library.You can add the book name, delete it or count book amount.")
while True:
print("What action would you like to do?")
print("Enter 'a' to add the book")
print("Enter 'd' to delete the book")
print("Enter 'c' to count total book amount")
print("Enter 's' to sort the book")
print("Enter 'q' to quit the program")
action = str(input("Make your choice: "))
if action == 'a':
add_book()
elif action == 'd':
del_book()
elif action =='c':
count_book()
elif action == 's':
sort_book()
elif action =='q':
break
else:
print("You entered wrong value: " + action)
print("")
print("The program is successfully finished!")
|
StarcoderdataPython
|
4824638
|
<gh_stars>10-100
import requests
from mockserver_friendly import request, response, form
from test import MOCK_SERVER_URL, MockServerClientTestCase
class TestFormRequests(MockServerClientTestCase):
def test_form_request(self):
self.client.stub(
request(body=form({
"a": "b",
"c[0]": "d"
})),
response()
)
result = requests.post(MOCK_SERVER_URL, data={"a": "b"})
self.assertEqual(result.status_code, 404)
result = requests.post(MOCK_SERVER_URL, data={"a": "b", "c[0]": "d"})
self.assertEqual(result.status_code, 200)
|
StarcoderdataPython
|
39550
|
<reponame>tej17584/proyecto3DisenoLenguajes
import pickle
class parserAlejandro():
def __init__(self) -> None:
self.tokensScaneados = "" # los tokens leidos
self.tokensScaneadosV2 = []
self.tokensMapeados = ""
self.lastToken = ""
self.lookAheadToken = ""
self.leerTokensAndMap()
self.Parser()
def leerTokensAndMap(self):
infile = open("arrayTokensLeidos", 'rb')
self.tokensScaneados = pickle.load(infile)
infile.close()
infile = open("diccionarioTokensMapeados", 'rb')
self.tokensMapeados = pickle.load(infile)
infile.close()
for llave, valor in self.tokensMapeados.items():
for x in self.tokensScaneados:
valoresToken = x.getAllValues()
if(llave == valoresToken[0]):
x.setNumeracion(valor)
elif(valoresToken[0] == "ERROR" and (valoresToken[1] == llave)):
x.setNumeracion(valor)
for x in range(len(self.tokensScaneados)):
if(self.tokensScaneados[x].getNumeracion() != ""):
self.tokensScaneadosV2.append(self.tokensScaneados[x])
def Expect(self, tokenId):
if(self.lookAheadToken.getNumeracion() == tokenId):
#print("llamare un nuevo token con tokenID: ", tokenId)
self.GetNewToken()
else:
self.printERROROnScreen(tokenId)
def GetNewToken(self):
self.lastToken = self.lookAheadToken
if(len(self.tokensScaneadosV2) > 0):
self.lookAheadToken = self.tokensScaneadosV2.pop(0)
else:
self.lookAheadToken = self.lookAheadToken
def getNumber(self):
if(self.lookAheadToken.getValor() != "+" and self.lookAheadToken.getValor() != "-" and self.lookAheadToken.getValor() != "*" and self.lookAheadToken.getValor() != "/" and self.lookAheadToken.getValor() != ";"):
return int(self.lastToken.getValor())
else:
return self.lastToken.getValor()
def getVar(self):
return self.lookAheadToken.getValor()
def Expr(self):
self.StatSeq()
def StatSeq(self):
while self.lookAheadToken.getNumeracion() == 5 or self.lookAheadToken.getNumeracion() == 2 or self.lookAheadToken.getNumeracion() == 8:
self.Stat()
self.Expect(3)
def Stat(self):
value = 0
value = self.Expression(value)
print("El Resultado de la operacion es: ", value)
def Expression(self, result):
result1, result2 = 0, 0
result1 = self.Term(result1)
while self.lookAheadToken.getNumeracion() == 4 or self.lookAheadToken.getNumeracion() == 5:
if(self.lookAheadToken.getNumeracion() == 4):
self.Expect(4)
result2 = self.Term(result2)
result1 = int(result1)
result2 = int(result2)
result1 += result2
elif(self.lookAheadToken.getNumeracion() == 5):
self.Expect(5)
result2 = self.Term(result2)
result1 -= result2
result = result1
return result
def Term(self, result):
result1, result2 = 1, 1
result1 = self.Factor(result1)
while self.lookAheadToken.getNumeracion() == 6 or self.lookAheadToken.getNumeracion() == 7:
if(self.lookAheadToken.getNumeracion() == 6):
self.Expect(6)
result2 = self.Factor(result2)
result1 = int(result1)
result2 = int(result2)
result1 *= result2
elif(self.lookAheadToken.getNumeracion() == 7):
self.Expect(7)
result2 = self.Factor(result2)
result1 = int(result1)
result2 = int(result2)
result1 /= result2
result = result1
return result
def Factor(self, result):
sign = 1
if(self.lookAheadToken.getNumeracion() == 5):
self.Expect(5)
sign = -1
if(self.lookAheadToken.getNumeracion() == 2):
result = self.Number(result)
elif(self.lookAheadToken.getNumeracion() == 8):
self.Expect(8)
result = self.Expression(result)
self.Expect(9)
result *= sign
return result
def Number(self, result):
self.Expect(2)
result = self.getNumber()
return result
def Parser(self):
self.GetNewToken()
self.Expr()
def printERROROnScreen(self, tokenId):
for x in self.tokensScaneadosV2:
if(x.getNumeracion() == tokenId):
if(x.getTipoToken() == "ERROR"):
errorPrint = x.getValor()
print(f'{errorPrint} expected')
elif(x.getTipoToken() != "ERROR"):
errorPrint = x.getTipoToken()
print(f'{errorPrint} expected')
obj = parserAlejandro()
|
StarcoderdataPython
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.