id
stringlengths 1
7
| text
stringlengths 6
1.03M
| dataset_id
stringclasses 1
value |
---|---|---|
3378208
|
from meteor_reasoner.graphutil.temporal_dependency_graph import *
from meteor_reasoner.graphutil.topological_sort import *
class Graph:
def __init__(self, program):
self.program = program
self.head2rule = defaultdict(list)
self.initialize()
def initialize(self):
self.build_head_map_rule()
self.split_program()
def split(self, rules, target_predicate):
CF = CycleFinder(program=rules)
CF.SCC()
scc2id = defaultdict(int)
id2scc = defaultdict(set)
predicateid2sccid = dict()
cnt = 0
for scc in CF.SCC_components:
scc2id[tuple(scc)] = cnt
id2scc[cnt] = tuple(scc)
for predicate in scc:
predicateid2sccid[predicate] = cnt
cnt += 1
new_graph = defaultdict(list)
graph = CF.graph
for key, value in graph.items():
key_id = predicateid2sccid[key]
neighbors = []
for neighbor in value:
neighbor_id = predicateid2sccid[neighbor]
if neighbor_id == key_id:
continue
else:
neighbors.append(neighbor_id)
if len(neighbors) > 0:
new_graph[key_id] = new_graph[key_id] + neighbors
TS = TopologicalSorting(new_graph, len(scc2id))
sorted_scc = TS.topologicalSort()
target_id = CF.predicate2id[target_predicate]
non_rescursive_scc = set()
automata_scc = set()
target_predicate_scc_id_index = -1
for i, scc_id in enumerate(sorted_scc):
if target_id in id2scc[scc_id]:
target_predicate_scc_id_index = i
break
if target_predicate_scc_id_index == -1:
raise ValueError("No such predicate")
first_cyclic_scc_id_index = -1
for i, scc_id in enumerate(sorted_scc[:target_predicate_scc_id_index]):
if len(id2scc[scc_id]) > 1:
first_cyclic_scc_id_index = i
if first_cyclic_scc_id_index == -1:
# no cycle before target_predicate_scc_id_index
for scc_id in sorted_scc[:target_predicate_scc_id_index]:
non_rescursive_scc = non_rescursive_scc.union(id2scc[scc_id])
automata_scc = id2scc[sorted_scc[target_predicate_scc_id_index]]
else:
for scc_id in sorted_scc[:first_cyclic_scc_id_index]:
non_rescursive_scc = non_rescursive_scc.union(id2scc[scc_id])
for scc_id in sorted_scc[first_cyclic_scc_id_index:target_predicate_scc_id_index]:
automata_scc = automata_scc.union(id2scc[scc_id])
non_recursive_rules = set()
automata_rules = set()
for predicate_id in non_rescursive_scc:
for rule in self.head2rule[CF.id2predicate[predicate_id]]:
non_recursive_rules.add(rule)
for predicate_id in automata_scc:
for rule in self.head2rule[CF.id2predicate[predicate_id]]:
flag = True
for literal in rule.body:
if isinstance(literal, BinaryLiteral):
if literal.left_atom.get_predicate() not in ["Top", "Bottom"] and CF.predicate2id[literal.left_atom.get_predicate()] not in automata_scc:
flag = False
if literal.right_atom.get_predicate() not in ["Top", "Bottom"] and CF.predicate2id[literal.right_atom.get_predicate()] not in automata_scc:
flag = False
else:
if literal.get_predicate() not in automata_scc:
flag = False
if flag:
automata_rules.add(rule)
else:
non_recursive_rules.add(rule)
return non_recursive_rules, automata_rules
def build_head_map_rule(self):
for rule in self.program:
head = rule.head
self.head2rule[head.get_predicate()].append(rule)
def split_program(self):
for rule in self.program:
head_predicate = rule.head.get_predicate()
self.cluster.add(head_predicate)
for rule in self.program:
head_predicate = rule.head.get_predicate()
for literal in rule.body:
if (isinstance(literal, Literal) or isinstance(literal, Atom)) and literal.get_predicate() in self.cluster.predicate2id:
self.cluster.addedge(head_predicate, literal.get_predicate())
elif isinstance(literal, BinaryLiteral):
if literal.left_atom.get_predicate() in self.cluster.predicate2id:
if literal.left_atom.get_predicate() == head_predicate:
self.cluster.addedge(head_predicate, literal.left_atom.get_predicate() + "_")
self.cluster.addedge(literal.left_atom.get_predicate() + "_", head_predicate)
else:
self.cluster.addedge(literal.left_atom.get_predicate(), head_predicate)
if literal.right_atom.get_predicate() in self.cluster.predicate2id:
if literal.right_atom.get_predicate() == head_predicate:
self.cluster.addedge(head_predicate, literal.right_atom.get_predicate() + "_")
self.cluster.addedge(literal.right_atom.get_predicate() + "_", head_predicate)
else:
self.cluster.addedge(literal.right_atom.get_predicate(), head_predicate)
def split_rules(self, target_predicate):
predicates = self.cluster.get_connected_predicates(target_predicate)
rules = set()
for predicate in predicates:
rules = rules.union(set(self.head2rule[predicate]))
return self.split(rules, target_predicate)
|
StarcoderdataPython
|
165352
|
EMBED_SIZE = 200
NUM_LAYERS = 2
LR = 0.0001
MAX_GRAD_NORM = 5.0
PAD_ID = 0
UNK_ID = 1
START_ID = 2
EOS_ID = 3
CONV_SIZE = 3
# sanity
# BUCKETS = [(55, 50)]
# BATCH_SIZE = 10
# NUM_EPOCHS = 50
# NUM_SAMPLES = 498
# HIDDEN_SIZE = 400
# test
BUCKETS = [(30, 30), (55, 50)]
BATCH_SIZE = 20
NUM_EPOCHS = 3
NUM_SAMPLES = 498
HIDDEN_SIZE = 400
# experiment 1
# BUCKETS = [(16, 28), (31, 28), (51, 28)]
# BATCH_SIZE = 400
# NUM_EPOCHS = 5
# NUM_SAMPLES = 40960
# HIDDEN_SIZE = 400
# experiment 2
# BUCKETS = [(102, 28)]
# BATCH_SIZE = 300
# NUM_EPOCHS = 5
# NUM_SAMPLES = 40960
# HIDDEN_SIZE = 250
|
StarcoderdataPython
|
1738899
|
import unittest
from myhdl_lib import *
from t_hsd_custom import t_hsd_custom
class Test_hsd_custom(t_hsd_custom):
'''|
| The main class for unit-testing. Add your tests here.
|________'''
def __init__(self):
# call base class constructor
t_hsd_custom.__init__(self)
# Automatically executed BEFORE every TestCase
def setUp(self):
t_hsd_custom.setUp(self)
# Automatically executed AFTER every TestCase
def tearDown(self):
t_hsd_custom.tearDown(self)
# generate stimuli and reference data:
def use_data_set_1(self):
fields_in = { 'cmd': 1, 'port': 2000 }
self.stim_rx_port_flds.append( fields_in )
self.ref_tx_port_flds.append( fields_in )
fields_in = { 'cmd': 0, 'port': 3000 }
self.stim_rx_port_flds.append( fields_in )
self.ref_tx_port_flds.append( fields_in )
self.run_it()
# ----------------------------------------------------------------------------
# @unittest.skip("")
def test_001(self):
""" >>>>>> TEST_001: Dump data to files """
self.models = {"top":self.BEH}
# Set fdump to True in order to generate test vector files for the global interfaces
self.tb_config = {"simulation_time":200, "cosimulation":False, "trace":False, "fdump":True}
self.use_data_set_1()
# ----------------------------------------------------------------------------
# @unittest.skip("")
def test_002(self):
""" >>>>>> TEST_002: The same stimuli as in test_001 """
self.models = {"top":self.RTL}
self.tb_config = {"simulation_time":150, "cosimulation":False, "trace":True, "fdump":False}
self.use_data_set_1()
# ----------------------------------------------------------------------------
# @unittest.skip("")
def test_003(self):
""" >>>>>> TEST_003: The same stimuli as in test_001 """
self.models = {"top":self.RTL}
self.tb_config = {"simulation_time":150, "cosimulation":True, "trace":True, "fdump":False}
self.use_data_set_1()
# ----------------------------------------------------------------------------
<EMAIL>("")
def test_004(self):
""" >>>>>> TEST_004: The same stimuli as in test_001 but all stimuli and expected results read from files """
self.models = {"top":self.RTL}
self.tb_config = {"simulation_time":150, "cosimulation":False, "trace":True, "fdump":False}
self.use_data_from_files()
# ----------------------------------------------------------------------------
#@<EMAIL>("")
def test_005(self):
""" >>>>>> TEST_005: The same stimuli as in test_001 but all stimuli and expected results read from files """
self.models = {"top":self.RTL}
self.tb_config = {"simulation_time":"auto", "cosimulation":True, "trace":False, "fdump":False}
self.use_data_from_files()
|
StarcoderdataPython
|
3333309
|
<reponame>aglines/gympopulation
import unittest
from secrets import *
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
import re
import sqlite3
import time
from datetime import datetime
db = sqlite3.connect(dbLocation)
cursor = db.cursor()
def logEvent(currEventStatus):
stringifiedCurrTime = str(datetime.now())
cursor.execute('''INSERT INTO eventlog (timestamp, event)
VALUES (?,?)''', (stringifiedCurrTime, currEventStatus))
db.commit()
# get webdriver/chrome to start
try:
driver = webdriver.Chrome(driverLocation)
except:
logEvent('webdriver error')
# get to the URL
try:
mainUrl = 'https://www.puregym.com/login/'
driver.get(mainUrl)
except:
errorLog = 'Error while getting mainURL'
logEvent(errorLog)
# log in
try:
loginField = driver.find_element_by_xpath('//*[@id="email"]')
passwordField = driver.find_element_by_xpath('//*[@id="pin"]')
submitButton = driver.find_element_by_xpath('//*[@id="login-submit"]')
loginField.send_keys(username)
passwordField.send_keys(<PASSWORD>)
submitButton.click()
except:
errorLog = 'Error during login'
logEvent(errorLog)
driver.implicitly_wait(5)
# get past survey if it exists
try:
skipButton = driver.find_element_by_xpath('//*[@id="canSkip"]')
skipButton.click()
notActuallyAnError = 'Survey found. '
# logEvent(notActuallyAnError)
except:
errorLog = 'survey not encountered, nothing to log'
# logEvent(errorLog)
# scrape data
try:
currPop = driver.find_element_by_xpath('//*[@id="main-content"]/div[2]/div/div/div[1]/div/div/div/div[1]/div/p[1]/span')
except:
errorLog = 'Error during data scrape'
logEvent(errorLog)
pattern = re.compile('\d+')
patternFewer = re.compile('Fewer than 20 people')
patternMore = re.compile('More than .')
matchedNumber = pattern.match(currPop.text)
matchedFewer = patternFewer.match(currPop.text)
matchedMore = patternMore.match(currPop.text)
if (matchedNumber):
currPop = matchedNumber.group()
elif (matchedFewer):
currPop = matchedFewer.group()
elif (matchedMore) :
currPop = matchedMore.group()
try:
currTime = time.time()
stringifiedCurrTime = str(datetime.now())
cursor.execute('''INSERT INTO times (timestamp, pop, readabletimestamp)
VALUES (?,?,?)''', (currTime, currPop, stringifiedCurrTime))
db.commit()
logEvent('All OK. Current pop: ' + currPop)
except:
errorLog = 'error during db insert statement'
logEvent(errorLog)
#shut it down
db.close()
driver.quit()
|
StarcoderdataPython
|
4801549
|
<filename>Scripts/simulation/reputation/reputation_tuning.py
# uncompyle6 version 3.7.4
# Python bytecode 3.7 (3394)
# Decompiled from: Python 3.7.9 (tags/v3.7.9:13c94747c7, Aug 17 2020, 18:58:18) [MSC v.1900 64 bit (AMD64)]
# Embedded file name: T:\InGame\Gameplay\Scripts\Server\reputation\reputation_tuning.py
# Compiled at: 2018-07-17 19:54:43
# Size of source mod 2**32: 901 bytes
from sims4.tuning.tunable import TunablePackSafeReference
from sims4.tuning.tunable_base import ExportModes
import services, sims4.resources
class ReputationTunables:
REPUTATION_RANKED_STATISTIC = TunablePackSafeReference(description='\n The ranked statistic that is to be used for tracking reputation progress.\n \n This should not need to be tuned at all. If you think you need to tune\n this please speak with a GPE before doing so.\n ',
manager=(services.get_instance_manager(sims4.resources.Types.STATISTIC)),
class_restrictions=('RankedStatistic', ),
export_modes=(
ExportModes.ClientBinary,))
|
StarcoderdataPython
|
1635848
|
<filename>src/reparsec/core/sequence.py<gh_stars>1-10
from typing import Callable, Optional, Sequence, Sized, TypeVar
from .parser import ParseFn
from .repair import make_insert, make_pending_skip, make_skip
from .result import Error, Ok, Recovered, Result
from .types import Ctx, RecoveryMode
T = TypeVar("T")
def eof() -> ParseFn[Sized, None]:
def eof(
stream: Sized, pos: int, ctx: Ctx[Sized],
rm: RecoveryMode) -> Result[None, Sized]:
if pos == len(stream):
return Ok(None, pos, ctx)
loc = ctx.get_loc(stream, pos)
if rm:
sl = len(stream)
return Recovered(
None,
make_pending_skip(
None, sl, ctx, loc, sl - pos, ["end of file"]
),
loc, ["end of file"]
)
return Error(loc, ["end of file"])
return eof
def satisfy(test: Callable[[T], bool]) -> ParseFn[Sequence[T], T]:
def satisfy(
stream: Sequence[T], pos: int, ctx: Ctx[Sequence[T]],
rm: RecoveryMode) -> Result[T, Sequence[T]]:
if pos < len(stream):
t = stream[pos]
if test(t):
return Ok(t, pos + 1, ctx, (), True)
loc = ctx.get_loc(stream, pos)
if rm:
cur = pos + 1
while cur < len(stream):
t = stream[cur]
if test(t):
return Recovered(
make_skip(
cur, t, cur + 1, ctx.update_loc(stream, cur + 1),
loc, cur - pos
),
None, loc
)
cur += 1
return Error(loc)
return satisfy
def sym(s: T, label: Optional[str] = None) -> ParseFn[Sequence[T], T]:
if label is None:
label_ = repr(s)
else:
label_ = label
expected = [label_]
def sym(
stream: Sequence[T], pos: int, ctx: Ctx[Sequence[T]],
rm: RecoveryMode) -> Result[T, Sequence[T]]:
if pos < len(stream):
t = stream[pos]
if t == s:
return Ok(t, pos + 1, ctx, (), True)
loc = ctx.get_loc(stream, pos)
if rm:
pending = (
make_insert(s, pos, ctx, loc, label_, expected) if rm[0]
else None
)
cur = pos + 1
while cur < len(stream):
t = stream[cur]
if t == s:
sel = make_skip(
cur, t, cur + 1, ctx.update_loc(stream, cur + 1),
loc, cur - pos, expected
)
return Recovered(sel, pending, loc, expected)
cur += 1
return Recovered(None, pending, loc, expected)
return Error(loc, expected)
return sym
|
StarcoderdataPython
|
3399454
|
<reponame>elfgzp/leetCode<gh_stars>1-10
#
# @lc app=leetcode.cn id=485 lang=python3
#
# [485] 最大连续1的个数
#
# https://leetcode-cn.com/problems/max-consecutive-ones/description/
#
# algorithms
# Easy (51.75%)
# Total Accepted: 8.7K
# Total Submissions: 16.7K
# Testcase Example: '[1,0,1,1,0,1]'
#
# 给定一个二进制数组, 计算其中最大连续1的个数。
#
# 示例 1:
#
#
# 输入: [1,1,0,1,1,1]
# 输出: 3
# 解释: 开头的两位和最后的三位都是连续1,所以最大连续1的个数是 3.
#
#
# 注意:
#
#
# 输入的数组只包含 0 和1。
# 输入数组的长度是正整数,且不超过 10,000。
#
#
#
class Solution:
def findMaxConsecutiveOnes(self, nums: List[int]) -> int:
res = 0
i = 0
for n in nums:
if n:
i += 1
res = max(i, res)
else:
i = 0
return res
|
StarcoderdataPython
|
187028
|
<reponame>rundhall/ESP-LEGO-SPIKE-Simulator
from spike import ForceSensor, Motor
# Initialize the Force Sensor, a motor, and a variable
force_sensor = ForceSensor('B')
motor = Motor('C')
count = 0
# You can press the Force Sensor 5 times
motor.set_default_speed(25)
while count < 5:
force_sensor.wait_until_pressed()
motor.start()
force_sensor.wait_until_released()
motor.stop()
count = count + 1
# This condition will always be true, so it will loop forever
while True:
# Measure the force in newtons or as a percentage
percentage = force_sensor.get_force_percentage()
# Use the measured force to start the motor
motor.start(percentage)
|
StarcoderdataPython
|
26682
|
<reponame>zzztimbo/dagster
import sys
from dagster_graphql.schema.pipelines import DauphinPipeline, DauphinPipelineSnapshot
from graphql.execution.base import ResolveInfo
from dagster import check
from dagster.core.definitions.pipeline import ExecutionSelector
from dagster.core.errors import DagsterInvalidDefinitionError
from dagster.utils.error import serializable_error_info_from_exc_info
from .utils import UserFacingGraphQLError, capture_dauphin_error
@capture_dauphin_error
def get_pipeline_snapshot_or_error(graphene_info, subset_id):
check.str_param(subset_id, 'subset_id')
selector = ExecutionSelector(subset_id)
pipeline_def = get_pipeline_def_from_selector(graphene_info, selector)
return DauphinPipelineSnapshot(pipeline_def.get_pipeline_index())
@capture_dauphin_error
def get_pipeline_or_error(graphene_info, selector):
'''Returns a DauphinPipelineOrError.'''
return DauphinPipeline(get_pipeline_def_from_selector(graphene_info, selector))
def get_pipeline_or_raise(graphene_info, selector):
'''Returns a DauphinPipeline or raises a UserFacingGraphQLError if one cannot be retrieved
from the selector, e.g., the pipeline is not present in the loaded repository.'''
return DauphinPipeline(get_pipeline_def_from_selector(graphene_info, selector))
def get_pipeline_reference_or_raise(graphene_info, selector):
'''Returns a DauphinPipelineReference or raises a UserFacingGraphQLError if a pipeline
reference cannot be retrieved from the selector, e.g, a UserFacingGraphQLError that wraps an
InvalidSubsetError.'''
return get_dauphin_pipeline_reference_from_selector(graphene_info, selector)
@capture_dauphin_error
def get_pipelines_or_error(graphene_info):
check.inst_param(graphene_info, 'graphene_info', ResolveInfo)
return _get_pipelines(graphene_info)
def get_pipelines_or_raise(graphene_info):
check.inst_param(graphene_info, 'graphene_info', ResolveInfo)
return _get_pipelines(graphene_info)
def _get_pipelines(graphene_info):
check.inst_param(graphene_info, 'graphene_info', ResolveInfo)
repository = graphene_info.context.get_repository()
pipeline_instances = []
for pipeline_def in repository.get_all_pipelines():
pipeline_instances.append(graphene_info.schema.type_named('Pipeline')(pipeline_def))
return graphene_info.schema.type_named('PipelineConnection')(
nodes=sorted(pipeline_instances, key=lambda pipeline: pipeline.name)
)
def get_pipeline_def_from_selector(graphene_info, selector):
check.inst_param(graphene_info, 'graphene_info', ResolveInfo)
check.inst_param(selector, 'selector', ExecutionSelector)
repository = graphene_info.context.get_repository()
if not repository.has_pipeline(selector.name):
raise UserFacingGraphQLError(
graphene_info.schema.type_named('PipelineNotFoundError')(pipeline_name=selector.name)
)
orig_pipeline = graphene_info.context.get_pipeline(selector.name)
if not selector.solid_subset:
return orig_pipeline
else:
for solid_name in selector.solid_subset:
if not orig_pipeline.has_solid_named(solid_name):
raise UserFacingGraphQLError(
graphene_info.schema.type_named('InvalidSubsetError')(
message='Solid "{solid_name}" does not exist in "{pipeline_name}"'.format(
solid_name=solid_name, pipeline_name=selector.name
),
pipeline=graphene_info.schema.type_named('Pipeline')(orig_pipeline),
)
)
try:
return orig_pipeline.build_sub_pipeline(selector.solid_subset)
except DagsterInvalidDefinitionError:
raise UserFacingGraphQLError(
graphene_info.schema.type_named('InvalidSubsetError')(
message=serializable_error_info_from_exc_info(sys.exc_info()).message,
pipeline=graphene_info.schema.type_named('Pipeline')(orig_pipeline),
)
)
def get_dauphin_pipeline_reference_from_selector(graphene_info, selector):
from ..schema.errors import DauphinPipelineNotFoundError, DauphinInvalidSubsetError
check.inst_param(graphene_info, 'graphene_info', ResolveInfo)
check.inst_param(selector, 'selector', ExecutionSelector)
try:
return graphene_info.schema.type_named('Pipeline')(
get_pipeline_def_from_selector(graphene_info, selector)
)
except UserFacingGraphQLError as exc:
if (
isinstance(exc.dauphin_error, DauphinPipelineNotFoundError)
or
# At this time DauphinPipeline represents a potentially subsetted
# pipeline so if the solids used to subset no longer exist
# we can't return the correct instance so we fallback to
# UnknownPipeline
isinstance(exc.dauphin_error, DauphinInvalidSubsetError)
):
return graphene_info.schema.type_named('UnknownPipeline')(selector.name)
raise
|
StarcoderdataPython
|
1704284
|
import os
os.environ["OMP_NUM_THREADS"] = "1"
os.environ["OPENBLAS_NUM_THREADS"] = "1"
os.environ["MKL_NUM_THREADS"] = "1"
os.environ["VECLIB_MAXIMUM_THREADS"] = "1"
os.environ["NUMEXPR_NUM_THREADS"] = "1"
import copy
import logging
import pandas as pd
import multiprocessing as mp
from ..orbit import TestOrbit
from ..utils import Timeout
from ..utils import _initWorker
from ..utils import _checkParallel
logger = logging.getLogger(__name__)
__all__ = [
"Backend"
]
TIMEOUT = 30
def propagation_worker(orbits, t1, backend):
with Timeout(seconds=TIMEOUT):
try:
propagated = backend._propagateOrbits(orbits, t1)
except TimeoutError:
logger.critical("Propagation timed out on orbit IDs (showing first 5): {}".format(orbits.ids[:5]))
propagated = pd.DataFrame()
return propagated
def ephemeris_worker(orbits, observers, backend):
with Timeout(seconds=TIMEOUT):
try:
ephemeris = backend._generateEphemeris(orbits, observers)
except TimeoutError:
logger.critical("Ephemeris generation timed out on orbit IDs (showing first 5): {}".format(orbits.ids[:5]))
ephemeris = pd.DataFrame()
return ephemeris
def orbitDetermination_worker(observations, backend):
with Timeout(seconds=TIMEOUT):
try:
orbits = backend._orbitDetermination(observations)
except TimeoutError:
logger.critical("Orbit determination timed out on observations (showing first 5): {}".format(observations["obs_id"].values[:5]))
orbits = pd.DataFrame()
return orbits
def projectEphemeris_worker(ephemeris, test_orbit_ephemeris):
assert len(ephemeris["mjd_utc"].unique()) == 1
assert len(test_orbit_ephemeris["mjd_utc"].unique()) == 1
assert ephemeris["mjd_utc"].unique()[0] == test_orbit_ephemeris["mjd_utc"].unique()[0]
observation_time = ephemeris["mjd_utc"].unique()[0]
# Create test orbit with state of orbit at visit time
test_orbit = TestOrbit(
test_orbit_ephemeris[["obj_x", "obj_y", "obj_z", "obj_vx", "obj_vy", "obj_vz"]].values[0],
observation_time
)
# Prepare rotation matrices
test_orbit.prepare()
# Apply rotation matrices and transform observations into the orbit's
# frame of motion.
test_orbit.applyToEphemeris(ephemeris)
return ephemeris
class Backend:
def __init__(self, name="Backend", **kwargs):
self.__dict__.update(kwargs)
self.name = name
self.is_setup = False
return
def setup(self):
return
def _propagateOrbits(self, orbits, t1):
"""
Propagate orbits from t0 to t1.
THIS FUNCTION SHOULD BE DEFINED BY THE USER.
"""
err = (
"This backend does not have orbit propagation implemented."
)
raise NotImplementedError(err)
def propagateOrbits(
self,
orbits,
t1,
chunk_size=100,
num_jobs=1,
parallel_backend="mp"
):
"""
Propagate each orbit in orbits to each time in t1.
Parameters
----------
orbits : `~thor.orbits.orbits.Orbits`
Orbits to propagate.
t1 : `~astropy.time.core.Time`
Times to which to propagate each orbit.
chunk_size : int, optional
Number of orbits to send to each job.
num_jobs : int, optional
Number of jobs to launch.
parallel_backend : str, optional
Which parallelization backend to use {'ray', 'mp'}. Defaults to using Python's multiprocessing
module ('mp').
Returns
-------
propagated : `~pandas.DataFrame`
Propagated orbits with at least the following columns:
orbit_id : Input orbit ID.
mjd_tdb : Time at which state is defined in MJD TDB.
x, y, z, vx, vy, vz : Orbit as cartesian state vector with units
of au and au per day.
"""
parallel, num_workers = _checkParallel(num_jobs, parallel_backend)
if parallel:
orbits_split = orbits.split(chunk_size)
t1_duplicated = [copy.deepcopy(t1) for i in range(len(orbits_split))]
backend_duplicated = [copy.deepcopy(self) for i in range(len(orbits_split))]
if parallel_backend == "ray":
import ray
if not ray.is_initialized():
ray.init(address="auto")
propagation_worker_ray = ray.remote(propagation_worker)
propagation_worker_ray.options(
num_returns=1,
num_cpus=1
)
p = []
for o, t, b in zip(orbits_split, t1_duplicated, backend_duplicated):
p.append(propagation_worker_ray.remote(o, t, b))
propagated_dfs = ray.get(p)
else: # parallel_backend == "mp"
p = mp.Pool(
processes=num_workers,
initializer=_initWorker,
)
propagated_dfs = p.starmap(
propagation_worker,
zip(
orbits_split,
t1_duplicated,
backend_duplicated,
)
)
p.close()
propagated = pd.concat(propagated_dfs)
propagated.reset_index(
drop=True,
inplace=True
)
else:
propagated = self._propagateOrbits(
orbits,
t1
)
return propagated
def _generateEphemeris(self, orbits, observers):
"""
Generate ephemerides for the given orbits as observed by
the observers.
THIS FUNCTION SHOULD BE DEFINED BY THE USER.
"""
err = (
"This backend does not have ephemeris generation implemented."
)
raise NotImplementedError(err)
def generateEphemeris(
self,
orbits,
observers,
test_orbit=None,
chunk_size=100,
num_jobs=1,
parallel_backend="mp"
):
"""
Generate ephemerides for each orbit in orbits as observed by each observer
in observers.
Parameters
----------
orbits : `~thor.orbits.orbits.Orbits`
Orbits for which to generate ephemerides.
observers : dict or `~pandas.DataFrame`
A dictionary with observatory codes as keys and observation_times (`~astropy.time.core.Time`) as values.
test_orbit : `~thor.orbits.orbits.Orbits`
Test orbit to use to generate projected coordinates.
chunk_size : int, optional
Number of orbits to send to each job.
num_jobs : int, optional
Number of jobs to launch.
parallel_backend : str, optional
Which parallelization backend to use {'ray', 'mp'}. Defaults to using Python's multiprocessing
module ('mp').
Returns
-------
ephemeris : `~pandas.DataFrame`
Ephemerides with at least the following columns:
orbit_id : Input orbit ID
observatory_code : Observatory's MPC code.
mjd_utc : Observation time in MJD UTC.
RA : Right Ascension in decimal degrees.
Dec : Declination in decimal degrees.
"""
parallel, num_workers = _checkParallel(num_jobs, parallel_backend)
if parallel:
orbits_split = orbits.split(chunk_size)
observers_duplicated = [copy.deepcopy(observers) for i in range(len(orbits_split))]
backend_duplicated = [copy.deepcopy(self) for i in range(len(orbits_split))]
if parallel_backend == "ray":
import ray
if not ray.is_initialized():
ray.init(address="auto")
ephemeris_worker_ray = ray.remote(ephemeris_worker)
ephemeris_worker_ray.options(
num_returns=1,
num_cpus=1
)
p = []
for o, t, b in zip(orbits_split, observers_duplicated, backend_duplicated):
p.append(ephemeris_worker_ray.remote(o, t, b))
ephemeris_dfs = ray.get(p)
else: # parallel_backend == "mp"
p = mp.Pool(
processes=num_workers,
initializer=_initWorker,
)
ephemeris_dfs = p.starmap(
ephemeris_worker,
zip(
orbits_split,
observers_duplicated,
backend_duplicated,
)
)
p.close()
ephemeris = pd.concat(ephemeris_dfs)
ephemeris.reset_index(
drop=True,
inplace=True
)
else:
ephemeris = self._generateEphemeris(
orbits,
observers
)
if test_orbit is not None:
test_orbit_ephemeris = self._generateEphemeris(
test_orbit,
observers
)
ephemeris_grouped = ephemeris.groupby(by=["observatory_code", "mjd_utc"])
ephemeris_split = [ephemeris_grouped.get_group(g).copy() for g in ephemeris_grouped.groups]
test_orbit_ephemeris_grouped = test_orbit_ephemeris.groupby(by=["observatory_code", "mjd_utc"])
test_orbit_ephemeris_split = [test_orbit_ephemeris_grouped.get_group(g) for g in test_orbit_ephemeris_grouped.groups]
if num_jobs > 1:
if parallel_backend == "ray":
projectEphemeris_worker_ray = ray.remote(projectEphemeris_worker)
projectEphemeris_worker_ray = projectEphemeris_worker_ray.options(
num_returns=1,
num_cpus=1
)
p = []
for e, te in zip(ephemeris_split, test_orbit_ephemeris_split):
p.append(projectEphemeris_worker_ray.remote(e, te))
ephemeris_dfs = ray.get(p)
else: # parallel_backend == "mp"
p = mp.Pool(
processes=num_workers,
initializer=_initWorker,
)
ephemeris_dfs = p.starmap(
projectEphemeris_worker,
zip(
ephemeris_split,
test_orbit_ephemeris_split
)
)
p.close()
else:
ephemeris_dfs = []
for e, te in zip(ephemeris_split, test_orbit_ephemeris_split):
ephemeris_df = projectEphemeris_worker(e, te)
ephemeris_dfs.append(ephemeris_df)
ephemeris = pd.concat(ephemeris_dfs)
ephemeris.reset_index(
drop=True,
inplace=True
)
ephemeris.sort_values(
by=["orbit_id", "observatory_code", "mjd_utc"],
inplace=True,
ignore_index=True
)
return ephemeris
def _orbitDetermination(self):
err = (
"This backend does not have orbit determination implemented."
)
raise NotImplementedError(err)
def orbitDetermination(
self,
observations,
chunk_size=10,
num_jobs=1,
parallel_backend="mp"
):
"""
Run orbit determination on the input observations. These observations
must at least contain the following columns:
obj_id : Object ID
mjd_utc : Observation time in MJD UTC.
RA_deg : Topocentric Right Ascension in decimal degrees.
Dec_deg : Topocentric Declination in decimal degrees.
sigma_RA_deg : 1-sigma uncertainty in RA.
sigma_Dec_deg : 1-sigma uncertainty in Dec.
observatory_code : MPC observatory code.
Parameters
----------
num_jobs : int, optional
Number of jobs to launch.
parallel_backend : str, optional
Which parallelization backend to use {'ray', 'mp'}. Defaults to using Python's multiprocessing
module ('mp').
"""
unique_objs = observations["obj_id"].unique()
observations_split = [observations[observations["obj_id"].isin(unique_objs[i:i+chunk_size])].copy() for i in range(0, len(unique_objs), chunk_size)]
backend_duplicated = [copy.deepcopy(self) for i in range(len(observations_split))]
parallel, num_workers = _checkParallel(num_jobs, parallel_backend)
if parallel_backend == "ray":
import ray
if not ray.is_initialized():
ray.init(address="auto")
orbitDetermination_worker_ray = ray.remote(orbitDetermination_worker)
orbitDetermination_worker_ray = orbitDetermination_worker_ray.options(
num_returns=1,
num_cpus=1
)
od = []
for o, b in zip(observations_split, backend_duplicated):
od.append(orbitDetermination_worker_ray.remote(o, b))
od_orbits_dfs = ray.get(od)
else: # parallel_backend == "mp"
p = mp.Pool(
processes=num_workers,
initializer=_initWorker,
)
od_orbits_dfs = p.starmap(
orbitDetermination_worker,
zip(
observations_split,
backend_duplicated,
)
)
p.close()
od_orbits = pd.concat(od_orbits_dfs, ignore_index=True)
return od_orbits
def _getObserverState(self, observers, origin="heliocenter"):
err = (
"This backend does not have observer state calculations implemented."
)
raise NotImplementedError(err)
|
StarcoderdataPython
|
3222173
|
from denodoclient.dataframes.denododataframeclient import DenodoDataFrameClient
DenodoDataFrameClient = DenodoDataFrameClient
|
StarcoderdataPython
|
3235788
|
import numpy as np
import cmath
from math import sqrt
def pau_x():
p_x=np.array([[0,1],[1,0]])
return p_x
def pau_y():
p_y=np.array([[0,-(cmath.sqrt(-1))],[(cmath.sqrt(-1)),0]])
return p_y
def pau_z():
p_z=np.array([[1,0],[0,-1]])
return p_z
def hada():
h=(1/sqrt(2))*(np.array([[1,1],[1,-1]]))
return h
def cnot():
cn=np.array([[1,0,0,0],[0,1,0,0],[0,0,0,1],[0,0,1,0]])
return cn
def tofo():
tof=np.array([[1,0,0,0,0,0,0,0],[0,1,0,0,0,0,0,0],[0,0,1,0,0,0,0,0],[0,0,0,1,0,0,0,0],[0,0,0,0,1,0,0,0],[0,0,0,0,0,1,0,0],[0,0,0,0,0,0,0,1],[0,0,0,0,0,0,1,0]])
return tof
|
StarcoderdataPython
|
1610252
|
# This file defines how PyOxidizer application building and packaging is
# performed. See PyOxidizer's documentation at
# https://pyoxidizer.readthedocs.io/en/stable/ for details of this
# configuration file format.
def make_exe():
# Obtain the default PythonDistribution for our build target. We link
# this distribution into our produced executable and extract the Python
# standard library from it.
dist = default_python_distribution()
# This function creates a `PythonPackagingPolicy` instance, which
# influences how executables are built and how resources are added to
# the executable. You can customize the default behavior by assigning
# to attributes and calling functions.
policy = dist.make_python_packaging_policy()
# Enable support for non-classified "file" resources to be added to
# resource collections.
# policy.allow_files = True
# Control support for loading Python extensions and other shared libraries
# from memory. This is only supported on Windows and is ignored on other
# platforms.
policy.allow_in_memory_shared_library_loading = True
# Control whether to generate Python bytecode at various optimization
# levels. The default optimization level used by Python is 0.
# policy.bytecode_optimize_level_zero = True
# policy.bytecode_optimize_level_one = True
policy.bytecode_optimize_level_two = True
# Package all available Python extensions in the distribution.
policy.extension_module_filter = "all"
# Package the minimum set of Python extensions in the distribution needed
# to run a Python interpreter. Various functionality from the Python
# standard library won't work with this setting! But it can be used to
# reduce the size of generated executables by omitting unused extensions.
# policy.extension_module_filter = "no-copyleft"
# Package Python extensions in the distribution not having additional
# library dependencies. This will exclude working support for SSL,
# compression formats, and other functionality.
# policy.extension_module_filter = "no-libraries"
# Controls whether `File` instances are emitted by the file scanner.
# policy.file_scanner_emit_files = False
# Controls the `add_include` attribute of "classified" resources
# (`PythonModuleSource`, `PythonPackageResource`, etc).
# policy.include_classified_resources = True
# Toggle whether Python module source code for modules in the Python
# distribution's standard library are included.
# policy.include_distribution_sources = False
# Toggle whether Python package resource files for the Python standard
# library are included.
# policy.include_distribution_resources = False
# Controls the `add_include` attribute of `File` resources.
# policy.include_file_resources = False
# Controls the `add_include` attribute of `PythonModuleSource` not in
# the standard library.
policy.include_non_distribution_sources = False
# Toggle whether files associated with tests are included.
policy.include_test = False
# Use in-memory location for adding resources by default.
policy.resources_location = "in-memory"
# Attempt to add resources relative to the built binary when
# `resources_location` fails.
policy.resources_location_fallback = "filesystem-relative:prefix"
# The configuration of the embedded Python interpreter can be modified
# by setting attributes on the instance. Some of these are
# documented below.
python_config = dist.make_python_interpreter_config()
# Evaluate a string as Python code when the interpreter starts.
python_config.run_command = "from gefyra.__main__ import main; main()"
# Produce a PythonExecutable from a Python distribution, embedded
# resources, and other options. The returned object represents the
# standalone executable that will be built.
exe = dist.to_python_executable(
name="gefyra",
# If no argument passed, the default `PythonPackagingPolicy` for the
# distribution is used.
packaging_policy=policy,
# If no argument passed, the default `PythonInterpreterConfig` is used.
config=python_config,
)
# Invoke `pip install` using a requirements file and add the collected resources
# to our binary.
exe.add_python_resources(exe.pip_install(["docker==5.0.3", "kubernetes==19.15.0"]))
# Read Python files from a local directory and add them to our embedded
# context, taking just the resources belonging to the `foo` and `bar`
# Python packages.
exe.add_python_resources(exe.read_package_root(
path=".",
packages=["gefyra"],
))
return exe
def make_embedded_resources(exe):
return exe.to_embedded_resources()
def make_install(exe):
# Create an object that represents our installed application file layout.
files = FileManifest()
# Add the generated executable to our install layout in the root directory.
files.add_python_resource(".", exe)
return files
def make_msi(exe):
# See the full docs for more. But this will convert your Python executable
# into a `WiXMSIBuilder` Starlark type, which will be converted to a Windows
# .msi installer when it is built.
return exe.to_wix_msi_builder(
# Simple identifier of your app.
"myapp",
# The name of your application.
"My Application",
# The version of your application.
"1.0",
# The author/manufacturer of your application.
"<NAME>"
)
# Dynamically enable automatic code signing.
def register_code_signers():
# You will need to run with `pyoxidizer build --var ENABLE_CODE_SIGNING 1` for
# this if block to be evaluated.
if not VARS.get("ENABLE_CODE_SIGNING"):
return
# Use a code signing certificate in a .pfx/.p12 file, prompting the
# user for its path and password to open.
# pfx_path = prompt_input("path to code signing certificate file")
# pfx_password = prompt_password(
# "password for code signing certificate file",
# confirm = True
# )
# signer = code_signer_from_pfx_file(pfx_path, pfx_password)
# Use a code signing certificate in the Windows certificate store, specified
# by its SHA-1 thumbprint. (This allows you to use YubiKeys and other
# hardware tokens if they speak to the Windows certificate APIs.)
# sha1_thumbprint = prompt_input(
# "SHA-1 thumbprint of code signing certificate in Windows store"
# )
# signer = code_signer_from_windows_store_sha1_thumbprint(sha1_thumbprint)
# Choose a code signing certificate automatically from the Windows
# certificate store.
# signer = code_signer_from_windows_store_auto()
# Activate your signer so it gets called automatically.
# signer.activate()
# Call our function to set up automatic code signers.
register_code_signers()
# Tell PyOxidizer about the build targets defined above.
register_target("exe", make_exe)
register_target("resources", make_embedded_resources, depends=["exe"], default_build_script=True)
register_target("install", make_install, depends=["exe"], default=True)
register_target("msi_installer", make_msi, depends=["exe"])
# Resolve whatever targets the invoker of this configuration file is requesting
# be resolved.
resolve_targets()
|
StarcoderdataPython
|
87950
|
<reponame>Sirruthf/stuff<gh_stars>0
from django.shortcuts import render, redirect
from django.http import HttpRequest, HttpResponse
from django.contrib.auth import authenticate, login, logout, update_session_auth_hash
from django.contrib.auth.decorators import login_required
from django.contrib.auth.forms import PasswordChangeForm
from django.contrib.auth.models import User
from main import yandex_oauth
from authentication import forms as f
from authentication.models import CustomUser
# Create your views here.
def confirm(request):
return render(request, template_name='confirm.html')
def reset_password(request):
return render(request, template_name='reset_password.html')
def log_out(request):
logout(request)
return redirect('/')
def log_in(request):
if request.method == 'POST':
print(request.POST)
user = authenticate(username=request.POST['username'], password=request.POST['password'])
if user is not None:
login(request, user)
return render(request, template_name='index.html', context={'request': request})
return render(request, template_name='login.html')
def register(request):
if request.method == 'POST':
formUser = f.FormReg(request.POST)
if formUser.is_valid():
formUser.save()
NewCustomUser = CustomUser(user=formUser.instance)
NewCustomUser.save()
return render(request, template_name='reg.html', context={'form': formUser})
elif 'code' in request.GET:
auth_code = request.GET.get('code')
auth_token = yandex_oauth.get_oauth_json(auth_code)['access_token']
user_json = yandex_oauth.get_account_info(auth_token)
return render(request, template_name='reg.html', context={'login':user_json['login'], 'email':user_json['default_email']})
return render(request, template_name='reg.html', context={'registered':False})
@login_required(login_url="/login/")
def change_pass(request):
if request.method == 'POST':
form = PasswordChangeForm(request.user, request.POST)
print(request.POST)
if form.is_valid():
update_session_auth_hash(request, form.save())
return render(request, template_name='change_password.html', context={'request': request, 'form': form})
return render(request, template_name='change_password.html')
def userprofile(request, id):
if request.method == 'POST':
form = f.FormChangeProf(request.POST, instance=request.user)
if form.is_valid():
form.save()
return render(request, template_name='profile.html', context={'request': request, 'form': form})
return render(request, template_name='profile.html', context={'request': request,
'profile': CustomUser.objects.get(id=id)})
def yandex_auth(request):
url = yandex_oauth.get_verification_url()
return redirect(url)
|
StarcoderdataPython
|
3391571
|
<reponame>conao3/coder
a, op, b = input().split()
a, b = int(a), int(b)
if op == '+':
print(a + b)
elif op == '-':
print(a - b)
|
StarcoderdataPython
|
72008
|
#!/usr/bin/python3
from hub import HubBot
import traceback
import itertools
import sys
import os
import select
import threading
import subprocess
import tempfile
import logging
import calendar
import abc
import smtplib
import sleekxmpp.exceptions
from datetime import datetime, timedelta
import email.message
import email.mime.text
import email.mime.multipart
from wsgiref.handlers import format_date_time
logger = logging.getLogger(__name__)
def smtp_insecure(host, port):
return smtplib.SMTP(host, port)
def smtp_ssl(host, port):
return smtplib.SMTP_SSL(host, port)
def smtp_starttls(host, port):
smtp = smtplib.SMTP(host, port)
smtp.starttls()
return smtp
class MailSendConfig(metaclass=abc.ABCMeta):
@classmethod
def _mime_to_bytes(cls, mime):
from io import StringIO
from email.generator import Generator
fp = StringIO()
g = Generator(fp, mangle_from_=False)
g.flatten(mime)
return fp.getvalue()
@abc.abstractmethod
def send_mime_mail(self, mime_mail):
pass
class MailSMTPConfig(MailSendConfig):
SEC_NONE = smtp_insecure
SEC_SSL = smtp_ssl
SEC_STARTTLS = smtp_starttls
def __init__(self, host, port, user, passwd, security=SEC_STARTTLS):
self._host = host
self._port = port
self._user = user
self._passwd = <PASSWORD>
self._security = security
def send_mime_mail(self, mime_mail, tolist):
smtp = self._security(self._host, self._port)
if self._user is not None:
smtp.login(self._user, self._passwd)
mailbytes = self._mime_to_bytes(mime_mail)
smtp.sendmail(
mime_mail["From"],
tolist,
mailbytes)
smtp.quit()
class MailConfig:
def __init__(self,
mfrom,
sendconfig,
subject="[buildbot] {severity}: {project} -- {target}"):
super().__init__()
self._mfrom = mfrom
self._sendconfig = sendconfig
self._subject = subject
def send_mail(self, lines, severity, project, target, tolist):
mail = email.mime.multipart.MIMEMultipart()
mail["To"] = ", ".join(tolist)
mail["From"] = self._mfrom
mail["Date"] = format_date_time(
calendar.timegm(datetime.utcnow().utctimetuple()))
mail["Subject"] = self._subject.format(
severity=severity,
project=project,
target=target)
text = """
Hello,
This is buildbot. This is a status notification for the job
{project} / {target}
The job has the status: {severity}.
Please see the attached output log for details and take appropriate
action.""".format(
severity=severity,
project=project,
target=target)
mime_text = email.mime.text.MIMEText(
text.encode("utf-8"), _charset="utf-8")
mail.attach(mime_text)
mime_log = email.mime.text.MIMEText(
"\n".join(lines).encode("utf-8"), _charset="utf-8")
mime_log.add_header(
"Content-Disposition",
"attachment",
filename="job.log")
mail.attach(mime_log)
self._sendconfig.send_mime_mail(mail, tolist)
class Popen(subprocess.Popen):
DEVNULLR = open("/dev/null", "r")
@classmethod
def checked(cls, call, *args, **kwargs):
proc = cls(call, *args, **kwargs)
result = proc.communicate()
retval = proc.wait()
if retval != 0:
raise subprocess.CalledProcessError(retval, " ".join(call))
return result
def __init__(self, call, *args, sink_line_call=None, update_env={}, **kwargs):
if sink_line_call is not None:
kwargs["stdout"] = subprocess.PIPE
kwargs["stderr"] = subprocess.PIPE
if "stdin" not in kwargs:
kwargs["stdin"] = self.DEVNULLR
if update_env:
env = os.environ
env.update(update_env)
kwargs["env"] = env
super().__init__(call, *args, **kwargs)
self.sink_line_call = sink_line_call
if sink_line_call is not None:
sink_line_call("$ {cmd}".format(cmd=" ".join(call)).encode())
def _submit_buffer(self, buf, force=False):
if b"\n" not in buf:
if force:
self.sink_line_call(buf)
return b""
else:
return buf
split = buf.split(b"\n")
for line in split[:-1]:
# discard everything before the last carriage return
line = line.split(b"\r")[-1]
self.sink_line_call(line)
return split[-1]
def communicate(self):
if self.sink_line_call is not None:
rlist = set([self.stdout, self.stderr])
buffers = {
self.stdout: b"",
self.stderr: b""
}
while True:
rs, _, _ = select.select(rlist, [], [])
for fd in rs:
fno = fd.fileno()
read = fd.readline()
if len(read) == 0:
rlist.remove(fd)
buf = buffers[fd]
if len(buf):
self._submit_buffer(buf, True)
del buffers[fd]
continue
buffers[fd] += read
buffers[fd] = self._submit_buffer(buffers[fd])
if len(rlist) == 0:
break
for buf in buffers:
self._submit_buffer(buf, True)
return None, None
else:
return super().communicate()
class WorkingDirectory:
def __init__(self, path):
self.path = path
def __enter__(self):
self.old_pwd = os.getcwd()
os.chdir(self.path)
return self.path
def __exit__(self, exc_type, exc_value, traceback):
os.chdir(self.old_pwd)
del self.old_pwd
return False
class Target:
def __init__(self, name, branch):
super().__init__()
self.name = name
self.branch = branch
def __str__(self):
return self.name
class Respawn(Target):
initial_cwd = os.getcwd()
class Forward:
def __init__(self, to_jid, msg="respawn", mtype="chat", **kwargs):
super().__init__(**kwargs)
self.to_jid = to_jid
self.msg = msg
self.mtype = mtype
def do_forward(self, xmpp):
xmpp.send_message(
mto=self.to_jid,
mbody=self.msg,
mtype=self.mtype
)
def __init__(self, name, xmpp,
branch="master",
forwards=[],
**kwargs):
super().__init__(name, branch, **kwargs)
self.xmpp = xmpp
self.forwards = forwards
self.cwd = os.getcwd()
@classmethod
def exec_respawn(cls, xmpp):
xmpp.disconnect(reconnect=False, wait=True)
try:
os.chdir(cls.initial_cwd)
os.execv(sys.argv[0], sys.argv)
except:
print("during execv")
traceback.print_exc()
raise
def build(self, log_func):
xmpp = self.xmpp
for forward in self.forwards:
log_func("Sending respawn command to {0}".format(forward.to_jid).encode())
forward.do_forward(xmpp)
log_func("Respawning self".encode())
self.exec_respawn(xmpp)
def __str__(self):
return "respawn {}".format(self.name)
class Execute(Target):
def __init__(self, name, *commands,
working_directory=None,
branch="master",
update_env={},
**kwargs):
super().__init__(name, branch, **kwargs)
self.working_directory = working_directory
self.commands = commands
self.update_env = update_env
def _do_build(self, log_func):
def checked(*args, **kwargs):
return Popen.checked(*args, sink_line_call=log_func, **kwargs)
for command in self.commands:
checked(command, update_env=self.update_env)
def build(self, log_func):
wd = self.working_directory or os.getcwd()
with WorkingDirectory(wd):
self._do_build(log_func)
class Pull(Execute):
class Mode:
def __init__(self, remote_location, log_func):
self.remote_location = remote_location
self.log_func = log_func
def checked(self, *args, **kwargs):
return Popen.checked(*args, sink_line_call=self.log_func, **kwargs)
class Rebase(Mode):
def run(self):
log_func, checked = self.log_func, self.checked
output = subprocess.check_output(["git", "stash"])
stashed = b"No local changes to save\n" != output
try:
call = ["git", "pull", "--rebase"]
if self.remote_location:
call.extend(self.remote_location)
checked(call)
except subprocess.CalledProcessError:
# pull failed, this is quite bad
log_func("pull failed, trying to restore previous state.".encode())
if stashed:
log_func("NOTE: There is a stash which needs to be un-stashed!".encode())
raise
if stashed:
checked(["git", "stash", "pop"])
class Merge(Mode):
def run(self):
log_func, checked = self.log_func, self.checked
try:
call = ["git", "pull"]
if self.remote_location:
call.extend(self.remote_location)
checked(call)
except subprocess.CalledProcessError:
# pull failed
log_func("pull failed, repository remains unchanged")
raise
def __init__(self, name, repository_location, branch,
after_pull_commands=[],
remote_location=None,
mode=Merge):
super().__init__(name, *after_pull_commands,
working_directory=repository_location)
self.remote_location = remote_location
self.branch = branch
self.mode = mode
def _do_build(self, log_func):
self.mode(self.remote_location, log_func).run()
super()._do_build(log_func)
output = subprocess.check_output(["git", "log", "--oneline", "HEAD^..HEAD"]).decode().strip()
log_func("{0} is now at {1}".format(self.name, output).encode())
def __str__(self):
return "pull {0}".format(self.name)
class Build(Execute):
def __init__(self, name, *args,
submodules=[],
commands=["make"],
working_copy=None,
**kwargs):
super().__init__(name, *commands, **kwargs)
self.submodules = submodules
self.working_copy = working_copy
def build_environment(self, log_func):
return self.project.build_environment(
log_func,
self.branch,
self.submodules,
working_copy=self.working_copy
)
def _do_build(self, env):
def checked(*args, **kwargs):
return Popen.checked(*args, sink_line_call=env.log_func, **kwargs)
for command in self.commands:
checked(command)
def build(self, log_func):
with self.build_environment(log_func) as env:
self._do_build(env)
def __str__(self):
return "build {0}".format(self.name)
class BuildAndMove(Build):
def __init__(self, *args, move_to=None, move_from=None, **kwargs):
super().__init__(*args, **kwargs)
if not move_to:
raise ValueError("Required parameter move_to missing or empty.")
self.move_to = move_to
self.move_from = move_from
def _do_build(self, env):
def checked(*args, **kwargs):
return Popen.checked(*args, sink_line_call=env.log_func, **kwargs)
super()._do_build(env)
if self.move_from is not None:
move_from = self.move_from.format(
builddir=env.tmp_dir
)
else:
move_from = env.tmp_dir
checked(["rm", "-rf", self.move_to])
checked(["mv", move_from, self.move_to])
class BuildEnvironment:
def __init__(self, tmp_dir, repo_url, branch, submodules, log_func):
self.tmp_dir_context = None
self.tmp_dir = tmp_dir
self.repo_url = repo_url
self.branch = branch
self.submodules = submodules
self.log_func = log_func
def __enter__(self):
def checked(*args, **kwargs):
return Popen.checked(*args, sink_line_call=self.log_func, **kwargs)
if self.tmp_dir is None:
self.tmp_dir_context = tempfile.TemporaryDirectory()
self.tmp_dir = self.tmp_dir_context.name
try:
if not os.path.isdir(self.tmp_dir):
os.makedirs(self.tmp_dir)
os.chdir(self.tmp_dir)
if os.path.isdir(os.path.join(self.tmp_dir, ".git")):
checked(["git", "fetch", "origin"])
else:
checked(["git", "clone", self.repo_url, self.tmp_dir])
checked(["git", "checkout", self.branch])
checked(["git", "pull"])
for submodule in self.submodules:
checked(["git", "submodule", "init", submodule])
checked(["git", "submodule", "update", submodule])
except:
if self.tmp_dir_context is not None:
self.tmp_dir_context.cleanup()
self.tmp_dir_context = None
raise
return self
def __exit__(self, exc_type, exc_value, traceback):
if self.tmp_dir_context is not None:
self.tmp_dir_context.cleanup()
return False
class Project:
@classmethod
def declare(cls, name, *args, **kwargs):
return (name, cls(name, *args, **kwargs))
def __init__(self, name, *builds,
repository_url=None,
pubsub_name=None,
working_copy=None,
mail_on_error=None,
**kwargs):
super().__init__(**kwargs)
self.name = name
self.repository_url = repository_url
self.pubsub_name = pubsub_name
self.working_copy = working_copy
self.builds = builds
self.mail_on_error = mail_on_error
for build in self.builds:
build.project = self
if pubsub_name is not None:
triggers = {}
for build in self.builds:
build_list = triggers.setdefault((self.pubsub_name, build.branch), [])
build_list.append(build)
self.triggers = triggers
else:
self.triggers = {}
def build_environment(self, log_func, branch, submodules,
working_copy=None):
return BuildEnvironment(
working_copy or self.working_copy,
self.repository_url,
branch,
submodules,
log_func
)
def __str__(self):
return self.name
class IOHandler:
class IOCapture:
def __init__(self, handler):
self._handler = handler
self._lines = []
def _handle_line(self, line):
self._lines.append(line)
def __enter__(self):
self._handler.add_line_hook(self._handle_line)
return self
def __exit__(self, exc_type, exc_value, traceback):
self._handler.remove_line_hook(self._handle_line)
@property
def lines(self):
return self._lines
def __init__(self):
self._line_hooks = []
def add_line_hook(self, line_hook):
self._line_hooks.append(line_hook)
def remove_line_hook(self, line_hook):
self._line_hooks.remove(line_hook)
def capture(self):
return self.IOCapture(self)
def write_line(self, line):
for hook in self._line_hooks:
hook(line)
class BuildBot(HubBot):
GIT_NODE = "git@"+HubBot.FEED
IDLE_MESSAGE = "constructor waiting for instructions"
config_credentials = {}
nickname = "foo"
def __init__(self, config_path):
self._config_path = config_path
self.initialized = False
error = self.reloadConfig()
if error:
traceback.print_exception(*error)
sys.exit(1)
self.initialized = True
credentials = self.config_credentials
super().__init__(
credentials["localpart"],
credentials["resource"],
credentials["password"]
)
del credentials["password"]
nickname = credentials["nickname"]
self.notification_to = credentials["notify"]
self.switch, self.nick = self.addSwitch(credentials["channel"], nickname, self.build_switch)
self.bots_switch, _ = self.addSwitch("bots", nickname)
self.add_event_handler("pubsub_publish", self.pubsubPublish)
self.output_handler = IOHandler()
def _muc_output(self, line):
self.send_message(
mto=self.switch,
mbody=line,
mtype="groupchat"
)
def _setup_pubsub(self):
try:
iq = self.pubsub.get_subscriptions(self.FEED, self.GIT_NODE)
if len(iq["pubsub"]["subscriptions"]) == 0:
self.pubsub.subscribe(self.FEED, self.GIT_NODE, bare=True)
except sleekxmpp.exceptions.IqError:
# error'd
self.send_message(
mto=self.bots_switch,
mbody="failed to setup pubsub link",
mtype="groupchat"
)
# this is the return value for the scheduler (i.e. run
# again)
return True
return False
def sessionStart(self, event):
super().sessionStart(event)
if self._setup_pubsub():
self.scheduler.add(
"link-pubsub",
60.0,
self._setup_pubsub,
repeat=True)
self.send_message(mto=self.switch,
mbody="",
msubject=self.IDLE_MESSAGE,
mtype="groupchat"
)
def reloadConfig(self):
namespace = {}
with open(self._config_path, "r") as f:
conf = f.read()
global_namespace = dict(globals())
global_namespace["xmpp"] = self
try:
exec(conf, global_namespace, namespace)
except Exception:
return sys.exc_info()
new_credentials = namespace.get("credentials", {})
if "localpart" not in new_credentials or "password" not in new_credentials:
raise ValueError("Both localpart and password must be present in credentials.")
if "nickname" not in new_credentials:
new_credentials["nickname"] = new_credentials["localpart"]
if "resource" not in new_credentials:
new_credentials["resource"] = "core"
# don't respawn on new password -- it'll get updated on next connect
# anyways
cmp_creds_new = dict(new_credentials)
del cmp_creds_new["password"]
cmp_creds_old = dict(self.config_credentials)
if cmp_creds_new != cmp_creds_old and self.initialized:
logger.info("Respawning due to major config change")
Respawn.exec_respawn(self)
self.config_credentials = new_credentials
self.authorized = set(namespace.get("authorized", []))
self.blacklist = set()
self.projects = dict(namespace.get("projects", []))
# repobranch-map contains the following structure
#
# {(repo, branch) => {project => [builds]}}
self.repobranch_map = {}
for project in self.projects.values():
for repobranch, builds in project.triggers.items():
projectmap = self.repobranch_map.setdefault(
repobranch, {})
projectmap[project] = list(builds)
return None
def build_switch(self, msg):
pass
def broadcast_error(self, msg, build, err):
hint = "Project “{0}”, target “{1!s}” is broken, traceback logged to {2}".format(
build.project.name,
build,
self.switch
)
self.send_message(
mto=self.bots_switch,
mbody="{1}: {0}".format(hint, self.notification_to),
mtype="groupchat"
)
self.send_message(
mto=self.switch,
mbody=self.format_exception(err),
mtype="groupchat"
)
print(hint)
def mail_error(self, severity, project, build, err, output_lines):
if project.mail_on_error is None:
print("project doesn't have configured mail foo")
return
print("sending mail")
mailconf, tolist = project.mail_on_error
mailconf.send_mail(
output_lines,
severity,
project.name,
build.name,
tolist)
def rebuild_repo(self, msg, repo, branch):
repobranch = (repo, branch)
try:
projects = self.repobranch_map[repobranch]
except KeyError:
raise
for project, builds in projects.items():
self.rebuild_project_subset(msg, project, builds)
return True
def rebuild_project_subset(self, msg, project, builds):
try:
for build in builds:
with self.output_handler.capture() as capture:
self.rebuild(build)
except subprocess.CalledProcessError as err:
self.broadcast_error(msg, build, err)
self.mail_error("failure", project, build, err, capture.lines)
return False
except Exception as err:
self.broadcast_error(msg, build, err)
self.mail_error("error", project, build, err, capture.lines)
return False
finally:
self.send_message(
mto=self.switch,
mbody="",
msubject=self.IDLE_MESSAGE,
mtype="groupchat"
)
def pubsubPublish(self, msg):
item = msg["pubsub_event"]["items"]["item"].xml[0]
repo = item.findtext("{http://hub.sotecware.net/xmpp/git-post-update}repository")
if repo is None:
print("Malformed git-post-update.")
ref = item.findtext("{http://hub.sotecware.net/xmpp/git-post-update}ref")
if ref is None:
print("Malformed git-post-update.")
try:
self.rebuild_repo(msg, repo, ref.split("/")[2])
except KeyError:
pass
def format_exception(self, exc_info):
return "\n".join(traceback.format_exception(*sys.exc_info()))
def reply_exception(self, msg, exc_info):
self.reply(msg, self.format_exception(exc_info))
def authorizedSource(self, msg):
origin = str(msg["from"].bare)
if not origin in self.authorized:
if not origin in self.blacklist:
self.reply(msg, "You're not authorized.")
self.blacklist.add(origin)
return False
return True
def messageMUC(self, msg):
if msg["mucnick"] == self.nick:
return
contents = msg["body"].strip()
if contents == "ping":
self.reply(msg, "pong")
return
def message(self, msg):
if msg["type"] == "groupchat":
return
if not self.authorizedSource(msg):
return
contents = msg["body"]
args = contents.split(" ", 1)
cmd = args[0]
args = args[1] if len(args) > 1 else ""
handler = self.COMMANDS.get(cmd, None)
if handler is not None:
try:
local = {"__func": handler, "__self": self, "__msg": msg}
self.reply(msg, repr(eval("__func(__self, __msg, {0})".format(args), globals(), local)))
except Exception:
self.reply_exception(msg, sys.exc_info())
else:
self.reply(msg, "Unknown command: {0}".format(cmd))
def rebuild(self, build):
def log_func_binary(buf):
if not isinstance(buf, str):
buf = buf.decode(errors="replace")
msg = buf.strip()
if msg:
self.output_handler.write_line(msg)
project = build.project
topic = "Running: {project!s} – {build!s}".format(
project=project,
build=build
)
self.send_message(mto=self.switch, mbody="", msubject=topic, mtype="groupchat")
self.output_handler.write_line(topic)
self.output_handler.add_line_hook(self._muc_output)
try:
build.build(log_func_binary)
self.output_handler.write_line("done.")
finally:
self.output_handler.remove_line_hook(self._muc_output)
def cmdRebuild(self, msg, projectName):
project = self.projects.get(projectName, None)
if not project:
return "Unknown project: {0}".format(projectName)
self.rebuild(project)
return True
def cmdReload(self, msg):
result = self.reloadConfig()
if result:
self.reply_exception(msg, result)
else:
return True
def cmdRebuildRepo(self, msg, repository, branch):
try:
self.rebuild_repo(msg, repository, branch)
except KeyError:
self.reply(msg, "Repository-branch combination not tracked: {}".format((repository, branch)))
def cmdEcho(self, msg, *args):
return " ".join((str(arg) for arg in args))
COMMANDS = {
"rebuild": cmdRebuild,
"rebuild-repo": cmdRebuildRepo,
"reload": cmdReload,
"echo": cmdEcho
}
if __name__=="__main__":
try:
import setproctitle
setproctitle.setproctitle("constructor")
except ImportError:
pass
import argparse
parser = argparse.ArgumentParser()
parser.add_argument(
"-c", "--config-file",
default="buildbot_config.py",
help="Path to the config file to use.",
dest="config_file"
)
args = parser.parse_args()
del parser
buildbot = BuildBot(args.config_file)
buildbot.run()
|
StarcoderdataPython
|
3294041
|
def has_automation(filename):
"""Decorator that adds the automation_file attribute to a test function.
When present, this filename will be used as the --automation file when creating the speculos fixture.
"""
def decorator(func):
func.automation_file = filename
return func
return decorator
|
StarcoderdataPython
|
67548
|
from lithopscloud.modules.config_builder import ConfigBuilder, update_decorator, spinner
from typing import Any, Dict
from lithopscloud.modules.utils import find_obj, find_default
class ImageConfig(ConfigBuilder):
def __init__(self, base_config: Dict[str, Any]) -> None:
super().__init__(base_config)
@update_decorator
def run(self) -> Dict[str, Any]:
@spinner
def get_image_objects():
return self.ibm_vpc_client.list_images().get_result()['images']
image_objects = get_image_objects()
default = find_default(self.defaults, image_objects, id='image_id') or 'ibm-ubuntu-20-04-2-minimal-amd64-1'
image_obj = find_obj(image_objects, 'Please choose \033[92mUbuntu\033[0m 20.04 VM image, currently only Ubuntu supported', default=default)
return image_obj['id'], image_obj['minimum_provisioned_size']
|
StarcoderdataPython
|
82514
|
import json
import pytest
from stix2 import TAXIICollectionSource
from test_data.mitre_test_data import ATTACK_PATTERN, COURSE_OF_ACTION, INTRUSION_SET, MALWARE, TOOL, ID_TO_NAME, \
RELATION, STIX_TOOL, STIX_MALWARE, STIX_ATTACK_PATTERN
class MockCollection:
def __init__(self, id_, title):
self.id = id_
self.title = title
def mock_create_relations(original):
def mock(item_json, id_to_name):
return original(item_json, ID_TO_NAME)
return mock
@pytest.mark.parametrize('indicator, expected_result', [
([ATTACK_PATTERN.get('response')], ATTACK_PATTERN.get('indicator')),
([COURSE_OF_ACTION.get('response')], COURSE_OF_ACTION.get('indicator')),
([INTRUSION_SET.get('response')], INTRUSION_SET.get('indicator')),
([MALWARE.get('response')], MALWARE.get('indicator')),
([TOOL.get('response')], TOOL.get('indicator')),
])
def test_fetch_indicators(mocker, indicator, expected_result):
"""
Given
- fetch incidents command
- command args
- command raw response
When
- mock the Client's get_stix_objects.
Then
- run the fetch incidents command using the Client
Validate that all the indicators extracted successfully
"""
import FeedMitreAttackv2 as fm
from FeedMitreAttackv2 import Client, create_relationship
client = Client(url="https://test.org", proxies=False, verify=False, tags=[], tlp_color=None)
default_id = 1
nondefault_id = 2
client.collections = [MockCollection(default_id, 'default'), MockCollection(nondefault_id, 'not_default')]
mocker.patch.object(client, 'initialise')
mocker.patch.object(TAXIICollectionSource, "__init__", return_value=None)
mocker.patch.object(TAXIICollectionSource, 'query', return_value=indicator)
mocker.patch.object(json, 'loads', return_value=indicator[0])
mocker.patch.object(fm, 'create_relationship', wraps=mock_create_relations(create_relationship))
indicators = client.build_iterator(create_relationships=True, limit=6)
assert indicators == expected_result
@pytest.mark.parametrize('field_name, field_value, expected_result', [
('created', '2017-05-31T21:31:43.540Z', '2017-05-31T21:31:43.540Z'),
('created', '2019-04-25T20:53:07.719Z\n2019-04-25T20:53:07.814Z', '2019-04-25T20:53:07.719Z'),
('modified', '2017-05-31T21:31:43.540Z', '2017-05-31T21:31:43.540Z'),
('modified', '2020-03-16T15:38:37.650Z\n2020-01-17T16:45:24.252Z', '2020-03-16T15:38:37.650Z'),
])
def test_handle_multiple_dates_in_one_field(field_name, field_value, expected_result):
"""
Given
- created / modified indicator field
When
- this field contains two dates
Then
- run the handle_multiple_dates_in_one_field
Validate The field contain one specific date.
"""
from FeedMitreAttackv2 import handle_multiple_dates_in_one_field
assert handle_multiple_dates_in_one_field(field_name, field_value) == expected_result
@pytest.mark.parametrize('indicator, expected_result', [
({"x_mitre_deprecated": True}, True),
({"revoked": True}, True),
({}, False)
])
def test_is_indicator_deprecated_or_revoked(indicator, expected_result):
"""
Given
- indicator in STIX format.
When
- we cheed
Then
- run the create_list_relationships
Validate The relationships list extracted successfully.
"""
from FeedMitreAttackv2 import is_indicator_deprecated_or_revoked
assert is_indicator_deprecated_or_revoked(indicator) == expected_result
@pytest.mark.parametrize('indicator_type, indicator_json, expected_result', [
('Attack Pattern', ATTACK_PATTERN.get('response'), ATTACK_PATTERN.get('map_result')),
('Course of Action', COURSE_OF_ACTION.get('response'), COURSE_OF_ACTION.get('map_result')),
('Intrusion Set', INTRUSION_SET.get('response'), INTRUSION_SET.get('map_result')),
('Malware', MALWARE.get('response'), MALWARE.get('map_result')),
('Tool', TOOL.get('response'), TOOL.get('map_result')),
('STIX Tool', STIX_TOOL.get('response'), STIX_TOOL.get('map_result')),
('STIX Malware', STIX_MALWARE.get('response'), STIX_MALWARE.get('map_result')),
('STIX Attack Pattern', STIX_ATTACK_PATTERN.get('response'), STIX_ATTACK_PATTERN.get('map_result'))
])
def test_map_fields_by_type(indicator_type, indicator_json, expected_result):
from FeedMitreAttackv2 import map_fields_by_type
assert map_fields_by_type(indicator_type, indicator_json) == expected_result
def test_create_relationship():
"""
Given
- relationship obj in STIX format.
When
- we extract this relationship to Demisto format
Then
- run the create_relationship
Validate The relationship extracted successfully.
"""
from FeedMitreAttackv2 import create_relationship
relation = create_relationship(RELATION.get('response'), ID_TO_NAME)
relation._entity_a = 'entity a'
relation._entity_a_type = 'STIX Malware'
relation._entity_b = 'entity b'
relation._entity_b_type = 'STIX Attack Pattern'
relation._name = 'uses'
relation._relation_type = 'IndicatorToIndicator'
relation._reverse_name = 'used-by'
def test_get_item_type():
from FeedMitreAttackv2 import get_item_type
assert get_item_type('malware', True) == 'Malware'
assert get_item_type('malware', False) == 'STIX Malware'
assert get_item_type('intrusion-set', True) == 'Intrusion Set'
assert get_item_type('intrusion-set', False) == 'Intrusion Set'
def test_create_relationship_list():
from FeedMitreAttackv2 import create_relationship_list
assert create_relationship_list([RELATION.get('response')], ID_TO_NAME) == RELATION.get('indicator')
|
StarcoderdataPython
|
3344227
|
import numpy as np
import functools
import traittypes
import traitlets
from itertools import tee
import pythreejs
from plyfile import PlyData, PlyElement
from .traits_support import check_shape, check_dtype
cached_property = getattr(functools, "cached_property", property)
# From itertools cookbook
def pairwise(iterable):
"s -> (s0,s1), (s1,s2), (s2, s3), ..."
a, b = tee(iterable)
next(b, None)
return zip(a, b)
def _ensure_triangulated(faces):
for face in faces:
if len(face[0]) == 3:
yield face
continue
# We are going to make the assumption that the face is convex
# We choose the first vertex as our fan source
indices, *rest = face
base = indices[0]
for pair in pairwise(indices[1:]):
yield [np.array((base,) + pair)] + rest
class Model(traitlets.HasTraits):
origin = traittypes.Array(None, allow_none=True).valid(
check_shape(3), check_dtype("f4")
)
vertices = traittypes.Array(None, allow_none=True).valid(
check_shape(None, 3), check_dtype("f4")
)
indices = traittypes.Array(None, allow_none=True).valid(
check_shape(None, 3), check_dtype("i4")
)
attributes = traittypes.Array(None, allow_none=True)
triangles = traittypes.Array(None, allow_none=True).valid(
check_shape(None, 3, 3), check_dtype("f4")
)
@classmethod
def from_ply(cls, filename):
# This is probably not the absolute best way to do this.
plydata = PlyData.read(filename)
vertices = plydata["vertex"][:]
faces = plydata["face"][:]
triangles = []
xyz_faces = []
for face in _ensure_triangulated(faces):
indices = face[0]
vert = vertices[indices]
triangles.append(np.array([vert["x"], vert["y"], vert["z"]]))
xyz_faces.append(indices)
xyz_vert = np.stack([vertices[ax] for ax in "xyz"], axis=-1)
xyz_faces = np.stack(xyz_faces)
colors = None
if "diffuse_red" in vertices.dtype.names:
colors = np.stack(
[vertices["diffuse_{}".format(c)] for c in ("red", "green", "blue")],
axis=-1,
)
triangles = np.array(triangles).swapaxes(1, 2)
obj = cls(
vertices=xyz_vert,
indices=xyz_faces.astype('i4'),
attributes=colors,
triangles=triangles,
)
return obj
@property
def geometry(self):
attributes = dict(
position=pythreejs.BufferAttribute(self.vertices, normalized=False),
index=pythreejs.BufferAttribute(
self.indices.ravel(order="C").astype("u4"), normalized=False
),
)
if self.attributes is not None:
attributes["color"] = pythreejs.BufferAttribute(self.attributes)
# Face colors requires
# https://speakerdeck.com/yomotsu/low-level-apis-using-three-dot-js?slide=22
# and
# https://github.com/mrdoob/three.js/blob/master/src/renderers/shaders/ShaderLib.js
geometry = pythreejs.BufferGeometry(attributes=attributes)
geometry.exec_three_obj_method("computeFaceNormals")
return geometry
@cached_property
def normals(self):
r"""Array of the normal vectors for the triangles in this model."""
v10 = self.triangles[:, 1, :] - self.triangles[:, 0, :]
v20 = self.triangles[:, 2, :] - self.triangles[:, 0, :]
return np.cross(v10, v20)
@cached_property
def areas(self):
r"""Array of areas for the triangles in this model."""
return 0.5 * np.linalg.norm(self.normals, axis=1)
def translate(self, delta):
self.vertices = self.vertices + delta
def rotate(self, q, origin="barycentric"):
"""
This expects a quaternion as input.
"""
pass
|
StarcoderdataPython
|
4825620
|
<filename>Other Trials/sentiment_input.py<gh_stars>0
from nltk.classify import NaiveBayesClassifier
#from nltk.corpus import posectivity
from nltk.sentiment import SentimentAnalyzer
from nltk.sentiment.util import *
import os
from textblob import TextBlob
##
##neg_docs = []
##with open("C:/Users/ShravanJagadish/Desktop/Search/Final Project/Output/2011-2013/negative_Bakeries.txt",'r') as file:
## for lines in file:
## blob = TextBlob(lines)
## for sentences in blob.sentences:
## inp = ''
## #print sentences
## content = sentences.split(" ")
## for words in content:
## if len(words) > 3 and words != 'null':
## inp = inp + words + ","
## if len(inp) > 1:
## neg_docs.append(("[" + inp +"],'neg'"))
##
##pos_docs = []
##with open("C:/Users/ShravanJagadish/Desktop/Search/Final Project/Output/2011-2013/positive_Bakeries.txt",'r') as file:
## for lines in file:
## blob = TextBlob(lines)
## for sentences in blob.sentences:
## inp = ''
## #print sentences
## content = sentences.split(" ")
## for words in content:
## if len(words) > 3 and words != 'null':
## inp = inp + words + ","
## if len(inp) > 1:
## pos_docs.append(("[" + inp +"],'pos'"))
##
##
pos_docs = []
pos_docs.append((['smart', 'and', 'alert', ',', 'thirteen', 'conversations', 'about', 'one','thing', 'is', 'a', 'small', 'gem', '.'], 'pos'))
pos_docs.append((['smart', 'and', 'alert', ',', 'thirteen', 'conversations', 'about', 'one','thing', 'is', 'a', 'small', 'gem', '.'], 'pos'))
pos_docs.append((['smart', 'and', 'alert', ',', 'thirteen', 'conversations', 'about', 'one','thing', 'is', 'a', 'small', 'gem', '.'], 'pos'))
pos_docs.append((['smart', 'and', 'alert', ',', 'thirteen', 'conversations', 'about', 'one','thing', 'is', 'a', 'small', 'gem', '.'], 'pos'))
pos_docs.append((['smart', 'and', 'alert', ',', 'thirteen', 'conversations', 'about', 'one','thing', 'is', 'a', 'small', 'gem', '.'], 'pos'))
pos_docs.append((['smart', 'and', 'alert', ',', 'thirteen', 'conversations', 'about', 'one','thing', 'is', 'a', 'small', 'gem', '.'], 'pos'))
pos_docs.append((['smart', 'and', 'alert', ',', 'thirteen', 'conversations', 'about', 'one','thing', 'is', 'a', 'small', 'gem', '.'], 'pos'))
pos_docs.append((['smart', 'and', 'alert', ',', 'thirteen', 'conversations', 'about', 'one','thing', 'is', 'a', 'small', 'gem', '.'], 'pos'))
pos_docs.append((['smart', 'and', 'alert', ',', 'thirteen', 'conversations', 'about', 'one','thing', 'is', 'a', 'small', 'gem', '.'], 'pos'))
pos_docs.append((['smart', 'and', 'alert', ',', 'thirteen', 'conversations', 'about', 'one','thing', 'is', 'a', 'small', 'gem', '.'], 'pos'))
neg_docs = []
neg_docs.append((['smart', 'and', 'alert', ',', 'thirteen', 'conversations', 'about', 'one','thing', 'is', 'a', 'small', 'gem', '.'], 'neg'))
neg_docs.append((['smart', 'and', 'alert', ',', 'thirteen', 'conversations', 'about', 'one','thing', 'is', 'a', 'small', 'gem', '.'], 'neg'))
neg_docs.append((['smart', 'and', 'alert', ',', 'thirteen', 'conversations', 'about', 'one','thing', 'is', 'a', 'small', 'gem', '.'], 'neg'))
neg_docs.append((['smart', 'and', 'alert', ',', 'thirteen', 'conversations', 'about', 'one','thing', 'is', 'a', 'small', 'gem', '.'], 'neg'))
neg_docs.append((['smart', 'and', 'alert', ',', 'thirteen', 'conversations', 'about', 'one','thing', 'is', 'a', 'small', 'gem', '.'], 'neg'))
neg_docs.append((['smart', 'and', 'alert', ',', 'thirteen', 'conversations', 'about', 'one','thing', 'is', 'a', 'small', 'gem', '.'], 'neg'))
neg_docs.append((['smart', 'and', 'alert', ',', 'thirteen', 'conversations', 'about', 'one','thing', 'is', 'a', 'small', 'gem', '.'], 'neg'))
neg_docs.append((['smart', 'and', 'alert', ',', 'thirteen', 'conversations', 'about', 'one','thing', 'is', 'a', 'small', 'gem', '.'], 'neg'))
neg_docs.append((['smart', 'and', 'alert', ',', 'thirteen', 'conversations', 'about', 'one','thing', 'is', 'a', 'small', 'gem', '.'], 'neg'))
neg_docs.append((['smart', 'and', 'alert', ',', 'thirteen', 'conversations', 'about', 'one','thing', 'is', 'a', 'small', 'gem', '.'], 'neg'))
print len(pos_docs)
print len(neg_docs)
print neg_docs[0]
train_pos_docs = pos_docs[:80]
test_pos_docs = pos_docs[80:100]
train_neg_docs = neg_docs[:80]
test_neg_docs = neg_docs[80:100]
training_docs = train_pos_docs+train_neg_docs
testing_docs = test_pos_docs+test_neg_docs
sentim_analyzer = SentimentAnalyzer()
all_words_neg = sentim_analyzer.all_words([mark_negation(doc) for doc in training_docs])
unigram_feats = sentim_analyzer.unigram_word_feats(all_words_neg, min_freq=4)
print len(unigram_feats)
sentim_analyzer.add_feat_extractor(extract_unigram_feats, unigrams=unigram_feats)
training_set = sentim_analyzer.apply_features(training_docs)
test_set = sentim_analyzer.apply_features(testing_docs)
trainer = NaiveBayesClassifier.train
classifier = sentim_analyzer.train(trainer, training_set)
for key,value in sorted(sentim_analyzer.evaluate(test_set).items()):
print '{0}: {1}'.format(key, value)
|
StarcoderdataPython
|
1701425
|
<reponame>kadamkaustubh/Countdown<filename>WordGame.py
import random
def sorted_word_list(file_name):
with open(file_name, 'r') as fileopen:
words = [line.strip() for line in fileopen]
sorted_list = sorted(words, key=len)
rev_list = reversed(sorted_list)
with open('words/SortedWords', 'w') as sort_write:
for word in rev_list:
if len(word) < 10:
sort_write.write(word)
sort_write.write('\n')
def anagram_check(word, check_word):
for letter in word:
if letter in check_word:
check_word = check_word.replace(letter, '', 1)
else:
return False
return True
def scramble_generator():
alphabet = list('qwertyuiopasdfghjklzxcvbnm')
vowels = list('aeiou')
consonants = []
for letter in alphabet:
if letter not in vowels:
consonants.append(letter)
scramble = random.sample(vowels, 3) + random.sample(consonants, 4) + random.sample(alphabet, 2)
return scramble
def word_solve(letters):
top_score = 0
solution = []
with open('words/sortedWords', 'r') as dict_file:
for dict_item in dict_file:
dict_item = dict_item.strip()
if anagram_check(dict_item, letters):
if len(dict_item) < top_score:
break
print(dict_item)
solution.append(dict_item)
top_score = len(dict_item)
dict_file.close()
return solution
board = scramble_generator()
print(board)
let = ''.join(board)
answer = word_solve(let)
|
StarcoderdataPython
|
12762
|
<filename>src/config.py<gh_stars>10-100
import yaml
import os
def parse_config(args):
"""
prepare configs
"""
file_dir = os.path.dirname(os.path.realpath('__file__'))
messytable_dir = os.path.realpath(os.path.join(file_dir, '..'))
config_pathname = os.path.join(messytable_dir,'models',args.config_dir,'train.yaml')
config = yaml.load(open(config_pathname, 'r'))
config['messytable_dir'] = messytable_dir
config['config_dir'] = os.path.join(messytable_dir,'models',args.config_dir)
config['data_dir'] = os.path.join(messytable_dir, 'data') if 'data_dir' not in config else config['data_dir'] # NOTE: either indicate data_dir or put the data in messytable/data
config['img_dir'] = os.path.join(config['data_dir'],'images')
config['train_label_pathname'] = os.path.join(config['data_dir'],'labels',config['train_json'])
config['num_workers'] = config['num_workers'] if 'num_workers' in config else 16
config['milestones'] = config['milestones'] if 'milestones' in config else [60, 80]
config['split_samples_in_func'] = config['split_samples_in_func'] if 'split_samples_in_func' in config else True
config['loss_func'] = config['loss_func'] if 'loss_func' in config else 'ERROR_LOSS_FUNC'
config['triplet_margin'] = config['triplet_margin'] if 'triplet_margin' in config else 0.3
config['data_augmentation'] = config['data_augmentation'] if 'data_augmentation' in config else False
config['cropped_img_size'] = (config['cropped_height'],config['cropped_width'])
config['original_img_size'] = (config['img_height'],config['img_width'])
config['scene_ratio'] = config['scene_ratio'] if 'scene_ratio' in config else 1.0
config['cam_selected_num'] = config['cam_selected_num'] if 'cam_selected_num' in config else 8
config['triplet_sampling_ratio'] = config['triplet_sampling_ratio'] if 'triplet_sampling_ratio' in config else [0.5,0.3,0.2]
config['image_pairs_per_batch'] = config['image_pairs_per_batch'] if 'image_pairs_per_batch' in config else 24
config['triplet_batch_size'] = config['triplet_batch_size'] if 'triplet_batch_size' in config else config['batch_size']
config['learning_rate'] = float(config['learning_rate'])
config['zoomout_crop_num'] = 'single_crop' if len(config['zoomout_ratio']) == 1 else 'multi_crops'
# make cam_pairs
test_cam_pairs = []
for i in range(1,9):
for j in range(i+1,10):
test_cam_pairs.append((str(i),str(j)))
reversed_cam_pairs = []
for cam_pair in test_cam_pairs:
reversed_cam_pairs.append((cam_pair[1],cam_pair[0]))
config['test_cam_pairs'] = test_cam_pairs
config['train_cam_pairs'] = test_cam_pairs + reversed_cam_pairs
config['cam_list'] = [str(i) for i in range(1,10)]
return config
|
StarcoderdataPython
|
1636299
|
<filename>gradschool/fs/utility.py
import os
import pathlib
def as_uri(path):
"""
Converts the supplied path to file URI
:param path: Path to be converted
:return: Path as a fille URI
"""
p = pathlib.Path(path)
return p.as_uri()
def as_pathlib(path):
"""
Converts the supplied path to an pathlib.Path object
:param path: The path to convert
:return: The path converted to pathlib.Path
"""
return pathlib.Path(path)
def isfile(path):
"""
Determines if the supplied path is a file
:param path: Path to check
:return: True if it is a file, False otherwise
"""
return os.path.isfile(path)
def isdir(path):
"""
Determines if the supplied path is a directory
:param path: Path to check
:return: True if it is a directory, False otherwise
"""
return os.path.isdir(path)
def is_absolute(path):
"""
Checks if the supplied path is an absolute path
:param path: Path to check
:return: True if absolute path, false, otherwise
"""
p = pathlib.Path(path)
return p.is_absolute()
def join_with_cwd(path):
"""
Joins the supplied path the current working directory
:param path: Path to be joined with the current working directory
:return: Path joined with the current working directory
"""
return os.path.join(os.getcwd(), path)
def basename(path):
"""
Get the base name of the supplied path
:param path: Path to get the base name from
:return: The base name of the path
"""
return os.path.basename(path)
def dirname(path):
"""
Get the directory name of the supplied path
:param path: A path
:return: The directory name of the path
"""
return os.path.dirname(path)
def rename(pfrom, pto):
"""
Renames a file or directory (pfrom) to a new name (pto)
:param pfrom: Path to file or directory to be renamed
:param pto: New name or path with new name
"""
os.rename(pfrom, pto)
def samefile(path1, path2):
"""
Determines if two paths point to the same file or directory
:param path1: First path
:param path2: Second path
:return: True if same, false otherwise
"""
return os.path.samefile(path1, path2)
def path_exists(path):
"""
Tests if a path exists
:param path: Path to test existence of
:return: True if path exists or false if it does not
"""
return os.path.exists(path)
def relpath(path, start=os.curdir):
"""
Get a relative filepath to path from either current working directory or optional starting point
:param path: The path to get a relative file path from
:param start: Optional starting point defaults to os.curdir
:return: The relative filepath
"""
return os.path.relpath(path, start=start)
def resolve(path):
"""
Resolves the supplied path
:param path: Path to be resolved
:return: The resolved path
"""
p = pathlib.Path(path)
return str(p.resolve())
|
StarcoderdataPython
|
3283360
|
# Copyright (C) 2021 Intel Corporation. All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
#
import os
import ctypes
from collections import namedtuple
from pcieparser.header import header
from pcieparser.caps import capabilities
from pcieparser.extcaps import extended_capabilities
class PCIConfigSpace(namedtuple("PCIConfigSpace", ["header", "caps", "extcaps"])):
def __repr__(self):
acc = str(self.header)
for cap in self.caps:
acc += "\n"
acc += str(cap)
for extcap in self.extcaps:
acc += "\n"
acc += str(extcap)
return acc
def has_cap(self, cap_name):
for cap in self.caps:
if cap_name == cap.name:
return True
for cap in self.extcaps:
if cap_name == cap.name:
return True
return False
def parse_config_space(path):
try:
data = open(os.path.join(path, "config"), mode='rb').read()
hdr = header(data)
caps = capabilities(data, hdr.capability_pointer)
config_space = PCIConfigSpace(hdr, caps, [])
# While PCI Express specification requires that a PCIe endpoint must have an extended capability header at
# offset 100h of its configuration space, we do see real PCIe endpoints not meeting this requirement
# occasionally. Thus, check the length of the configuration space as well before trying to parse its extended
# capability list.
if config_space.has_cap("PCI Express") and len(data) >= 260:
extcaps = extended_capabilities(data)
config_space = PCIConfigSpace(hdr, caps, extcaps)
return config_space
except FileNotFoundError:
return None
|
StarcoderdataPython
|
1665353
|
"""
Based on this example -> https://github.com/open-power/pdbg/blob/master/.build.sh
TEMPDIR=`mktemp -d ${HOME}/pdbgobjXXXXXX`
RUN_TMP="docker run --rm=true --user=${USER} -w ${TEMPDIR} -v ${HOME}:${HOME} -t ${CONTAINER}"
${RUN_TMP} ${SRCDIR}/configure --host=arm-linux-gnueabi
${RUN_TMP} make
rm -rf ${TEMPDIR}
"""
|
StarcoderdataPython
|
1761069
|
<filename>part-data/test-sqlite.py
import sqlite3
if __name__ == "__main__":
data = [
(1, 2, 3),
(2, 3, 4),
]
s = sqlite3.connect('database.db')
# 给数据库建立游标,就可以执行sql查询语句了
db = s.cursor()
db.execute('create table wulj (name, number, rate)')
print(db)
s.commit()
db.executemany('insert into wulj (?,?,?)', data)
s.commit()
for row in db.execute('select * from wulj'):
print(row)
number = 10
# 用户输入参数用于交互查询,?代表占位符
for row in db.execute('select * from wulj where num > ?', (number,)):
print(row)
|
StarcoderdataPython
|
15664
|
import pandas as pd
import re
import os
from tqdm import tqdm
## Cleaning train raw dataset
train = open('./data/raw/train.crash').readlines()
train_ids = []
train_texts = []
train_labels = []
for id, line in tqdm(enumerate(train)):
line = line.strip()
if line.startswith("train_"):
train_ids.append(id)
elif line == "0" or line == "1":
train_labels.append(id)
for id, lb in tqdm(zip(train_ids, train_labels)):
line_id = train[id].strip()
label = train[lb].strip()
text = ' '.join(train[id + 1: lb])
text = re.sub('\s+', ' ', text).strip()[1: -1].strip()
train_texts.append(text)
train_df = pd.DataFrame({
'id': train_ids,
'text': train_texts,
'label': train_labels
})
if not os.path.exists('./data'):
os.makedirs('./data')
train_df.to_csv('./data/train.csv', encoding='utf-8', index=False)
## Clean test raw dataset
test = open("./data/raw/test.crash").readlines()
test_ids = []
test_texts = []
for id, line in tqdm(enumerate(test)):
line = line.strip()
if line.startswith("test_"):
test_ids.append(id)
for i, id in tqdm(enumerate(test_ids)):
if i >= len(test_ids) - 1:
end = len(test)
else:
end = test_ids[i + 1]
line_id = test[id].strip()
text = re.sub('\s+', ' ', ' '.join(test[id + 1: end])).strip()[1:-1].strip()
test_texts.append(text)
test_df = pd.DataFrame({
'id': test_ids,
'text': test_texts
})
submission = pd.read_csv('./data/raw/sample_submission.csv', encoding='utf-8')
result = pd.concat([test_df, submission], axis=1, sort=False)
result.to_csv('./data/test.csv', encoding='utf-8', index=False)
|
StarcoderdataPython
|
132932
|
<reponame>bozhikovstanislav/Python-Fundamentals
string_to_revers = input()
for x in string_to_revers[::-1]:
print(x, end='')
|
StarcoderdataPython
|
26336
|
# -*- coding: utf-8 -*-
"""Example 1: Load and plot airfoil coordinates
"""
import os
import matplotlib.pyplot as plt
from mypack.utils.io import read_selig
from mypack.utils.plotting import plot_airfoil
def example_1():
"""Run example 1"""
# script inputs
mod_path = os.path.dirname(os.path.abspath(__file__)) # current module
air_path = os.path.join(mod_path, '..',
'tests', 'test_utils', 'files', 'demo_selig.dat')
# load coordinates from a a selig-style airfoil file
air_df = read_selig(air_path)
# plot the airfoil
plot_airfoil(air_df)
# save the png for the documentation
fig = plt.gcf()
save_name = os.path.basename(__file__).replace('.py', '.png') # file name
save_path = os.path.join(mod_path, save_name)
fig.savefig(save_path)
if __name__ == '__main__': # call function when run as script
example_1()
|
StarcoderdataPython
|
29374
|
from flask_wtf import FlaskForm
from wtforms import PasswordField, SubmitField, StringField
from wtforms.validators import DataRequired, Length
class InstagramLoginForm(FlaskForm):
username = StringField('Instagram Username', validators=[DataRequired(),
Length(min=6, max=20)])
password = PasswordField('<PASSWORD>', validators=[DataRequired()])
submit = SubmitField('Save')
|
StarcoderdataPython
|
1622475
|
# https://www.hackerrank.com/challenges/30-testing/problem
def minimum_index(seq):
if len(seq) == 0:
raise ValueError("Cannot get the minimum value index from an empty sequence")
min_idx = 0
for i in range(1, len(seq)):
if seq[i] < seq[min_idx]:
min_idx = i
return min_idx
class TestDataEmptyArray(object):
@staticmethod
def get_array():
return []
class TestDataUniqueValues(object):
@staticmethod
def get_array():
return [15, 54, 13, 76, 1, 4, 5, 3, 2, 9, 27, 87, 91, 10, 11, 7, 12, 99]
@staticmethod
def get_expected_result():
return 4
class TestDataExactlyTwoDifferentMinimums(object):
@staticmethod
def get_array():
return [1, 2, 3, 0, 4, 0, 5]
@staticmethod
def get_expected_result():
return 3
def TestWithEmptyArray():
try:
seq = TestDataEmptyArray.get_array()
result = minimum_index(seq)
except ValueError as e:
pass
else:
assert False
def TestWithUniqueValues():
seq = TestDataUniqueValues.get_array()
assert len(seq) >= 2
assert len(list(set(seq))) == len(seq)
expected_result = TestDataUniqueValues.get_expected_result()
result = minimum_index(seq)
assert result == expected_result
def TestiWithExactyTwoDifferentMinimums():
seq = TestDataExactlyTwoDifferentMinimums.get_array()
assert len(seq) >= 2
tmp = sorted(seq)
assert tmp[0] == tmp[1] and (len(tmp) == 2 or tmp[1] < tmp[2])
expected_result = TestDataExactlyTwoDifferentMinimums.get_expected_result()
result = minimum_index(seq)
assert result == expected_result
if __name__ == '__main__':
TestWithEmptyArray()
TestWithUniqueValues()
TestiWithExactyTwoDifferentMinimums()
print("OK")
|
StarcoderdataPython
|
3243125
|
<reponame>patryk-tech/Friendo_Bot
import os
TOKEN = os.environ.get("FRIENDO_TOKEN")
MEME_USERNAME = os.environ.get("MEME_USERNAME")
MEME_PASSWORD = os.environ.get("MEME_PASSWORD")
COMMAND_PREFIX = "."
VERSION = "1.2.8"
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
BASE_GITHUB_REPO = "https://github.com/fisher60/Friendo_Bot"
|
StarcoderdataPython
|
1722197
|
<filename>app/categories/controller.py
from flask import abort
from sqlalchemy import asc
from .models import Category
from app.recipes.models import Recipe
from app.user.models import User
# Returns all categories
def categoryList():
return Category.getCategories()
# Returns 1 category
def currentCategory(category_id):
return Category.getCurrentCategory(category_id)
# Returns user info that created the category
def categoryCreator(category):
return User.getUserInfo(category.user_id)
# Returns Recipe list for this category
def recipeList(category_id):
return Recipe.getRecipeList(category_id)
def createNewCategory(name, id):
Category.create(
name=name,
user_id=id
)
return True
def updateCategory(category, name):
category.name = name
category.update()
return True
def deleteCategory(category, category_id):
try:
recipes = recipeList(category_id)
recipes.delete()
except:
abort(500)
category.delete()
return True
|
StarcoderdataPython
|
3316809
|
<reponame>christabor/plantstuff<gh_stars>1-10
"""OCR conversion for generating data from images."""
from pprint import pprint as ppr
try:
import Image
except ImportError:
from PIL import Image
import pytesseract
from plantstuff.scraper_utils.decorators import cached
# E.g. `which tesseract`
pytesseract.pytesseract.tesseract_cmd = '/usr/local/bin/tesseract'
def get_image_data(img):
"""Get image data and clean-up."""
fpath = 'ocr_images/{}'.format(img)
ref_name = fpath.replace('/', '__').replace('.', '___') + '.txt'
@cached(ref_name, directory='ocr_images')
def get():
img = Image.open(fpath)
res = pytesseract.image_to_string(img)
ppr(res)
return res
return get()
get_image_data('threatened_taxa.jpg')
|
StarcoderdataPython
|
142327
|
# -*- coding: utf-8 -*-
import click
import logging
from pathlib import Path
import os,glob
# from dotenv import find_dotenv, load_dotenv
import process_rna as process_rna
import process_hichip as process_hichip
import process_atac as process_atac
import process_bedtools as process_bedtools
import process_crms as process_crms
##TODO: make workflow for single tissue
###TODO: snakemake this workflow
@click.command()
@click.argument('input_filepath', type=click.Path(exists=True))
@click.argument('interim_filepath', type=click.Path())
@click.argument('output_filepath', type=click.Path())
@click.argument('external_filepath', type=click.Path())
def main(input_filepath, interim_filepath, output_filepath, external_filepath):
""" Runs data processing scripts to turn raw data from (../raw) into
cleaned data ready to be analyzed (saved in ../processed).
"""
logger = logging.getLogger(__name__)
logger.info('Data Preprocessing')
logger.info('processing interim RNA')
#### process_rna.run(os.path.join(input_filepath,'rna'), os.path.join(interim_filepath,'rna'))
logger.info('processing interim ATAC')
# process_atac.merge_samples(os.path.join(input_filepath,'atac'), os.path.join(interim_filepath,'merged/atac'),
# split_delim = '.narrowPeak.bed', extension=".bed", type_prefix='', verbose=True)
logger.info('processing interim Footprinting')
### footprinting or motif scan
# process_atac.preprocess_footprinting(os.path.join(input_filepath,'footprinting'), os.path.join(interim_filepath,'merged/footprinting'))
# ran from cmd instead: (base) DERM-C02ZQ972MD6R:raw mguo123$ for file in `ls motif_scan/*bed`; do echo $file;f=${file#*/}; tissue=${f%%_*}; bedtools sort -i $file > merged_motif_scan/${tissue}_merged.bed; echo $tissue;done
# process_atac.preprocess_footprinting(os.path.join(input_filepath,'motif_scan'), os.path.join(interim_filepath,'merged/motif_scan'))
logger.info('processing interim HICHIP')
#### process_hichip.make_bedpe(os.path.join(input_filepath,'hichip'), os.path.join(interim_filepath,'hichip'))
#### process_hichip.make_csvs(os.path.join(input_filepath,'hichip'), os.path.join(interim_filepath,'merged'))
#### preprocessed with 1B_mg_preprocess_hichip.ipynb
# process_bedtools.sort_bed_dir(os.path.join(interim_filepath,'merged/anchors_bed'),os.path.join(interim_filepath,'merged/anchors_bed_sort'))
logger.info('annotating promoters')
promoter_file = os.path.join(external_filepath,'promoter_hg19_2000_500_sort.bed')
# process_bedtools.annotate_batch(promoter_file, os.path.join(interim_filepath,'merged/atac'),
# os.path.join(interim_filepath,'annon/promoter_atac'),
# extensions=['_hg19_2000_500_sort.bed','_merged.bed'],f=1E-6, wo=False)
### footprinting or motif scan
# process_bedtools.annotate_batch(promoter_file, os.path.join(interim_filepath,'merged/footprinting'),
# os.path.join(interim_filepath,'annon/promoter_footprinting'),
# extensions=['_hg19_2000_500_sort.bed','_merged.bed'],f=1E-6, wo=False)
process_bedtools.annotate_batch(promoter_file, os.path.join(input_filepath,'merged_motif_scan'),
os.path.join(interim_filepath,'annon/promoter_motif_scan'),
extensions=['_hg19_2000_500_sort.bed','_merged.bed'],f=1E-6, wo=False)
logger.info('annotating promoters with anchors that are close by')
# region_search_file = os.path.join(external_filepath,'promoter_hg19_5000_5000_sort.bed')
# process_bedtools.annotate_batch(region_search_file, os.path.join(interim_filepath,'merged/anchors_bed_sort'),
# os.path.join(interim_filepath,'annon/promoter_anchors'),
# extensions=['_hg19_5000_5000_sort.bed','_sort.bed'],f=1E-9, wo=True)
logger.info('annotating anchors')
# process_bedtools.annotate_batch(os.path.join(interim_filepath,'merged/anchors_bed_sort'),
# os.path.join(interim_filepath,'merged/atac'),
# os.path.join(interim_filepath,'annon/anchor_atac'),
# extensions=['_sort.bed','_merged.bed'],f=2.5E-6)
### footprinting or motif scan
# process_bedtools.annotate_batch(os.path.join(interim_filepath,'merged/anchors_bed_sort'),
# os.path.join(interim_filepath,'merged/footprinting'),
# os.path.join(interim_filepath,'annon/anchor_footprinting'),
# extensions=['_sort.bed','_merged.bed'],f=2.5E-6)
process_bedtools.annotate_batch(os.path.join(input_filepath,'merged_motif_scan'),
os.path.join(interim_filepath,'merged/motif_scan'),
os.path.join(interim_filepath,'annon/anchor_motif_scan'),
extensions=['_sort.bed','_merged.bed'],f=2.5E-6)
logger.info('creating crms')
rna_file = os.path.join(interim_filepath, 'rna', 'tissue_tpm_sym.csv')
tf_annon_file = os.path.join(external_filepath, 'HOCOMOCOv11_annotation.csv')
annon_file_path = os.path.join(interim_filepath,'annon')
loop_file_path = os.path.join(interim_filepath, 'merged/loops')
output_dir = os.path.join(output_filepath,'tissue_crms')
### footprinting or motif scan
# process_crms.create_crm_batch(rna_file, tf_annon_file, annon_file_path, loop_file_path,
# output_dir,#tissues_sel = ['Airway', 'Pancreas', 'Uterine'],
# type='all',THRES=0,verbose=True)
process_crms.create_crm_batch(rna_file, tf_annon_file, annon_file_path, loop_file_path,
output_dir,#tissues_sel = ['Airway', 'Pancreas', 'Uterine'],
choose_footprint=False, type='all',THRES=0,verbose=True)
# process_crms.create_crm_batch(rna_file, tf_annon_file, annon_file_path, loop_file_path,
# output_dir,type='all',THRES=1)
if __name__ == '__main__':
log_fmt = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
logging.basicConfig(level=logging.INFO, format=log_fmt)
# not used in this stub but often useful for finding various files
project_dir = Path(__file__).resolve().parents[2]
print(project_dir)
# # find .env automagically by walking up directories until it's found, then
# # load up the .env entries as environment variables
# load_dotenv(find_dotenv())
main()
|
StarcoderdataPython
|
1601632
|
<reponame>devesh-todarwal/web-dev
import os
import requests
import xml.etree.ElementTree as ET
from bs4 import BeautifulSoup as soup
from difflib import SequenceMatcher
import numpy as np
def search_from(lst,name,max_results=10):
n = min(max_results,len(lst))
sim = [get_similarity(name,l) for l in lst]
lst = np.array(lst)
return list(reversed(lst[np.argsort(sim)][-n:]))
def generate_star_rating(rating):
rating = round(rating * 2) / 2
star_html = ''
count = 0
while rating > 0:
if rating < 1:
star_html += '<span class="fa fa-star-half-o checked"></span>\n'
else:
star_html += '<span class="fa fa-star checked"></span>\n'
rating -= 1
count += 1
while count < 5:
count += 1
star_html += '<span class="fa fa-star-o checked"></span>\n'
return star_html
def get_similarity(s1, s2):
"""
Measure of how similar str s1 and s2 are.
Scale from 0 to 1
"""
t0 = sorted(list(set(s1.split(' ')).intersection(set(s2.split(' ')))))
t1 = sorted(list(set(t0 + s1.split(' '))))
t2 = sorted(list(set(t0 + s2.split(' '))))
r01 = SequenceMatcher(None, t0, t1).ratio()
r02 = SequenceMatcher(None, t0, t2).ratio()
r12 = SequenceMatcher(None, t1, t2).ratio()
return max(r01, r02, r12)
def get_key():
with open('key') as f:
key = f.read()
return key
def get_image_url(isbn):
key = get_key()
res = requests.get(f'https://www.goodreads.com/book/isbn/{isbn}.xml?key={key}')
root = soup(res.text,'xml')
default_img = 'https://s.gr-assets.com/assets/nophoto/book/111x148-bcc042a9c91a29c1d680899eff700a03.png'
try:
small_url = root.book.small_image_url.get_text().strip()
url = root.book.image_url.get_text().strip()
if url == '':
if small_url == '':
return default_img
return small_url
except:
url = default_img
return url
def get_book_basic_backup(isbn):
key = get_key()
res = requests.get(f'https://www.goodreads.com/book/isbn/{isbn}?key={key}')
root = soup(res.text,'xml')
try:
try:
title = root.book.title.get_text()
except:
title = ''
try:
year = root.book.publication_year.get_text()
except:
year = ''
try:
author = root.book.authors.author.find('name').get_text()
except:
author = ''
except:
title = ''
author = ''
year = ''
return title, author, year
def get_book_info(isbn):
key = get_key()
res = requests.get(f'https://www.goodreads.com/book/isbn/{isbn}?key={key}')
root = soup(res.text,'xml')
try:
desc = root.book.description.get_text()
try:
root1 = soup(desc,'lxml')
short_desc = root1.blockquote.get_text()
except:
short_desc = ''
except:
short_desc = ''
desc = ''
return short_desc, desc
def get_rating_info(isbn):
key = get_key()
res1 = requests.get(f'https://www.goodreads.com/book/review_counts.json?isbns={isbn}&key={key}')
res2 = requests.get(f'https://www.goodreads.com/book/isbn/{isbn}?key={key}')
root = soup(res2.text,'xml')
try:
avg_rating = float(res1.json()["books"][0]["average_rating"])
except:
avg_rating = 0
try:
rating_dist = root.book.rating_dist.get_text().split('|')
rating_dist = [int(r.split(':')[1]) for r in rating_dist]
except:
rating_dist = [0, 0, 0, 0, 0, 0]
return avg_rating, rating_dist
def search_books(title):
pass
|
StarcoderdataPython
|
3220742
|
<reponame>Sagarikanaik96/Test2<gh_stars>0
# -*- coding: utf-8 -*-
# Copyright (c) 2020, veena and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe.model.document import Document
class Quotation(Document):
pass
def auto_create_supplier_quotation(doc,method):
supplier=frappe.db.get_value('Supplier',{'is_internal_supplier':1,'represents_company':doc.company},'supplier_name')
company=frappe.db.get_value('Customer',{'is_internal_Customer':1,'customer_name':doc.customer_name},'represents_company')
contact_person=frappe.db.get_value('Dynamic Link',{'parenttype':'Contact','link_doctype':'Supplier',"link_name":supplier},'parent')
qu_name=frappe.db.get_list('Document Specific Naming Series',filters={'parent':company,'parenttype':'Company'},fields={'*'})
warehouse=frappe.db.get_value('Company',{'company_name':company},'default_warehouse')
quotation_name="null"
for tup in qu_name:
if tup.reference_document=="Supplier Quotation":
quotation_name=tup.series
if quotation_name!="null":
if company:
if supplier:
tax_template=frappe.db.get_value('Purchase Taxes and Charges Template',{'company':doc.customer_name},'name')
tax_list=frappe.db.get_list("Purchase Taxes and Charges",filters={'parent':tax_template,'parenttype':'Purchase Taxes and Charges Template'},fields={'*'})
sq_doc=frappe.get_doc(dict(doctype = 'Supplier Quotation',
supplier=supplier,
naming_series=quotation_name,
company=company,
valid_till=doc.valid_till,
supplier_address=frappe.db.get_value("Dynamic Link",{"parenttype":"Address","link_doctype":"Supplier","link_name":supplier},"parent"),
contact_person=contact_person,
contact_email=frappe.db.get_value('Contact Email', {'parenttype':'Contact','parent':contact_person},'email_id'),
conversion_rate=1,
quotation_no=doc.name,
tc_name=doc.tc_name,
taxes_and_charges=tax_template,
terms=doc.terms,
total=doc.total,
total_taxes_and_charges=doc.total_taxes_and_charges,
grand_total=doc.grand_total,
base_grand_total=doc.base_grand_total,
rounded_total=doc.rounded_total,
base_rounded_total=doc.base_rounded_total,
quotation_type=doc.quotation_type,
opening_date=doc.opening_date,
rfq_no=frappe.db.get_value('Opportunity',doc.opportunity,'reference_no')
)).insert(ignore_mandatory=True)
for val in doc.items:
sq_doc.append('items', {
'item_code':val.item_code,
'qty':val.qty,
'uom':val.uom,
'stock_uom':val.stock_uom,
'rate':val.rate,
'amount':val.amount,
'base_rate':val.base_rate,
'base_amount':val.base_amount,
'description':val.description,
'conversion_factor':val.conversion_factor,
'warehouse':warehouse
})
for tax in tax_list:
sq_doc.append('taxes',{
'account_head':tax.account_head,
'charge_type':tax.charge_type,
'add_deduct_tax':'Add',
'category':'Total',
'description':tax.description,
'rate':frappe.db.get_value("Sales Taxes and Charges",{'parent':doc.name,'parenttype':'Quotation'},'rate'),
'tax_amount':frappe.db.get_value("Sales Taxes and Charges",{'parent':doc.name,'parenttype':'Quotation'},'tax_amount'),
'total':frappe.db.get_value("Sales Taxes and Charges",{'parent':doc.name,'parenttype':'Quotation'},'total'),
'tax_amount_after_discount_amount':frappe.db.get_value("Sales Taxes and Charges",{'parent':doc.name,'parenttype':'Quotation'},'tax_amount_after_discount_amount'),
'base_tax_amount':frappe.db.get_value("Sales Taxes and Charges",{'parent':doc.name,'parenttype':'Quotation'},'base_tax_amount'),
'base_total':frappe.db.get_value("Sales Taxes and Charges",{'parent':doc.name,'parenttype':'Quotation'},'base_total')
})
sq_doc.add_comment('Comment',' System created '+sq_doc.name)
sq_doc.save()
doc.add_comment('Comment',' Supplier Quotation: '+sq_doc.name)
else:
frappe.msgprint("Unable to create Supplier Quotation as customer: "+doc.customer_name +" is not associated with any company. Register the Company and submit the document: "+doc.name+ ". As Customer is not associated with any company, don't let Vendor submit the Quotation document.")
raise frappe.ValidationError("Unable to create Supplier Quotation as customer: "+doc.customer_name +" is not associated with any company. Register the Company and submit the document: "+doc.name+ ". As Customer is not associated with any company, don't let Vendor submit the Quotation document.")
else:
frappe.throw("Unable to save the Supplier Quotation as the naming series are unavailable . Please provide the naming series at the Company: "+company+" to save the document");
|
StarcoderdataPython
|
3237808
|
<reponame>bopopescu/pythonlib<gh_stars>0
#!/usr/bin/env python
# cardinal_pythonlib/sqlalchemy/merge_db.py
"""
===============================================================================
Original code copyright (C) 2009-2020 <NAME> (<EMAIL>).
This file is part of cardinal_pythonlib.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
===============================================================================
**Function "merge_db" to merge two databases via SQLAlchemy.**
*Notes:*
Note in passing: there is no common base class for SQLAlchemy ORM instances
(it's not :class:`DeclarativeMeta`). For example, in CamCOPS:
.. code-block:: none
> Phq9.__bases__
(<class 'camcops_server.cc_modules.cc_task.TaskHasPatientMixin'>,
<class 'camcops_server.cc_modules.cc_task.Task'>,
<class 'sqlalchemy.ext.declarative.api.Base'>)
... and that last :class:`Base` isn't a permanent class, just a newly named
thing; see :func:`sqlalchemy.ext.declarative.api.declarative_base`.
Again, with the CamCOPS classes:
.. code-block:: none
> issubclass(Phq9, Base)
True
> issubclass(Base, DeclarativeMeta)
False
> Base.__bases__
(<class 'object'>,)
So the best type hints we have are:
.. code-block:: none
class: Type
instance: object
"""
import sys
from typing import Any, Callable, Dict, List, Tuple, Type
import unittest
from sqlalchemy.engine import create_engine
from sqlalchemy.engine.base import Engine
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import lazyload, load_only
from sqlalchemy.orm import relationship
# noinspection PyProtectedMember
from sqlalchemy.orm.session import make_transient, Session, sessionmaker
from sqlalchemy.schema import sort_tables
from sqlalchemy.sql.schema import Column, ForeignKey, MetaData, Table
from sqlalchemy.sql.sqltypes import Integer, Text
from cardinal_pythonlib.dicts import map_keys_to_values
from cardinal_pythonlib.logs import (
get_brace_style_log_with_null_handler,
main_only_quicksetup_rootlogger,
)
from cardinal_pythonlib.sqlalchemy.dump import dump_database_as_insert_sql
from cardinal_pythonlib.sqlalchemy.orm_inspect import (
rewrite_relationships,
colname_to_attrname_dict,
copy_sqla_object,
get_orm_classes_by_table_name_from_base,
get_pk_attrnames,
)
from cardinal_pythonlib.sqlalchemy.schema import (
get_column_names,
get_table_names,
)
from cardinal_pythonlib.sqlalchemy.session import (
get_engine_from_session,
get_safe_url_from_engine,
get_safe_url_from_session,
SQLITE_MEMORY_URL,
)
from cardinal_pythonlib.sqlalchemy.table_identity import TableIdentity
log = get_brace_style_log_with_null_handler(__name__)
# =============================================================================
# TableDependency; get_all_dependencies
# =============================================================================
class TableDependency(object):
"""
Stores a table dependency for use in functions such as
:func:`sqlalchemy.schema.sort_tables`, which requires a tuple of two
:class:`Table` objects, in the order ``(parent, child)``, where ``child``
depends on ``parent`` (e.g. a field like ``child.parent_id`` refers to
``parent.id``).
"""
def __init__(self,
parent_table_id: TableIdentity = None,
child_table_id: TableIdentity = None,
parent_table: Table = None,
child_table: Table = None,
parent_tablename: str = None,
child_tablename: str = None,
metadata: MetaData = None) -> None:
"""
The parent and child tables can be specified by name, :class:`Table`
object, or our :class:`TableIdentity` descriptor class.
"""
overspecified = "Don't specify table with both TableIdentity and " \
"Table/tablename"
if parent_table_id:
self._parent = parent_table_id
assert parent_table is None and not parent_tablename, overspecified
else:
self._parent = TableIdentity(table=parent_table,
tablename=parent_tablename,
metadata=metadata)
if child_table_id:
self._child = child_table_id
assert child_table is None and not child_tablename, overspecified
else:
self._child = TableIdentity(table=child_table,
tablename=child_tablename,
metadata=metadata)
def __str__(self) -> str:
return f"{self.child_tablename} -> {self.parent_tablename}"
def __repr__(self) -> str:
return (
f"TableDependency({self.child_tablename!r} "
f"depends on {self.parent_tablename!r})"
)
def set_metadata(self, metadata: MetaData) -> None:
"""
Sets the metadata for the parent and child tables.
"""
self._parent.set_metadata(metadata)
self._child.set_metadata(metadata)
def set_metadata_if_none(self, metadata: MetaData) -> None:
"""
Sets the metadata for the parent and child tables, unless they were
set already.
"""
self._parent.set_metadata_if_none(metadata)
self._child.set_metadata_if_none(metadata)
@property
def parent_table(self) -> Table:
"""
Returns the parent table as a :class:`Table`.
"""
return self._parent.table
@property
def child_table(self) -> Table:
"""
Returns the child table as a :class:`Table`.
"""
return self._child.table
@property
def parent_tablename(self) -> str:
"""
Returns the parent table's string name.
"""
return self._parent.tablename
@property
def child_tablename(self) -> str:
"""
Returns the child table's string name.
"""
return self._child.tablename
def sqla_tuple(self) -> Tuple[Table, Table]:
"""
Returns the tuple ``(parent_table, child_table)``, both as
:class:`Table` objects.
"""
return self.parent_table, self.child_table
def get_all_dependencies(metadata: MetaData,
extra_dependencies: List[TableDependency] = None,
sort: bool = True) \
-> List[TableDependency]:
"""
Describes how the tables found in the metadata depend on each other.
(If table B contains a foreign key to table A, for example, then B depends
on A.)
Args:
metadata: the metadata to inspect
extra_dependencies: additional table dependencies to specify manually
sort: sort into alphabetical order of (parent, child) table names?
Returns:
a list of :class:`TableDependency` objects
See :func:`sort_tables_and_constraints` for method.
"""
extra_dependencies = extra_dependencies or [] # type: List[TableDependency] # noqa
for td in extra_dependencies:
td.set_metadata_if_none(metadata)
dependencies = set([td.sqla_tuple() for td in extra_dependencies])
tables = list(metadata.tables.values()) # type: List[Table]
for table in tables:
for fkc in table.foreign_key_constraints:
if fkc.use_alter is True:
# http://docs.sqlalchemy.org/en/latest/core/constraints.html#sqlalchemy.schema.ForeignKeyConstraint.params.use_alter # noqa
continue
dependent_on = fkc.referred_table
if dependent_on is not table:
dependencies.add((dependent_on, table))
if hasattr(table, "_extra_dependencies"):
# noinspection PyProtectedMember
dependencies.update(
(parent, table) for parent in table._extra_dependencies
)
dependencies = [
TableDependency(parent_table=parent, child_table=child)
for parent, child in dependencies
]
if sort:
dependencies.sort(key=lambda td_: (td_.parent_tablename,
td_.child_tablename))
return dependencies
# =============================================================================
# TableDependencyClassification; classify_tables_by_dependency_type
# =============================================================================
class TableDependencyClassification(object):
"""
Class to describe/classify a table in terms of its dependencies.
"""
def __init__(self,
table: Table,
children: List[Table] = None,
parents: List[Table] = None) -> None:
"""
Args:
table: the table in question
children: its children (things that depend on it)
parents: its parents (things that it depends on)
"""
self.table = table
self.children = children or [] # type: List[Table]
self.parents = parents or [] # type: List[Table]
self.circular = False
self.circular_chain = [] # type: List[Table]
@property
def is_child(self) -> bool:
"""
Is this table a child?
"""
return bool(self.parents)
@property
def is_parent(self) -> bool:
"""
Is this table a parent?
"""
return bool(self.children)
@property
def standalone(self) -> bool:
"""
Is this table standalone (neither a child nor a parent)?
"""
return not self.is_child and not self.is_parent
@property
def tablename(self) -> str:
"""
Returns the table's name.
"""
return self.table.name
@property
def parent_names(self) -> List[str]:
"""
Returns the names of this table's parents.
"""
return [t.name for t in self.parents]
@property
def child_names(self) -> List[str]:
"""
Returns the names of this table's children.
"""
return [t.name for t in self.children]
def set_circular(self, circular: bool, chain: List[Table] = None) -> None:
"""
Mark this table as circular (or not).
Args:
circular: is it circular?
chain: if it's circular, this should be the list of tables
participating in the circular chain
"""
self.circular = circular
self.circular_chain = chain or [] # type: List[Table]
@property
def circular_description(self) -> str:
"""
Description of the circular chain.
"""
return " -> ".join(t.name for t in self.circular_chain)
@property
def description(self) -> str:
"""
Short description.
"""
if self.is_parent and self.is_child:
desc = "parent+child"
elif self.is_parent:
desc = "parent"
elif self.is_child:
desc = "child"
else:
desc = "standalone"
if self.circular:
desc += f"+CIRCULAR({self.circular_description})"
return desc
def __str__(self) -> str:
return f"{self.tablename}:{self.description}"
def __repr__(self) -> str:
return (
f"TableDependencyClassification("
f"{self.tablename!r}:{self.description})"
)
def classify_tables_by_dependency_type(
metadata: MetaData,
extra_dependencies: List[TableDependency] = None,
sort: bool = True) \
-> List[TableDependencyClassification]:
"""
Inspects a metadata object (optionally adding other specified dependencies)
and returns a list of objects describing their dependencies.
Args:
metadata: the :class:`MetaData` to inspect
extra_dependencies: additional dependencies
sort: sort the results by table name?
Returns:
list of :class:`TableDependencyClassification` objects, one for each
table
"""
tables = list(metadata.tables.values()) # type: List[Table]
all_deps = get_all_dependencies(metadata, extra_dependencies)
tdcmap = {} # type: Dict[Table, TableDependencyClassification]
for table in tables:
parents = [td.parent_table for td in all_deps
if td.child_table == table]
children = [td.child_table for td in all_deps
if td.parent_table == table]
tdcmap[table] = TableDependencyClassification(
table, parents=parents, children=children
)
# Check for circularity
def parents_contain(start: Table,
probe: Table) -> Tuple[bool, List[Table]]:
tdc_ = tdcmap[start]
if probe in tdc_.parents:
return True, [start, probe]
for parent in tdc_.parents:
contains_, chain_ = parents_contain(start=parent, probe=probe)
if contains_:
return True, [start] + chain_
return False, []
def children_contain(start: Table,
probe: Table) -> Tuple[bool, List[Table]]:
tdc_ = tdcmap[start]
if probe in tdc_.children:
return True, [start, probe]
for child in tdc_.children:
contains_, chain_ = children_contain(start=child, probe=probe)
if contains_:
return True, [start] + chain_
return False, []
for table in tables:
tdc = tdcmap[table]
contains, chain = parents_contain(start=table, probe=table)
if contains:
tdc.set_circular(contains, chain)
else:
contains, chain = children_contain(start=table, probe=table)
if contains:
tdc.set_circular(contains, chain)
else:
tdc.set_circular(False)
classifications = list(tdcmap.values())
if sort:
classifications.sort(key=lambda c: c.tablename)
return classifications
# =============================================================================
# TranslationContext (for merge_db)
# =============================================================================
class TranslationContext(object):
"""
Information-passing object for user callbacks from :func:`merge_db`.
Args:
oldobj:
The old SQLAlchemy ORM object from the source session.
newobj:
The framework's go at building a new SQLAlchemy ORM object, which
will be inserted into the destination session.
The sequence is:
1. ``newobj`` is created
2. a :class:`TranslationContext` is created, referring to
``newobj``
3. The ``translate_fn`` parameter to :func:`merge_db` will be
called with the :class:`TranslationContext` as its parameter
- the user-suppled :func:`translate_fn` function can, at this
point, modify the ``newobj`` attribute
- if the user function sets the ``newobj`` attribute to
``None``, this object will be skipped
4. If the :class:`TranslationContext`'s ``newobj`` member is not
``None``, the new object is inserted into the destination
session.
objmap:
A dictionary mapping old to new objects, for objects in tables
other than standalone tables.
table:
SQLAlchemy ``Table`` object from the metadata. (Not necessarily
bound to any session, but will reflect the structure of the
destination, not necessarily the source, since the merge operation
assumes that the metadata describes the destination.)
tablename:
Table name that corresponds to ``table``.
src_session:
The SQLAlchemy :class:`Session` object for the source.
dst_session:
The SQLAlchemy :class:`Session` object for the destination.
src_engine:
The SQLAlchemy :class:`Engine` object for the source.
dst_engine:
The SQLAlchemy :class:`Engine` object for the destination.
missing_src_columns:
Names of columns known to be present in the destination but absent
from the source.
info:
Extra dictionary for additional user-specified information.
It is possible that ``oldobj`` and ``newobj`` are the SAME OBJECT.
"""
def __init__(self,
oldobj: object,
newobj: object,
objmap: Dict[object, object],
table: Table,
tablename: str,
src_session: Session,
dst_session: Session,
src_engine: Engine,
dst_engine: Engine,
src_table_names: List[str],
missing_src_columns: List[str] = None,
info: Dict[str, Any] = None) -> None:
self.oldobj = oldobj
self.newobj = newobj
self.objmap = objmap
self.table = table
self.tablename = tablename
self.src_session = src_session
self.dst_session = dst_session
self.src_engine = src_engine
self.dst_engine = dst_engine
self.src_table_names = src_table_names
self.missing_src_columns = missing_src_columns or [] # type: List[str]
self.info = info or {} # type: Dict[str, Any]
# =============================================================================
# merge_db
# =============================================================================
def merge_db(base_class: Type,
src_engine: Engine,
dst_session: Session,
allow_missing_src_tables: bool = True,
allow_missing_src_columns: bool = True,
translate_fn: Callable[[TranslationContext], None] = None,
skip_tables: List[TableIdentity] = None,
only_tables: List[TableIdentity] = None,
tables_to_keep_pks_for: List[TableIdentity] = None,
extra_table_dependencies: List[TableDependency] = None,
dummy_run: bool = False,
info_only: bool = False,
report_every: int = 1000,
flush_per_table: bool = True,
flush_per_record: bool = False,
commit_with_flush: bool = False,
commit_at_end: bool = True,
prevent_eager_load: bool = True,
trcon_info: Dict[str, Any] = None) -> None:
"""
Copies an entire database as far as it is described by ``metadata`` and
``base_class``, from SQLAlchemy ORM session ``src_session`` to
``dst_session``, and in the process:
- creates new primary keys at the destination, or raises an error if it
doesn't know how (typically something like: ``Field 'name' doesn't have a
default value``)
- maintains relationships, or raises an error if it doesn't know how
Basic method:
- Examines the metadata for the SQLAlchemy ORM base class you provide.
- Assumes that the tables exist (in the destination).
- For each table/ORM class found in the metadata:
- Queries (via the ORM) from the source.
- For each ORM instance retrieved:
- Writes information to the destination SQLAlchemy session.
- If that ORM object has relationships, process them too.
If a table is missing in the source, then that's OK if and only if
``allow_missing_src_tables`` is set. (Similarly with columns and
``allow_missing_src_columns``; we ask the ORM to perform a partial load,
of a subset of attributes only.)
Args:
base_class:
your ORM base class, e.g. from ``Base = declarative_base()``
src_engine:
SQLALchemy :class:`Engine` for the source database
dst_session:
SQLAlchemy :class:`Session` for the destination database
allow_missing_src_tables:
proceed if tables are missing from the source (allowing you to
import from older, incomplete databases)
allow_missing_src_columns:
proceed if columns are missing from the source (allowing you to
import from older, incomplete databases)
translate_fn:
optional function called with each instance, so you can modify
instances in the pipeline. Signature:
.. code-block:: python
def my_translate_fn(trcon: TranslationContext) -> None:
# We can modify trcon.newobj, or replace it (including
# setting trcon.newobj = None to omit this object).
pass
skip_tables:
tables to skip (specified as a list of :class:`TableIdentity`)
only_tables:
tables to restrict the processor to (specified as a list of
:class:`TableIdentity`)
tables_to_keep_pks_for:
tables for which PKs are guaranteed to be safe to insert into the
destination database, without modification (specified as a list of
:class:`TableIdentity`)
extra_table_dependencies:
optional list of :class:`TableDependency` objects (q.v.)
dummy_run:
don't alter the destination database
info_only:
show info, then stop
report_every:
provide a progress report every *n* records
flush_per_table:
flush the session after every table (reasonable)
flush_per_record:
flush the session after every instance (AVOID this if tables may
refer to themselves)
commit_with_flush:
``COMMIT`` with each flush?
commit_at_end:
``COMMIT`` when finished?
prevent_eager_load:
disable any eager loading (use lazy loading instead)
trcon_info:
additional dictionary passed to ``TranslationContext.info``
(see :class:`.TranslationContext`)
"""
log.info("merge_db(): starting")
if dummy_run:
log.warning("Dummy run only; destination will not be changed")
# Check parameters before we modify them
if only_tables is not None and not only_tables:
log.warning("... only_tables == []; nothing to do")
return
# Finalize parameters
skip_tables = skip_tables or [] # type: List[TableIdentity]
only_tables = only_tables or [] # type: List[TableIdentity]
tables_to_keep_pks_for = tables_to_keep_pks_for or [] # type: List[TableIdentity] # noqa
extra_table_dependencies = extra_table_dependencies or [] # type: List[TableDependency] # noqa
trcon_info = trcon_info or {} # type: Dict[str, Any]
# We need both Core and ORM for the source.
# noinspection PyUnresolvedReferences
metadata = base_class.metadata # type: MetaData
src_session = sessionmaker(bind=src_engine)() # type: Session
dst_engine = get_engine_from_session(dst_session)
tablename_to_ormclass = get_orm_classes_by_table_name_from_base(base_class)
# Tell all TableIdentity objects about their metadata
for tilist in [skip_tables, only_tables, tables_to_keep_pks_for]:
for ti in tilist:
ti.set_metadata_if_none(metadata)
for td in extra_table_dependencies:
td.set_metadata_if_none(metadata)
# Get all lists of tables as their names
skip_table_names = [ti.tablename for ti in skip_tables]
only_table_names = [ti.tablename for ti in only_tables]
tables_to_keep_pks_for = [ti.tablename for ti in tables_to_keep_pks_for] # type: List[str] # noqa
# ... now all are of type List[str]
# Safety check: this is an imperfect check for source == destination, but
# it is fairly easy to pass in the wrong URL, so let's try our best:
_src_url = get_safe_url_from_engine(src_engine)
_dst_url = get_safe_url_from_session(dst_session)
assert _src_url != _dst_url or _src_url == SQLITE_MEMORY_URL, (
"Source and destination databases are the same!"
)
# Check the right tables are present.
src_tables = sorted(get_table_names(src_engine))
dst_tables = sorted(list(tablename_to_ormclass.keys()))
log.debug("Source tables: {!r}", src_tables)
log.debug("Destination tables: {!r}", dst_tables)
if not allow_missing_src_tables:
missing_tables = sorted(
d for d in dst_tables
if d not in src_tables and d not in skip_table_names
)
if missing_tables:
raise RuntimeError("The following tables are missing from the "
"source database: " + repr(missing_tables))
table_num = 0
overall_record_num = 0
tables = list(metadata.tables.values()) # type: List[Table]
# Very helpfully, MetaData.sorted_tables produces tables in order of
# relationship dependency ("each table is preceded by all tables which
# it references");
# http://docs.sqlalchemy.org/en/latest/core/metadata.html
# HOWEVER, it only works if you specify ForeignKey relationships
# explicitly.
# We can also add in user-specified dependencies, and therefore can do the
# sorting in one step with sqlalchemy.schema.sort_tables:
ordered_tables = sort_tables(
tables,
extra_dependencies=[td.sqla_tuple() for td in extra_table_dependencies]
)
# Note that the ordering is NOT NECESSARILY CONSISTENT, though (in that
# the order of stuff it doesn't care about varies across runs).
all_dependencies = get_all_dependencies(metadata, extra_table_dependencies)
dep_classifications = classify_tables_by_dependency_type(
metadata, extra_table_dependencies)
circular = [tdc for tdc in dep_classifications if tdc.circular]
assert not circular, f"Circular dependencies! {circular!r}"
log.debug("All table dependencies: {}",
"; ".join(str(td) for td in all_dependencies))
log.debug("Table dependency classifications: {}",
"; ".join(str(c) for c in dep_classifications))
log.info("Processing tables in the order: {!r}",
[table.name for table in ordered_tables])
objmap = {}
def flush() -> None:
if not dummy_run:
log.debug("Flushing session")
dst_session.flush()
if commit_with_flush:
log.debug("Committing...")
dst_session.commit()
def translate(oldobj_: object, newobj_: object) -> object:
if translate_fn is None:
return newobj_
tc = TranslationContext(oldobj=oldobj_,
newobj=newobj_,
objmap=objmap,
table=table,
tablename=tablename,
src_session=src_session,
dst_session=dst_session,
src_engine=src_engine,
dst_engine=dst_engine,
missing_src_columns=missing_columns,
src_table_names=src_tables,
info=trcon_info)
translate_fn(tc)
if tc.newobj is None:
log.debug("Instance skipped by user-supplied translate_fn")
return tc.newobj
# -------------------------------------------------------------------------
# Now, per table/ORM class...
# -------------------------------------------------------------------------
for table in ordered_tables:
tablename = table.name
if tablename in skip_table_names:
log.info("... skipping table {!r} (as per skip_tables)", tablename)
continue
if only_table_names and tablename not in only_table_names:
log.info("... ignoring table {!r} (as per only_tables)", tablename)
continue
if allow_missing_src_tables and tablename not in src_tables:
log.info("... ignoring table {!r} (not in source database)",
tablename)
continue
table_num += 1
table_record_num = 0
src_columns = sorted(get_column_names(src_engine, tablename))
dst_columns = sorted([column.name for column in table.columns])
missing_columns = sorted(list(set(dst_columns) - set(src_columns)))
if not allow_missing_src_columns and missing_columns:
raise RuntimeError(
f"The following columns are missing from source table "
f"{tablename!r}: {missing_columns!r}")
orm_class = tablename_to_ormclass[tablename]
pk_attrs = get_pk_attrnames(orm_class)
c2a = colname_to_attrname_dict(orm_class)
missing_attrs = map_keys_to_values(missing_columns, c2a)
tdc = [tdc for tdc in dep_classifications if tdc.table == table][0]
log.info("Processing table {!r} via ORM class {!r}",
tablename, orm_class)
log.debug("PK attributes: {!r}", pk_attrs)
log.debug("Table: {!r}", table)
log.debug("Dependencies: parents = {!r}; children = {!r}",
tdc.parent_names, tdc.child_names)
if info_only:
log.debug("info_only; skipping table contents")
continue
def wipe_primary_key(inst: object) -> None:
for attrname in pk_attrs:
setattr(inst, attrname, None)
query = src_session.query(orm_class)
if allow_missing_src_columns and missing_columns:
src_attrs = map_keys_to_values(src_columns, c2a)
log.info("Table {} is missing columns {} in the source",
tablename, missing_columns)
log.debug("... using only columns {} via attributes {}",
src_columns, src_attrs)
query = query.options(load_only(*src_attrs))
# PROBLEM: it will not ignore the PK.
if prevent_eager_load:
query = query.options(lazyload("*"))
wipe_pk = tablename not in tables_to_keep_pks_for
# How best to deal with relationships?
#
# This doesn't work:
# - process tables in order of dependencies, eager-loading
# relationships with
# for relationship in insp.mapper.relationships: # type: RelationshipProperty # noqa
# related_col = getattr(orm_class, relationship.key)
# query = query.options(joinedload(related_col))
# - expunge from old session / make_transient / wipe_primary_key/ add
# to new session
# ... get errors like
# sqlalchemy.exc.InvalidRequestError: Object '<Parent at
# 0x7f99492440b8>' is already attached to session '7' (this is
# '6')
#
# ... at the point of dst_session.add(instance)
# ... when adding the object on the child side of the relationship
# ... I suspect that we move the Parent from session S to session D,
# but when we eager-load the Parent from the Child, that makes
# another in session S, so when we add the Child to session D, its
# parent is in session S, which is wrong.
#
# We must, therefore, take a more interventional approach, in which we
# maintain a copy of the old object, make a copy using
# copy_sqla_object, and re-assign relationships accordingly.
for instance in query.all():
# log.debug("Source instance: {!r}", instance)
table_record_num += 1
overall_record_num += 1
if table_record_num % report_every == 0:
log.info("... progress{}: on table {} ({}); record {} this "
"table; overall record {}",
" (DUMMY RUN)" if dummy_run else "",
table_num, tablename,
table_record_num, overall_record_num)
if tdc.standalone:
# Our table has neither parents nor children. We can therefore
# simply move the instance from one session to the other,
# blanking primary keys.
# https://stackoverflow.com/questions/14636192/sqlalchemy-modification-of-detached-object # noqa
src_session.expunge(instance)
make_transient(instance)
if wipe_pk:
wipe_primary_key(instance)
instance = translate(instance, instance)
if not instance:
continue # translate_fn elected to skip it
if not dummy_run:
dst_session.add(instance)
# new PK will be created when session is flushed
else:
# Our table has either parents or children. We therefore make
# a copy and place the COPY in the destination session. If
# this object may be a parent, we maintain a log (in objmap)
# of the old-to-new mapping. If this object is a child, we
# re-assign its relationships based on the old-to-new mapping
# (since we will have processed the parent table first, having
# carefully ordered them in advance).
oldobj = instance # rename for clarity
newobj = copy_sqla_object(
oldobj, omit_pk=wipe_pk, omit_fk=True,
omit_attrs=missing_attrs, debug=False
)
rewrite_relationships(oldobj, newobj, objmap, debug=False,
skip_table_names=skip_table_names)
newobj = translate(oldobj, newobj)
if not newobj:
continue # translate_fn elected to skip it
if not dummy_run:
dst_session.add(newobj)
# new PK will be created when session is flushed
if tdc.is_parent:
objmap[oldobj] = newobj # for its children's benefit
if flush_per_record:
flush()
if flush_per_table:
flush()
flush()
if commit_at_end:
log.debug("Committing...")
dst_session.commit()
log.info("merge_db(): finished")
# =============================================================================
# Unit tests
# =============================================================================
class MergeTestMixin(object):
"""
Mixin to create source/destination databases as in-memory SQLite databases
for unit testing purposes.
"""
def __init__(self, *args, echo: bool = False, **kwargs) -> None:
self.src_engine = create_engine(SQLITE_MEMORY_URL, echo=echo) # type: Engine # noqa
self.dst_engine = create_engine(SQLITE_MEMORY_URL, echo=echo) # type: Engine # noqa
self.src_session = sessionmaker(bind=self.src_engine)() # type: Session # noqa
self.dst_session = sessionmaker(bind=self.dst_engine)() # type: Session # noqa
# log.critical("SRC SESSION: {}", self.src_session)
# log.critical("DST SESSION: {}", self.dst_session)
self.Base = declarative_base()
# noinspection PyArgumentList
super().__init__(*args, **kwargs)
def dump_source(self) -> None:
log.warning("Dumping source")
dump_database_as_insert_sql(
engine=self.src_engine,
fileobj=sys.stdout,
include_ddl=True,
multirow=True
)
def dump_destination(self) -> None:
log.warning("Dumping destination")
dump_database_as_insert_sql(
engine=self.dst_engine,
fileobj=sys.stdout,
include_ddl=True,
multirow=True
)
def do_merge(self, dummy_run: bool = False) -> None:
merge_db(
base_class=self.Base,
src_engine=self.src_engine,
dst_session=self.dst_session,
allow_missing_src_tables=False,
allow_missing_src_columns=True,
translate_fn=None,
skip_tables=None,
only_tables=None,
extra_table_dependencies=None,
dummy_run=dummy_run,
report_every=1000
)
class MergeTestPlain(MergeTestMixin, unittest.TestCase):
"""
Unit tests for a simple merge operation.
*Notes re unit testing:*
- tests are found by virtue of the fact that their names start with
"test"; see
https://docs.python.org/3.6/library/unittest.html#basic-example
- A separate instance of the class is created for each test, and in each
case is called with:
.. code-block:: python
setUp()
testSOMETHING()
tearDown()
... see https://docs.python.org/3.6/library/unittest.html#test-cases
- If you use mixins, they go AFTER :class:`unittest.TestCase`; see
https://stackoverflow.com/questions/1323455/python-unit-test-with-base-and-sub-class
""" # noqa
def setUp(self) -> None:
# log.info('In setUp()')
class Parent(self.Base):
__tablename__ = "parent"
id = Column(Integer, primary_key=True, autoincrement=True)
name = Column(Text)
class Child(self.Base):
__tablename__ = "child"
id = Column(Integer, primary_key=True, autoincrement=True)
name = Column(Text)
parent_id = Column(Integer, ForeignKey("parent.id"))
parent = relationship(Parent)
self.Base.metadata.create_all(self.src_engine)
self.Base.metadata.create_all(self.dst_engine)
p1 = Parent(name="Parent 1")
p2 = Parent(name="Parent 2")
c1 = Child(name="Child 1")
c2 = Child(name="Child 2")
c1.parent = p1
c2.parent = p2
self.src_session.add_all([p1, p2, c1, c2])
self.src_session.commit()
def tearDown(self) -> None:
pass
# log.info('In tearDown()')
def test_source(self) -> None:
self.dump_source()
def test_dummy(self) -> None:
log.info("Testing merge_db() in dummy run mode")
self.do_merge(dummy_run=True)
self.dst_session.commit()
self.dump_destination()
def test_merge_to_empty(self) -> None:
log.info("Testing merge_db() to empty database")
self.do_merge(dummy_run=False)
self.dst_session.commit()
self.dump_destination()
# @unittest.skip
def test_merge_to_existing(self) -> None:
log.info("Testing merge_db() to pre-populated database")
self.do_merge(dummy_run=False)
self.dst_session.commit()
self.do_merge(dummy_run=False)
self.dst_session.commit()
self.dump_destination()
class MergeTestCircular(MergeTestMixin, unittest.TestCase):
"""
Unit tests including a circular dependency, which will fail.
"""
@unittest.expectedFailure
def test_setup_circular(self):
class Parent(self.Base):
__tablename__ = "parent"
id = Column(Integer, primary_key=True, autoincrement=True)
name = Column(Text)
child_id = Column(Integer, ForeignKey("child.id"))
child = relationship("Child", foreign_keys=[child_id])
class Child(self.Base):
__tablename__ = "child"
id = Column(Integer, primary_key=True, autoincrement=True)
name = Column(Text)
parent_id = Column(Integer, ForeignKey("parent.id"))
parent = relationship(Parent, foreign_keys=[parent_id])
self.Base.metadata.create_all(self.src_engine)
self.Base.metadata.create_all(self.dst_engine)
p1 = Parent(name="Parent 1")
p2 = Parent(name="Parent 2")
c1 = Child(name="Child 1")
c2 = Child(name="Child 2")
c1.parent = p1
c2.parent = p2
p1.child = c1
p2.child = c2
self.src_session.add_all([p1, p2, c1, c2])
self.src_session.commit() # will raise sqlalchemy.exc.CircularDependencyError # noqa
@unittest.expectedFailure
def test_circular(self) -> None:
self.test_setup_circular() # fails here
log.info("Testing merge_db() with circular relationship")
self.do_merge(dummy_run=False) # would fail here, but fails earlier!
self.dst_session.commit()
self.dump_destination()
# =============================================================================
# main
# =============================================================================
# run with "python merge_db.py -v" to be verbose
if __name__ == "__main__":
main_only_quicksetup_rootlogger()
unittest.main()
|
StarcoderdataPython
|
1785096
|
<reponame>kdar/rust-python-static-example<filename>src/main.py
import sysconfig
print(sysconfig.get_config_var('LDVERSION')
or sysconfig.get_config_var('py_version_short'))
|
StarcoderdataPython
|
1778233
|
# -*- coding: utf-8 -*-
#
# Dell EMC OpenManage Ansible Modules
# Version 4.0.0
# Copyright (C) 2021 Dell Inc. or its subsidiaries. All Rights Reserved.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
#
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import json
from io import StringIO
from ssl import SSLError
import pytest
from ansible.module_utils._text import to_text
from ansible.module_utils.six.moves.urllib.error import HTTPError, URLError
from ansible.module_utils.urls import ConnectionError, SSLValidationError
from ansible_collections.dellemc.openmanage.plugins.modules import ome_active_directory
from ansible_collections.dellemc.openmanage.tests.unit.plugins.modules.common import FakeAnsibleModule
AD_URI = "AccountService/ExternalAccountProvider/ADAccountProvider"
TEST_CONNECTION = "AccountService/ExternalAccountProvider/Actions/ExternalAccountProvider.TestADConnection"
NO_CHANGES_MSG = "No changes found to be applied."
CHANGES_FOUND = "Changes found to be applied."
MAX_AD_MSG = "Unable to add the account provider because the maximum number of configurations allowed for an" \
" Active Directory service is {0}."
CREATE_SUCCESS = "Successfully added the Active Directory service."
MODIFY_SUCCESS = "Successfully modified the Active Directory service."
DELETE_SUCCESS = "Successfully deleted the Active Directory service."
DOM_SERVER_MSG = "Specify the domain server. Domain server is required to create an Active Directory service."
GRP_DOM_MSG = "Specify the group domain. Group domain is required to create an Active Directory service."
CERT_INVALID = "The provided certificate file path is invalid or not readable."
DOMAIN_ALLOWED_COUNT = "Maximum entries allowed for {0} lookup type is {1}."
TEST_CONNECTION_SUCCESS = "Test Connection is successful. "
TEST_CONNECTION_FAIL = "Test Connection has failed. "
ERR_READ_FAIL = "Unable to retrieve the error details."
INVALID_ID = "The provided Active Directory ID is invalid."
TIMEOUT_RANGE = "The {0} value is not in the range of {1} to {2}."
MAX_AD = 2
MIN_TIMEOUT = 15
MAX_TIMEOUT = 300
MODULE_PATH = 'ansible_collections.dellemc.openmanage.plugins.modules.ome_active_directory.'
MODULE_UTIL_PATH = 'ansible_collections.dellemc.openmanage.plugins.module_utils.ome.'
@pytest.fixture
def ome_connection_mock_for_ad(mocker, ome_response_mock):
connection_class_mock = mocker.patch(MODULE_PATH + 'RestOME')
ome_connection_mock_obj = connection_class_mock.return_value.__enter__.return_value
ome_connection_mock_obj.invoke_request.return_value = ome_response_mock
return ome_connection_mock_obj
class TestOmeAD(FakeAnsibleModule):
module = ome_active_directory
@pytest.mark.parametrize("params", [
{"module_args": {"name": "domdev"}, "json_data": {"value": [{'Name': 'domdev', 'Id': 12}]},
"ad": {'Name': 'domdev', 'Id': 12}, "ad_cnt": 1},
{"module_args": {"id": 12}, "json_data": {"value": [{'Name': 'domdev', 'Id': 12}]},
"ad": {'Name': 'domdev', 'Id': 12}, "ad_cnt": 1},
{"module_args": {"id": 11}, "json_data": {"value": [
{'Name': 'domdev', 'Id': 12}, {'Name': 'domdev', 'Id': 13}]}, "ad": {}, "ad_cnt": 2}])
def test_get_ad(self, params, ome_connection_mock_for_ad, ome_response_mock):
ome_response_mock.success = params.get("success", True)
f_module = self.get_module_mock(params=params['module_args'])
ome_response_mock.json_data = params["json_data"]
ad, ad_cnt = self.module.get_ad(f_module, ome_connection_mock_for_ad)
assert ad == params['ad']
assert ad_cnt == params['ad_cnt']
@pytest.mark.parametrize("params", [{
"module_args": {"domain_controller_lookup": "MANUAL", "domain_server": ["172.16.17.32"],
"group_domain": "domain.com", "name": "domdev"}, "msg": CREATE_SUCCESS}, {
"module_args": {"domain_controller_lookup": "MANUAL", "domain_server": ["172.16.17.32"],
"group_domain": "domain.com", "name": "domdev"}, "msg": CHANGES_FOUND, "check_mode": True}, {
"module_args": {"domain_controller_lookup": "MANUAL", "domain_server": ["172.16.17.32"],
"group_domain": "domain.com", "name": "domdev", "test_connection": True,
"domain_username": "user", "domain_password": "<PASSWORD>"},
"msg": "{0}{1}".format(TEST_CONNECTION_SUCCESS, CREATE_SUCCESS)}
])
def test_ome_active_directory_create_success(self, params, ome_connection_mock_for_ad, ome_response_mock,
ome_default_args, mocker):
ome_response_mock.success = params.get("success", True)
ome_response_mock.json_data = {"Name": "AD1"}
mocker.patch(MODULE_PATH + 'get_ad', return_value=params.get("get_ad", (None, 1)))
ome_default_args.update(params['module_args'])
result = self._run_module(ome_default_args, check_mode=params.get('check_mode', False))
assert result['msg'] == params['msg']
@pytest.mark.parametrize("params", [{
"module_args": {"domain_controller_lookup": "MANUAL", "domain_server": ["172.16.17.32"],
"group_domain": "domain.com", "name": "domdev"},
"get_ad": ({"Name": "ad_test", "Id": 21789, "ServerType": "MANUAL", "ServerName": ["192.168.20.181"],
"DnsServer": [], "GroupDomain": "dellemcdomain.com", "NetworkTimeOut": 120, "SearchTimeOut": 120,
"ServerPort": 3269, "CertificateValidation": False}, 1),
"msg": MODIFY_SUCCESS}, {
"module_args": {"domain_controller_lookup": "MANUAL", "domain_server": ["172.16.17.32"],
"group_domain": "domain.com", "name": "domdev", "test_connection": True,
"domain_username": "user", "domain_password": "<PASSWORD>"}, "get_ad":
({"Name": "ad_test", "Id": 21789, "ServerType": "MANUAL", "ServerName": ["192.168.20.181"], "DnsServer": [],
"GroupDomain": "dellemcdomain.com", "NetworkTimeOut": 120, "SearchTimeOut": 120, "ServerPort": 3269,
"CertificateValidation": False}, 1),
"msg": "{0}{1}".format(TEST_CONNECTION_SUCCESS, MODIFY_SUCCESS)},
{"module_args": {"domain_controller_lookup": "MANUAL", "domain_server": ["172.16.17.32"],
"group_domain": "dellemcdomain.com", "name": "domdev"},
"get_ad": ({"Name": "domdev", "Id": 21789, "ServerType": "MANUAL", "ServerName": ["172.16.17.32"],
"DnsServer": [], "GroupDomain": "dellemcdomain.com", "NetworkTimeOut": 120, "SearchTimeOut": 120,
"ServerPort": 3269, "CertificateValidation": False}, 1),
"msg": NO_CHANGES_MSG}, {
"module_args": {"domain_controller_lookup": "MANUAL", "domain_server": ["172.16.17.32"],
"group_domain": "dellemcdomain.com", "name": "domdev"},
"get_ad": ({"Name": "domdev", "Id": 21789, "ServerType": "MANUAL", "ServerName": ["192.168.20.181"],
"DnsServer": [], "GroupDomain": "dellemcdomain.com", "NetworkTimeOut": 120,
"SearchTimeOut": 120, "ServerPort": 3269, "CertificateValidation": False}, 1),
"msg": CHANGES_FOUND, "check_mode": True}
])
def test_ome_active_directory_modify_success(self, params, ome_connection_mock_for_ad, ome_response_mock,
ome_default_args, mocker):
ome_response_mock.success = params.get("success", True)
ome_response_mock.json_data = {"Name": "AD1"}
ome_connection_mock_for_ad.strip_substr_dict.return_value = params.get("get_ad", (None, 1))[0]
mocker.patch(MODULE_PATH + 'get_ad', return_value=params.get("get_ad", (None, 1)))
ome_default_args.update(params['module_args'])
result = self._run_module(ome_default_args, check_mode=params.get('check_mode', False))
assert result['msg'] == params['msg']
@pytest.mark.parametrize("params", [{
"module_args": {"domain_controller_lookup": "MANUAL", "domain_server": ["172.16.17.32"],
"group_domain": "domain.com", "name": "domdev", "state": "absent"},
"get_ad": ({"Name": "domdev", "Id": 21789, "ServerType": "MANUAL", "ServerName": ["192.168.20.181"],
"DnsServer": [], "GroupDomain": "dellemcdomain.com", "NetworkTimeOut": 120, "SearchTimeOut": 120,
"ServerPort": 3269, "CertificateValidation": False}, 1),
"msg": DELETE_SUCCESS},
{"module_args": {"domain_controller_lookup": "MANUAL", "domain_server": ["172.16.17.32"],
"group_domain": "dellemcdomain.com", "name": "domdev1", "state": "absent"},
"msg": NO_CHANGES_MSG}, {
"module_args": {"domain_controller_lookup": "MANUAL", "domain_server": ["172.16.17.32"],
"group_domain": "dellemcdomain.com", "name": "domdev", "state": "absent"},
"get_ad": ({"Name": "domdev", "Id": 21789, "ServerType": "MANUAL", "ServerName": ["192.168.20.181"],
"DnsServer": [], "GroupDomain": "dellemcdomain.com", "NetworkTimeOut": 120,
"SearchTimeOut": 120, "ServerPort": 3269, "CertificateValidation": False}, 1),
"msg": CHANGES_FOUND, "check_mode": True}
])
def test_ome_active_directory_delete_success(self, params, ome_connection_mock_for_ad, ome_response_mock,
ome_default_args, mocker):
ome_response_mock.success = params.get("success", True)
ome_response_mock.json_data = {"Name": "AD1"}
ome_connection_mock_for_ad.strip_substr_dict.return_value = params.get("get_ad", (None, 1))[0]
mocker.patch(MODULE_PATH + 'get_ad', return_value=params.get("get_ad", (None, 1)))
ome_default_args.update(params['module_args'])
result = self._run_module(ome_default_args, check_mode=params.get('check_mode', False))
assert result['msg'] == params['msg']
@pytest.mark.parametrize("params", [
{"module_args": {"domain_controller_lookup": "MANUAL", "group_domain": "domain.com", "name": "domdev"},
"msg": DOM_SERVER_MSG}, {"module_args": {"domain_controller_lookup": "MANUAL",
"domain_server": ["172.16.17.32", "172.16.17.32", "172.16.58.3",
"172.16.58.3"], "group_domain": "domain.com",
"name": "domdev"}, "msg": DOMAIN_ALLOWED_COUNT.format("MANUAL", 3)},
{"module_args": {"domain_server": ["dom1.com1", "dom2.com"], "group_domain": "domain.com", "name": "domdev"},
"msg": DOMAIN_ALLOWED_COUNT.format("DNS", 1)},
{"module_args": {"domain_controller_lookup": "MANUAL", "domain_server": ["172.16.17.32"], "name": "domdev"},
"msg": GRP_DOM_MSG}, {"module_args": {"domain_controller_lookup": "MANUAL", "domain_server": ["172.16.17.32"],
"group_domain": "domain.com", "name": "domdev", "network_timeout": 1},
"msg": TIMEOUT_RANGE.format("NetworkTimeOut", MIN_TIMEOUT, MAX_TIMEOUT)}, {
"module_args": {"domain_controller_lookup": "MANUAL", "domain_server": ["172.16.17.32"],
"group_domain": "domain.com", "name": "domdev", "search_timeout": 301},
"msg": TIMEOUT_RANGE.format("SearchTimeOut", MIN_TIMEOUT, MAX_TIMEOUT)}, {
"module_args": {"domain_controller_lookup": "MANUAL", "domain_server": ["172.16.17.32"],
"group_domain": "domain.com", "name": "domdev"}, "ad_cnt": 2,
"msg": MAX_AD_MSG.format(MAX_AD)}, {
"module_args": {"domain_controller_lookup": "MANUAL", "domain_server": ["172.16.17.32"],
"group_domain": "domain.com", "name": "domdev", "validate_certificate": True,
"certificate_file": "nonexistingcert.crt"}, "msg": CERT_INVALID}, {
"module_args": {"domain_controller_lookup": "MANUAL", "domain_server": ["172.16.17.32"],
"group_domain": "domain.com", "id": 1234, "validate_certificate": True,
"certificate_file": "nonexistingcert.crt"}, "msg": INVALID_ID}
])
def test_ome_active_directory_create_fails(self, params, ome_connection_mock_for_ad, ome_response_mock,
ome_default_args, mocker):
ome_response_mock.success = params.get("success", True)
ome_response_mock.json_data = {"Name": "AD1"}
mocker.patch(MODULE_PATH + 'get_ad', return_value=(None, params.get("ad_cnt", 1)))
ome_default_args.update(params['module_args'])
result = self._run_module_with_fail_json(ome_default_args)
assert result['msg'] == params['msg']
@pytest.mark.parametrize("params", [{
"module_args": {"domain_controller_lookup": "MANUAL", "domain_server": ["172.16.17.32"],
"group_domain": "testconnectionfail.com", "name": "domdev", "test_connection": True,
"domain_username": "user", "domain_password": "<PASSWORD>"},
"msg": "{0}{1}".format(TEST_CONNECTION_FAIL, "Unable to connect to the LDAP or AD server."), "is_http": True,
"error_info": {
"error": {"@Message.ExtendedInfo": [{"Message": "Unable to connect to the LDAP or AD server."}], }}}, {
"module_args": {"domain_controller_lookup": "MANUAL", "domain_server": ["172.16.17.32"],
"group_domain": "testconnectionfail.com", "name": "domdev", "test_connection": True,
"domain_username": "user", "domain_password": "<PASSWORD>"},
"msg": "{0}{1}".format(TEST_CONNECTION_FAIL, ERR_READ_FAIL), "is_http": True, "error_info": {
"error1": {"@Message.ExtendedInfo": [{"Message": "Unable to connect to the LDAP or AD server."}], }}}, {
"module_args": {"domain_controller_lookup": "MANUAL", "domain_server": ["172.16.17.32"],
"group_domain": "testconnectionfail.com", "name": "domdev", "test_connection": True,
"domain_username": "user", "domain_password": "<PASSWORD>"},
"msg": "{0}{1}".format(TEST_CONNECTION_FAIL, "Exception occurrence success."),
"error_info": "Exception occurrence success."}, ])
def test_ome_active_directory_create_test_conenction_fail(self, params, ome_default_args, mocker):
mocker.patch(MODULE_PATH + 'get_ad', return_value=(None, params.get("ad_cnt", 1)))
rest_obj_class_mock = mocker.patch(MODULE_PATH + 'RestOME')
ome_connection_mock_obj = rest_obj_class_mock.return_value.__enter__.return_value
if params.get("is_http"):
json_str = to_text(json.dumps(params['error_info']))
ome_connection_mock_obj.invoke_request.side_effect = HTTPError('http://testdellemcomead.<EMAIL>', 404,
'http error message',
{"accept-type": "application/json"},
StringIO(json_str))
else:
ome_connection_mock_obj.invoke_request.side_effect = Exception(params['error_info'])
ome_default_args.update(params['module_args'])
result = self._run_module_with_fail_json(ome_default_args)
assert result['msg'] == params['msg']
@pytest.mark.parametrize("exc_type",
[IOError, ValueError, SSLError, TypeError, ConnectionError, HTTPError, URLError])
def test_ome_active_directory_main_exception_failure_case(self, exc_type, mocker, ome_default_args,
ome_connection_mock_for_ad, ome_response_mock):
ome_default_args.update({"state": "absent", "name": "t1"})
ome_response_mock.status_code = 400
ome_response_mock.success = False
json_str = to_text(json.dumps({"info": "error_details"}))
if exc_type == URLError:
mocker.patch(MODULE_PATH + 'get_ad', side_effect=exc_type("url open error"))
result = self._run_module(ome_default_args)
assert result["unreachable"] is True
elif exc_type not in [HTTPError, SSLValidationError]:
mocker.patch(MODULE_PATH + 'get_ad', side_effect=exc_type("exception message"))
result = self._run_module_with_fail_json(ome_default_args)
assert result['failed'] is True
else:
mocker.patch(MODULE_PATH + 'get_ad', side_effect=exc_type('http://testhost.com', 400, 'http error message',
{"accept-type": "application/json"},
StringIO(json_str)))
result = self._run_module_with_fail_json(ome_default_args)
assert result['failed'] is True
assert 'msg' in result
|
StarcoderdataPython
|
1678133
|
<reponame>CrankySupertoon01/Toontown-2<filename>dev/tools/leveleditor/direct/showbase/ShowBaseGlobal.py
"""instantiate global ShowBase object"""
__all__ = []
from ShowBase import *
# Create the showbase instance
# This should be created by the game specific "start" file
#ShowBase()
# Instead of creating a show base, assert that one has already been created
assert base
# Set direct notify categories now that we have config
directNotify.setDconfigLevels()
def inspect(anObject):
from direct.tkpanels import Inspector
return Inspector.inspect(anObject)
import __builtin__
__builtin__.inspect = inspect
# this also appears in AIBaseGlobal
if (not __debug__) and __dev__:
notify = directNotify.newCategory('ShowBaseGlobal')
notify.error("You must set 'want-dev' to false in non-debug mode.")
|
StarcoderdataPython
|
1742954
|
<gh_stars>1-10
import os
import time
import numpy as np
import tensorflow as tf
from .li import LatentVariable
from .gaminet import GAMINet
from sklearn.metrics.pairwise import cosine_similarity
from sklearn.cluster import KMeans
from copy import deepcopy
import networkx as nx
import matplotlib.pyplot as plt
from .utils import global_visualize_density
from itertools import product
from scipy.cluster.hierarchy import dendrogram, linkage,fcluster
from matplotlib import pyplot as plt
class GAMMLI:
"""
Generalized Addtive Model with Manifest and Latent Interactions
:param dict model_info: model basic information.
:param array subnet_arch: subnetwork architecture.
:param array interact_arch: interact subnetwork architecture.
:param func activation_func: activation_function.
:param float lr_bp: learning rate.
:param float loss_threshold_main: main_effect loss threshold.
:param float loss_threshold_inter: interact_effect loss threshold.
:param int main_grid_size: number of the sampling points for main_effect training..
:param int interact_grid_size: number of the sampling points for interact_effect training.
:param int batch_size: size of batch.
:param int main_effect_epochs: main effect training stage epochs.
:param int tuning_epochs: tuning stage epochs.
:param int interaction_epochs: interact effect training stage epochs.
:param int interact_num: the max interact pair number.
:param str interaction_restrict: interaction restrict settings.
:param int early_stop_thres: epoch for starting the early stop.
:param float convergence_threshold: convergence threshold for latent effect training.
:param int mf_training_iters: latent effect training stage epochs.
:param int max_rank: max rank for the latent variable.
:param bool change_mode: whether change the initial value for latent effect training.
:param int u_group_num: number of user group.
:param int i_group_num: number of item group.
:param float scale_ratio: group range shrinkage ratio.
:param float combine_range: group combination range.
:param bool auto_tune: whether auto tune the hyperparameter.
:param int random_state: number of user group.
:param str wc: build model for 'warm start' or 'cold start'
"""
def __init__(self,
meta_info=None,
model_info=None,
subnet_arch=[10, 6],
interact_arch=[20, 10],
activation_func=tf.tanh,
lr_bp=0.001,
loss_threshold_main=0.01,
loss_threshold_inter=0.01,
main_grid_size=41,
interact_grid_size=41,
batch_size=1000,
main_effect_epochs=10000,
tuning_epochs=500,
interaction_epochs=20,
interact_num=20,
interaction_restrict=None,
verbose=False,
early_stop_thres=100,
shrinkage_value=2,
convergence_threshold=0.001,
mf_training_iters=20,
max_rank=None,
n_power_iterations=1,
n_oversamples=10,
init_fill_method="zero",
min_value=None,
max_value=None,
change_mode = False,
normalizer=None,
multi_type_num=0,
u_group_num=0,
i_group_num=0,
val_u_group = 0,
val_i_group = 0,
reg_clarity=0.001,
scale_ratio=1,
auto_tune=False,
random_state = 0,
combine_range=0.99,
wc = None,
lambda_ = 0,
si_approach = 'rsvd'):
super(GAMMLI, self).__init__()
self.meta_info = meta_info
self.model_info = model_info
self.subnet_arch = subnet_arch
self.interact_arch = interact_arch
self.activation_func = activation_func
self.lr_bp = lr_bp
self.loss_threshold_main = loss_threshold_main
self.loss_threshold_inter = loss_threshold_inter
self.main_grid_size = main_grid_size
self.interact_grid_size = interact_grid_size
self.batch_size = batch_size
self.tuning_epochs = tuning_epochs
self.main_effect_epochs = main_effect_epochs
self.interaction_epochs = interaction_epochs
self.interact_num = interact_num
self.interaction_restrict = interaction_restrict
self.verbose = verbose
self.early_stop_thres = early_stop_thres
self.fill_method = init_fill_method
self.min_value = min_value
self.max_value = max_value
self.normalizer = normalizer
self.shrinkage_value = shrinkage_value
self.convergence_threshold = convergence_threshold
self.mf_max_iters = mf_training_iters
self.max_rank = max_rank
self.change_mode =change_mode
self.n_power_iterations = n_power_iterations
self.n_oversamples = n_oversamples
self.reg_clarity = reg_clarity
self.multi_type_num = multi_type_num
self.u_group_num = u_group_num
self.i_group_num = i_group_num
self.val_u_group = val_u_group
self.val_i_group = val_i_group
self.scale_ratio = scale_ratio
self.auto_tune = auto_tune
self.random_state = random_state
self.combine_range = combine_range
self.wc = wc
self.lambda_ = lambda_
self.si_approach = si_approach
tf.random.set_seed(self.random_state)
simu_dir = "./results/gaminet/"
#path = 'data/simulation/sim_0.9.csv'
if not os.path.exists(simu_dir):
os.makedirs(simu_dir)
self.task_type = self.model_info['task_type']
self.feat_dict = self.model_info['feat_dict']
self.ui_shape = self.model_info['ui_shape']
if self.task_type == "Regression":
#self.loss_fn = tf.keras.losses.MeanSquaredError()
self.loss_fn = tf.keras.losses.MeanAbsoluteError()
elif self.task_type == "Classification":
self.loss_fn = tf.keras.losses.BinaryCrossentropy()
#gam first mf second
def fit(self,tr_x, val_x, tr_y, val_y, tr_Xi, val_Xi, tr_idx, val_idx):
"""
Build a GAMMLI model from the dataset (tr_x, val_x, tr_y, val_y, tr_Xi, val_Xi, tr_idx, val_idx).
:param array tr_x: explict effect feature in training set.
:param array val_x: explict effect feature in validation set.
:param array tr_y: target variable in training set.
:param array val_y: target variable in validation set.
:param array tr_Xi: implicit effect feature in training set.
:param array val_Xi: implicit effect feature in validation set.
:param array tr_idx: training set index.
:param array tr_idx: validation set index.
:return: fitted GAMMLI model
"""
"""
#initial cluster training
self.user_feature_list = []
self.item_feature_list = []
for indice, (feature_name, feature_info) in enumerate(self.meta_info.items()):
if feature_info["source"] == "user":
self.user_feature_list.append(indice)
elif feature_info["source"] == "item":
self.item_feature_list.append(indice)
user_feature = np.concatenate([tr_x[:,self.user_feature_list],tr_Xi[:,0].reshape(-1,1)],1)
item_feature = np.concatenate([tr_x[:,self.item_feature_list],tr_Xi[:,1].reshape(-1,1)],1)
user_feature = np.unique(user_feature,axis=0)
item_feature = np.unique(item_feature,axis=0)
user_feature = user_feature[np.argsort(user_feature[:,-1])]
item_feature = item_feature[np.argsort(item_feature[:,-1])]
self.user_id = user_feature[:,-1]
self.item_id = item_feature[:,-1]
user_feature = user_feature[:,:-1]
item_feature = item_feature[:,:-1]
val_user_feature = np.concatenate([val_x[:,self.user_feature_list],val_Xi[:,0].reshape(-1,1)],1)
val_item_feature = np.concatenate([val_x[:,self.item_feature_list],val_Xi[:,1].reshape(-1,1)],1)
val_user_feature = np.unique(val_user_feature,axis=0)
val_item_feature = np.unique(val_item_feature,axis=0)
val_user_feature = val_user_feature[np.argsort(val_user_feature[:,-1])]
val_item_feature = val_item_feature[np.argsort(val_item_feature[:,-1])]
val_user_feature = val_user_feature[:,:-1]
val_item_feature = val_item_feature[:,:-1]
if self.u_group_num != 0:
self.u_group, self.u_group_model = self.main_effect_cluster(user_feature,self.u_group_num)
self.val_u_group = self.u_group_model.predict(val_user_feature)
else:
self.u_group=0
if self.i_group_num != 0:
self.i_group, self.i_group_model = self.main_effect_cluster(item_feature,self.i_group_num)
self.val_i_group = self.i_group_model.predict(val_item_feature)
else:
self.i_group = 0
"""
error1=[]
val_error1=[]
error2=[]
val_error2 =[]
val_error = []
#gam fit
self.gami_model = GAMINet(meta_info=self.meta_info,interact_num=self.interact_num,interact_arch=self.interact_arch,
subnet_arch=self.subnet_arch, task_type=self.task_type,
activation_func=tf.tanh, batch_size=self.batch_size, lr_bp=self.lr_bp,
main_effect_epochs=self.main_effect_epochs,tuning_epochs=self.tuning_epochs,
interaction_epochs=self.interaction_epochs, reg_clarity=self.reg_clarity,interaction_restrict=self.interaction_restrict,
verbose=self.verbose, early_stop_thres=self.early_stop_thres,random_state=self.random_state)
model = self.gami_model
st_time = time.time()
model.fit(tr_x, val_x, tr_y, val_y, tr_idx, val_idx)
fi_time = time.time()
print('time cost:',fi_time-st_time)
pred_train = model.predict(tr_x)
pred_val = model.predict(val_x)
error1.append(self.loss_fn(tr_y.ravel(),pred_train.ravel()).numpy())
val_error1.append(self.loss_fn(val_y.ravel(),pred_val.ravel()).numpy())
if self.task_type == 'Classification':
pred_train_initial = model.predict_initial(tr_x).numpy()
pred_train_initial[pred_train_initial>np.log(9999)] = np.log(9999)
pred_train_initial[pred_train_initial<np.log(1/9999)] = np.log(1/9999)
pred_val_initial = model.predict_initial(val_x).numpy()
pred_val_initial[pred_val_initial>np.log(9999)] = np.log(9999)
pred_val_initial[pred_val_initial<np.log(1/9999)] = np.log(1/9999)
print('After the gam stage, training error is %0.5f , validation error is %0.5f' %(error1[-1],val_error1[-1]))
if self.task_type == 'Regression':
residual = (tr_y.ravel() - pred_train.ravel()).reshape(-1,1)
residual_val = (val_y.ravel() - pred_val.ravel()).reshape(-1,1)
elif self.task_type == 'Classification':
tr_y_temp = deepcopy(tr_y)
val_y_temp = deepcopy(val_y)
tr_y_temp[tr_y_temp==0]=-1
val_y_temp[val_y_temp==0]=-1
residual = (2*tr_y_temp.ravel()/(1 + np.exp(2*tr_y_temp.ravel()*pred_train_initial.ravel()))).reshape(-1,1)
residual_val = (2*val_y_temp.ravel()/(1 + np.exp(2*val_y_temp.ravel()*pred_val_initial.ravel()))).reshape(-1,1)
#mf fit
if self.mf_max_iters !=0:
if self.task_type == 'Classification':
pred_train = pred_train_initial
pred_val = pred_val_initial
self.lv_model = LatentVariable(meta_info=self.meta_info,verbose = self.verbose,task_type=self.task_type,max_rank=self.max_rank,max_iters=self.mf_max_iters,
change_mode=self.change_mode,auto_tune=self.auto_tune,
convergence_threshold=self.convergence_threshold,n_oversamples=self.n_oversamples
,u_group_num=self.u_group_num, i_group_num=self.i_group_num
,scale_ratio=self.scale_ratio,pred_tr=pred_train,shrinkage_value=self.shrinkage_value,
tr_y=tr_y,pred_val=pred_val,val_y=val_y, tr_Xi=tr_Xi,val_Xi=val_Xi,random_state=self.random_state
,combine_range=self.combine_range, wc = self.wc, lambda_= self.lambda_, si_approach = self.si_approach)
model1 = self.lv_model
st_time = time.time()
model1.fit(tr_x,val_x,tr_Xi,val_Xi,residual,residual_val,self.ui_shape)
fi_time = time.time()
print('time cost:',fi_time-st_time)
pred = model1.predict(tr_Xi)
predval = model1.predict(val_Xi)
if self.task_type == 'Classification':
error2.append(self.loss_fn(tr_y.ravel(),tf.sigmoid(pred.ravel()+pred_train.ravel()).numpy()).numpy())
val_error2.append(self.loss_fn(val_y.ravel(),tf.sigmoid(predval.ravel()+pred_val.ravel()).numpy()).numpy())
else:
error2.append(self.loss_fn(tr_y.ravel(),pred.ravel()+pred_train.ravel()).numpy())
val_error2.append(self.loss_fn(val_y.ravel(),predval.ravel()+pred_val.ravel()).numpy())
self.mf_tr_err = error2[-1]
self.mf_val_err = val_error2[-1]
print('After the matrix factor stage, training error is %0.5f, validation error is %0.5f' %(error2[-1],val_error2[-1]))
val_error_bi = [val_error1[-1],val_error2[-1]]
val_error = val_error + val_error_bi
self.final_gam_model = self.gami_model
self.final_mf_model = self.lv_model
if self.si_approach == 'rsvd':
self.s = np.diag(self.final_mf_model.s)
self.u = self.final_mf_model.u
self.v = self.final_mf_model.v.T
elif self.si_approach == 'als':
self.s = self.final_mf_model.s
self.u = self.final_mf_model.u
self.v = self.final_mf_model.v
self.cur_rank = self.final_mf_model.cur_rank
self.match_i = self.final_mf_model.match_i
self.match_u = self.final_mf_model.match_u
self.u_group_model = self.final_mf_model.u_group_model
self.i_group_model = self.final_mf_model.i_group_model
self.user_feature_list = self.final_mf_model.user_feature_list
self.item_feature_list = self.final_mf_model.item_feature_list
self.feature_list_ = self.gami_model.feature_list_
def predict(self,xx,Xi):
"""
predict result by fitted GAMMLI model.
:param array xx: the explicit features of samples for predict.
:param array Xi: the implicit features of samples for predict.
:return: prediction result
"""
if self.mf_max_iters == 0 or self.final_mf_model==None:
pred = self.final_gam_model.predict(xx)
return pred
else:
pred1 = self.final_gam_model.predict(xx)
if self.task_type == 'Classification':
pred1 = self.final_gam_model.predict_initial(xx).numpy()
pred2 = []
for i in range(Xi.shape[0]):
if Xi[i,0] == 'cold':
g = self.u_group_model.predict(xx[i,self.user_feature_list].reshape(1,-1))[0]
u = self.match_u[g]
#u=np.zeros(u.shape)
else:
u = self.u[int(Xi[i,0])]
if Xi[i,1] == 'cold':
g = self.i_group_model.predict(xx[i,self.item_feature_list].reshape(1,-1))[0]
v = self.match_i[g]
#v =np.zeros(v.shape)
else:
v =self.v[int(Xi[i,1])]
pred_mf = np.dot(u, np.multiply(self.s, v))
pred2.append(pred_mf)
pred2 = np.array(pred2)
pred = pred1.ravel()+ pred2.ravel()
if self.task_type == 'Classification':
#print(pred.shape)
#pred = tf.nn.softmax(pred).numpy()
#print(pred.shape)
pred = tf.sigmoid(pred).numpy()
return pred
def linear_global_explain(self):
self.final_gam_model.global_explain(folder="./results", name="demo", cols_per_row=3, main_density=3, save_png=False, save_eps=False)
def local_explain(self,class_,ex_idx,xx,Xi,y,simu_dir = 'result'):
mf_output = self.final_mf_model.predict(Xi[ex_idx].reshape(1,-1))
data_dict_local = self.final_gam_model.local_explain(class_ ,mf_output,xx[[ex_idx],:], y[[ex_idx],:],save_dict=False)
return data_dict_local
def cold_start_analysis(self,xx,u_i,confi):
if u_i == 'user':
g = self.u_group_model.predict(xx[:,self.user_feature_list].reshape(1,-1))[0]
group_pre_u = self.final_mf_model.pre_u
g = self.new_group(g,group_pre_u)
mean_g = self.match_u[g]
std_g = self.var_u[g]**0.5
if u_i == 'item':
g = self.i_group_model.predict(xx[:,self.item_feature_list].reshape(1,-1))[0]
group_pre_i = self.final_mf_model.pre_i
g = self.new_group(g,group_pre_i)
mean_g = self.match_i[g]
std_g = self.var_i[g]**0.5
upper = mean_g + confi * std_g
lower = mean_g - confi * std_g
print('The new '+u_i+' belong to group '+str(g)+'\n mean is '+str(mean_g)+'\n and std is '+ str(std_g)+
'\n the confidence interval is ['+str(lower)+','+str(upper)+']')
return mean_g, std_g, upper, lower
def dash_board(self,data_dict, importance, simu_dir, save_eps=True):
"""
Show a dashboard for global explain, which contains the explanation of main effects, manifest interactions and latent interactions.
:param dict data_dict: explanation data for plotting.
:param array importance: importance of each effects.
:param str simu_dir: storage path.
:param bool save_eps: whether save the eps of dashboard.
:return: Showing dashboard.
"""
im_list = importance.tolist()
for i,j in data_dict.items():
importance_ = im_list.pop(0)
if data_dict[i]['importance'] !=0:
data_dict[i]['importance'] = importance_
importance = np.sum(im_list) *100
global_visualize_density(data_dict, save_png=False,save_eps=save_eps, folder=simu_dir, name='s1_global')
self.latent_graph(importance,save_eps=save_eps)
def get_all_rank(self,Xi):
"""
Get importance of each effects.
:param array Xi: implicit effect feature in training set.
:return: array of importance.
"""
sorted_index, componment_scales_gam = self.final_gam_model.get_all_active_rank()
componment_scales_gam = componment_scales_gam.reshape(-1,1)
delta = np.array(self.final_mf_model.rank_norm(Xi)).reshape(-1,1)
componment_coefs= np.vstack([componment_scales_gam, delta])
componment_scales = (np.abs(componment_coefs) / np.sum(np.abs(componment_coefs))).reshape([-1])
return componment_scales
def latent_graph(self,importance,save_eps=False):
s = self.s
user=np.array(list(self.match_u.values()))
item=np.array(list(self.match_i.values()))
user_m = np.multiply(user,s)
item_m = np.multiply(item,s)
Z1 = linkage(user_m, 'ward')
Z2 = linkage(item_m, 'ward')
left_x,left_y=0.1,0.1
width,height=1,0.5
left_yh=left_y+0.1+0.1
left_yhh=left_yh+0.1+0.1
left_xh=left_x+0.1
left_xhh=left_xh+0.2
heatmap_area=[left_xhh,left_y,width,height+0.15]
#user_dendro=[left_x,left_y,0.1,height]
#item_dendro = [left_xhh,0.25+height,1,0.1]
user_heat = [left_xh,left_y,0.1,height+0.15]
item_heat = [left_xhh, 0.3+height,1,0.1]
cbar = [left_xhh+1.05,left_y,0.1,height+0.15]
plt.figure(figsize=(8,8))
plt.suptitle('Latent Interactions (%.2f%%)' %importance,fontsize=18, x=0.9)
area_1=plt.axes(heatmap_area)
#area_2=plt.axes(user_dendro)
#area_3=plt.axes(item_dendro)
area_4 =plt.axes(user_heat)
area_5 =plt.axes(item_heat)
area_6 = plt.axes(cbar)
#h1 =dendrogram(Z1,ax=area_2,orientation='left',no_labels=True)
#h2 = dendrogram(Z2,ax=area_3,no_labels=True)
#user_s = user_m[h1['leaves']][::-1]
#item_s = item_m[h2['leaves']]
f_user = [np.linalg.norm(i) for i in user_m.tolist()]
f_item = [np.linalg.norm(j) for j in item_m.tolist()]
inter = []
for i,j in product(user_m, item_m):
inter.append(np.dot(i,np.divide(j,s)))
inter = np.array(inter).reshape(user_m.shape[0],-1)
ax1 = area_1.imshow(inter,aspect='auto')
area_1.yaxis.set_ticks_position('right')
area_1.set_yticks(range(user_m.shape[0]))
area_1.set_xticks(range(item_m.shape[0]))
area_1.set_yticklabels([str(i) for i in range(user_m.shape[0])])
area_1.set_xticklabels([str(i) for i in range(item_m.shape[0])])
ax4 = area_4.imshow(np.array(f_user).reshape(-1,1),aspect='auto')
#area_2.set_xticks([])
area_4.set_xticks([])
area_4.set_yticks([])
ax5 = area_5.imshow(np.array(f_item).reshape(1,-1),aspect='auto')
#area_3.set_yticks([])
area_5.set_xticks([])
area_5.set_yticks([])
#area_2.spines['top'].set_visible(False)
#area_2.spines['right'].set_visible(False)
#area_2.spines['bottom'].set_visible(False)
#area_2.spines['left'].set_visible(False)
#area_3.spines['top'].set_visible(False)
#area_3.spines['right'].set_visible(False)
#area_3.spines['bottom'].set_visible(False)
#area_3.spines['left'].set_visible(False)
area_6.spines['top'].set_visible(False)
area_6.spines['right'].set_visible(False)
area_6.spines['bottom'].set_visible(False)
area_6.spines['left'].set_visible(False)
area_6.set_xticks([])
area_6.set_yticks([])
plt.colorbar(ax1,ax=area_6)
if save_eps:
plt.savefig("latent.eps", bbox_inches="tight", dpi=100)
def radar_plot(self, ui_type='user', group_index=[0,1],save_eps=False):
if ui_type == 'user':
labels = np.array(self.feature_list_)[self.user_feature_list].tolist()
k = len(self.user_feature_list)
plot_data = self.u_group_model.cluster_centers_[group_index]
angles = np.linspace(0, 2*np.pi, k, endpoint=False)
plot_data = np.concatenate((plot_data, plot_data[:,[0]]), axis=1)
angles = np.concatenate((angles, [angles[0]]))
fig = plt.figure(figsize=(10, 10))
ax = fig.add_subplot(111, polar=True)
for i in range(len(plot_data)):
ax.plot(angles, plot_data[i], 'o-', label = 'Group'+str(group_index[i]), linewidth=2)
ax.set_rgrids(np.arange(0, 1.6, 0.2), np.arange(0, 1))
ax.set_thetagrids(angles * 180/np.pi, labels)
plt.legend(loc = 4)
elif ui_type == 'item':
labels = np.array(self.feature_list_)[self.item_feature_list].tolist()
k = len(self.item_feature_list)
plot_data = self.i_group_model.cluster_centers_[group_index]
angles = np.linspace(0, 2*np.pi, k, endpoint=False)
plot_data = np.concatenate((plot_data, plot_data[:,[0]]), axis=1)
angles = np.concatenate((angles, [angles[0]]))
fig = plt.figure(figsize=(10, 10))
ax = fig.add_subplot(111, polar=True)
for i in range(len(plot_data)):
ax.plot(angles, plot_data[i], 'o-', label = 'Group'+str(group_index[i]), linewidth=2)
ax.set_rgrids(np.arange(0, 1.6, 0.2), np.arange(0, 1))
ax.set_thetagrids(angles * 180/np.pi, labels)
plt.legend(loc = 4)
if save_eps:
plt.savefig("latent.eps", bbox_inches="tight", dpi=100)
|
StarcoderdataPython
|
1609812
|
"""
MyToolBox is a collection of reusable tools.
Author: <EMAIL>
Copyright (C) CERN 2013-2021
"""
import sys
AUTHOR = "<NAME> <<EMAIL>>"
COPYRIGHT = "Copyright (C) CERN 2013-2021"
VERSION = "0.1.0"
DATE = "01 Mar 2013"
__author__ = AUTHOR
__version__ = VERSION
__date__ = DATE
PY2 = sys.hexversion < 0x03000000
PY3 = not PY2
|
StarcoderdataPython
|
4814759
|
<gh_stars>1-10
class Chef:
def make_chicken(self):
print("The chef makes chicken")
def make_salad(self):
print("The chef makes salad")
def make_special_dish(self):
print("The chef makes make special dish")
class ChineseChef():
def make_chicken(self):
print("The chef makes orange chicken")
def make_salad(self):
print("The chef makes orange salad")
def make_special_dish(self):
print("The chef makes make bbq special dish")
def make_Fried_rice(self):
print("The chef makes Fried_rice")
|
StarcoderdataPython
|
1680165
|
# -*- coding: utf-8 -*-
"""
celery.worker.state
~~~~~~~~~~~~~~~~~~~
Internal worker state (global)
This includes the currently active and reserved tasks,
statistics, and revoked tasks.
:copyright: (c) 2009 - 2012 by <NAME>.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import
import os
import platform
import shelve
from collections import defaultdict
from .. import __version__
from ..datastructures import LimitedSet
from ..utils import cached_property
#: Worker software/platform information.
SOFTWARE_INFO = {"sw_ident": "celeryd",
"sw_ver": __version__,
"sw_sys": platform.system()}
#: maximum number of revokes to keep in memory.
REVOKES_MAX = 10000
#: how many seconds a revoke will be active before
#: being expired when the max limit has been exceeded.
REVOKE_EXPIRES = 3600
#: set of all reserved :class:`~celery.worker.job.Request`'s.
reserved_requests = set()
#: set of currently active :class:`~celery.worker.job.Request`'s.
active_requests = set()
#: count of tasks executed by the worker, sorted by type.
total_count = defaultdict(lambda: 0)
#: the list of currently revoked tasks. Persistent if statedb set.
revoked = LimitedSet(maxlen=REVOKES_MAX, expires=REVOKE_EXPIRES)
#: Updates global state when a task has been reserved.
task_reserved = reserved_requests.add
def task_accepted(request):
"""Updates global state when a task has been accepted."""
active_requests.add(request)
total_count[request.task_name] += 1
def task_ready(request):
"""Updates global state when a task is ready."""
active_requests.discard(request)
reserved_requests.discard(request)
if os.environ.get("CELERY_BENCH"): # pragma: no cover
from time import time
all_count = 0
bench_start = None
bench_every = int(os.environ.get("CELERY_BENCH_EVERY", 1000))
__reserved = task_reserved
__ready = task_ready
def task_reserved(request): # noqa
global bench_start
if bench_start is None:
bench_start = time()
return __reserved(request)
def task_ready(request): # noqa
global all_count, bench_start
all_count += 1
if not all_count % bench_every:
print("* Time spent processing %s tasks (since first "
"task received): ~%.4fs\n" % (
bench_every, time() - bench_start))
bench_start = None
return __ready(request)
class Persistent(object):
storage = shelve
_is_open = False
def __init__(self, filename):
self.filename = filename
self._load()
def save(self):
self.sync(self.db)
self.db.sync()
self.close()
def merge(self, d):
revoked.update(d.get("revoked") or {})
return d
def sync(self, d):
prev = d.get("revoked") or {}
prev.update(revoked.as_dict())
d["revoked"] = prev
return d
def open(self):
return self.storage.open(self.filename, writeback=True)
def close(self):
if self._is_open:
self.db.close()
self._is_open = False
def _load(self):
self.merge(self.db)
@cached_property
def db(self):
self._is_open = True
return self.open()
|
StarcoderdataPython
|
3245452
|
from aqt import *
def getQIcon(name):
"Convenience method for getting a QIcon from this add-on's icon directory."
here = os.path.dirname(os.path.realpath(__file__))
iPath = os.path.join(here, "icons", name)
return QIcon(iPath)
|
StarcoderdataPython
|
3233636
|
#Twin protocol definitions
#<NAME>
#27-Sep-2021
#//state of Twin, {"state"={}}
TWINSTATE = "state"
##delta of Twin change, {"delta"={}}
TWINDELTA = "delta"
###
###Message structure - MSG
###
#//TOPIC, {"topic"="....", "data"="..."}
TWINTOPIC = "topic"
#//DATA, {"topic"="....", "data"="..."}
TWINDATA = "data"
####
### Default TOPICS
###
#ERROR - when something goes wrong on the PICO
# {"topic"="error", "data"="..."}
TOPICERROR = "error"
#GET - request to GET current status from PICO
# {"topic"="get", "data"=""}
TOPICGET = "get"
#PING - request PONG response
# {"topic"="ping", "data"=""}
TOPICPING = "ping"
#PONG - response to PING
# {"topic"="pong", "data"=""}
TOPICPONG = "pong"
#trn - transaction state member, used to sync twins
TWINTRN = "trn"
|
StarcoderdataPython
|
3337326
|
"""The fitting module contains the code for fitting the experimental data."""
from __future__ import annotations
from pathlib import Path
from rich.progress import track
from chemex.configuration.methods import Methods
from chemex.configuration.methods import Statistics
from chemex.containers.experiments import Experiments
from chemex.containers.experiments import generate_exp_for_statistics
from chemex.messages import print_fitmethod
from chemex.messages import print_group_name
from chemex.messages import print_minimizing
from chemex.messages import print_no_data
from chemex.messages import print_running_statistics
from chemex.messages import print_step_name
from chemex.optimize.gridding import run_grid
from chemex.optimize.grouping import create_groups
from chemex.optimize.helper import calculate_statistics
from chemex.optimize.helper import execute_post_fit
from chemex.optimize.helper import execute_post_fit_groups
from chemex.optimize.helper import print_header
from chemex.optimize.helper import print_values_stat
from chemex.optimize.minimizer import minimize
from chemex.parameters import database
def _run_statistics(
experiments: Experiments,
path: Path,
fitmethod: str | None = None,
statistics: Statistics | None = None,
):
if statistics is None:
return
methods = {
"mc": {"message": "Monte Carlo", "filename": "monte_carlo.out"},
"bs": {"message": "bootstrap", "filename": "bootstrap.out"},
"bsn": {"message": "nucleus-based bootstrap", "filename": "bootstrap_ns.out"},
}
params_lf = database.build_lmfit_params(experiments.param_ids)
ids_vary = [param.name for param in params_lf.values() if param.vary]
for statistic_name, iter_nb in statistics.dict().items():
if iter_nb is None:
continue
method = methods[statistic_name]
print_running_statistics(method["message"])
with open(path / method["filename"], "w") as fileout:
fileout.write(print_header(ids_vary))
for _ in track(range(iter_nb), total=iter_nb, description=" "):
exp_stat = generate_exp_for_statistics(experiments, statistic_name)
params_lf = database.build_lmfit_params(exp_stat.param_ids)
params_fit = minimize(exp_stat, params_lf, fitmethod, verbose=False)
chisqr = calculate_statistics(exp_stat, params_fit).get("chisqr", 1e32)
fileout.write(print_values_stat(params_fit, ids_vary, chisqr))
def _fit_groups(
experiments: Experiments,
path: Path,
plot: str,
fitmethod: str,
statistics: Statistics | None,
) -> None:
groups = create_groups(experiments)
plot_flg = (plot == "normal" and len(groups) == 1) or plot == "all"
print_minimizing()
for group in groups:
group_lmfit_params = database.build_lmfit_params(group.experiments.param_ids)
group_path = path / group.path
if message := group.message:
print_group_name(message)
best_lmfit_params = minimize(
group.experiments, group_lmfit_params, fitmethod, verbose=True
)
database.update_from_parameters(best_lmfit_params)
execute_post_fit(group.experiments, group_path, plot_flg)
# Run Monte Carlo and/or bootstrap analysis
_run_statistics(
group.experiments,
group_path,
fitmethod,
statistics,
)
if len(groups) > 1:
execute_post_fit_groups(experiments, path, plot)
def run_methods(
experiments: Experiments, methods: Methods, path: Path, plot_level: str
) -> None:
for index, (section, method) in enumerate(methods.items(), start=1):
if section:
print_step_name(section, index, len(methods))
# Select a subset of profiles based on "INCLUDE" and "EXCLUDE"
experiments.select(method.selection)
if not experiments:
print_no_data()
continue
print_fitmethod(method.fitmethod)
# Update the parameter "vary" and "expr" status
database.set_parameter_status(method)
if method.grid and method.statistics:
print(
'Warning: "GRID" and "STATISTICS" options are mutually '
'exclusive. Only the "GRID" calculation will be run.'
)
method.statistics = None
path_sect = path / section if len(methods) > 1 else path
if method.grid:
run_grid(experiments, method.grid, path_sect, plot_level, method.fitmethod)
else:
_fit_groups(
experiments, path_sect, plot_level, method.fitmethod, method.statistics
)
|
StarcoderdataPython
|
1660498
|
from django.db import models
# Create your models here.
class Task(models.Model):
title = models.CharField(max_length = 200)
complete = models.BooleanField(default = False)
created = models.DateTimeField(auto_now_add = True)
def __str__(self):
return self.title
|
StarcoderdataPython
|
51569
|
<filename>src/conformal_methods/utils.py
import numpy as np
import pandas as pd
from src.config import SRC
from numba import jit
from scipy.stats import norm
from scipy.stats import skewnorm
from sklearn.preprocessing import StandardScaler
def init_scoring_object(method, quantile=0.9):
def scoring_object(estimator, X, y):
if (method == "mean-based") | (method == "weighted-mean-based"):
y_pred = estimator.predict(X)
loss = np.mean((y - y_pred)**2)
return -loss.item()
if method == "quantile-based":
y_pred = estimator.predict(X)
if (quantile > 0) and (quantile < 1):
residual = y - y_pred
return -np.sum(residual * (quantile - (residual<0)))
else:
return np.nan
return scoring_object
def CQR_conformity_score(lower_quant_hat, upper_quant_hat, y_conf):
first_arg = lower_quant_hat.flatten() - y_conf.flatten()
second_arg = y_conf.flatten() - upper_quant_hat.flatten()
conf_args = np.column_stack((first_arg, second_arg))
return np.max(conf_args, axis=1)
def extract_intervals(conf_set_list):
# preallocate interval boundary matrix
intervals = np.zeros((len(conf_set_list), 2))
for i in range(len(conf_set_list)):
intervals[i, 0] = np.min(conf_set_list[i])
intervals[i, 1] = np.max(conf_set_list[i])
return intervals
def flatten(l):
new_l = []
for tup in l:
sublist = []
for i, subelement in enumerate(tup):
if isinstance(subelement, tuple):
for j in subelement:
sublist.append(j)
else:
sublist.append(subelement)
new_l.append(tuple(sublist))
return new_l
def cond_variance(X_mat, error_type, linear_part=None):
if error_type == "simple_linear":
cond_variance = (X_mat.flatten()) ** 2
elif error_type == "varying_squared_linear_part":
cond_variance = 1 + linear_part ** 2
# print(np.histogram(cond_variance))
elif error_type == "varying_third_moment_mu":
t_dist_part = 3.0 / (3 - 2)
cond_variance = (
t_dist_part
* (1 + 2 * np.abs(linear_part) ** 3 / np.mean(np.abs(linear_part) ** 3))
** 2
)
else:
raise ValueError("Please specify regular error_type.")
return cond_variance
def x_scale(X_mat, error_type, linear_part=None):
if error_type == "simple_linear":
scale = X_mat.flatten()
elif error_type == "varying_squared_linear_part":
scale = linear_part
elif error_type == "varying_third_moment_mu":
scale = linear_part
else:
raise ValueError("Please specify regular error_type.")
return scale
def construc_cond_metric_df(cond_variance, result_pred_bands, y_predict):
interval_lengths = result_pred_bands[:, 1] - result_pred_bands[:, 0]
covered = (y_predict.flatten() >= result_pred_bands[:, 0]) & (
y_predict.flatten() <= result_pred_bands[:, 1]
)
# df = pd.DataFrame(np.stack((cond_variance, interval_lengths, covered), axis=1))
df = np.stack((cond_variance, interval_lengths, covered), axis=1)
return df
def construc_cond_metric_df_simulation(x_scale, result_pred_bands, y_predict):
interval_lengths = result_pred_bands[:, 1] - result_pred_bands[:, 0]
covered = (y_predict.flatten() >= result_pred_bands[:, 0]) & (
y_predict.flatten() <= result_pred_bands[:, 1]
)
df = np.stack((x_scale, interval_lengths, covered), axis=1)
return df
@jit(nopython=True)
def conditional_cdf_hat(y_grid, y_vec, q_hat_conf_mat, q_hat_pred_mat):
# preallocate matrix for the predicted cdf values
f_hat_y_mat = np.zeros((q_hat_pred_mat.shape[0], len(y_grid.flatten())))
###
q_hat_conf_less_y_mat = q_hat_conf_mat <= y_vec.reshape(-1, 1)
f_hat_conf = (1.0 / q_hat_conf_less_y_mat.shape[1]) * np.sum(
q_hat_conf_less_y_mat, axis=1
)
###
for i, y in enumerate(y_grid):
q_hat_pred_less_y = q_hat_pred_mat <= y
f_hat_y = (1.0 / q_hat_pred_less_y.shape[1]) * np.sum(q_hat_pred_less_y, axis=1)
f_hat_y_mat[:, i] = f_hat_y
return f_hat_conf, f_hat_y_mat
@jit(nopython=True)
def p_y_func(alpha, y_grid, f_hat_conf, f_hat_y_mat):
f_hat_conf_abs_dev = np.abs(f_hat_conf.flatten() - 0.5)
f_hat_y_mat_abs_dev = np.abs(f_hat_y_mat - 0.5)
conf_set_list = []
# fix the X_n+1 prediction point:
for i in range(f_hat_y_mat.shape[0]):
conf_set = []
# fix the y grid value:
for j, y in enumerate(y_grid):
val = (
1
/ (len(f_hat_conf_abs_dev) + 1)
* np.sum(f_hat_y_mat_abs_dev[i, j] <= f_hat_conf_abs_dev)
)
if val > alpha:
conf_set.append(y)
conf_set_list.append(conf_set)
return conf_set_list
def extract_intervals(conf_set_list):
# preallocate interval boundary matrix
intervals = np.zeros((len(conf_set_list), 2))
for i in range(len(conf_set_list)):
intervals[i, 0] = np.min(conf_set_list[i])
intervals[i, 1] = np.max(conf_set_list[i])
return intervals
def calc_normal_params(mu_1, mu_0, X, heteroscedastic):
means = mu_1 - mu_0
if heteroscedastic:
variances = X[:,0]**2 + np.ones(len(means))
else:
variances = np.ones(len(means)) * 2
return means, variances
def get_oracle_interval(lower, upper):
def oracle_interval(mean, var):
std = np.sqrt(var)
norm_obj = norm(loc=mean,scale=std)
quantiles = norm_obj.ppf([lower, upper])
return quantiles
return oracle_interval
def get_oracle_intervals(means, variances):
oracle_interval_fun = get_oracle_interval(0.05, 0.95)
result = list(map(oracle_interval_fun, means, variances))
return result
def share_signif_fun(oracle_intervals, ite_pred_intervals):
which_oracle_ints_signif = np.logical_not((oracle_intervals[:,0] <= 0) & (oracle_intervals[:,1] >= 0))
which_predicted_ints_signif = np.logical_not((ite_pred_intervals[:,0] <= 0) & (ite_pred_intervals[:,1] >= 0))
oracle_signif_signs = np.sign(np.mean(oracle_intervals, axis=1))
predicted_signif_signs = np.sign(np.mean(ite_pred_intervals, axis=1))
same_sign = (oracle_signif_signs == predicted_signif_signs)
correctly_signif_given_oracle_signif = which_oracle_ints_signif & which_predicted_ints_signif & same_sign
if np.sum(which_oracle_ints_signif) == 0:
return -1.0
else:
return np.sum(correctly_signif_given_oracle_signif) / np.sum(which_oracle_ints_signif)
def share_signif_oracles(oracle_intervals, ite_vals):
which_oracle_ints_signif = np.logical_not((oracle_intervals[:,0] <= 0) & (oracle_intervals[:,1] >= 0))
which_ites_not_zero = (ite_vals != 0)
signif_oracles_given_ite_not_zero = which_oracle_ints_signif & which_ites_not_zero
return np.sum(signif_oracles_given_ite_not_zero) / len(oracle_intervals)
def share_signif_intervals_given_ite_not_zero(ite_pred_intervals, ite_vals):
which_predicted_ints_signif = np.logical_not((ite_pred_intervals[:,0] <= 0) & (ite_pred_intervals[:,1] >= 0))
which_ites_not_zero = (ite_vals != 0)
signif_intervals_given_ite_not_zero = which_predicted_ints_signif & which_ites_not_zero
return np.sum(signif_intervals_given_ite_not_zero) / len(ite_pred_intervals)
def generate_treatment_effects_helper(X, treatment_case):
n, p = X.shape
if treatment_case == "binary":
condition = 1 * (X[:,0] > 0.0)
treat = np.where(condition == 0, -1.0, condition)
tau_x = treat
elif treatment_case == "gaussian":
beta_treat = np.ones(p)
half_point = round(p/2)
beta_treat[:half_point] = 1.0
beta_treat[half_point:] = 0.0
# division by true standard deviation of the sum to yield variance 1
tau_x = (X @ beta_treat) / np.sqrt(half_point)
else:
raise ValueError("Please specify a valid main effect type.")
return tau_x
def dgp_ate_zero(n, p, effect_size, main_effect_case="const", treatment_case="binary"):
X = generate_X_fixed_positions(n = n, p=p, X_dist="normal", cor="none", standardize=False, rho=0.5)
tau_x = generate_treatment_effects_helper(X=X, treatment_case=treatment_case)
if main_effect_case == "const":
mu_1 = np.ones(n) + effect_size * tau_x
mu_0 = np.ones(n)
elif main_effect_case == "linear":
beta = np.ones(p)
beta[::2] = 0.0
mu_1 = X @ beta + effect_size * tau_x
mu_0 = X @ beta
elif main_effect_case == "non-linear":
beta = np.ones(p)
beta[::2] = 0.0
linear_part = X @ beta
base_fun = 2 * np.log(1 + np.exp(linear_part))
mu_1 = base_fun + effect_size * tau_x
mu_0 = base_fun
else:
raise ValueError("Please specify a valid main effect type.")
# noise:
eps_1 = np.random.normal(0, 1, n)
eps_0 = np.random.normal(0, 1, n)
# draw treatment assignment variable:
W = np.random.binomial(n=1, p=0.5, size=(n,))
# calculate other quantities of interest:
ite = mu_1 - mu_0 + eps_1 - eps_0
# observed y_obs depends on W:
y_obs = W * (mu_1 + eps_1) + (1 - W) * (mu_0 + eps_0)
return ite, mu_1, mu_0, eps_1, eps_0, y_obs, X, W
def generate_X_fixed_positions(
n,
p,
X_dist="normal",
cor="none",
standardize=False,
rho=0.15,
k=5,
alpha=5,
uniform_lower=0,
uniform_upper=1,
):
# Generate X matrix
if X_dist == "normal":
X = np.random.normal(0, 1, n * p).reshape((n, p))
if X_dist == "binom":
X = np.random.binomial(n=1, p=0.5, size=(n, p))
if X_dist == "uniform":
X = np.random.uniform(uniform_lower, uniform_upper, n * p).reshape((n, p))
if X_dist == "skewed_normal":
X = skewnorm.rvs(alpha, size=n * p).reshape((n, p))
if X_dist == "mixture":
X = np.zeros(n * p).reshape((n, p))
x1 = np.random.normal(0, 1, n * p).reshape((n, p))
x2 = np.random.binomial(n=1, p=0.5, size=(n, p))
x3 = skewnorm.rvs(5, size=n * p).reshape((n, p))
u = np.random.uniform(0, 1, p)
i1 = u <= 1 / 3
i2 = (1 / 3 < u) & (u <= 2 / 3)
i3 = u > 2 / 3
X[:, i1] = x1[:, i1]
X[:, i2] = x2[:, i2]
X[:, i3] = x3[:, i3]
# setting the decisive 5 covariates to a fixed distribution for later purposes
X[:, 0] = np.random.normal(0, 1, n)
X[:, 4] = np.random.binomial(n=1, p=0.5, size=n)
X[:, 6] = skewnorm.rvs(5, size=n)
X[:, 8] = skewnorm.rvs(5, size=n)
X[:, 9] = np.random.binomial(n=1, p=0.5, size=n)
# Pairwise correlation
if cor == "pair":
b = (-2 * np.sqrt(1 - rho) + 2 * np.sqrt((1 - rho) + p * rho)) / (2 * p)
a = b + np.sqrt(1 - rho)
# calculate symmetric square root of p x p matrix whose diagonals are 1 and off diagonals are rho:
sig_half = np.full(shape=(p, p), fill_value=b)
np.fill_diagonal(sig_half, a)
X = X @ sig_half
# Auto-correlation
if cor == "auto":
for j in range(p):
mat = X[:, max(0, j - k) : j + 1]
wts = np.random.uniform(0, 1, mat.shape[1]).flatten()
wts = wts / np.sum(wts)
tmp = mat * wts
X[:, j] = np.array(np.mean(tmp, axis=1))
# Standardize, if necessary
if standardize:
scaler = StandardScaler().fit(X)
X = scaler.transform(X)
return X
def flatten(l):
new_l = []
for tup in l:
sublist = []
for i, subelement in enumerate(tup):
if isinstance(subelement, tuple):
for j in subelement:
sublist.append(j)
else:
sublist.append(subelement)
new_l.append(tuple(sublist))
return new_l
def generate_y_fixed_positions(
X_mat,
eps_dist="normal",
error_type="const",
functional_form="linear",
sigma=1,
force_beta_positive=True,
non_zero_beta_count=None,
magnitude_nonzero_coeffs=1,
signal_noise_ratio=None,
alpha=5,
df=4,
):
n, p = X_mat.shape
if non_zero_beta_count is None:
non_zero_beta_count = int(np.ceil(p / 10))
if non_zero_beta_count is not None:
if non_zero_beta_count > p:
raise ValueError(
"Number of non-zero coefficients cannot exceed the number of covariates in X."
)
else:
non_zero_beta_count = int(non_zero_beta_count)
# calculate the linear part of the conditional expectation function, or the error multiplicator:
# Sample s variables uniformly at random, define true coefficients
if eps_dist == "t":
non_zero_coeffs = np.array([0, 4, 6, 8, 9])
beta = np.zeros(p)
beta[non_zero_coeffs] = np.random.choice(
np.array([-1, 1]) * magnitude_nonzero_coeffs,
size=non_zero_beta_count,
replace=True,
)
if force_beta_positive:
beta = np.abs(beta)
linear_part = X_mat @ beta
else:
non_zero_coeffs = np.arange(non_zero_beta_count)
beta = np.zeros(p)
beta[non_zero_coeffs] = np.random.choice(
np.array([-1, 1]) * magnitude_nonzero_coeffs,
size=non_zero_beta_count,
replace=True,
)
if force_beta_positive:
beta = np.abs(beta)
linear_part = X_mat @ beta
# main effect:
if functional_form == "linear":
mu = linear_part
elif functional_form == "sine":
mu = 2 * np.sin(np.pi * linear_part) + np.pi * linear_part
elif functional_form == "stochastic_poisson":
if p > 1:
raise ValueError("This dgp can only be initialized with p = 1.")
else:
x = X_mat.flatten()
ax = 0 * x
for i in range(len(x)):
ax[i] = np.random.poisson(np.sin(x[i]) ** 2 + 0.1) + 0.03 * x[
i
] * np.random.randn(1)
ax[i] += 25 * (np.random.uniform(0, 1, 1) < 0.01) * np.random.randn(1)
y = ax.astype(np.float32)
return y
else:
raise ValueError("Please specify regular functional form.")
# error:
if eps_dist == "normal":
eps = np.random.normal(0, 1, n)
elif eps_dist == "uniform":
eps = np.random.uniform(0, 1, n)
elif eps_dist == "t":
eps = np.random.standard_t(df=df, size=n)
elif eps_dist == "skewed_normal":
eps = skewnorm.rvs(alpha, size=n)
else:
raise ValueError("Please specify regular error distribution.")
if error_type == "const":
sx = np.ones(n)
sigma_vec = sigma * sx
elif error_type == "simple_linear":
sx = linear_part
sigma_vec = sigma * sx
elif error_type == "varying_third_moment_mu":
sx = 1 + 2 * np.abs(linear_part) ** 3 / 38.73
sigma_vec = sigma * sx
elif error_type == "varying_squared_linear_part":
sx = np.sqrt(1 + (linear_part) ** 2)
sigma_vec = sigma * sx
else:
raise ValueError("Please specify regular error type.")
assert eps.shape == (n,)
assert sigma_vec.shape == (n,)
assert mu.shape == (n,)
if signal_noise_ratio is not None:
mu = (
mu
* np.sqrt(signal_noise_ratio)
* np.sqrt(np.mean(sigma_vec ** 2))
/ np.std(mu)
)
assert mu.shape == (n,)
y = mu + eps * sigma_vec
if functional_form != "stochastic_poisson":
return y, eps, sigma_vec, mu, beta
def x_scale(X_mat, error_type, linear_part=None):
if error_type == "simple_linear":
scale = X_mat.flatten()
elif error_type == "varying_squared_linear_part":
scale = linear_part
elif error_type == "varying_third_moment_mu":
scale = linear_part
else:
raise ValueError("Please specify regular error_type.")
return scale
def construc_cond_metric_df_simulation(x_scale, result_pred_bands, y_predict):
interval_lengths = result_pred_bands[:, 1] - result_pred_bands[:, 0]
covered = (y_predict.flatten() >= result_pred_bands[:, 0]) & (
y_predict.flatten() <= result_pred_bands[:, 1]
)
df = np.stack((x_scale, interval_lengths, covered), axis=1)
return df
def get_conditional_variances(process_type):
if (
process_type == 3
): # chernozhukov example distributional conformal prediction (2021)
x_grid = np.linspace(0, 1, 2000)
return x_grid, np.array(x_grid) ** 2
if process_type == 4: # romano table
src = str(SRC)
df = pd.read_csv(src + "/simulations/helper_tables/romano_table_cond_variances.csv")
return np.array(df["X"]), np.array(df["cond_var"])
if process_type == 2:
x_grid = np.linspace(0, 5, 2000)
cond_var = 1 + x_grid ** 2
return x_grid, cond_var
if process_type == 1:
x_grid = np.linspace(-5, 5, 2000)
cond_var = 2 * (1 + (2 * np.abs(x_grid) ** 3) / 38.73) ** 2
return x_grid, cond_var
|
StarcoderdataPython
|
3263602
|
class Calculator:
"""
A calculator that support polish notation.
"""
def __init__(self):
pass
|
StarcoderdataPython
|
1766283
|
from src.main.managers.items.item_manager import ItemManager
class HardCodedAliceItemManager(ItemManager):
def __init__(self, items):
ItemManager.__init__(self, items)
self.first_run = True
def get_spook_rate_and_power(self):
power = 0
if self._HAT_BAG:
hat = self._HAT_BAG[0]
hat_rate, hat_power = hat.get_spook_rate_and_power()
power += hat_power
if self.first_run:
rate = 100
self.first_run = False
else:
rate = 0
return rate, power
|
StarcoderdataPython
|
8420
|
"""Define the aiolookin package."""
from .device import async_get_device # noqa
|
StarcoderdataPython
|
1653895
|
from foreman import get_relpath, rule
from garage import scripts
from templates import common
common.define_git_repo(
repo='https://github.com/capnproto/capnproto.git',
treeish='v0.6.1',
)
common.define_distro_packages([
'autoconf',
'automake',
'g++',
'libtool',
'pkg-config',
])
@rule
@rule.depend('install_packages')
@rule.depend('git_clone')
def build(parameters):
"""Build capnproto from source."""
drydock_src = parameters['//base:drydock'] / get_relpath()
if (drydock_src / 'c++/.libs/libcapnp.so').exists():
return
with scripts.directory(drydock_src / 'c++'):
scripts.execute(['autoreconf', '-i'])
scripts.execute(['./configure'])
# Don't run `make check` at the moment.
scripts.execute(['make'])
with scripts.using_sudo():
scripts.execute(['make', 'install'])
scripts.execute(['ldconfig'])
@rule
@rule.depend('build')
@rule.reverse_depend('//base:tapeout')
def tapeout(parameters):
"""Copy build artifacts."""
# Nothing here as //base:tapeout will tapeout /usr/local/lib for us.
pass
|
StarcoderdataPython
|
121412
|
# !/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2019/11/20 13:34
# @Author : Tao.Xu
# @Email : <EMAIL>
"""
Some own/observed great lib/ideas,common useful python libs.
"""
import sys
from tlib import version
if sys.version_info < (2, 6):
raise ImportError('tlib needs to be run on python 2.6 and above.')
|
StarcoderdataPython
|
3393849
|
<reponame>jorisfa/gcpdiag
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Test code in pubsub.py."""
from unittest import mock
from gcpdiag import models
from gcpdiag.queries import apis_stub, pubsub
DUMMY_PROJECT_NAME = 'gcpdiag-pubsub1-aaaa'
DUMMY_TOPIC_NAME = 'projects/gcpdiag-pubsub1-aaaa/topics/gcpdiag-pubsub1topic-aaaa'
DUMMY_SUB_NAME = 'projects/gcpdiag-pubsub1-aaaa/subscriptions/gcpdiag-pubsub1subscription-aaaa'
DUMMY_PERM = 'domain:google.com'
@mock.patch('gcpdiag.queries.apis.get_api', new=apis_stub.get_api_stub)
class TestPubsub:
"""Test Pubsub"""
def test_get_topics(self):
context = models.Context(project_id=DUMMY_PROJECT_NAME)
topics = pubsub.get_topics(context=context)
assert DUMMY_TOPIC_NAME in topics
def test_get_subscription(self):
context = models.Context(project_id=DUMMY_PROJECT_NAME)
subscription = pubsub.get_subscription(context=context)
assert DUMMY_SUB_NAME in subscription
def test_get_topic_iam_policy(self):
policy = pubsub.get_topic_iam_policy(DUMMY_TOPIC_NAME)
assert DUMMY_PERM in policy.get_members()
def test_get_subscription_iam_policy(self):
policy = pubsub.get_subscription_iam_policy(DUMMY_SUB_NAME)
assert DUMMY_PERM in policy.get_members()
|
StarcoderdataPython
|
1763215
|
"""Configurations and utilities for model building and training."""
import json
import yaml
import torch
import wandb
import argparse
import numpy as np
from pathlib import Path
from pydantic import BaseSettings as _BaseSettings
from typing import TypeVar, Type, Union, Optional, Dict, Any
PathLike = Union[str, Path]
_T = TypeVar("_T")
def parse_args() -> argparse.Namespace:
parser = argparse.ArgumentParser()
parser.add_argument(
"-c", "--config", help="YAML config file", type=str, required=True
)
args = parser.parse_args()
return args
class BaseSettings(_BaseSettings):
def dump_yaml(self, cfg_path: PathLike):
with open(cfg_path, mode="w") as fp:
yaml.dump(json.loads(self.json()), fp, indent=4, sort_keys=False)
@classmethod
def from_yaml(cls: Type[_T], filename: PathLike) -> _T:
with open(filename) as fp:
raw_data = yaml.safe_load(fp)
return cls(**raw_data)
class WandbConfig(BaseSettings):
# Project name for wandb logging
wandb_project_name: Optional[str] = None
# Team name for wandb logging
wandb_entity_name: Optional[str] = None
# Model tag for wandb labeling
model_tag: Optional[str] = None
def init(
self,
cfg: BaseSettings,
model: torch.nn.Module,
wandb_path: PathLike,
) -> Optional[wandb.config]:
"""Initialize wandb with model and config.
Parameters
----------
cfg : BaseSettings
Model configuration with hyperparameters and training settings.
model : torch.nn.Module
Model to train, passed to :obj:`wandb.watch(model)` for logging.
wandb_path : PathLike
Path to write :obj:`wandb/` directory containing training logs.
Returns
-------
Optional[wandb.config]
wandb config object or None if :obj:`wandb_project_name` is None.
"""
if self.wandb_project_name is not None:
wandb.init(
project=self.wandb_project_name,
entity=self.wandb_entity_name,
name=self.model_tag,
id=self.model_tag,
dir=str(wandb_path),
config=cfg.dict(),
resume=False,
)
wandb.watch(model)
return wandb.config
class OptimizerConfig(BaseSettings):
"""pydantic schema for PyTorch optimizer which allows for arbitrary
optimizer hyperparameters."""
class Config:
extra = "allow"
# Name of optimizer
name: str = "Adam"
# Arbitrary optimizer hyperparameters
hparams: Dict[str, Any] = {}
class SchedulerConfig(BaseSettings):
"""pydantic schema for PyTorch scheduler which allows for arbitrary
scheduler hyperparameters."""
class Config:
extra = "allow"
# Name of scheduler
name: str = "ReduceLROnPlateau"
# Arbitrary scheduler hyperparameters
hparams: Dict[str, Any] = {}
def get_torch_optimizer(
name: str, hparams: Dict[str, Any], parameters
) -> torch.optim.Optimizer:
"""Construct a PyTorch optimizer specified by :obj:`name` and :obj:`hparams`."""
from torch import optim
if name == "Adadelta":
optimizer = optim.Adadelta
elif name == "Adagrad":
optimizer = optim.Adagrad
elif name == "Adam":
optimizer = optim.Adam
elif name == "AdamW":
optimizer = optim.AdamW
elif name == "SparseAdam":
optimizer = optim.SparseAdam
elif name == "Adamax":
optimizer = optim.Adamax
elif name == "ASGD":
optimizer = optim.ASGD
elif name == "LBFGS":
optimizer = optim.LBFGS
elif name == "RMSprop":
optimizer = optim.RMSprop
elif name == "Rprop":
optimizer = optim.Rprop
elif name == "SGD":
optimizer = optim.SGD
else:
raise ValueError(f"Invalid optimizer name: {name}")
try:
return optimizer(parameters, **hparams)
except TypeError:
raise Exception(
f"Invalid parameter in hparams: {hparams}"
f" for optimizer {name}.\nSee PyTorch docs."
)
def get_torch_scheduler(
name: Optional[str], hparams: Dict[str, Any], optimizer: torch.optim.Optimizer
) -> Optional[torch.optim.lr_scheduler._LRScheduler]:
"""Construct a PyTorch lr_scheduler specified by :obj:`name` and :obj:`hparams`.
Parameters
----------
name : Optional[str]
Name of PyTorch lr_scheduler class to use. If :obj:`name` is :obj:`None`,
simply return None.
hparams : Dict[str, Any]
Hyperparameters to pass to the lr_scheduler.
optimizer : torch.optim.Optimizer
The initialized optimizer.
Returns
-------
Optional[torch.optim.lr_scheduler._LRScheduler]
The initialized PyTorch scheduler, or None if :obj:`name` is :obj:`None`.
"""
if name is None:
return None
from torch.optim import lr_scheduler
if name == "ReduceLROnPlateau":
scheduler = lr_scheduler.ReduceLROnPlateau
elif name == "LambdaLR":
raise ValueError("LambdaLR not supported")
elif name == "MultiplicativeLR":
raise ValueError("MultiplicativeLR not supported")
elif name == "StepLR":
scheduler = lr_scheduler.StepLR
elif name == "MultiStepLR":
scheduler = lr_scheduler.MultiStepLR
elif name == "ExponentialLR":
scheduler = lr_scheduler.ExponentialLR
elif name == "CosineAnnealingLR":
scheduler = lr_scheduler.CosineAnnealingLR
elif name == "ReduceLROnPlateau":
scheduler = lr_scheduler.ReduceLROnPlateau
elif name == "CyclicLR":
scheduler = lr_scheduler.CyclicLR
elif name == "OneCycleLR":
scheduler = lr_scheduler.OneCycleLR
elif name == "CosineAnnealingWarmRestarts":
scheduler = lr_scheduler.CosineAnnealingWarmRestarts
else:
raise ValueError(f"Invalid scheduler name: {name}")
try:
return scheduler(optimizer, **hparams)
except TypeError:
raise Exception(
f"Invalid parameter in hparams: {hparams}"
f" for scheduler {name}.\nSee PyTorch docs."
)
def log_checkpoint(
checkpoint_file: PathLike,
epoch: int,
model: torch.nn.Module,
optimizers: Dict[str, torch.optim.Optimizer],
scheduler: Optional[torch.optim.lr_scheduler._LRScheduler] = None,
):
"""Write a torch .pt file containing the epoch, model, optimizer,
and scheduler.
Parameters
----------
checkpoint_file: PathLike
Path to save checkpoint file.
epoch : int
The current training epoch.
model : torch.nn.Module
The model whose parameters are saved.
optimizers : Dict[str, torch.optim.Optimizer]
The optimizers whose parameters are saved.
scheduler : Optional[torch.optim.lr_scheduler._LRScheduler]
Optional scheduler whose parameters are saved.
"""
checkpoint = {
"epoch": epoch, # To resume training, (see resume_checkpoint)
"model_state_dict": model.state_dict(),
}
for name, optimizer in optimizers.items():
checkpoint[name + "_state_dict"] = optimizer.state_dict()
if scheduler is not None:
checkpoint["scheduler_state_dict"] = scheduler.state_dict()
torch.save(checkpoint, checkpoint_file)
def resume_checkpoint(
checkpoint_file: PathLike,
model: torch.nn.Module,
optimizers: Dict[str, torch.optim.Optimizer],
scheduler: Optional[torch.optim.lr_scheduler._LRScheduler] = None,
) -> int:
"""Modifies :obj:`model`, :obj:`optimizer`, and :obj:`scheduler` with
values stored in torch .pt file :obj:`checkpoint_file` to resume from
a previous training checkpoint.
Parameters
----------
checkpoint_file : PathLike
Path to checkpoint file to resume from.
model : torch.nn.Module
Module to update the parameters of.
optimizers : Dict[str, torch.optim.Optimizer]
Optimizers to update.
scheduler : Optional[torch.optim.lr_scheduler._LRScheduler]
Optional scheduler to update.
Returns
-------
int :
The epoch the checkpoint is saved plus one i.e. the current
training epoch to start from.
"""
checkpoint = torch.load(checkpoint_file, map_location="cpu")
start_epoch = checkpoint["epoch"] + 1
model.load_state_dict(checkpoint["model_state_dict"])
for name, optimizer in optimizers.items():
optimizer.load_state_dict(checkpoint[name + "_state_dict"])
if scheduler is not None:
scheduler_state_dict = checkpoint.get("scheduler_state_dict")
if scheduler_state_dict is not None:
scheduler.load_state_dict(scheduler_state_dict)
return start_epoch
def plot_scatter(
data: np.ndarray,
color_dict: Dict[str, np.ndarray] = {},
color: Optional[str] = None,
):
import pandas as pd
import plotly.express as px
df_dict = color_dict.copy()
dim = data.shape[1]
assert dim in [2, 3]
for i, name in zip(range(dim), ["x", "y", "z"]):
df_dict[name] = data[:, i]
df = pd.DataFrame(df_dict)
scatter_kwargs = dict(
x="x",
y="y",
color=color,
width=1000,
height=1000,
size_max=7,
hover_data=list(df_dict.keys()),
)
if dim == 2:
fig = px.scatter(df, **scatter_kwargs)
else: # dim == 3
fig = px.scatter_3d(df, z="z", **scatter_kwargs)
return fig
def log_latent_visualization(
data: np.ndarray,
colors: Dict[str, np.ndarray],
output_path: PathLike,
epoch: int = 0,
n_samples: Optional[int] = None,
method: str = "TSNE",
) -> Dict[str, str]:
"""Make scatter plots of the latent space using the specified
method of dimensionality reduction.
Parameters
----------
data : np.ndarray
The latent embeddings to visualize of shape (N, D) where
N is the number of examples and D is the number of dimensions.
colors : Dict[str, np.ndarray]
Each item in the dictionary will generate a different plot labeled
with the key name. Each inner array should be of size N.
output_path : PathLike
The output directory path to save plots to.
epoch : int, default=0
The current epoch of training to label plots with.
n_samples : Optional[int], default=None
Number of samples to plot, will take a random sample of the
:obj:`data` if :obj:`n_samples < N`. Otherwise, if :obj:`n_samples`
is None, use all the data.
method : str, default="TSNE"
Method of dimensionality reduction used to plot. Currently supports:
"PCA", "TSNE", "LLE", or "raw" for plotting the raw embeddings (or
up to the first 3 dimensions if D > 3). If "TSNE" is specified, then
the GPU accelerated RAPIDS.ai implementation will be tried first and
if it is unavailable then the sklearn version will be used instead.
Returns
-------
Dict[str, str]
A dictionary mapping each key in color to a raw HTML string containing
the scatter plot data. These can be saved directly for visualization
and logged to wandb during training.
Raises
------
ValueError
If dimensionality reduction :obj:`method` is not supported.
"""
from plotly.io import to_html
# Make temp variables to not mutate input data
if n_samples is not None:
inds = np.random.choice(len(data), n_samples)
_data = data[inds]
_colors = {name: color[inds] for name, color in colors.items()}
else:
_data = data
_colors = colors
if method == "PCA":
from sklearn.decomposition import PCA
model = PCA(n_components=3)
data_proj = model.fit_transform(_data)
elif method == "TSNE":
try:
# Attempt to use rapidsai
from cuml.manifold import TSNE
# rapidsai only supports 2 dimensions
model = TSNE(n_components=2, method="barnes_hut")
except ImportError:
from sklearn.manifold import TSNE
model = TSNE(n_components=3, n_jobs=1)
data_proj = model.fit_transform(_data)
elif method == "LLE":
from sklearn import manifold
data_proj, _ = manifold.locally_linear_embedding(
_data, n_neighbors=12, n_components=3
)
elif method == "raw":
if _data.shape[1] <= 3:
# If _data only has 2 or 3 dimensions, use it directly.
data_proj = _data
else:
# Use the first 3 dimensions of the raw data.
data_proj = _data[:, :3]
else:
raise ValueError(f"Invalid dimensionality reduction method {method}")
html_strings = {}
for color in _colors:
fig = plot_scatter(data_proj, _colors, color)
html_string = to_html(fig)
html_strings[color] = html_string
fname = Path(output_path) / f"latent_space-{method}-{color}-epoch-{epoch}.html"
with open(fname, "w") as f:
f.write(html_string)
return html_strings
|
StarcoderdataPython
|
4829599
|
# Copyright 2020 The DDSP Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Library of functions for differentiable digital signal processing (DDSP)."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
from typing import Any, Dict, Text, TypeVar
import gin
import numpy as np
from scipy import fftpack
import tensorflow.compat.v2 as tf
Number = TypeVar('Number', int, float, np.ndarray, tf.Tensor)
# Utility Functions ------------------------------------------------------------
def tf_float32(x):
"""Ensure array/tensor is a float32 tf.Tensor."""
if isinstance(x, tf.Tensor):
return tf.cast(x, dtype=tf.float32) # This is a no-op if x is float32.
else:
return tf.convert_to_tensor(x, tf.float32)
def make_iterable(x):
"""Ensure that x is an iterable."""
return x if isinstance(x, collections.Iterable) else [x]
def nested_lookup(nested_key: Text,
nested_dict: Dict[Text, Any],
delimiter: Text = '/') -> tf.Tensor:
"""Returns the value of a nested dict according to a parsed input string.
Args:
nested_key: String of the form "key/key/key...".
nested_dict: Nested dictionary.
delimiter: String that splits the nested keys.
Returns:
value: Value of the key from the nested dictionary.
"""
# Parse the input string.
keys = nested_key.split(delimiter)
# Return the nested value.
value = nested_dict
for key in keys:
value = value[key]
return value
def midi_to_hz(notes: Number) -> Number:
"""TF-compatible midi_to_hz function."""
notes = tf_float32(notes)
return 440.0 * (2.0**((notes - 69.0) / 12.0))
def hz_to_midi(frequencies: Number) -> Number:
"""TF-compatible hz_to_midi function."""
frequencies = tf_float32(frequencies)
log2 = lambda x: tf.math.log(x) / tf.math.log(2.0)
notes = 12.0 * (log2(frequencies) - log2(440.0)) + 69.0
# Map 0 Hz to MIDI 0 (Replace -inf with 0.)
cond = tf.equal(notes, -np.inf)
notes = tf.where(cond, 0.0, notes)
return notes
def unit_to_midi(unit: Number,
midi_min: Number = 20.0,
midi_max: Number = 90.0,
clip: bool = False) -> Number:
"""Map the unit interval [0, 1] to MIDI notes."""
unit = tf.clip_by_value(unit, 0.0, 1.0) if clip else unit
return midi_min + (midi_max - midi_min) * unit
def midi_to_unit(midi: Number,
midi_min: Number = 20.0,
midi_max: Number = 90.0,
clip: bool = False) -> Number:
"""Map MIDI notes to the unit interval [0, 1]."""
unit = (midi - midi_min) / (midi_max - midi_min)
return tf.clip_by_value(unit, 0.0, 1.0) if clip else unit
def unit_to_hz(unit: Number,
hz_min: Number,
hz_max: Number,
clip: bool = False) -> Number:
"""Map unit interval [0, 1] to [hz_min, hz_max], scaling logarithmically."""
midi = unit_to_midi(unit,
midi_min=hz_to_midi(hz_min),
midi_max=hz_to_midi(hz_max),
clip=clip)
return midi_to_hz(midi)
def hz_to_unit(hz: Number,
hz_min: Number,
hz_max: Number,
clip: bool = False) -> Number:
"""Map [hz_min, hz_max] to unit interval [0, 1], scaling logarithmically."""
midi = hz_to_midi(hz)
return midi_to_unit(midi,
midi_min=hz_to_midi(hz_min),
midi_max=hz_to_midi(hz_max),
clip=clip)
def resample(inputs: tf.Tensor,
n_timesteps: int,
method: Text = 'linear',
add_endpoint: bool = True) -> tf.Tensor:
"""Interpolates a tensor from n_frames to n_timesteps.
Args:
inputs: Framewise 1-D, 2-D, 3-D, or 4-D Tensor. Shape [n_frames],
[batch_size, n_frames], [batch_size, n_frames, channels], or
[batch_size, n_frames, n_freq, channels].
n_timesteps: Time resolution of the output signal.
method: Type of resampling, must be in ['linear', 'cubic', 'window']. Linear
and cubic ar typical bilinear, bicubic interpolation. Window uses
overlapping windows (only for upsampling) which is smoother for amplitude
envelopes.
add_endpoint: Hold the last timestep for an additional step as the endpoint.
Then, n_timesteps is divided evenly into n_frames segments. If false, use
the last timestep as the endpoint, producing (n_frames - 1) segments with
each having a length of n_timesteps / (n_frames - 1).
Returns:
Interpolated 1-D, 2-D, 3-D, or 4-D Tensor. Shape [n_timesteps],
[batch_size, n_timesteps], [batch_size, n_timesteps, channels], or
[batch_size, n_timesteps, n_freqs, channels].
Raises:
ValueError: If method is 'window' and input is 4-D.
ValueError: If method is not one of 'linear', 'cubic', or 'window'.
"""
inputs = tf_float32(inputs)
is_1d = len(inputs.shape) == 1
is_2d = len(inputs.shape) == 2
is_4d = len(inputs.shape) == 4
# Ensure inputs are at least 3d.
if is_1d:
inputs = inputs[tf.newaxis, :, tf.newaxis]
elif is_2d:
inputs = inputs[:, :, tf.newaxis]
def _image_resize(method):
"""Closure around tf.image.resize."""
# Image resize needs 4-D input. Add/remove extra axis if not 4-D.
outputs = inputs[:, :, tf.newaxis, :] if not is_4d else inputs
outputs = tf.compat.v1.image.resize(outputs,
[n_timesteps, outputs.shape[2]],
method=method,
align_corners=not add_endpoint)
return outputs[:, :, 0, :] if not is_4d else outputs
# Perform resampling.
if method == 'linear':
outputs = _image_resize(tf.compat.v1.image.ResizeMethod.BILINEAR)
elif method == 'cubic':
outputs = _image_resize(tf.compat.v1.image.ResizeMethod.BICUBIC)
elif method == 'window':
outputs = upsample_with_windows(inputs, n_timesteps, add_endpoint)
else:
raise ValueError('Method ({}) is invalid. Must be one of {}.'.format(
method, "['linear', 'cubic', 'window']"))
# Return outputs to the same dimensionality of the inputs.
if is_1d:
outputs = outputs[0, :, 0]
elif is_2d:
outputs = outputs[:, :, 0]
return outputs
def upsample_with_windows(inputs: tf.Tensor,
n_timesteps: int,
add_endpoint: bool = True) -> tf.Tensor:
"""Upsample a series of frames using using overlapping hann windows.
Good for amplitude envelopes.
Args:
inputs: Framewise 3-D tensor. Shape [batch_size, n_frames, n_channels].
n_timesteps: The time resolution of the output signal.
add_endpoint: Hold the last timestep for an additional step as the endpoint.
Then, n_timesteps is divided evenly into n_frames segments. If false, use
the last timestep as the endpoint, producing (n_frames - 1) segments with
each having a length of n_timesteps / (n_frames - 1).
Returns:
Upsampled 3-D tensor. Shape [batch_size, n_timesteps, n_channels].
Raises:
ValueError: If input does not have 3 dimensions.
ValueError: If attempting to use function for downsampling.
ValueError: If n_timesteps is not divisible by n_frames (if add_endpoint is
true) or n_frames - 1 (if add_endpoint is false).
"""
inputs = tf_float32(inputs)
if len(inputs.shape) != 3:
raise ValueError('Upsample_with_windows() only supports 3 dimensions, '
'not {}.'.format(inputs.shape))
# Mimic behavior of tf.image.resize.
# For forward (not endpointed), hold value for last interval.
if add_endpoint:
inputs = tf.concat([inputs, inputs[:, -1:, :]], axis=1)
n_frames = int(inputs.shape[1])
n_intervals = (n_frames - 1)
if n_frames >= n_timesteps:
raise ValueError('Upsample with windows cannot be used for downsampling'
'More input frames ({}) than output timesteps ({})'.format(
n_frames, n_timesteps))
if n_timesteps % n_intervals != 0.0:
minus_one = '' if add_endpoint else ' - 1'
raise ValueError(
'For upsampling, the target the number of timesteps must be divisible '
'by the number of input frames{}. (timesteps:{}, frames:{}, '
'add_endpoint={}).'.format(minus_one, n_timesteps, n_frames,
add_endpoint))
# Constant overlap-add, half overlapping windows.
hop_size = n_timesteps // n_intervals
window_length = 2 * hop_size
window = tf.signal.hann_window(window_length) # [window]
# Transpose for overlap_and_add.
x = tf.transpose(inputs, perm=[0, 2, 1]) # [batch_size, n_channels, n_frames]
# Broadcast multiply.
# Add dimension for windows [batch_size, n_channels, n_frames, window].
x = x[:, :, :, tf.newaxis]
window = window[tf.newaxis, tf.newaxis, tf.newaxis, :]
x_windowed = (x * window)
x = tf.signal.overlap_and_add(x_windowed, hop_size)
# Transpose back.
x = tf.transpose(x, perm=[0, 2, 1]) # [batch_size, n_timesteps, n_channels]
# Trim the rise and fall of the first and last window.
return x[:, hop_size:-hop_size, :]
def log_scale(x, min_x, max_x):
"""Scales a -1 to 1 value logarithmically between min and max."""
x = tf_float32(x)
x = (x + 1.0) / 2.0 # Scale [-1, 1] to [0, 1]
return tf.exp((1.0 - x) * tf.math.log(min_x) + x * tf.math.log(max_x))
@gin.register
def exp_sigmoid(x, exponent=10.0, max_value=2.0, threshold=1e-7):
"""Exponentiated Sigmoid pointwise nonlinearity.
Bounds input to [threshold, max_value] with slope given by exponent.
Args:
x: Input tensor.
exponent: In nonlinear regime (away from x=0), the output varies by this
factor for every change of x by 1.0.
max_value: Limiting value at x=inf.
threshold: Limiting value at x=-inf. Stablizes training when outputs are
pushed to 0.
Returns:
A tensor with pointwise nonlinearity applied.
"""
x = tf_float32(x)
return max_value * tf.nn.sigmoid(x)**tf.math.log(exponent) + threshold
@gin.register
def sym_exp_sigmoid(x, width=8.0):
"""Symmetrical version of exp_sigmoid centered at (0, 1e-7)."""
x = tf_float32(x)
return exp_sigmoid(width * (tf.abs(x)/2.0 - 1.0))
# Additive Synthesizer ---------------------------------------------------------
def remove_above_nyquist(frequency_envelopes: tf.Tensor,
amplitude_envelopes: tf.Tensor,
sample_rate: int = 16000) -> tf.Tensor:
"""Set amplitudes for oscillators above nyquist to 0.
Args:
frequency_envelopes: Sample-wise oscillator frequencies (Hz). Shape
[batch_size, n_samples, n_sinusoids].
amplitude_envelopes: Sample-wise oscillator amplitude. Shape [batch_size,
n_samples, n_sinusoids].
sample_rate: Sample rate in samples per a second.
Returns:
amplitude_envelopes: Sample-wise filtered oscillator amplitude.
Shape [batch_size, n_samples, n_sinusoids].
"""
frequency_envelopes = tf_float32(frequency_envelopes)
amplitude_envelopes = tf_float32(amplitude_envelopes)
amplitude_envelopes = tf.where(
tf.greater_equal(frequency_envelopes, sample_rate / 2.0),
tf.zeros_like(amplitude_envelopes), amplitude_envelopes)
return amplitude_envelopes
def oscillator_bank(frequency_envelopes: tf.Tensor,
amplitude_envelopes: tf.Tensor,
sample_rate: int = 16000,
sum_sinusoids: bool = True) -> tf.Tensor:
"""Generates audio from sample-wise frequencies for a bank of oscillators.
Args:
frequency_envelopes: Sample-wise oscillator frequencies (Hz). Shape
[batch_size, n_samples, n_sinusoids].
amplitude_envelopes: Sample-wise oscillator amplitude. Shape [batch_size,
n_samples, n_sinusoids].
sample_rate: Sample rate in samples per a second.
sum_sinusoids: Add up audio from all the sinusoids.
Returns:
wav: Sample-wise audio. Shape [batch_size, n_samples, n_sinusoids] if
sum_sinusoids=False, else shape is [batch_size, n_samples].
"""
frequency_envelopes = tf_float32(frequency_envelopes)
amplitude_envelopes = tf_float32(amplitude_envelopes)
# Don't exceed Nyquist.
amplitude_envelopes = remove_above_nyquist(frequency_envelopes,
amplitude_envelopes,
sample_rate)
# Change Hz to radians per sample.
omegas = frequency_envelopes * (2.0 * np.pi) # rad / sec
omegas = omegas / float(sample_rate) # rad / sample
# Accumulate phase and synthesize.
phases = tf.cumsum(omegas, axis=1)
wavs = tf.sin(phases)
audio = amplitude_envelopes * wavs # [mb, n_samples, n_sinusoids]
if sum_sinusoids:
audio = tf.reduce_sum(audio, axis=-1) # [mb, n_samples]
return audio
def get_harmonic_frequencies(frequencies: tf.Tensor,
n_harmonics: int) -> tf.Tensor:
"""Create integer multiples of the fundamental frequency.
Args:
frequencies: Fundamental frequencies (Hz). Shape [batch_size, :, 1].
n_harmonics: Number of harmonics.
Returns:
harmonic_frequencies: Oscillator frequencies (Hz).
Shape [batch_size, :, n_harmonics].
"""
frequencies = tf_float32(frequencies)
f_ratios = tf.linspace(1.0, float(n_harmonics), int(n_harmonics))
f_ratios = f_ratios[tf.newaxis, tf.newaxis, :]
harmonic_frequencies = frequencies * f_ratios
return harmonic_frequencies
def harmonic_synthesis(frequencies: tf.Tensor,
amplitudes: tf.Tensor,
harmonic_shifts: tf.Tensor = None,
harmonic_distribution: tf.Tensor = None,
n_samples: int = 64000,
sample_rate: int = 16000,
amp_resample_method: Text = 'window') -> tf.Tensor:
"""Generate audio from frame-wise monophonic harmonic oscillator bank.
Args:
frequencies: Frame-wise fundamental frequency in Hz. Shape [batch_size,
n_frames, 1].
amplitudes: Frame-wise oscillator peak amplitude. Shape [batch_size,
n_frames, 1].
harmonic_shifts: Harmonic frequency variations (Hz), zero-centered. Total
frequency of a harmonic is equal to (frequencies * harmonic_number * (1 +
harmonic_shifts)). Shape [batch_size, n_frames, n_harmonics].
harmonic_distribution: Harmonic amplitude variations, ranged zero to one.
Total amplitude of a harmonic is equal to (amplitudes *
harmonic_distribution). Shape [batch_size, n_frames, n_harmonics].
n_samples: Total length of output audio. Interpolates and crops to this.
sample_rate: Sample rate.
amp_resample_method: Mode with which to resample amplitude envelopes.
Returns:
audio: Output audio. Shape [batch_size, n_samples, 1]
"""
frequencies = tf_float32(frequencies)
amplitudes = tf_float32(amplitudes)
if harmonic_distribution is not None:
harmonic_distribution = tf_float32(harmonic_distribution)
n_harmonics = int(harmonic_distribution.shape[-1])
elif harmonic_shifts is not None:
harmonic_shifts = tf_float32(harmonic_shifts)
n_harmonics = int(harmonic_shifts.shape[-1])
else:
n_harmonics = 1
# Create harmonic frequencies [batch_size, n_frames, n_harmonics].
harmonic_frequencies = get_harmonic_frequencies(frequencies, n_harmonics)
if harmonic_shifts is not None:
harmonic_frequencies *= (1.0 + harmonic_shifts)
# Create harmonic amplitudes [batch_size, n_frames, n_harmonics].
if harmonic_distribution is not None:
harmonic_amplitudes = amplitudes * harmonic_distribution
else:
harmonic_amplitudes = amplitudes
# Create sample-wise envelopes.
frequency_envelopes = resample(harmonic_frequencies, n_samples) # cycles/sec
amplitude_envelopes = resample(harmonic_amplitudes, n_samples,
method=amp_resample_method)
# Synthesize from harmonics [batch_size, n_samples].
audio = oscillator_bank(frequency_envelopes,
amplitude_envelopes,
sample_rate=sample_rate)
return audio
# Wavetable Synthesizer --------------------------------------------------------
def linear_lookup(phase: tf.Tensor,
wavetables: tf.Tensor) -> tf.Tensor:
"""Lookup from wavetables with linear interpolation.
Args:
phase: The instantaneous phase of the base oscillator, ranging from 0 to
1.0. This gives the position to lookup in the wavetable.
Shape [batch_size, n_samples, 1].
wavetables: Wavetables to be read from on lookup. Shape [batch_size,
n_samples, n_wavetable] or [batch_size, n_wavetable].
Returns:
The resulting audio from linearly interpolated lookup of the wavetables at
each point in time. Shape [batch_size, n_samples].
"""
phase, wavetables = tf_float32(phase), tf_float32(wavetables)
# Add a time dimension if not present.
if len(wavetables.shape) == 2:
wavetables = wavetables[:, tf.newaxis, :]
# Add a wavetable dimension if not present.
if len(phase.shape) == 2:
phase = phase[:, :, tf.newaxis]
# Add first sample to end of wavetable for smooth linear interpolation
# between the last point in the wavetable and the first point.
wavetables = tf.concat([wavetables, wavetables[..., 0:1]], axis=-1)
n_wavetable = int(wavetables.shape[-1])
# Get a phase value for each point on the wavetable.
phase_wavetables = tf.linspace(0.0, 1.0, n_wavetable)
# Get pair-wise distances from the oscillator phase to each wavetable point.
# Axes are [batch, time, n_wavetable].
phase_distance = tf.abs((phase - phase_wavetables[tf.newaxis, tf.newaxis, :]))
# Put distance in units of wavetable samples.
phase_distance *= n_wavetable - 1
# Weighting for interpolation.
# Distance is > 1.0 (and thus weights are 0.0) for all but nearest neighbors.
weights = tf.nn.relu(1.0 - phase_distance)
weighted_wavetables = weights * wavetables
# Interpolated audio from summing the weighted wavetable at each timestep.
return tf.reduce_sum(weighted_wavetables, axis=-1)
def wavetable_synthesis(frequencies: tf.Tensor,
amplitudes: tf.Tensor,
wavetables: tf.Tensor,
n_samples: int = 64000,
sample_rate: int = 16000):
"""Monophonic wavetable synthesizer.
Args:
frequencies: Frame-wise frequency in Hertz of the fundamental oscillator.
Shape [batch_size, n_frames, 1].
amplitudes: Frame-wise amplitude envelope to apply to the oscillator. Shape
[batch_size, n_frames, 1].
wavetables: Frame-wise wavetables from which to lookup. Shape
[batch_size, n_wavetable] or [batch_size, n_frames, n_wavetable].
n_samples: Total length of output audio. Interpolates and crops to this.
sample_rate: Number of samples per a second.
Returns:
audio: Audio at the frequency and amplitude of the inputs, with harmonics
given by the wavetable. Shape [batch_size, n_samples].
"""
wavetables = tf_float32(wavetables)
# Create sample-wise envelopes.
amplitude_envelope = resample(amplitudes, n_samples, method='window')[:, :, 0]
frequency_envelope = resample(frequencies, n_samples) # cycles / sec
# Create intermediate wavetables.
wavetable_shape = wavetables.shape.as_list()
if len(wavetable_shape) == 3 and wavetable_shape[1] > 1:
wavetables = resample(wavetables, n_samples)
# Accumulate phase (in cycles which range from 0.0 to 1.0).
phase_velocity = frequency_envelope / float(sample_rate) # cycles / sample
# Note: Cumsum accumulates _very_ small errors at float32 precision.
# On the order of milli-Hertz.
phase = tf.cumsum(phase_velocity, axis=1, exclusive=True) % 1.0
# Synthesize with linear lookup.
audio = linear_lookup(phase, wavetables)
# Modulate with amplitude envelope.
audio *= amplitude_envelope
return audio
def variable_length_delay(phase: tf.Tensor,
audio: tf.Tensor,
max_length: int = 512) -> tf.Tensor:
"""Delay audio by a time-vaying amount using linear interpolation.
Useful for modulation effects such as vibrato, chorus, and flanging.
Args:
phase: The normlaized instantaneous length of the delay, ranging from 0 to
1.0. This corresponds to a delay of 0 to max_length samples. Shape
[batch_size, n_samples, 1].
audio: Audio signal to be delayed. Shape [batch_size, n_samples].
max_length: Maximimum delay in samples.
Returns:
The delayed audio signal. Shape [batch_size, n_samples].
"""
phase, audio = tf_float32(phase), tf_float32(audio)
# Make causal by zero-padding audio up front.
audio = tf.pad(audio, [(0, 0), (max_length - 1, 0)])
# Cut audio up into frames of max_length.
frames = tf.signal.frame(audio,
frame_length=max_length,
frame_step=1,
pad_end=False)
# Reverse frames so that [0, 1] phase corresponds to [0, max_length] delay.
frames = frames[..., ::-1]
# Read audio from the past frames.
return linear_lookup(phase, frames)
# Time-varying convolution -----------------------------------------------------
def get_fft_size(frame_size: int, ir_size: int, power_of_2: bool = True) -> int:
"""Calculate final size for efficient FFT.
Args:
frame_size: Size of the audio frame.
ir_size: Size of the convolving impulse response.
power_of_2: Constrain to be a power of 2. If False, allow other 5-smooth
numbers. TPU requires power of 2, while GPU is more flexible.
Returns:
fft_size: Size for efficient FFT.
"""
convolved_frame_size = ir_size + frame_size - 1
if power_of_2:
# Next power of 2.
fft_size = int(2**np.ceil(np.log2(convolved_frame_size)))
else:
fft_size = int(fftpack.helper.next_fast_len(convolved_frame_size))
return fft_size
def crop_and_compensate_delay(audio: tf.Tensor, audio_size: int, ir_size: int,
padding: Text,
delay_compensation: int) -> tf.Tensor:
"""Crop audio output from convolution to compensate for group delay.
Args:
audio: Audio after convolution. Tensor of shape [batch, time_steps].
audio_size: Initial size of the audio before convolution.
ir_size: Size of the convolving impulse response.
padding: Either 'valid' or 'same'. For 'same' the final output to be the
same size as the input audio (audio_timesteps). For 'valid' the audio is
extended to include the tail of the impulse response (audio_timesteps +
ir_timesteps - 1).
delay_compensation: Samples to crop from start of output audio to compensate
for group delay of the impulse response. If delay_compensation < 0 it
defaults to automatically calculating a constant group delay of the
windowed linear phase filter from frequency_impulse_response().
Returns:
Tensor of cropped and shifted audio.
Raises:
ValueError: If padding is not either 'valid' or 'same'.
"""
# Crop the output.
if padding == 'valid':
crop_size = ir_size + audio_size - 1
elif padding == 'same':
crop_size = audio_size
else:
raise ValueError('Padding must be \'valid\' or \'same\', instead '
'of {}.'.format(padding))
# Compensate for the group delay of the filter by trimming the front.
# For an impulse response produced by frequency_impulse_response(),
# the group delay is constant because the filter is linear phase.
total_size = int(audio.shape[-1])
crop = total_size - crop_size
start = ((ir_size - 1) // 2 -
1 if delay_compensation < 0 else delay_compensation)
end = crop - start
return audio[:, start:-end]
def fft_convolve(audio: tf.Tensor,
impulse_response: tf.Tensor,
padding: Text = 'same',
delay_compensation: int = -1) -> tf.Tensor:
"""Filter audio with frames of time-varying impulse responses.
Time-varying filter. Given audio [batch, n_samples], and a series of impulse
responses [batch, n_frames, n_impulse_response], splits the audio into frames,
applies filters, and then overlap-and-adds audio back together.
Applies non-windowed non-overlapping STFT/ISTFT to efficiently compute
convolution for large impulse response sizes.
Args:
audio: Input audio. Tensor of shape [batch, audio_timesteps].
impulse_response: Finite impulse response to convolve. Can either be a 2-D
Tensor of shape [batch, ir_size], or a 3-D Tensor of shape [batch,
ir_frames, ir_size]. A 2-D tensor will apply a single linear
time-invariant filter to the audio. A 3-D Tensor will apply a linear
time-varying filter. Automatically chops the audio into equally shaped
blocks to match ir_frames.
padding: Either 'valid' or 'same'. For 'same' the final output to be the
same size as the input audio (audio_timesteps). For 'valid' the audio is
extended to include the tail of the impulse response (audio_timesteps +
ir_timesteps - 1).
delay_compensation: Samples to crop from start of output audio to compensate
for group delay of the impulse response. If delay_compensation is less
than 0 it defaults to automatically calculating a constant group delay of
the windowed linear phase filter from frequency_impulse_response().
Returns:
audio_out: Convolved audio. Tensor of shape
[batch, audio_timesteps + ir_timesteps - 1] ('valid' padding) or shape
[batch, audio_timesteps] ('same' padding).
Raises:
ValueError: If audio and impulse response have different batch size.
ValueError: If audio cannot be split into evenly spaced frames. (i.e. the
number of impulse response frames is on the order of the audio size and
not a multiple of the audio size.)
"""
audio, impulse_response = tf_float32(audio), tf_float32(impulse_response)
# Add a frame dimension to impulse response if it doesn't have one.
ir_shape = impulse_response.shape.as_list()
if len(ir_shape) == 2:
impulse_response = impulse_response[:, tf.newaxis, :]
ir_shape = impulse_response.shape.as_list()
# Get shapes of audio and impulse response.
batch_size_ir, n_ir_frames, ir_size = ir_shape
batch_size, audio_size = audio.shape.as_list()
# Validate that batch sizes match.
if batch_size != batch_size_ir:
raise ValueError('Batch size of audio ({}) and impulse response ({}) must '
'be the same.'.format(batch_size, batch_size_ir))
# Cut audio into frames.
frame_size = int(np.ceil(audio_size / n_ir_frames))
hop_size = frame_size
audio_frames = tf.signal.frame(audio, frame_size, hop_size, pad_end=True)
# Check that number of frames match.
n_audio_frames = int(audio_frames.shape[1])
if n_audio_frames != n_ir_frames:
raise ValueError(
'Number of Audio frames ({}) and impulse response frames ({}) do not '
'match. For small hop size = ceil(audio_size / n_ir_frames), '
'number of impulse response frames must be a multiple of the audio '
'size.'.format(n_audio_frames, n_ir_frames))
# Pad and FFT the audio and impulse responses.
fft_size = get_fft_size(frame_size, ir_size, power_of_2=True)
audio_fft = tf.signal.rfft(audio_frames, [fft_size])
ir_fft = tf.signal.rfft(impulse_response, [fft_size])
# Multiply the FFTs (same as convolution in time).
audio_ir_fft = tf.multiply(audio_fft, ir_fft)
# Take the IFFT to resynthesize audio.
audio_frames_out = tf.signal.irfft(audio_ir_fft)
audio_out = tf.signal.overlap_and_add(audio_frames_out, hop_size)
# Crop and shift the output audio.
return crop_and_compensate_delay(audio_out, audio_size, ir_size, padding,
delay_compensation)
# Filter Design ----------------------------------------------------------------
def apply_window_to_impulse_response(impulse_response: tf.Tensor,
window_size: int = 0,
causal: bool = False) -> tf.Tensor:
"""Apply a window to an impulse response and put in causal form.
Args:
impulse_response: A series of impulse responses frames to window, of shape
[batch, n_frames, ir_size].
window_size: Size of the window to apply in the time domain. If window_size
is less than 1, it defaults to the impulse_response size.
causal: Impulse responnse input is in causal form (peak in the middle).
Returns:
impulse_response: Windowed impulse response in causal form, with last
dimension cropped to window_size if window_size is greater than 0 and less
than ir_size.
"""
impulse_response = tf_float32(impulse_response)
# If IR is in causal form, put it in zero-phase form.
if causal:
impulse_response = tf.signal.fftshift(impulse_response, axes=-1)
# Get a window for better time/frequency resolution than rectangular.
# Window defaults to IR size, cannot be bigger.
ir_size = int(impulse_response.shape[-1])
if (window_size <= 0) or (window_size > ir_size):
window_size = ir_size
window = tf.signal.hann_window(window_size)
# Zero pad the window and put in in zero-phase form.
padding = ir_size - window_size
if padding > 0:
half_idx = (window_size + 1) // 2
window = tf.concat([window[half_idx:],
tf.zeros([padding]),
window[:half_idx]], axis=0)
else:
window = tf.signal.fftshift(window, axes=-1)
# Apply the window, to get new IR (both in zero-phase form).
window = tf.broadcast_to(window, impulse_response.shape)
impulse_response = window * tf.math.real(impulse_response)
# Put IR in causal form and trim zero padding.
if padding > 0:
first_half_start = (ir_size - (half_idx - 1)) + 1
second_half_end = half_idx + 1
impulse_response = tf.concat([impulse_response[..., first_half_start:],
impulse_response[..., :second_half_end]],
axis=-1)
else:
impulse_response = tf.signal.fftshift(impulse_response, axes=-1)
return impulse_response
def frequency_impulse_response(magnitudes: tf.Tensor,
window_size: int = 0) -> tf.Tensor:
"""Get windowed impulse responses using the frequency sampling method.
Follows the approach in:
https://ccrma.stanford.edu/~jos/sasp/Windowing_Desired_Impulse_Response.html
Args:
magnitudes: Frequency transfer curve. Float32 Tensor of shape [batch,
n_frames, n_frequencies] or [batch, n_frequencies]. The frequencies of the
last dimension are ordered as [0, f_nyqist / (n_frames -1), ...,
f_nyquist], where f_nyquist is (sample_rate / 2). Automatically splits the
audio into equally sized frames to match frames in magnitudes.
window_size: Size of the window to apply in the time domain. If window_size
is less than 1, it defaults to the impulse_response size.
Returns:
impulse_response: Time-domain FIR filter of shape
[batch, frames, window_size] or [batch, window_size].
Raises:
ValueError: If window size is larger than fft size.
"""
# Get the IR (zero-phase form).
magnitudes = tf.complex(magnitudes, tf.zeros_like(magnitudes))
impulse_response = tf.signal.irfft(magnitudes)
# Window and put in causal form.
impulse_response = apply_window_to_impulse_response(impulse_response,
window_size)
return impulse_response
def sinc(x, threshold=1e-20):
"""Normalized zero phase version (peak at zero)."""
x = tf_float32(x)
x = tf.where(tf.abs(x) < threshold, threshold * tf.ones_like(x), x)
x = np.pi * x
return tf.sin(x) / x
def sinc_impulse_response(cutoff_frequency, window_size=512, sample_rate=None):
"""Get a sinc impulse response for a set of low-pass cutoff frequencies.
Args:
cutoff_frequency: Frequency cutoff for low-pass sinc filter. If the
sample_rate is given, cutoff_frequency is in Hertz. If sample_rate is
None, cutoff_frequency is normalized ratio (frequency/nyquist) in the
range [0, 1.0]. Shape [batch_size, n_time, 1].
window_size: Size of the Hamming window to apply to the impulse.
sample_rate: Optionally provide the sample rate.
Returns:
impulse_response: A series of impulse responses. Shape
[batch_size, n_time, (window_size // 2) * 2 + 1].
"""
# Convert frequency to samples/sample_rate [0, Nyquist] -> [0, 1].
if sample_rate is not None:
cutoff_frequency *= 2.0 / float(sample_rate)
# Create impulse response axis.
half_size = window_size // 2
full_size = half_size * 2 + 1
idx = tf.range(-half_size, half_size + 1.0, dtype=tf.float32)
idx = idx[tf.newaxis, tf.newaxis, :]
# Compute impulse response.
impulse_response = sinc(cutoff_frequency * idx)
# Window the impulse response.
window = tf.signal.hamming_window(full_size)
window = tf.broadcast_to(window, impulse_response.shape)
impulse_response = window * tf.math.real(impulse_response)
# Normalize for unity gain.
impulse_response /= tf.reduce_sum(impulse_response, axis=-1, keepdims=True)
return impulse_response
def frequency_filter(audio: tf.Tensor,
magnitudes: tf.Tensor,
window_size: int = 0,
padding: Text = 'same') -> tf.Tensor:
"""Filter audio with a finite impulse response filter.
Args:
audio: Input audio. Tensor of shape [batch, audio_timesteps].
magnitudes: Frequency transfer curve. Float32 Tensor of shape [batch,
n_frames, n_frequencies] or [batch, n_frequencies]. The frequencies of the
last dimension are ordered as [0, f_nyqist / (n_frames -1), ...,
f_nyquist], where f_nyquist is (sample_rate / 2). Automatically splits the
audio into equally sized frames to match frames in magnitudes.
window_size: Size of the window to apply in the time domain. If window_size
is less than 1, it is set as the default (n_frequencies).
padding: Either 'valid' or 'same'. For 'same' the final output to be the
same size as the input audio (audio_timesteps). For 'valid' the audio is
extended to include the tail of the impulse response (audio_timesteps +
window_size - 1).
Returns:
Filtered audio. Tensor of shape
[batch, audio_timesteps + window_size - 1] ('valid' padding) or shape
[batch, audio_timesteps] ('same' padding).
"""
impulse_response = frequency_impulse_response(magnitudes,
window_size=window_size)
return fft_convolve(audio, impulse_response, padding=padding)
def sinc_filter(audio: tf.Tensor,
cutoff_frequency: tf.Tensor,
window_size: int = 512,
sample_rate: int = None,
padding: Text = 'same') -> tf.Tensor:
"""Filter audio with sinc low-pass filter.
Args:
audio: Input audio. Tensor of shape [batch, audio_timesteps].
cutoff_frequency: Frequency cutoff for low-pass sinc filter. If the
sample_rate is given, cutoff_frequency is in Hertz. If sample_rate is
None, cutoff_frequency is normalized ratio (frequency/nyquist) in the
range [0, 1.0]. Shape [batch_size, n_time, 1].
window_size: Size of the Hamming window to apply to the impulse.
sample_rate: Optionally provide the sample rate.
padding: Either 'valid' or 'same'. For 'same' the final output to be the
same size as the input audio (audio_timesteps). For 'valid' the audio is
extended to include the tail of the impulse response (audio_timesteps +
window_size - 1).
Returns:
Filtered audio. Tensor of shape
[batch, audio_timesteps + window_size - 1] ('valid' padding) or shape
[batch, audio_timesteps] ('same' padding).
"""
impulse_response = sinc_impulse_response(cutoff_frequency,
window_size=window_size,
sample_rate=sample_rate)
return fft_convolve(audio, impulse_response, padding=padding)
|
StarcoderdataPython
|
97423
|
"""
Name : __init__.py
boxes module
This import path is important to allow importing correctly as package
"""
import os, sys
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '.')))
|
StarcoderdataPython
|
3268310
|
from django.core.management.base import BaseCommand
from django.utils.six.moves import input
from instagram.client import InstagramAPI
from mezzanine.conf import settings
def get_auth_tokens(stdout):
stdout.write('Please enter the following Instagram client details\n\n')
print('lol' + settings.INSTAGRAM_CLIENT_ID)
if settings.INSTAGRAM_CLIENT_ID == '':
settings.INSTAGRAM_CLIENT_ID = input('Client ID: ').strip()
if settings.INSTAGRAM_CLIENT_SECRET == '':
settings.INSTAGRAM_CLIENT_SECRET = input('Client Secret: ').strip()
if settings.INSTAGRAM_REDIRECT_URI == '':
settings.INSTAGRAM_REDIRECT_URI = input('Redirect URI: ').strip()
scope = ['basic', 'public_content', 'likes']
api = InstagramAPI(client_id=settings.INSTAGRAM_CLIENT_ID, client_secret=settings.INSTAGRAM_CLIENT_SECRET, redirect_uri=settings.INSTAGRAM_REDIRECT_URI)
redirect_uri = api.get_authorize_login_url(scope=scope)
stdout.write('\nVisit this page and authorize access in your browser:\n\n%s\n\n' % redirect_uri)
code = input('Paste in code in query string after redirect: ').strip()
access_token = api.exchange_code_for_access_token(code)
stdout.write('Access token:\n\n%s\n\n' % (access_token,))
class Command(BaseCommand):
help = 'Generate access token needed for Instagram'
def handle(self, **options):
get_auth_tokens(self.stdout)
|
StarcoderdataPython
|
168209
|
from .default import _C as cfg
from .default import update_config
|
StarcoderdataPython
|
30323
|
<reponame>shootsoft/practice
class Solution:
# @param {integer} k
# @param {integer} n
# @return {integer[][]}
def combinationSum3(self, k, n):
nums = range(1, 10)
self.results = []
self.combination(nums, n, k, 0, [])
return self.results
def combination(self, nums, target, k, start, result):
if k <= 0 :
return
elif k == 1:
for i in nums:
if i == target:
self.results.append([i])
elif k == 2:
end = len(nums) - 1
while start < end:
s = nums[start] + nums[end]
if s == target:
result.append(nums[start])
result.append(nums[end])
self.results.append(result[:])
result.pop()
result.pop()
start += 1
elif s < target:
start += 1
else:
#s > target
end -= 1
else:
for i in range(start, len(nums)-1):
t = target - nums[i]
if t >= nums[i+1]:
result.append(nums[i])
self.combination(nums, t, k -1, i + 1, result )
result.pop()
else:
break
|
StarcoderdataPython
|
1793020
|
<reponame>arccode/factory<gh_stars>1-10
# Copyright 2016 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""A factory test to check if the components can be probed successfully or not.
Description
-----------
Uses probe module to probe the components, and verifies the component count of
each category. The default rule is the count should be equal to 1. If the
required count is not 1, we can set the expected count either in ``device_data``
or in the argument `overridden_rules` in the test list.
If the component counts of some of the categories are not always same around
each SKUs, we can record the SKU's specific rules in
``py/config/model_sku.json`` and let the test ``model_sku`` be run before
this test.
The format of the config file::
{
<Component category> : {
<Component name> : {
"eval" : <Function expression>,
"expect" : <Rule expression>
}
}
}
Please refer to ``py/probe/probe_cmdline.py`` for more details.
Test Procedure
--------------
This is an automatic test that doesn't need any user interaction.
1. Run the probe module to probe the components listed in `config_file`.
2. Mark the test result to passed only if for each component category,
number of successfully probed components fits the category's rule.
3. If `show_ui` is ``False``, just end the test. Otherwise continue below
steps.
4. If `show_ui` is ``True``, show the result and wait for OP to press the space
key to continue. Otherwise show the result only if the test is failed.
When this test is verifying if the number of probed components of each category
fits the requirement, the following conditions will be executed:
1. If `overridden_rules` specifies a rule to verify the number of the
probed components of that category, use that rule.
2. If `overridden_rules` doesn't specifies a rule for that category and
``device_data.component.has_<category_name>`` exists, take
``int(device_data.component.has_<category_name>)`` as the expected number
of probed components.
3. If none of above conditions fit the case, the test will expect only one
component of that category to be probed.
Dependency
----------
- Probe framework (``cros.factory.probe``).
Examples
--------
To do probe test on DUT, add a test item in the test list::
{
"pytest_name": "probe",
"args": {
"config_file": "probe.json",
"overridden_rules": [
["camera", "==", 2]
]
}
}
And list what components to probe in `probe.json` (Note that the comments
(``// ...``) below is not allowed in a real config file)::
{
"audio": {
"foo_audio": { // Probe by checking if the content of /tmp/foo is "FOO".
"eval": {"file": "/tmp/foo"},
"expect": "FOO"
},
"bar_audio": {
"eval": {"file": "/tmp/bar"},
"expect": "BAR"
}
},
"storage": {
"foo_storage": { // Probe by running the command "storage_probe" and
// checking if the stdout of the command is "FOO".
"eval": {"shell": "storage_probe"},
"expect": "FOO"
}
},
"camera": {
"camera_0": {
"eval": "shell:grep -o -w CAM1 /sys/class/video4linux/video0/name",
"expect": "CAM2"
},
"camera_1": {
"eval": "shell:grep -o -w CAM2 /sys/class/video4linux/video1/name",
"expect": "CAM2"
}
}
}
The `overridden_rules` argument above means that there should be two camera
components. So in the above example, the test would pass only if the probe
module successfully probed `camera_0`, `camera_1`, `foo_sotrage`, and one of
`foo_audio` or `bar_audio`.
Following example shows how to use ``device_data`` to specific the required
number of probed camera. The test list should contain::
{
"pytest_name": "model_sku",
"args": {
"config_name": "my_model_sku"
}
},
{
"pytest": "probe"
"args": {
"config_file": "probe.json"
}
}
And ``my_model_sku.json`` should contain::
{
"product_sku": {
"Example": {
"34": {
"component.has_camera": False
},
"35": {
"component.has_camera": 2
}
}
}
}
In this example, we expect the probe module to find no any camera component on
a product_name `Example` SKU 34 device. And expect the probe module to find two
camera components on a product_name `Example` SKU 35 device.
"""
import collections
import json
import operator
import os
from cros.factory.device import device_utils
from cros.factory.test import device_data
from cros.factory.test import session
from cros.factory.test.i18n import _
from cros.factory.test import test_case
from cros.factory.test import test_ui
from cros.factory.test import ui_templates
from cros.factory.test.utils import deploy_utils
from cros.factory.utils.arg_utils import Arg
# The config files should be placed in the py/test/pytests/probe/ folder.
LOCAL_CONFIG_DIR = os.path.dirname(os.path.abspath(__file__))
OPERATOR_MAP = {
'==': operator.eq,
'!=': operator.ne,
'<': operator.lt,
'<=': operator.le,
'>': operator.gt,
'>=': operator.ge,
'in': lambda a, b: a in b}
def EvaluateRule(a, op_str, b):
return OPERATOR_MAP[op_str](a, b)
class ProbeTest(test_case.TestCase):
ARGS = [
Arg('config_file', str,
'Path to probe config file. This is interpreted as a path '
'relative to `test/pytests/probe` folder.'),
Arg('component_list', list,
'A list of components to be verified',
default=None),
Arg('overridden_rules', list,
'List of [category, cmp_function, value].',
default=[]),
Arg('show_ui', bool,
'Always show the result and prompt if set to True. Always not show '
'the result and prompt if set to False. Otherwise, only show the '
'result and prompt when the test fails.',
default=None),
]
def setUp(self):
self._dut = device_utils.CreateDUTInterface()
self.factory_tools = deploy_utils.CreateFactoryTools(self._dut)
self.config_file_path = os.path.join(
LOCAL_CONFIG_DIR, self.args.config_file)
def runTest(self):
# Check the config file exists.
if not os.path.exists(self.config_file_path):
self.fail('Config file %s does not exist.' % self.config_file_path)
# Execute Probe.
cmd = ['probe', '-v', 'probe', '--config-file', self.config_file_path]
if self.args.component_list is not None:
cmd += ['--comps'] + self.args.component_list
session.console.info('Call the command: %s', ' '.join(cmd))
probed_results = json.loads(self.factory_tools.CheckOutput(cmd))
# Generate the rules of each category.
rule_map = {}
for category in probed_results:
expected_count = device_data.GetDeviceData(
device_data.JoinKeys(device_data.KEY_COMPONENT, 'has_' + category))
rule_map[category] = (
'==', int(expected_count) if expected_count is not None else 1)
for category, op_str, value in self.args.overridden_rules:
rule_map[category] = (op_str, value)
table_html = ui_templates.Table(rows=len(probed_results) + 1, cols=4)
title = ['Category', 'Probed Components', 'Rule', 'Status']
for idx, content in enumerate(title):
table_html.SetContent(0, idx, '<b>%s</b>' % content)
# Check every category meets the rule.
all_passed = True
for row_idx, category in enumerate(probed_results, 1):
count = len(probed_results[category])
op_str, value = rule_map[category]
status = OPERATOR_MAP[op_str](count, value)
all_passed &= status
# Set the table.
counter = collections.defaultdict(int)
for result in probed_results[category]:
counter[result['name']] += 1
comp_summary = '<br>'.join('%d %s found.' % (num_comp, comp_name)
for comp_name, num_comp in counter.items())
summary_str = comp_summary or 'No component found.'
rule_str = 'count (%s) %s %s' % (count, op_str, value)
status_str = 'passed' if status else 'failed'
session.console.info('Category "%s" %s %s, %s.',
category, summary_str, rule_str, status_str)
table_html.SetContent(row_idx, 0, category)
table_html.SetContent(row_idx, 1, summary_str)
table_html.SetContent(row_idx, 2, rule_str)
table_html.SetContent(
row_idx, 3, '<div class=test-status-{0}>{0}</div>'.format(status_str))
if self.args.show_ui is True or (self.args.show_ui is None and
not all_passed):
self.ui.SetState([
table_html.GenerateHTML(), '<span class="prompt">',
_('Press SPACE to continue'), '</span>'
])
self.ui.WaitKeysOnce(test_ui.SPACE_KEY)
if not all_passed:
self.fail()
|
StarcoderdataPython
|
1799355
|
from django.conf.urls import url
from .views import *
app_name = 'forum'
urlpatterns = [
url(r'^$', index, name='index'),
url(r'^(?P<page_number>[0-9]+)$', index, name='index'),
url(r'^topic/(?P<pk>[0-9]+)/(?P<page_number>[0-9]+)', topic, name='topic'),
url(r'^login/', log_in, name='log_in'),
url(r'^register/', register, name='register'),
url(r'^logout/', log_out, name='log_out'),
]
|
StarcoderdataPython
|
1798371
|
#!/usr/bin/env python
#############################################################################
##
# This file is part of Taurus
##
# http://taurus-scada.org
##
# Copyright 2011 CELLS / ALBA Synchrotron, Bellaterra, Spain
##
# Taurus is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
##
# Taurus is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
##
# You should have received a copy of the GNU Lesser General Public License
# along with Taurus. If not, see <http://www.gnu.org/licenses/>.
##
#############################################################################
from __future__ import absolute_import
__all__ = ["TaurusPlot"]
from future.utils import string_types
import copy
from taurus.external.qt import QtGui, Qt
from taurus.core.util.containers import LoopList
from taurus.core.util.log import Logger
from taurus.qt.qtcore.configuration import BaseConfigurableClass
from pyqtgraph import PlotWidget
from .curvespropertiestool import CurvesPropertiesTool
from .taurusmodelchoosertool import TaurusXYModelChooserTool
from .legendtool import PlotLegendTool
from .datainspectortool import DataInspectorTool
from .y2axis import Y2ViewBox
from .curveproperties import CURVE_COLORS
class TaurusPlot(PlotWidget, BaseConfigurableClass):
"""
TaurusPlot is a general widget for plotting 1D data sets. It is an extended
taurus-aware version of :class:`pyqtgraph.PlotWidget`.
Apart from all the features already available in a regulat PlotWidget,
TaurusPlot incorporates the following tools/features:
- Secondary Y axis (right axis)
- A plot configuration dialog, and save/restore configuration
facilities
- A menu option for adding/removing models
- A menu option for showing/hiding the legend
- Automatic color change of curves for newly added models
"""
def __init__(self, parent=None, **kwargs):
if Qt.QT_VERSION < 0x050000:
# Workaround for issue when using super with pyqt<5
BaseConfigurableClass.__init__(self)
PlotWidget.__init__(self, parent=parent, **kwargs)
else:
super(TaurusPlot, self).__init__(parent=None, **kwargs)
# Compose with a Logger
self._logger = Logger(name=self.__class__.__name__)
self.debug = self._logger.debug
self.info = self._logger.info
self.warning = self._logger.warning
self.error = self._logger.error
# set up cyclic color generator
self._curveColors = LoopList(CURVE_COLORS)
self._curveColors.setCurrentIndex(-1)
# add save & retrieve configuration actions
menu = self.getPlotItem().getViewBox().menu
saveConfigAction = QtGui.QAction("Save configuration", menu)
saveConfigAction.triggered.connect(self._onSaveConfigAction)
menu.addAction(saveConfigAction)
loadConfigAction = QtGui.QAction("Retrieve saved configuration", menu)
loadConfigAction.triggered.connect(self._onRetrieveConfigAction)
menu.addAction(loadConfigAction)
self.registerConfigProperty(self._getState, self.restoreState, "state")
# add legend tool
legend_tool = PlotLegendTool(self)
legend_tool.attachToPlotItem(self.getPlotItem())
# add model chooser
self._model_chooser_tool = TaurusXYModelChooserTool(self)
self._model_chooser_tool.attachToPlotItem(
self.getPlotItem(), self, self._curveColors
)
# add Y2 axis
self._y2 = Y2ViewBox()
self._y2.attachToPlotItem(self.getPlotItem())
# add plot configuration dialog
self._cprop_tool = CurvesPropertiesTool(self)
self._cprop_tool.attachToPlotItem(self.getPlotItem(), y2=self._y2)
# add a data inspector
inspector_tool = DataInspectorTool(self)
inspector_tool.attachToPlotItem(self.getPlotItem())
# enable Autorange
self.getPlotItem().getViewBox().enableAutoRange(True)
self._y2.enableAutoRange(True)
# Register config properties
self.registerConfigDelegate(self._model_chooser_tool, "XYmodelchooser")
self.registerConfigDelegate(self._y2, "Y2Axis")
self.registerConfigDelegate(self._cprop_tool, "CurvePropertiesTool")
self.registerConfigDelegate(legend_tool, "legend")
self.registerConfigDelegate(inspector_tool, "inspector")
# --------------------------------------------------------------------
# workaround for bug in pyqtgraph v<=0.10.0, already fixed in
# https://github.com/pyqtgraph/pyqtgraph/commit/52754d4859
# TODO: remove this once pyqtgraph v>0.10 is released
def __getattr__(self, item):
try:
return PlotWidget.__getattr__(self, item)
except NameError:
raise AttributeError(
"{} has no attribute {}".format(self.__class__.__name__, item)
)
# --------------------------------------------------------------------
def __getitem__(self, idx):
"""
Provides a list-like interface: items can be accessed using slice
notation
"""
return self.getPlotItem().listDataItems()[idx]
def __len__(self):
return len(self.getPlotItem().listDataItems())
def setModel(self, names):
"""Reimplemented to delegate to the model chooser"""
# support passing a string in names
if isinstance(names, string_types):
names = [names]
self._model_chooser_tool.updateModels(names)
def addModels(self, names):
"""Reimplemented to delegate to the model chooser"""
# support passing a string in names
if isinstance(names, string_types):
names = [names]
self._model_chooser_tool.addModels(names)
def _getState(self):
"""Same as PlotWidget.saveState but removing viewRange conf to force
a refresh with targetRange when loading
"""
state = copy.deepcopy(self.saveState())
# remove viewRange conf
del state["view"]["viewRange"]
return state
def setXAxisMode(self, x_axis_mode):
"""Required generic TaurusPlot API """
from taurus_pyqtgraph import DateAxisItem
if x_axis_mode == "t":
axis = DateAxisItem(orientation="bottom")
axis.attachToPlotItem(self.getPlotItem())
elif x_axis_mode == "n":
axis = self.getPlotItem().axes["bottom"]["item"]
if isinstance(axis, DateAxisItem):
axis.detachFromPlotItem()
else:
raise ValueError("Unsupported x axis mode {}".format(x_axis_mode))
def _onSaveConfigAction(self):
"""wrapper to avoid issues with overloaded signals"""
return self.saveConfigFile()
def _onRetrieveConfigAction(self):
"""wrapper to avoid issues with overloaded signals"""
return self.loadConfigFile()
def plot_main(
models=(),
config_file=None,
x_axis_mode="n",
demo=False,
window_name="TaurusPlot (pg)",
):
"""Launch a TaurusPlot"""
import sys
from taurus.qt.qtgui.application import TaurusApplication
app = TaurusApplication(cmd_line_parser=None, app_name="taurusplot(pg)")
w = TaurusPlot()
# w.loadConfigFile('tmp/TaurusPlot.pck')
w.setWindowTitle(window_name)
if demo:
models = list(models)
models.extend(["eval:rand(100)", "eval:0.5*sqrt(arange(100))"])
w.setXAxisMode(x_axis_mode.lower())
if config_file is not None:
w.loadConfigFile(config_file)
if models:
w.setModel(models)
w.show()
ret = app.exec_()
# import pprint
# pprint.pprint(w.createConfig())
sys.exit(ret)
if __name__ == "__main__":
plot_main()
|
StarcoderdataPython
|
1698377
|
<reponame>stepanandr/taf
# Copyright (c) 2011 - 2017, Intel Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""``dev_vlabcross.py```
`ONS Vlab virtual cross specific functionality`
"""
from os.path import join as os_path_join
import sys
import time
import socket
from subprocess import Popen
from . import loggers
from . import environment
from . import dev_basecross
from .custom_exceptions import CrossException
from .xmlrpc_proxy import TimeoutServerProxy as xmlrpcProxy
class VlabEnv(dev_basecross.GenericXConnectMixin):
"""Vlab from device viewpoint.
"""
class_logger = loggers.ClassLogger()
DEFAULT_TIMEOUT = 1
def __init__(self, config, opts):
"""Initialize VlabEnv class.
Args:
config(dict): Configuration information.
opts(OptionParser): py.test config.option object which contains all py.test cli options.
Raises:
CrossException: error in vlab path
"""
self.id = config['id']
self.type = config['instance_type']
self.ipaddr = config['ip_host']
self.port = config['ip_port'] if "ip_port" in config else "8050"
self.ifaces = config['ports']
self.opts = opts
# Do xconnect on create?
self.autoconnect = config['autoconnect'] if "autoconnect" in config else True
self.related_conf = {}
if "related_conf" in list(config.keys()):
self.related_conf = config['related_conf']
self.tgmap = []
if "tgmap" in list(config.keys()):
self.tgmap = config['tgmap']
if "portmap" in list(config.keys()):
self.portmap = config['portmap']
if "bridged_ifaces" in list(config.keys()):
self.bridged_ifaces = config['bridged_ifaces']
self.ports_count = len(self.ifaces) - len(self.bridged_ifaces)
else:
self.ports_count = len(self.ifaces)
self.bind_iface = config['ip_iface'] if "ip_iface" in config else None
self.build_path = environment.get_absolute_build_path(opts.build_path)
if not self.build_path:
raise CrossException("Could not find vlab binaries path - %s." % (opts.build_path, ))
self.class_logger.info("Vlab binaries path: %s." % (self.build_path, ))
self.xmlproxy = xmlrpcProxy("http://%s:%s/RPC2" % (self.ipaddr, self.port), timeout=45)
self.popen = None
self.popen_logfile = "vlab%s.output.log" % (self.id, )
# Set On/Off(True/False) status according to get_only option.
self.status = self.opts.get_only
def probe_port(self):
"""Establishing a connection to a remote host.
Returns:
bool: True if connection is established
"""
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.settimeout(3)
try:
sock.connect((self.ipaddr, int(self.port)))
sock.close()
return True
except Exception:
return False
def probe(self):
"""Check if Vlab instance is run.
Returns:
dict: Vlab status
"""
result = {'isup': False, 'type': "unknown", 'prop': {}}
if self.probe_port():
result['isup'] = True
try:
instance_prop = self.xmlproxy.vlab.sysinfo()
result['type'] = "vlab"
result['prop'] = instance_prop
self.class_logger.info("Found a running vlab instance on %s:%s." % (self.ipaddr, self.port, ))
self.class_logger.info("Revision: %s" % result['prop']['revision'])
except Exception:
pass
return result
def waiton(self, timeout=30):
"""Waiting until Vlab port is up.
Args:
timeout(int): Waiting timeout
Raises:
CrossException: error on vlab start
Returns:
dict: Vlab status
"""
status = None
message = "Waiting until vlab on %s port #%s is up." % (self.ipaddr, self.port, )
self.class_logger.info(message)
stop_flag = False
end_time = time.time() + timeout
while not stop_flag:
if loggers.LOG_STREAM:
sys.stdout.write(".")
sys.stdout.flush()
if time.time() < end_time:
status = self.probe()
if status["isup"] and status["type"] == "vlab":
stop_flag = True
self.class_logger.info(("VLAB started on host %(host)s port %(port)s: " +
"uptime - %(uptime)s, workdir - %(workdir)s, hostname - %(hostname)s," +
"xmlRpcPort - %(xmlRpcPort)s, port - %(xport)s, revision - %(revision)s") %
{'host': self.ipaddr, 'port': self.port, 'uptime': status['prop']['uptime'],
'workdir': status['prop']['workdir'],
'hostname': status['prop']['hostname'], 'xmlRpcPort': status['prop']['xmlRpcPort'],
'xport': status['prop']['port'], 'revision': status['prop']['revision']})
else:
if status["isup"] and status["type"] != "vlab":
message = (("Port %s on host %s is busy. " +
"Check if vlab already started or other application use the same port.") %
(self.port, self.ipaddr))
else:
message = "Timeout exceeded."
self.class_logger.warning(message)
raise CrossException(message)
if not stop_flag:
time.sleep(self.DEFAULT_TIMEOUT)
return status
def waitoff(self, timeout=30):
"""Waiting until Vlab port is down.
Args:
timeout(int): Waiting timeout
Raises:
CrossException: error on vlab stop
Returns:
dict: Vlab status
"""
status = None
message = "Waiting until vlab on %s port #%s is down." % (self.ipaddr, self.port, )
self.class_logger.info(message)
stop_flag = False
end_time = time.time() + timeout
while not stop_flag:
if loggers.LOG_STREAM:
sys.stdout.write(".")
sys.stdout.flush()
if time.time() < end_time:
status = self.probe_port()
if not status:
stop_flag = True
else:
if status:
message = "Timeout exceeded. The port %s on host %s is still open" % (self.port, self.ipaddr)
self.class_logger.warning(message)
raise CrossException(message)
if not stop_flag:
time.sleep(self.DEFAULT_TIMEOUT)
message = "Waiting until vlab process with %d pid stop" % (self.popen.pid, )
self.class_logger.info(message)
while True:
if loggers.LOG_STREAM:
sys.stdout.write(".")
sys.stdout.flush()
if time.time() < end_time:
if self.popen.poll() is not None:
self.class_logger.info("Exit code of the vlab process with PID %s = %s" %
(self.popen.pid, self.popen.poll()))
break
else:
message = "Timeout exceeded. Vlab process with PID %d still exists." % (self.popen.pid, )
self.class_logger.warning(message)
raise CrossException(message)
time.sleep(self.DEFAULT_TIMEOUT)
def start(self):
"""Starts vlab based on provided host and port info with specified number of interfaces.
Raises:
CrossException: not local environment, vlab is stopped
Exception: error on vlab start
"""
def check_rc():
"""Checking Vlab process.
"""
rc = process.poll()
if rc is not None:
raise CrossException("Vlab process is terminated with signal %s." % (rc, ))
process = None
bin_path = os_path_join(self.build_path, "bin", "vlab")
# TODO: Add possibility to run vlab instance on remote hosts, any port using paramiko.
if (self.ipaddr != "localhost") and (self.port != "8050"):
message = "Only local environment is supported at the moment."
self.class_logger.error(message)
raise CrossException(message)
try:
self.class_logger.info("Starting Vlab on %s:%s" % (self.ipaddr, self.port))
command_lst = [bin_path, "-v", "%s" % (self.ports_count, )]
if hasattr(self, "bridged_ifaces"):
for b_iface in self.bridged_ifaces:
command_lst.append("-P")
command_lst.append(b_iface)
self.class_logger.debug("Start command: %s" % (" ".join(command_lst), ))
log_wrap_out, log_wrap_err = loggers.pipe_loggers("vlab%s" % (self.id, ), self.popen_logfile)
process = Popen(command_lst, stdout=log_wrap_out, stderr=log_wrap_err,
cwd=self.build_path, env={"LD_LIBRARY_PATH": os_path_join(self.build_path, "lib")})
check_rc()
except Exception as err:
self.class_logger.error("Error executing vlab Popen process.")
self.class_logger.error(str(err))
raise
# let's wait until device is up and running:
self.waiton()
self.popen = process
check_rc()
self.status = True
def stop(self):
"""Stops vlab based on provided host and port info.
Raises:
CrossException: error on vlab stop
"""
if not self.popen:
message = "No Popen object exists for Vlab. Exiting stop() method without processing."
self.class_logger.error(message)
raise CrossException(message)
# Send xmlrpc shutdown query
result = self.xmlproxy.system.shutdown("")
if result is not 0:
message = "Error stopping vlab instance. XMLRPC query response = %s" % (result, )
self.class_logger.error(message)
raise CrossException(message)
else:
# let's wait until device is fully stopped:
self.waitoff()
self.status = False
def restart(self):
"""Restarting Vlab instance.
"""
try:
self.stop()
except Exception as err:
self.class_logger.warning("Fail to stop vlab instance with error: %s" % err)
finally:
self.start()
def check(self):
"""Checking Vlab instance status.
"""
if self.status:
self.waiton()
else:
self.class_logger.info("Skip check method for vlab id:%s because it has Off status." % (self.id, ))
class VlabCross(VlabEnv):
"""Vlab from xconnect viewpoint.
"""
class_logger = loggers.ClassLogger()
def _get_ports_from_config(self, connection=None):
"""Get ports from configuration.
Args:
connection(list): Connection info in format [sw1, port1, sw2, port2]
Raises:
CrossException: unsupported connection type
ValueError: error in configuration file
Returns:
list: Ports from configuration
"""
def get_port(conn):
"""Get port ID.
"""
# If device linked to another via bridged interface
if hasattr(self, 'portmap'):
for elem in self.portmap:
if conn == elem[:2]:
return [0, elem[2] - 1]
# If device id in connection == vlab id or id of related TG
if conn[0] == self.id or conn[0] in self.tgmap:
# Return vlab self id and vlab port id from list of ifaces
return [0, conn[1] - 1]
# other devices Id.
else:
for rkey in list(self.related_conf.keys()):
rconf = self.related_conf[rkey]
if rconf['id'] == conn[0] and rconf['entry_type'] == "switch":
# Return switch ID and port ID from config
# [devId: conf_port_Id] --> [devId: port_Id_for_vlab]
# E.G.
# conn = [1, 3]
# rconf = {'id': 1, ports: ["5", "6", "7"]}
# conf_port_id = 3, real_dev_port_id = "7", port_Id_for_vlab = 6 = (7 - 1)
# Vlab port list index start from 0, but switch port index from 1, so do -1
# Switch ID = PortNo - 8080. E.g. 8082 - 8080 = 2
return [int(rconf['ip_port']) - 8080, int(rconf['ports'][conn[1] - 1]) - 1]
elif rconf['id'] == conn[0] and rconf['entry_type'] == "hub":
return [rconf['hub_id'], int(rconf['ports'][conn[1] - 1]) - 1]
elif rconf['id'] == conn[0] and not rconf['entry_type'] in ["switch", "hub"]:
message = "Only connections to switch, hub or Vlab itself are supported. But found entry type = %s" % rconf['entry_type']
self.class_logger.error(message)
raise CrossException(message)
# Part 1
vconn1 = get_port(connection[:2])
# Part 2
vconn2 = get_port(connection[2:])
try:
vconn_full = vconn1 + vconn2
return vconn_full
except Exception:
raise ValueError("Cannot make requested connection. Check config. Got following args: %s, %s" % (vconn1, vconn2))
def xconnect(self, connection):
"""Create single connection.
Args:
connection(list): Connection info in format [sw1, port1, sw2, port2]
"""
vconn = self._get_ports_from_config(connection)
self.class_logger.debug("Connect VLAB ports: %s" % vconn)
return self.xmlproxy.vlab.cross.connect(vconn[0], vconn[1], vconn[2], vconn[3])
def xdisconnect(self, connection):
"""Destroy single connection.
Args:
connection(list): Connection info in format [sw1, port1, sw2, port2]
"""
vconn = self._get_ports_from_config(connection)
self.class_logger.debug("Disconnect VLAB ports: %s" % vconn)
return self.xmlproxy.vlab.cross.disconnect(vconn[0], vconn[1], vconn[2], vconn[3])
def cross_connect(self, conn_list):
"""Make connections between switches.
Args:
conn_list(list[list]): Set of connections in format: [[sw1, port1, sw2, port2], ... ]
Raises:
CrossException: devices from conn_list are not in related configurations,
error on connection creation
Returns:
bool: True if success or raise an error if connections were not created.
Examples::
cross_connect([[0, 1, 1, 1], [0, 2, 1, 2]])
"""
if self.related_conf and conn_list:
list_id = []
for conn in conn_list:
list_id.append(conn[0])
list_id.append(conn[2])
if set(self.related_conf.keys()) != set(list_id):
message = ("Set of cross connected devices %s is not appropriate related config %s."
% (list(set(list_id)), list(set(self.related_conf.keys()))))
self.class_logger.error(message)
raise CrossException(message)
for conn in conn_list:
# make connections
self.class_logger.info("Make connection %(sw1)s,%(port1)s, and %(sw2)s,%(port2)s." %
{'sw1': conn[0], 'port1': conn[1], 'sw2': conn[2], 'port2': conn[3]})
if self.xconnect(conn) == 0:
message = "Cannot create connection: %s" % conn
self.class_logger.error(message)
raise CrossException(message)
return True
def cross_disconnect(self, disconn_list):
"""Destroy connections between switches.
Args:
disconn_list(list[list]): Set of connections in format: [[sw1, port1, sw2, port2], ... ]
Raises:
CrossException: error on connection destroying
Returns:
bool: True if success or False if connections were not destroyed.
Examples::
cross_disconnect([[0, 1, 1, 1], [0, 2, 1, 2]])
"""
# Destroy connections using Virtual Lab
for conn in disconn_list:
self.class_logger.info("Destroy connection %(sw1)s,%(port1)s, and %(sw2)s,%(port2)s." %
{'sw1': conn[0], 'port1': conn[1], 'sw2': conn[2], 'port2': conn[3]})
if self.xdisconnect(conn) == 0:
message = "Cannot destroy connection: %s" % conn
self.class_logger.error(message)
raise CrossException(message)
return True
def cross_clear(self):
"""Clear all connections between switches
Raises:
CrossException: error on connections clearing
Returns:
bool: True if success or False if all connections were not cleared.
Examples::
cross_clear(env)
"""
self.class_logger.info("Clear all connections.")
if self.xmlproxy.vlab.cross.clear() == 0:
message = "Cannot clear all connections"
self.class_logger.error(message)
raise CrossException(message)
return True
ENTRY_TYPE = "cross"
INSTANCES = {"vlab": VlabCross}
NAME = "cross"
|
StarcoderdataPython
|
74996
|
<reponame>sgondala/Automix<filename>yahoo_with_mixtext/hyperopt_eval_single.py<gh_stars>1-10
import torch
import torch.nn.functional as F
from torch.utils.data import DataLoader
import numpy as np
from FastAutoAugment.read_data import *
from FastAutoAugment.classification_models.MixText import *
import pickle
import wandb
import argparse
from tqdm import tqdm
from hyperopt import fmin, tpe, hp, Trials
parser = argparse.ArgumentParser(description='PyTorch MixText')
parser.add_argument('--batch-size', default=64, type=int, metavar='N',
help='train batchsize')
parser.add_argument('--checkpoint-path', type=str, default='checkpoints/train_yahoo_on_mixtext_10_per_class_no_augmentations/model_best.pth', help='Saved model checkpoint')
parser.add_argument('--sub-policies-per-policy', type=int, default=3)
parser.add_argument('--number-of-policies-to-evaluate', type=int, default=50)
parser.add_argument('--alpha', type=float, default=2)
parser.add_argument('--mix-layers', nargs='+',
default=[7,9,12], type=int, help='define mix layer set')
args = parser.parse_args()
# Seeds
np.random.seed(42)
torch.manual_seed(42)
torch.cuda.manual_seed_all(42)
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.deterministic = True
def own_loss(logits, target, num_labels):
assert logits.shape == target.shape
loss = -torch.sum(F.log_softmax(logits, dim=1)*target, axis=1)
assert loss.shape[0] == target.shape[0]
return loss.mean()
def optimization_function(input_arguments):
arg1, arg2, arg3 = input_arguments
wandb.init(project="auto_augment", reinit=True)
wandb_name = f'hyperopt_single_inter_lada_layers_{arg1}_{arg2}_{arg3}'
model_name = 'bert-base-uncased'
dataset_identifier = 'val_200'
val = pickle.load(open('data/paper_yahoo_split/yahoo_val_200_per_class.pkl', 'rb'))
# knn = arg1
# mu = arg2
knn = 7
mu = 0.23
val_dataset = create_dataset(val['X'], val['y'], model_name, 256, mix='Inter_LADA', num_classes=10,knn_lada=knn, mu_lada=mu, dataset_identifier = dataset_identifier)
wandb.run.name = wandb_name
wandb.run.save()
val_dataloader = DataLoader(val_dataset, batch_size=args.batch_size, num_workers=4)
base_model = torch.load(args.checkpoint_path).cuda()
base_model.eval()
with torch.no_grad():
loss_total = 0
total_sample = 0
for batch in tqdm(val_dataloader, desc='Validation loop'):
encoded_1, encoded_2, label_1, label_2 = batch
assert encoded_1.shape == encoded_2.shape
# mix_layer = np.random.choice(args.mix_layers)
mix_layer = np.random.choice([arg1, arg2, arg3])
l = np.random.beta(args.alpha, args.alpha)
l = max(l, 1-l)
logits = base_model(encoded_1.cuda(), encoded_2.cuda(), l, mix_layer)
combined_labels = label_1 * l + label_2 * (1-l)
loss = own_loss(logits, combined_labels.cuda(), num_labels=10)
loss_total += loss.item() * encoded_1.shape[0]
total_sample += encoded_1.shape[0]
loss_total = loss_total/total_sample
wandb.log({'Test loss' : loss_total})
print('Test loss ', loss_total)
return loss_total
if __name__ == "__main__":
trials = Trials()
space = []
# space.append(hp.choice(f'arg1', list(range(1, 10))))
# space.append(hp.uniform(f'arg2', 0, 1))
space.append(hp.choice(f'arg1', list(range(1,12))))
space.append(hp.choice(f'arg2', list(range(1,12))))
space.append(hp.choice(f'arg3', list(range(1,12))))
best = fmin(fn=optimization_function,
space=space,
algo=tpe.suggest,
max_evals=args.number_of_policies_to_evaluate,
trials=trials)
pickle.dump(
trials,
open(f'data/saved_logs/hyperopt_single_inter_lada_layers_changes_{args.number_of_policies_to_evaluate}.pkl', 'wb'))
|
StarcoderdataPython
|
96770
|
<filename>brainreg/backend/niftyreg/utils.py
import imio
import numpy as np
def save_nii(stack, atlas_pixel_sizes, dest_path):
"""
Save self.target_brain to dest_path as a nifti image.
The scale (zooms of the output nifti image) is copied from the atlas
brain.
:param str dest_path: Where to save the image on the filesystem
"""
transformation_matrix = get_transf_matrix_from_res(atlas_pixel_sizes)
imio.to_nii(
stack,
dest_path,
scale=(
atlas_pixel_sizes[0] / 1000,
atlas_pixel_sizes[1] / 1000,
atlas_pixel_sizes[2] / 1000,
),
affine_transform=transformation_matrix,
)
def get_transf_matrix_from_res(pix_sizes):
"""Create transformation matrix in mm
from a dictionary of pixel sizes in um
:param pix_sizes:
:return:
"""
transformation_matrix = np.eye(4)
for i in [0, 1, 2]:
transformation_matrix[i, i] = pix_sizes[i] / 1000
return transformation_matrix
|
StarcoderdataPython
|
182611
|
import torch
import torch.nn as nn
import math
# wildcard import for legacy reasons
if __name__ == '__main__':
import sys
sys.path.append("..")
from models.blocks import *
from models.wide_resnet import compression, group_lowrank
# only used in the first convolution, which we do not substitute by convention
def conv_bn(inp, oup, stride):
return nn.Sequential(
nn.Conv2d(inp, oup, 3, stride, 1, bias=False),
nn.BatchNorm2d(oup),
nn.ReLU6(inplace=True)
)
# only used for final fully connectec layers
def conv_1x1_bn(inp, oup, ConvClass):
return nn.Sequential(
ConvClass(inp, oup, 1, 1, 0, bias=False),
nn.BatchNorm2d(oup),
nn.ReLU6(inplace=True)
)
class InvertedResidual(nn.Module):
def __init__(self, inp, oup, stride, expand_ratio, ConvClass):
super(InvertedResidual, self).__init__()
self.stride = stride
self.Conv = ConvClass
assert stride in [1, 2]
hidden_dim = round(inp * expand_ratio)
self.use_res_connect = self.stride == 1 and inp == oup
if expand_ratio == 1:
self.conv = nn.Sequential(
# dw
nn.Conv2d(hidden_dim, hidden_dim, 3, stride, 1, groups=hidden_dim, bias=False),
nn.BatchNorm2d(hidden_dim),
nn.ReLU6(inplace=True),
# pw-linear
self.Conv(hidden_dim, oup, 1, 1, 0, bias=False),
nn.BatchNorm2d(oup),
)
else:
self.conv = nn.Sequential(
# pw
self.Conv(inp, hidden_dim, 1, 1, 0, bias=False),
nn.BatchNorm2d(hidden_dim),
nn.ReLU6(inplace=True),
# dw
nn.Conv2d(hidden_dim, hidden_dim, 3, stride, 1, groups=hidden_dim, bias=False),
nn.BatchNorm2d(hidden_dim),
nn.ReLU6(inplace=True),
# pw-linear
self.Conv(hidden_dim, oup, 1, 1, 0, bias=False),
nn.BatchNorm2d(oup),
)
def forward(self, x):
if self.use_res_connect:
return x + self.conv(x)
else:
return self.conv(x)
class MobileNetV2(nn.Module):
def __init__(self, ConvClass, block=None, n_class=1000,
input_size=224, width_mult=1.):
super(MobileNetV2, self).__init__()
self.kwargs = dict(ConvClass=ConvClass, block=block, n_class=n_class,
input_size=input_size, width_mult=width_mult)
block = InvertedResidual
self.Conv = ConvClass
input_channel = 32
last_channel = 1280
interverted_residual_setting = [
# t, c, n, s
[1, 16, 1, 1],
[6, 24, 2, 2],
[6, 32, 3, 2],
[6, 64, 4, 2],
[6, 96, 3, 1],
[6, 160, 3, 2],
[6, 320, 1, 1],
]
# building first layer
assert input_size % 32 == 0
input_channel = int(input_channel * width_mult)
self.last_channel = int(last_channel * width_mult) if width_mult > 1.0 else last_channel
self.features = [conv_bn(3, input_channel, 2)]
# building inverted residual blocks
for t, c, n, s in interverted_residual_setting:
output_channel = int(c * width_mult)
for i in range(n):
if i == 0:
self.features.append(block(input_channel, output_channel, s, expand_ratio=t, ConvClass=self.Conv))
else:
self.features.append(block(input_channel, output_channel, 1, expand_ratio=t, ConvClass=self.Conv))
input_channel = output_channel
# building last several layers
self.features.append(conv_1x1_bn(input_channel, self.last_channel, self.Conv))
# make it nn.Sequential
self.features = nn.Sequential(*self.features)
# building classifier
self.classifier_conv = self.Conv(self.last_channel, n_class, 1, 1, 0, bias=True)
#self.classifier = \
#nn.Dropout(0.2), remove dropout for training according to github
# nn.(self.last_channel, n_class),
#)
self._initialize_weights()
def classifier(self, x):
n, c = x.size()
x = self.classifier_conv(x.view(n,c,1,1))
n, c, _, _ = x.size()
return x.view(n,c)
def forward(self, x):
#y_orig = self.features(x)
attention_maps = []
attention = lambda x: F.normalize(x.pow(2).mean(1).view(x.size(0), -1))
y = x
for block in self.features:
y = block(y)
if isinstance(block, InvertedResidual):
if block.stride > 1:
attention_maps.append(attention(y))
#error = torch.abs(y-y_orig).max()
#assert error < 1e-2, f"Error {error} above 0.01"
x = y
x = x.mean(3).mean(2)
x = self.classifier(x)
return x, attention_maps
def compression_ratio(self):
return compression(self.__class__, self.kwargs)
def grouped_parameters(self, weight_decay):
return group_lowrank(self.named_parameters(), weight_decay,
self.compression_ratio())
def _initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
if hasattr(m, 'weight'):
m.weight.data.normal_(0, math.sqrt(2. / n))
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
n = m.weight.size(1)
m.weight.data.normal_(0, 0.01)
m.bias.data.zero_()
def save_reference():
net = MobileNetV2()
net.eval()
x = torch.randn(1,3,224,224).float()
y = net(x)
print(y.size())
torch.save(x, "reference_input_mobilenet.torch")
torch.save(y, "reference_output_mobilenet.torch")
torch.save(net.state_dict(), "reference_state_mobilenet.torch")
def match_keys(net, state):
nstate = net.state_dict()
old_keys = [k for k in state]
for i, k in enumerate(nstate):
p = state[old_keys[i]]
if i == (len(old_keys)-2):
n,m = p.size()
nstate[k] = p.view(n,m,1,1)
else:
nstate[k] = p
return nstate
def test():
import os
net = MobileNetV2(Conv)
if os.path.exists("reference_state_mobilenet.torch"):
state = torch.load("reference_state_mobilenet.torch")
state = match_keys(net, state)
net.load_state_dict(state)
net.eval()
x = torch.load("reference_input_mobilenet.torch")
else:
x = torch.randn(1,3,224,224).float()
y, _ = net(Variable(x))
print(y.size())
# check if these match the test weights
if os.path.exists("reference_output_mobilenet.torch"):
ref_output = torch.load("reference_output_mobilenet.torch")
error = torch.abs(ref_output - y).max()
print(f"Error: {error}, Max logit: {y.max()}/{ref_output.max()}, Min logit: {y.min()}/{ref_output.min()}")
state = {
'net': net.state_dict(),
'epoch': 150,
'args': None,
'width': None,
'depth': None,
'conv': 'Conv',
'blocktype': None,
'module': None,
'train_losses': None,
'train_errors': None,
'val_losses': None,
'val_errors': [28.2],
}
torch.save(state, "mobilenetv2.tonylins.t7")
def test_compression():
net = MobileNetV2(Conv)
#net = MobileNetV2(conv_function('Hashed_0.1'))
nparams = lambda x: sum([p.numel() for p in x.parameters()])
for block in net.features:
print(nparams(block))
for x in block:
print(x)
print(nparams(x))
#CompressedConv = conv_function("Hashed_0.1")
for conv in ['Shuffle_%i'%i for i in [4,8,16,32]]+['Hashed_0.01']:
print(conv)
CompressedConv = conv_function(conv)
net = MobileNetV2(CompressedConv)
print(" ", net.compression_ratio())
if __name__ == '__main__':
test()
#test_compression()
|
StarcoderdataPython
|
1648939
|
from learner import Learner
from imgur import Imgur
from meme import Memer
import logging
doom_img_key = "__doomimg__"
doom_quote_key = "__doomquote__"
class Doom():
def memify(self, image, content):
memer = Memer()
parts = [x.strip() for x in content.encode('utf-8').split(",")]
top = parts[0] if len(parts) > 0 else None
bottom = parts[1] if len(parts) > 1 else None
return memer.get_meme(image, top, bottom)
def doom_meme(self, content):
learner = Learner()
image = learner.get(doom_img_key)
quote = content if content else learner.get(doom_quote_key)
return self.memify(image, quote)
def doom_pic(self, image):
learner = Learner()
learner.learn(doom_img_key, image)
return "got a pick of DOOM"
def doom_quote(self, quote):
learner = Learner()
learner.learn(doom_quote_key, quote)
return "got some verse from DOOM"
def doom(self, details):
if not details or len(details) == 0:
return self.doom_meme(None)
elif details[0] == "quote":
return self.doom_quote(" ".join(details[1:]))
elif details[0] == "pic" or details[0] == "img":
return self.doom_pic(details[1])
else:
return self.doom_meme(" ".join(details))
|
StarcoderdataPython
|
1615563
|
<reponame>mmaysami/azure-functions-python<gh_stars>0
import logging
import json
import time
import azure.functions as func
from . import toolsA_F1 as tools
def main(req: func.HttpRequest) -> func.HttpResponse:
start = time.time()
logging.info('Python HTTP trigger function processed a request.')
try:
# Decode and return request body as JSON
req_body = req.get_json()
except ValueError:
numA, numB = None, None
pass
else:
numA = req_body.get('A')
numB = req_body.get('B')
if numA and numB:
# Call Common Functions
sum1 = tools.sum1(numA, numB)
sub1 = tools.sub1(numA, numB)
pow1 = tools.pow1(numA, numB)
div1 = tools.div1(numA, numB)
dt1 = time.time()-start
return func.HttpResponse(
json.dumps({
'method': req.method,
'url': req.url,
'headers': dict(req.headers),
'params': dict(req.params),
'get_body': req.get_body().decode(),
'timer': dt1,
'return': 'Function App recieved %s and %s' %({numA}, {numB}) ,
'Sum': sum1,
'Sub': sub1,
'Pow': pow1,
'Div': div1
})
)
else:
dt1 = time.time()-start
return func.HttpResponse(
json.dumps({
'method': req.method,
'url': req.url,
'headers': dict(req.headers),
'params': dict(req.params),
'get_body': req.get_body().decode(),
'timer': dt1,
'return': 'Please pass numbers A,B to Function App in the request body'
})
, status_code=400
)
|
StarcoderdataPython
|
3324095
|
<gh_stars>0
from .base_token_test import TestToken
from ..entity_objects.authentication_object import AuthenticationObject
class TestAuthentication(TestToken):
"""
Implements the authentication test routines
"""
_entity_object_class = AuthenticationObject
""" An object for a testing entity """
del TestToken
|
StarcoderdataPython
|
1712567
|
<reponame>dextar1/image-classifier<filename>test.py
from PIL import Image, ImageFilter
def imageprepare(argv):
"""
This function returns the pixel values.
The imput is a png file location.
"""
im = Image.open(argv).convert('L')
width = float(im.size[0])
height = float(im.size[1])
newImage = Image.new('L', (28, 28), (255)) # creates white canvas of 28x28 pixels
if width > height: # check which dimension is bigger
# Width is bigger. Width becomes 20 pixels.
nheight = int(round((20.0 / width * height), 0)) # resize height according to ratio width
if (nheight == 0): # rare case but minimum is 1 pixel
nheight = 1
# resize and sharpen
img = im.resize((20, nheight), Image.ANTIALIAS).filter(ImageFilter.SHARPEN)
wtop = int(round(((28 - nheight) / 2), 0)) # calculate horizontal position
newImage.paste(img, (4, wtop)) # paste resized image on white canvas
else:
# Height is bigger. Heigth becomes 20 pixels.
nwidth = int(round((20.0 / height * width), 0)) # resize width according to ratio height
if (nwidth == 0): # rare case but minimum is 1 pixel
nwidth = 1
# resize and sharpen
img = im.resize((nwidth, 20), Image.ANTIALIAS).filter(ImageFilter.SHARPEN)
wleft = int(round(((28 - nwidth) / 2), 0)) # caculate vertical pozition
newImage.paste(img, (wleft, 4)) # paste resized image on white canvas
# newImage.save("sample.png
tv = list(newImage.getdata()) # get pixel values
# normalize pixels to 0 and 1. 0 is pure white, 1 is pure black.
tva = [(255 - x) * 1.0 / 255.0 for x in tv]
print(tva)
return tva
x=imageprepare('./image.png')#file path here
print(len(x))# mnist IMAGES are 28x28=784 pixels
|
StarcoderdataPython
|
3253249
|
<filename>bin/smartstreamingcommand.py<gh_stars>0
#!/usr/bin/env python
from splunklib.searchcommands import StreamingCommand
import sys
import select
import os
import gzip
import re
import csv
import math
import time
import logging
try:
from collections import OrderedDict # must be python 2.7
except ImportError:
from ..ordereddict import OrderedDict
from splunklib.six.moves import zip as izip
from splunklib.searchcommands.internals import (CsvDialect, MetadataDecoder)
class ChunkedInput(object):
def __init__(self, infile, limit):
self._file = infile
self._limit = limit
def __getattr__(self, name):
return getattr(self._file, name)
def __iter__(self):
while True:
if self._limit <= 0:
return
line = self._file.readline()
yield line
self._limit -= len(line)
class SmartStreamingCommand(StreamingCommand):
""" A smarter version of the Splunk SDK's StreamingCommand.
Like the parent class, this class applies a transformation to
search results as they travel through the streams pipeline.
This class adds functionality that more intelligently reads events
from the Splunk server, reducing the memory consumption of this
custom command when it is running. Additionally, this class adds
support to continually monitor and drain any continuing
information sent by the parent Splunk process. Finally, this
class adds functionality that will incrementally flush the
produced events, also reducing the memory footprint of this
command.
Finally, this class includes more careful handshaking between the
custom command process and the parent Splunk daemon to avoid the
"buffer full" Splunk daemon bug. This includes always observing a
"read one chunk, send one chunk" policy and ensuring that outbound
chunks are never flushed at a rate faster than one event per
"throttleMs" milliseconds. The default for "throttleMs" is
'0.08', meaning that standard batch of 50,000 events will not be
flushed faster than once each four seconds.
This class has been tested against the following configuration
dimensions:
- Single install Splunk server vs. SHC and indexer cluster (3x6)
- On the searchhead (eg. after `localop`) vs. on indexers in parallel
- With and without previews enabled
- Against both generating and eventing base searches
This class otherwise supports the same functionality and interface
as the parent, StreamingCommand, class.
"""
def __init__(self):
StreamingCommand.__init__(self)
self._throttleMs = 0.08
self._last_flush = None
self._last_count = 0
@property
def throttleMs(self):
return self._throttleMs
@throttleMs.setter
def throttleMs(self, value):
self._throttleMs = value
def stream(self, records):
""" Generator function that processes and yields event records to the
Splunk stream pipeline.
You must override this method.
"""
raise NotImplementedError('SmartStreamingCommand.stream(self, records)')
# Override base class method to replace the record generator with
# our own generator that understands how to stop after a chunk
# without requiring the ifile to be closed...
def _execute(self, ifile, process):
self.logger.setLevel(logging.INFO)
# Bump base class' understanding of maxresultrows by one so
# that we can control flushing here...
maxresultrows = getattr(self._metadata.searchinfo, 'maxresultrows', 50000)
setattr(self._metadata.searchinfo, 'maxresultrows', maxresultrows+1)
self._flush_count = math.floor(2*maxresultrows/3)
self._record_writer.write_records(self._metered_flush(self.stream(self._our_records(ifile))))
self.finish()
# Start reading a chunk by reading the header and returning the
# metadata and body lengths. The remainder of the chunk is not
# read off of the provided file input.
def _start_chunk(self, ifile):
# noinspection PyBroadException
try:
header = ifile.readline()
except Exception as error:
raise RuntimeError('Failed to read transport header: {} : {}'.format(error,header))
if header == "":
ifile.close()
return None, None
if not header:
return None, None
match = SmartStreamingCommand._header.match(header)
if match is None:
raise RuntimeError('Failed to parse transport header: "{}"'.format(header))
metadata_length, body_length = match.groups()
metadata_length = int(metadata_length)
body_length = int(body_length)
return metadata_length, body_length
_header = re.compile(r'chunked\s+1.0\s*,\s*(\d+)\s*,\s*(\d+)\s*\n')
# Read and return the metadata from the provided file input.
def _read_metadata(self, ifile, metadata_length):
try:
metadata = ifile.read(metadata_length)
except Exception as error:
raise RuntimeError('Failed to read metadata of length {}: {}'.format(metadata_length, error))
decoder = MetadataDecoder()
self.logger.info("Metadata: {}".format(metadata))
try:
metadata = decoder.decode(metadata)
except Exception as error:
raise RuntimeError('Failed to parse metadata of length {}: {}'.format(metadata_length, error))
return metadata
# Capture input events (of size bytes) from the provided file
# input into a local, gzip'd file in the dispatch directory.
def _capture_input(self, ifile, bytes):
dispatch_dir = self._metadata.searchinfo.dispatch_dir
if not os.path.exists(dispatch_dir):
os.mkdir(dispatch_dir, 0775)
file = 'input_snap_{}.gz'.format(os.getpid())
path = os.path.join(dispatch_dir, file)
self.logger.debug('Capture input ({} bytes) in {}...'.format(bytes,file))
count = 0
with gzip.open(path, 'wb') as copy:
for line in ChunkedInput(ifile, bytes):
copy.write(line)
count += 1
copy.flush()
copy.close()
self._icopy_path = path
self._icopy = gzip.open(path, 'rb')
self._ifile = ifile
self.logger.info('Input captured ({})'.format(count))
# Drain exactly one input chunk.
def _drain_input_one_chunk(self, ifile):
m_len, b_len = self._start_chunk(ifile)
if m_len is not None and b_len is not None:
try:
ifile.read(m_len+b_len)
except Exception as error:
raise RuntimeError('Failed to clear chunk of lengths {} {}: {}'.format(m_len, b_len, error))
# Loop, checking the provided input file and, iff bytes are
# present, read a chunk, until no bytes are present.
def _drain_input(self, ifile):
# Loop reading chunks out of the input until it is dry...
chunks = 0
check_input = not ifile.closed
while check_input:
check_input = False
check_rd, check_wr, check_ex = select.select([ifile], [], [], 0)
if check_rd == [ifile]:
# Input available; drain it...
self._drain_input_one_chunk(ifile)
# Check again...
check_input = not ifile.closed
chunks += 1
if chunks > 0:
self.logger.info('Cleared {} input chunk(s)'.format(chunks))
# Flush, but only at a certain rate (sleeps if called too often).
def _gated_flush(self, count):
if self._last_flush is None:
self._last_flush = time.time()
max = count if count > self._last_count else self._last_count
intervalSec = self.throttleMs * max / 1000.0
timeSec = time.time()
# Check if we have flushed recently; iff so, stall briefly...
if self._last_flush+intervalSec > timeSec:
sleepSec = self._last_flush+intervalSec - timeSec
self.logger.info('Sleep before flushing, {}s'.format(sleepSec))
time.sleep(sleepSec)
self.logger.info('Flushing events ({})...'.format(count))
self.flush()
self._last_flush = time.time()
self._last_count = count
self.logger.debug('Flushed')
# Generator function that captures input, then reads the captured
# copy, yielding events in OrderedDict form.
def _one_chunk_of_records(self, ifile):
self._finished = True
metadata_length, body_length = self._start_chunk(ifile)
if metadata_length is None:
self.logger.info("No chunk; exiting...")
return
self.logger.info('Start data chunk...({},{})'.format(metadata_length, body_length))
metadata = self._read_metadata(ifile, metadata_length)
action = getattr(metadata, 'action', None)
if action != 'execute':
raise RuntimeError('Expected execute action, not {}'.format(action))
finished = getattr(metadata, 'finished', False)
self._record_writer.is_flushed = False
if body_length is 0:
return
copy_input = True
if copy_input:
self._capture_input(ifile, body_length)
reader = csv.reader(self._icopy, dialect=CsvDialect)
else:
reader = csv.reader(ChunkedInput(ifile, body_length), dialect=CsvDialect)
try:
fieldnames = next(reader)
except StopIteration:
raise RuntimeError('CSV header malformed')
self.logger.debug('Read records...')
mv_fieldnames = dict([(name, name[len('__mv_'):]) for name in fieldnames if name.startswith('__mv_')])
if len(mv_fieldnames) == 0:
for values in reader:
yield OrderedDict(izip(fieldnames, values))
else:
for values in reader:
record = OrderedDict()
for fieldname, value in izip(fieldnames, values):
if fieldname.startswith('__mv_'):
if len(value) > 0:
record[mv_fieldnames[fieldname]] = self._decode_list(value)
elif fieldname not in record:
record[fieldname] = value
yield record
if not self._icopy is None:
self._icopy.close()
os.remove(self._icopy_path)
if finished:
return
self._finished = False
# Generator function that reads one chunk at a time processing
# results, occasionally flushing, until the input is closed or the
# parent reports that we are finished. Replacement for _records()
# from base class.
def _our_records(self, ifile):
self._finished = False
self._tot_count = 0
self._cur_count = 0
while not self._finished:
self.logger.debug('Read one chunk...')
for record in self._one_chunk_of_records(ifile):
yield record
self._tot_count += self._cur_count
self._gated_flush(self._cur_count)
self.logger.info('Done one chunk ({}/{} returned).'.format(self._cur_count, self._tot_count))
self._cur_count = 0
self.logger.info('Done with all records ({} returned)'.format(self._tot_count))
self.logger.debug('Read remaining chunks...sleep {}s first'.format(1))
time.sleep(1)
self._drain_input(ifile)
def _metered_flush(self, events):
for event in events:
self._cur_count += 1
yield event
if self._cur_count % self._flush_count == 0:
self._tot_count += self._cur_count
if self._cur_count > 0:
self._gated_flush(self._cur_count)
self._drain_input_one_chunk(self._ifile)
self.logger.info('Read one input chunk')
self._cur_count = 0
|
StarcoderdataPython
|
3291800
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Date : 2019-06-14 10:22:21
# @Author : <NAME>
# @Version : 1.0
import pprint
def functionTest():
pass
class ClassTest(object):
"""docstring for ClassTest"""
def __init__(self,):
super(ClassTest, self).__init__()
def selfMethodTest(self):
pass
@classmethod
def classMethodTest(cls):
pass
@staticmethod
def staticMethodTest():
pass
if __name__ == '__main__':
print(functionTest)
print('*' * 100)
print(ClassTest.selfMethodTest)
print(ClassTest.classMethodTest)
print(ClassTest.staticMethodTest)
print('*' * 100)
inst = ClassTest()
print(inst.selfMethodTest)
print(inst.classMethodTest)
print(inst.staticMethodTest)
print('*' * 100)
pprint.pprint(dir(functionTest))
|
StarcoderdataPython
|
3311483
|
# coding: utf-8
import datetime
import unittest
from mock import Mock, patch
import pyslack
class ClientTest(unittest.TestCase):
token = "my token"
@patch('requests.post')
def test_post_message(self, r_post):
"""A message can be posted to a channel"""
client = pyslack.SlackClient(self.token)
reply = {"ok": True}
r_post.return_value.json = Mock(return_value = reply)
result = client.chat_post_message('#channel', 'message')
self.assertEqual(reply, result)
@patch('requests.post')
def test_error_response(self, r_post):
"""Server error messages are handled gracefully"""
client = pyslack.SlackClient(self.token)
reply = {"ok": False, "error": "There was an error"}
r_post.return_value.json.return_value = reply
with self.assertRaises(pyslack.SlackError) as context:
client.chat_post_message('#channel', 'message')
self.assertEqual(context.exception.message, reply["error"])
@patch('requests.post')
def test_rate_limit(self, r_post):
"""HTTP 429 Too Many Requests response is handled gracefully"""
client = pyslack.SlackClient(self.token)
reply = {"ok": False, "error": "Too many requests"}
r_post.return_value = Mock(status_code=429, headers={'retry-after': 10})
r_post.return_value.json.return_value = reply
with self.assertRaises(pyslack.SlackError) as context:
client.chat_post_message('#channel', 'message')
self.assertEqual(r_post.call_count, 1)
self.assertGreater(client.blocked_until,
datetime.datetime.utcnow() + datetime.timedelta(seconds=8))
# A second send attempt should also throw, but without creating a request
with self.assertRaises(pyslack.SlackError) as context:
client.chat_post_message('#channel', 'message')
self.assertEqual(r_post.call_count, 1)
# After the time has expired, it should be business as usual
client.blocked_until = datetime.datetime.utcnow() - \
datetime.timedelta(seconds=5)
r_post.return_value = Mock(status_code=200)
r_post.return_value.json.return_value = {"ok": True}
client.chat_post_message('#channel', 'message')
self.assertEqual(r_post.call_count, 2)
|
StarcoderdataPython
|
1695412
|
<filename>src/cloudservice/azext_cloudservice/manual/custom.py
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
# pylint: disable=too-many-lines
# pylint: disable=import-outside-toplevel
from azure.cli.core.util import sdk_no_wait
def cloud_service_create(cmd,
client,
resource_group_name,
cloud_service_name,
location=None,
tags=None,
package_url=None,
configuration=None,
configuration_url=None,
start_cloud_service=None,
upgrade_mode=None,
extensions=None,
load_balancer_configurations=None,
id_=None,
secrets=None,
roles=None,
no_wait=False):
parameters = {}
parameters['location'] = location
parameters['tags'] = tags
parameters['properties'] = {}
parameters['properties']['package_url'] = package_url
parameters['properties']['configuration'] = configuration
parameters['properties']['configuration_url'] = configuration_url
parameters['properties']['start_cloud_service'] = start_cloud_service
parameters['properties']['upgrade_mode'] = upgrade_mode
if extensions:
parameters['properties']['extension_profile'] = {}
parameters['properties']['extension_profile']['extensions'] = extensions
else:
parameters['properties']['extension_profile'] = None
parameters['properties']['network_profile'] = {}
parameters['properties']['network_profile']['load_balancer_configurations'] = _parse_lbs(
load_balancer_configurations, cmd, resource_group_name)
if id_:
parameters['properties']['network_profile']['swappable_cloud_service'] = {}
parameters['properties']['network_profile']['swappable_cloud_service']['id'] = id_
else:
parameters['properties']['network_profile']['swappable_cloud_service'] = None
parameters['properties']['os_profile'] = {}
parameters['properties']['os_profile']['secrets'] = _parse_secrets(
secrets, cmd, resource_group_name)
parameters['properties']['role_profile'] = {}
parameters['properties']['role_profile']['roles'] = _parse_roles(roles)
return sdk_no_wait(no_wait,
client.begin_create_or_update,
resource_group_name=resource_group_name,
cloud_service_name=cloud_service_name,
parameters=parameters)
def _parse_roles(roles):
"""
Example, ContosoFrontend:Standard_D1_v2:1:Standard
:param roles:
:return:
"""
if not roles:
return None
roles_json = []
for role in roles:
terms = role.split(':')
roles_json.append({
'sku': {
'name': terms[1],
'capacity': terms[2],
'tier': terms[3]
},
'name': terms[0]
})
return roles_json
def _parse_lbs(lbs, cmd, resource_group_name):
from msrestazure.tools import resource_id, is_valid_resource_id
from azure.cli.core.commands.client_factory import get_subscription_id
if not lbs:
return None
lbs_json = []
for lb in lbs:
terms = lb.split(':')
ip = terms[2]
subnet = terms[3]
private_ip = terms[4]
if ip and not is_valid_resource_id(ip):
ip = resource_id(
subscription=get_subscription_id(cmd.cli_ctx), resource_group=resource_group_name,
namespace='Microsoft.Network', type='publicIPAddresses', name=ip)
lbs_json.append({
'properties': {
'frontendIPConfigurations': [
{
'properties': {
'publicIPAddress': {'id': ip} if ip else None,
'subnet': {'id': subnet} if subnet else None,
'privateIPAddress': private_ip if private_ip else None
},
'name': terms[1]
}
]
},
'name': terms[0]
})
return lbs_json
def _parse_secrets(secrets, cmd, resource_group_name):
from msrestazure.tools import resource_id, is_valid_resource_id
from azure.cli.core.commands.client_factory import get_subscription_id
if not secrets:
return None
secrets_json = []
for secret in secrets:
terms = secret.split(':')
vault = terms[0]
certs = terms[1:]
if vault and not is_valid_resource_id(vault):
vault = resource_id(
subscription=get_subscription_id(cmd.cli_ctx), resource_group=resource_group_name,
namespace='Microsoft.KeyVault', type='vaults', name=vault)
secrets_json.append({
'sourceVault': {
'id': vault
},
'vaultCertificates': [
{'certificateUrl': cert} for cert in certs
]
})
return secrets_json
|
StarcoderdataPython
|
78221
|
# -*- coding: utf-8 -*-
#!/usr/bin/env python
import math
def A(m ,n):
ret = math.factorial(n) / math.factorial(n-m)
return ret
def C(m, n):
ret = A(m, n) / math.factorial(m)
return ret
def cell(i, n, count):
ret = (-1 ** i) * C(i, count) * ((1 - float(i) / count) ** n)
print ret
return ret
def getAllCards(n, count):
cells = 0;
for i in range(1, count):
cells += cell(i, n, count)
return 1 + cells
if __name__ == '__main__':
print "***************\n"+ str(getAllCards(108, 108))
|
StarcoderdataPython
|
1604996
|
#!/usr/bin/env python
from __future__ import print_function
from LifeCycleTests.LifeCycleTools.PayloadHandler import PayloadHandler
from LifeCycleTests.LifeCycleTools.OptParser import get_command_line_options
import random, os, sys
def change_cksums(block_dict, file_dict):
file_dict['check_sum'] = str(random.randint(1000, 9999))
file_dict['adler32'] = str(random.randint(1000, 9999))
def change_file_size(block_dict, file_dict):
block = block_dict['block']
old_file_size = file_dict['file_size']
block['block_size'] -= old_file_size
new_file_size = int(random.gauss(1000000000, 90000000))
block['block_size'] += new_file_size
file_dict['file_size'] = new_file_size
def skip_file(block_dict, file_dict):
logical_file_name = file_dict['logical_file_name']
file_size = file_dict['file_size']
block = block_dict['block']
block['block_size'] -= file_size
block['file_count'] -= 1
files_to_delete = []
file_conf_to_delete = []
for count, this_file in enumerate(block_dict['files']):
if this_file['logical_file_name'] == logical_file_name:
files_to_delete.append(count)
#del block_dict['files'][count]
for count, file_conf in enumerate(block_dict['file_conf_list']):
if file_conf['lfn'] == logical_file_name:
file_conf_to_delete.append(count)
#del block_dict['file_conf_list'][count]
return files_to_delete, file_conf_to_delete
failure_func = {"DBSSkipFileFail" : skip_file,
"DBSChangeCksumFail" : change_cksums,
"DBSChangeSizeFail" : change_file_size}
options = get_command_line_options(__name__, sys.argv)
payload_handler = PayloadHandler()
payload_handler.load_payload(options.input)
block_dump = payload_handler.payload['workflow']['DBS']
for block in block_dump:
files_to_delete = []
file_conf_to_delete = []
for this_file in block['files']:
###get last part of the logical_file_name, which is the actually
filename = this_file['logical_file_name'].split('/')[-1]
###remove .root from filename
filename = filename.replace('.root', '')
###decode failures from filename
failures = filename.split('_')[1:]
for failure in failures:
if failure.startswith('DBS'):
try:
### call function to modify the block contents
ret_val = failure_func[failure](block, this_file)
if ret_val:
files_to_delete.extend(ret_val[0])
file_conf_to_delete.extend(ret_val[1])
except Exception as ex:
print("%s does not support the failure %s" % (os.path.basename(__file__), failure))
raise ex
for del_file in reversed(files_to_delete):
del block['files'][del_file]
for del_file_conf in reversed(file_conf_to_delete):
del block['file_conf_list'][del_file_conf]
p = payload_handler.clone_payload()
p['workflow']['DBS'] = block_dump
payload_handler.append_payload(p)
payload_handler.save_payload(options.output)
|
StarcoderdataPython
|
3308276
|
<filename>Model/login_screen.py
# The model implements the observer pattern. This means that the class must
# support adding, removing, and alerting observers. In this case, the model is
# completely independent of controllers and views. It is important that all
# registered observers implement a specific method that will be called by the
# model when they are notified (in this case, it is the `model_is_changed`
# method). For this, observers must be descendants of an abstract class,
# inheriting which, the `model_is_changed` method must be overridden.
import multitasking
multitasking.set_max_threads(10)
class LoginScreenModel:
"""Implements the logic of the user login screen."""
def __init__(self, base):
self.base = base
# Data:
# {
# 'login': 'User Login',
# 'password': "<PASSWORD>",
# }
self.user_data = {}
self._data_validation_status = None
self._observers = []
@property
def data_validation_status(self):
return self._data_validation_status
@data_validation_status.setter
def data_validation_status(self, value):
self._data_validation_status = value
# We notify the View -
# :class:`~View.LoginScreen.login_screen.LoginScreenView` about the
# changes that have occurred in the data model.
self.notify_observers()
@multitasking.task
def chek_data(self):
"""
Get data from the database and compares this data with the data entered
by the user.
This method is completely asynchronous. It does not return any value.
"""
data = self.base.get_data_from_base_users()
data_validation_status = False
for key in data:
if data[key] == self.user_data:
data_validation_status = True
break
self.data_validation_status = data_validation_status
def set_user_data(self, key, value):
"""Sets a dictionary of data that the user enters."""
self.user_data[key] = value
def notify_observers(self):
"""
The method that will be called on the observer when the model changes.
"""
for observer in self._observers:
observer.model_is_changed()
def reset_data_validation_status(self):
self.data_validation_status = None
def add_observer(self, observer):
self._observers.append(observer)
def remove_observer(self, observer):
self._observers.remove(observer)
|
StarcoderdataPython
|
3384837
|
from django.conf.urls import url
from article.views.oj import ArticleAPI
urlpatterns = [
# 文章
url(r"^article/?$", ArticleAPI.as_view(), name="articel_view_api"),
]
|
StarcoderdataPython
|
3384713
|
import pymongo
import mysql
'''
This module performs the subscriber information processing
1. Gets the existing subscriber list from MongoDB
2. Gets the live subscriber list from the MySQL DB
3. Compares the two lists and returns ONLY anything that has changed
4. Updates any changes to the live subscriber list collection
5. Inserts changes in to the tetra_subscriber_changes collection for change history
'''
def getExistingSubscribers(DB: pymongo.MongoClient) -> list:
'''
Gets the existing subscriber list for comparision
'''
subscribers = DB['tetra_subscribers'].find()
subscribers = list(subscribers)
return(subscribers)
def getCurrentSubscribers(CURSOR: mysql.connector):
'''
Retrieves the current subscriber list.
'''
# Execute MySQL query
CURSOR.execute("SELECT \
SSI, \
Description, \
SsiKind, \
ProfileId, \
Timestamp \
FROM `subscriber`;"
)
subscribers = CURSOR.fetchall()
new_subs = []
for subscriber in subscribers:
if subscriber[2] == 1:
s_type = 'Subscriber'
elif subscriber[2] == 2:
s_type = 'Group'
elif subscriber[2] == 5:
s_type = 'Application'
elif subscriber[2] == 8:
s_type = 'Terminal'
new_sub = {
'ssi' : subscriber[0],
'description' : subscriber[1],
'type' : s_type,
'profile' : subscriber[3],
'timestamp' : subscriber[4]
}
new_subs.append(new_sub)
return(new_subs)
def getDifference(
previous: list,
latest: list
) -> list:
'''
Compares two lists of dictionaries and returns a list of dictionaries that have changed
Absolute wiardry
'''
changed = []
# Iterate over the latest subscriber list
for sub in latest:
# Check if there is an issi match between the new and old subscriber list
match = next((item for item in previous if item["ssi"] == sub['ssi']), None)
# If there is a match...
if match:
# Create a set of the values
previous_match = set(match.items())
latest_match = set(sub.items())
# Remove any values that are the same
difference = latest_match - previous_match
# Convert back to a dictionary
difference = dict(difference)
# If there is a difference
# (No difference does nothing)
if difference:
try:
# Try if there is a comment (blank most of the time)
sub['comment'] = difference['comment']
except KeyError:
pass
# Append the subscriber to the final list
changed.append(sub)
else:
# No match = new subscriber
# Append to the list
changed.append(sub)
return(changed)
def updateDB(
DB: pymongo.MongoClient,
data: list
) -> None:
'''
Updates MongoDB
'''
print(len(data))
print(data[0])
for sub in data:
# Tecnically should only update differences or new subscribers if any
DB['tetra_subscribers'].find_one_and_update(
{
'ssi' : sub['ssi']
},
{
'$set' : sub
},
upsert=True
)
# Log the subscriber changes
DB['tetra_subscriber_changes'].insert_many(data)
def tetraSubscribers(
CURSOR: mysql,
DB: pymongo.MongoClient
) -> None:
'''
Compares the previous and latest subscriber lists and logs any changes
'''
# Get the existing subscriber list
previous = getExistingSubscribers(DB)
# Get the new subscriber list
latest = getCurrentSubscribers(CURSOR)
# Compare and return the difference
difference = getDifference(previous, latest)
# Only update the MongoDB collection if there are any differenes
if len(difference) > 0:
updateDB(DB, difference)
print(f"Updated {len(difference)} changed subscribers")
def msLocation(
CURSOR: mysql,
DB: pymongo.MongoClient
) -> None:
'''
Get current node a subscriber is associated to
'''
# Execute MySQL query
CURSOR.execute("SELECT \
Ssi, \
NodeDescr \
FROM `mslocation`"
)
myresult = CURSOR.fetchall()
# Iterate over the subscribers
for subscriber in myresult:
# Update the subscriber collection with the currently connected node
node = subscriber[1]
DB['tetra_subscribers'].find_one_and_update(
{
'ssi' : subscriber[0],
},
{
'$set': {
'node' : node
}
},
upsert=True
)
|
StarcoderdataPython
|
4803181
|
from dataclasses import dataclass
from piate.api.resources.collections import Collections
from piate.api.resources.domains import Domains
from piate.api.resources.entries import Entries
from piate.api.resources.institutions import Institutions
from piate.api.resources.inventories import Inventories
from piate.api.session import Session
@dataclass(init=False)
class Client:
inventories: Inventories
collections: Collections
institutions: Institutions
def __init__(self, session: Session):
self._session = session
self.inventories = Inventories(self._session)
self.collections = Collections(self._session)
self.domains = Domains(self._session)
self.institutions = Institutions(self._session)
self.entries = Entries(self._session)
|
StarcoderdataPython
|
3249346
|
<reponame>nthacker/learnAnalytics-DeepLearning-Azure<gh_stars>10-100
# Hyperparams LSTM
EPOCHS=3
BATCHSIZE=64
EMBEDSIZE=125
NUMHIDDEN=100
DROPOUT=0.2
LR=0.001
BETA_1=0.9
BETA_2=0.999
EPS=1e-08
MAXLEN=150
MAXFEATURES=20000
GPU=True
|
StarcoderdataPython
|
61424
|
<reponame>jol79/LiveChat
from django.urls import path
from . import views
urlpatterns = [
path('', views.chat, name='chat'),
path('login', views.login, name='login'),
path('users', views.users, name='users_list'),
path('users/edit/<id>', views.edit_user, name='edit_user')
]
|
StarcoderdataPython
|
36655
|
<filename>authors/apps/profiles/migrations/0022_auto_20190123_1211.py
# Generated by Django 2.1.4 on 2019-01-23 12:11
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('profiles', '0021_auto_20190122_1723'),
]
operations = [
migrations.AlterField(
model_name='userprofile',
name='bio',
field=models.TextField(blank=True, max_length=200),
),
]
|
StarcoderdataPython
|
15963
|
<filename>handlers/_my.py
import model
sticker_storage = model.get_storage()
def my(_, update):
"""Prints stickers added by user"""
message = update.message
user_id = update.message.from_user.id
stickers = sticker_storage.get_for_owner(user_id, max_count=20, tagged=True)
text = '\n\n'.join(
'Tags: {tags}\n'
'Times used: {sticker.times_used}\n'
'/{sticker.id}'
.format(
sticker=sticker,
tags=', '.join(sticker.tags)
)
for sticker in stickers
)
message.reply_text(text, parse_mode='HTML')
|
StarcoderdataPython
|
191929
|
<reponame>visualsnoop/visualsnoop-client-python
__version__ = '0.2'
DEFAULT_ENDPOINT='http://visualsnoop.com/api/v1'
|
StarcoderdataPython
|
3303026
|
<gh_stars>1000+
from ..base.twilltestcase import common, ShedTwillTestCase
column_repository_name = 'column_maker_0080'
column_repository_description = "Add column"
column_repository_long_description = "Compute an expression on every row"
convert_repository_name = 'convert_chars_0080'
convert_repository_description = "Convert delimiters"
convert_repository_long_description = "Convert delimiters to tab"
category_name = 'Test 0080 Advanced Circular Dependencies'
category_description = 'Test circular dependency features'
class TestRepositoryCircularDependencies(ShedTwillTestCase):
'''Verify that the code correctly handles circular dependencies.'''
def test_0000_initiate_users(self):
"""Create necessary user accounts."""
self.login(email=common.test_user_1_email, username=common.test_user_1_name)
test_user_1 = self.test_db_util.get_user(common.test_user_1_email)
assert test_user_1 is not None, f'Problem retrieving user with email {common.test_user_1_email} from the database'
self.test_db_util.get_private_role(test_user_1)
self.login(email=common.admin_email, username=common.admin_username)
admin_user = self.test_db_util.get_user(common.admin_email)
assert admin_user is not None, f'Problem retrieving user with email {common.admin_email} from the database'
self.test_db_util.get_private_role(admin_user)
def test_0005_create_column_repository(self):
"""Create and populate the column_maker repository."""
category = self.create_category(name=category_name, description=category_description)
self.login(email=common.test_user_1_email, username=common.test_user_1_name)
repository = self.get_or_create_repository(name=column_repository_name,
description=column_repository_description,
long_description=column_repository_long_description,
owner=common.test_user_1_name,
category_id=self.security.encode_id(category.id),
strings_displayed=[])
self.upload_file(repository,
filename='column_maker/column_maker.tar',
filepath=None,
valid_tools_only=True,
uncompress_file=True,
remove_repo_files_not_in_tar=False,
commit_message='Uploaded column_maker tarball.',
strings_displayed=[],
strings_not_displayed=[])
def test_0005_create_convert_repository(self):
"""Create and populate the convert_chars repository."""
self.login(email=common.admin_email, username=common.admin_username)
category = self.create_category(name=category_name, description=category_description)
self.login(email=common.test_user_1_email, username=common.test_user_1_name)
repository = self.get_or_create_repository(name=convert_repository_name,
description=convert_repository_description,
long_description=convert_repository_long_description,
owner=common.test_user_1_name,
category_id=self.security.encode_id(category.id),
strings_displayed=[])
self.upload_file(repository,
filename='convert_chars/convert_chars.tar',
filepath=None,
valid_tools_only=True,
uncompress_file=True,
remove_repo_files_not_in_tar=False,
commit_message='Uploaded convert_chars tarball.',
strings_displayed=[],
strings_not_displayed=[])
def test_0020_create_repository_dependencies(self):
'''Upload a repository_dependencies.xml file that specifies the current revision of convert_chars_0080 to the column_maker_0080 repository.'''
convert_repository = self.test_db_util.get_repository_by_name_and_owner(convert_repository_name, common.test_user_1_name)
column_repository = self.test_db_util.get_repository_by_name_and_owner(column_repository_name, common.test_user_1_name)
repository_dependencies_path = self.generate_temp_path('test_0080', additional_paths=['convert'])
repository_tuple = (self.url, convert_repository.name, convert_repository.user.username, self.get_repository_tip(convert_repository))
self.create_repository_dependency(repository=column_repository, repository_tuples=[repository_tuple], filepath=repository_dependencies_path)
def test_0025_create_dependency_on_filtering(self):
'''Upload a repository_dependencies.xml file that specifies the current revision of filtering to the freebayes_0040 repository.'''
convert_repository = self.test_db_util.get_repository_by_name_and_owner(convert_repository_name, common.test_user_1_name)
column_repository = self.test_db_util.get_repository_by_name_and_owner(column_repository_name, common.test_user_1_name)
repository_dependencies_path = self.generate_temp_path('test_0080', additional_paths=['convert'])
repository_tuple = (self.url, column_repository.name, column_repository.user.username, self.get_repository_tip(column_repository))
self.create_repository_dependency(repository=convert_repository, repository_tuples=[repository_tuple], filepath=repository_dependencies_path)
def test_0030_verify_repository_dependencies(self):
'''Verify that each repository can depend on the other without causing an infinite loop.'''
convert_repository = self.test_db_util.get_repository_by_name_and_owner(convert_repository_name, common.test_user_1_name)
column_repository = self.test_db_util.get_repository_by_name_and_owner(column_repository_name, common.test_user_1_name)
self.check_repository_dependency(convert_repository, column_repository, self.get_repository_tip(column_repository))
self.check_repository_dependency(column_repository, convert_repository, self.get_repository_tip(convert_repository))
def test_0035_verify_repository_metadata(self):
'''Verify that resetting the metadata does not change it.'''
column_repository = self.test_db_util.get_repository_by_name_and_owner(column_repository_name, common.test_user_1_name)
convert_repository = self.test_db_util.get_repository_by_name_and_owner(convert_repository_name, common.test_user_1_name)
for repository in [column_repository, convert_repository]:
self.verify_unchanged_repository_metadata(repository)
|
StarcoderdataPython
|
3325997
|
<filename>rasa/nlu/featurizers/bert_featurizer.py<gh_stars>1-10
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import numpy as np
import logging
import typing
from typing import Any
from typing import List
from typing import Text
from rasa_nlu.config import RasaNLUModelConfig
from rasa_nlu.featurizers import Featurizer
from rasa_nlu.tokenizers import Token
from rasa_nlu.training_data import Message
from rasa_nlu.training_data import TrainingData
logger = logging.getLogger(__name__)
if typing.TYPE_CHECKING:
from builtins import str
class InputFeatures(object):
"""A single set of features of data."""
def __init__(self,
input_ids,
input_mask):
self.input_ids = input_ids
self.input_mask = input_mask
class BertFeaturizer(Featurizer):
name = "intent_featurizer_bert"
provides = ["text_features"]
requires = ["tokens"]
# @classmethod
# def required_packages(cls):
# # type: () -> List[Text]
# return ["numpy"]
def train(self, training_data, config, **kwargs):
# type: (TrainingData, RasaNLUModelConfig, **Any) -> None
bert_tokenizer = self._bert_tokenizer(**kwargs)
max_seq_length = kwargs.get('max_seq_length')
for example in training_data.intent_examples:
features = self.features_for_tokens(example.get("tokens"),
bert_tokenizer,
max_seq_length)
example.set("text_features",
self._combine_with_existing_text_features(
example, features))
def process(self, message, **kwargs):
# type: (Message, **Any) -> None
bert_tokenizer = self._bert_tokenizer(**kwargs)
max_seq_length = kwargs.get('max_seq_length')
features = self.features_for_tokens(message.get("tokens"),
bert_tokenizer,
max_seq_length)
message.set("text_features",
self._combine_with_existing_text_features(message,
features))
def _bert_tokenizer(self, **kwargs):
bert_tokenizer = kwargs.get("bert_tokenizer")
if not bert_tokenizer:
raise Exception("Failed to train 'intent_featurizer_bert'. "
"Missing a proper BERT feature extractor. "
"Make sure this component is preceded by "
"the 'tokenizer_bert' component in the pipeline "
"configuration.")
return bert_tokenizer
def features_for_tokens(self, tokens, tokenizer, max_seq_length):
# type: (List[Token]) -> InputFeatures
tokens_a = tokens
# Account for [CLS] and [SEP] with "- 2"
if len(tokens_a) > max_seq_length - 2:
tokens_a = tokens_a[0:(max_seq_length - 2)]
# The convention in BERT is:
# (a) For sequence pairs:
# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
# (b) For single sequences:
# tokens: [CLS] the dog is hairy . [SEP]
# type_ids: 0 0 0 0 0 0 0
#
# Where "type_ids" are used to indicate whether this is the first
# sequence or the second sequence. The embedding vectors for `type=0` and
# `type=1` were learned during pre-training and are added to the wordpiece
# embedding vector (and position vector). This is not *strictly* necessary
# since the [SEP] token unambiguously separates the sequences, but it makes
# it easier for the model to learn the concept of sequences.
#
# For classification tasks, the first vector (corresponding to [CLS]) is
# used as the "sentence vector". Note that this only makes sense because
# the entire model is fine-tuned.
tokens = []
tokens.append("[CLS]")
for token in tokens_a:
tokens.append(token)
tokens.append("[SEP]")
input_ids = tokenizer.convert_tokens_to_ids(tokens)
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
input_mask = [1] * len(input_ids)
# Zero-pad up to the sequence length.
while len(input_ids) < max_seq_length:
input_ids.append(0)
input_mask.append(0)
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
feature = InputFeatures(
input_ids=input_ids,
input_mask=input_mask)
return feature
|
StarcoderdataPython
|
1797435
|
<reponame>quocodile/auto_correct
import csv
def calc_edit_dist(word1, word2):
'''
First, create a 2D array to enable dynamic programming.
Then, use dynamic programming to alculate
edit distance between two words.
'''
#this method needs fixing
comparison_matrix = create_comparision_matrix(word1, word2)
num_rows = len(comparison_matrix)
num_cols = len(comparison_matrix[0])
cost_to_replace = 1
cost_to_insert = 1
for row in range(1, num_rows):
for col in range(1, num_cols):
if row == col:
if word1[row] == word2[col]:
comparison_matrix[row][col] = comparison_matrix[row-1][col-1]
else:
comparison_matrix[row][col] = comparison_matrix[row-1][col-1] + cost_to_replace
else:
comparison_matrix[row][col] = comparison_matrix[row-1][col-1] + cost_to_insert
return comparison_matrix[num_rows-1][num_cols-1]
def create_comparision_matrix(word1, word2):
'''
Create a 2D array with the all entires containing
all 0s except for the first row and first column
'''
word1_length = len(word1)
word2_length = len(word2)
comparison_matrix = []
for i in range(word1_length):
comparison_matrix.append([])
for j in range(word2_length):
comparison_matrix[i].append(0)
if word1[0] != word2[0]:
comparison_matrix[0][0] = 2
for r in range(1, word1_length):
try:
if word1[r] == word2[r]:
comparison_matrix[r][0] = comparison_matrix[r-1][0]
else:
comparison_matrix[r][0] = comparison_matrix[r-1][0] + 2
except:
comparison_matrix[r][0] = comparison_matrix[r-1][0] + 1
for c in range(1, word2_length):
comparison_matrix[0][c] = comparison_matrix[0][c-1] + 1
return comparison_matrix
def load_dictionary_as_list():
dictionary_as_list = list(open('corncob_lowercase.txt', 'r'))
for i in range(len(dictionary_as_list)):
dictionary_as_list[i] = dictionary_as_list[i].strip()
return dictionary_as_list
def suggest_word(input_text, dictionary):
'''
With the text the user has provided,
suggest a word to type.
'''
closest_word = '______________________________________________________________________________________'
for word in dictionary:
if len(input_text) >= len(word):
continue
else:
if input_text == word[0:len(input_text)]:
if len(word) < len(closest_word):
closest_word = word
if closest_word == '______________________________________________________________________________________':
closest_word = ''
return closest_word
def autocorrect_word(input_text, dictionary):
''':
With the text the user has provided, if the
the word is not in the dictionary, provide
an alternative word that autocorrects the
given text.
'''
possible_words = ['', '', '']
least_edit_distances = [9999, 9999, 9999]
if input_text in dictionary:
return input_text
for word in dictionary:
edit_distance = calc_edit_dist(word, input_text)
for i in range(len(least_edit_distances)):
if edit_distance < least_edit_distances[i]:
least_edit_distances[i] = edit_distance
possible_words[i] = word
break
print(f"These were the possible words: {possible_words}")
closest_word = find_most_frequent_word(possible_words)
return closest_word
def find_most_frequent_word(possible_words):
most_frequent_word = possible_words[0]
highest_frequency = 0
word_frequencies = convert_frequency_csv_to_array()
for row in word_frequencies:
for possible_word in possible_words:
word = row[1]
if word == possible_word:
word_frequency = int(row[2])
if word_frequency > highest_frequency:
highest_frequency = word_frequency
most_frequent_word = word
return most_frequent_word
def convert_frequency_csv_to_array():
with open('word_frequency.csv') as word_frequencies_csv:
csv_reader = list(csv.reader(word_frequencies_csv))
csv_reader = csv_reader[1:]
return csv_reader
def main():
while True:
input_text = input('Enter a word: ')
dictionary = load_dictionary_as_list()
if len(input_text) == 0:
continue
elif len(input_text) < 2:
suggested_word = suggest_word(input_text, dictionary)
else:
closest_word = autocorrect_word(input_text, dictionary)
suggested_word = suggest_word(input_text, dictionary)
print(f"Did you mean this word? {closest_word}")
print(f"Were you about to type: {suggested_word}")
main()
|
StarcoderdataPython
|
1692916
|
from django.conf import settings
from django.urls import URLResolver, URLPattern
from django.urls.base import resolve, reverse_lazy
__author__ = 'Ashraful'
URL_NAMES = []
def get_view_by_url(url_name=None):
"""
**view generator**
:param url_name: get url_name as string
:return: view function (Though it is class or something)
"""
if not url_name:
return None
url = reverse_lazy(url_name)
resolver_match = resolve(url)
return resolver_match.func
def get_all_url_names(urlpatterns):
"""
:param urlpatterns: django url formatted patterns
:return: global var URL_NAMES
"""
for pattern in urlpatterns:
# Check it resolver or pattern. If pattern then end level if not ? then, go deeper level
if isinstance(pattern, URLResolver):
# Check if the url custom url or django default. Actually making diff by namespace.
# So, You are not allowed to put namespace on your urls
if pattern.namespace is not None:
continue
# Recursive call for going deeper to find out regex patterns
get_all_url_names(pattern.url_patterns) # call this function recursively
elif isinstance(pattern, URLPattern):
url_name = pattern.name # get the view name
if url_name:
URL_NAMES.append(url_name)
return URL_NAMES
def get_urls(*args, **kwargs):
"""
:param args: expecting any tuple (still not implemented)
:param kwargs: expecting any dictionary like object (still not implemented)
:return: a function call as a list
"""
global URL_NAMES
URL_NAMES = []
root_urlconf = __import__(settings.ROOT_URLCONF) # import root_urlconf module
all_urlpatterns = root_urlconf.urls.urlpatterns
get_all_url_names(all_urlpatterns)
return URL_NAMES
|
StarcoderdataPython
|
3265539
|
from typing import Type, TypeVar
from ssz.hashable_container import HashableContainer
from .block_headers import SignedBeaconBlockHeader, default_signed_beacon_block_header
TProposerSlashing = TypeVar("TProposerSlashing", bound="ProposerSlashing")
class ProposerSlashing(HashableContainer):
fields = [
# First block header
("signed_header_1", SignedBeaconBlockHeader),
# Second block header
("signed_header_2", SignedBeaconBlockHeader),
]
@classmethod
def create(
cls: Type[TProposerSlashing],
signed_header_1: SignedBeaconBlockHeader = default_signed_beacon_block_header,
signed_header_2: SignedBeaconBlockHeader = default_signed_beacon_block_header,
) -> TProposerSlashing:
return super().create(
signed_header_1=signed_header_1, signed_header_2=signed_header_2
)
def __str__(self) -> str:
return (
f" signed_header_1=({self.signed_header_1}),"
f" signed_header_2=({self.signed_header_2})"
)
|
StarcoderdataPython
|
1760207
|
<filename>example/sample/models.py<gh_stars>1-10
from django.db import models
from lazydrf.models import LDRF
class Record(models.Model, metaclass=LDRF):
"""
Defines a key/value record model.
"""
#: Defines the key of the record.
key = models.CharField(max_length=16, unique=True, blank=False, null=False)
#: Defines the value of the record.
value = models.CharField(max_length=64, blank=False, null=False, db_index=True)
class Meta:
"""
Defines Django model metadata.
"""
app_label = "sample"
class APIFields:
"""
Defines fields related API metadata.
"""
editable = ["key", "value"]
ordering = ["key"]
searching = ["key", "^value"]
class APIFiltering:
"""
Defines filtering related API metadata.
"""
key = ["exact", "icontains", "startswith"]
value = ["exact", "icontains", "startswith"]
class APIViewset:
pass
|
StarcoderdataPython
|
4916
|
'''
Created on Mar 6, 2014
@author: tharanga
'''
import unittest
from time import sleep
import EventService as es
from EventService import WebSocketServer as ws
from EventService import EventManager as em
import socket
from base64 import b64encode
import struct
import MySQLdb
import json
import EventService
import flaskr
import tempfile
def encodeMessage( message):
message = b64encode(message)
b1 =0x80 | 0x1 & 0x0f
b2 = 0
header=""
payload_len = len(message)
if payload_len < 126 :
header = struct.pack('>BB', b1, payload_len)
message= header +message
elif (payload_len < ((2 ** 16) - 1)):
b2 |= 126
header += chr(b1)
header += chr(b2)
l = struct.pack(">H", payload_len)
header += l
message = header +message
else:
b2 |= 127
header += chr(b1)
header += chr(b2)
l = struct.pack(">Q", payload_len)
header += l
message = header +message
return message
class TestWebSockets(unittest.TestCase):
def setUp(self):
self.wsServer = ws('',12345,'127.0.0.1')
self.wsServer.setRunning(True);
sleep(1)
self.testsocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.testsocket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) # Create a socket object
host = 'localhost' # Get local machine name
port = 12345
self.testsocket.connect((host, port))
def tearDown(self):
self.wsServer.closeConnection();
self.testsocket.close()
sleep(1)
def test_webSocketServerOBject(self):
self.assertEqual(self.wsServer.SERVER, '', "Server set to the desired value")
self.assertEqual(self.wsServer.PORT, 12345, "Server port is set correctly")
self.assertEqual(self.wsServer.LOCALHOST, "127.0.0.1", "Localhost set to 127.0.0.1")
def test_invalid_Request(self):
message= "Test Message"
self.testsocket.send(message)
data = repr(self.testsocket.recv(1024))
#print 'Response to invalid message<TestMessage> %s'%(data)
self.assertEqual(data, '\'CONNECTION_REJECTED\'', "Invalid Message rejected")
def test_valid_WS_Request(self):
message = "GET /mychat HTTP/1.1\nHost: server.example.com\nUpgrade: websocket\nConnection: Upgrade\nSec-WebSocket-Key: <KEY>nSec-WebSocket-Protocol: chat\nSec-WebSocket-Version: 13\nOrigin: localhost\n\n"
# message = "Test message"
self.testsocket.sendall(message)
wsresponse = repr(self.testsocket.recv(1024))
#print 'Response to valid ws request %s'%wsresponse
self.assertNotEqual(wsresponse, '\'CONNECTION_REJECTED\'', "Connection is not rejected")
self.assertIsNotNone(wsresponse, "Connection Response is not Empty")
self.testsocket.sendall(("Test Message"))
data = repr(self.testsocket.recv(1024))
#print 'Response to un encoded Request %s'%(data)
self.assertEqual(data, "\'Un expected opcode\'", "In valid Message rejected")
def test_invalid_Messge(self):
message = "GET /mychat HTTP/1.1\nHost: server.example.com\nUpgrade: websocket\nConnection: Upgrade\nSec-WebSocket-Key: <KEY>==\nSec-WebSocket-Protocol: chat\nSec-WebSocket-Version: 13\nOrigin: localhost\n\n"
self.testsocket.sendall(message)
wsresponse = repr(self.testsocket.recv(1024))
sleep(1)
self.testsocket.sendall("Test Message")
data = repr(self.testsocket.recv(1024))
self.assertEqual(data, "\'Un expected opcode\'", "In valid Message rejected")
def test_malformed_Message(self):
message = "GET /mychat HTTP/1.1\nHost: server.example.com\nUpgrade: websocket\nConnection: Upgrade\nSec-WebSocket-Key: <KEY>==\nSec-WebSocket-Protocol: chat\nSec-WebSocket-Version: 13\nOrigin: localhost\n\n"
self.testsocket.sendall(message)
wsresponse = repr(self.testsocket.recv(1024))
# print wsresponse
self.testsocket.send(encodeMessage("Test Message"))#This line seems to get stuck at times. Solution is to use sendAll, use \n at the end
data = repr(self.testsocket.recv(1024))
self.assertEqual(data, "\'MISFORMATED MESSAGE\'", "Messages with out a type is rejected")
def test_wellformed_Message_for_Text(self):
message = "GET /mychat HTTP/1.1\nHost: server.example.com\nUpgrade: websocket\nConnection: Upgrade\nSec-WebSocket-Key: <KEY>nSec-WebSocket-Protocol: chat\nSec-WebSocket-Version: 13\nOrigin: localhost\n\n"
self.testsocket.sendall(message)
wsresponse = repr(self.testsocket.recv(1024))
# print wsresponse
self.testsocket.send(encodeMessage("1<---->Test Message"))#This line seems to get stuck at times. Solution is to use sendAll, use \n at the end
data = repr(self.testsocket.recv(1024))
print data
self.assertEqual(data, "\'Text received\'", "Text Messages is identified and accepted")
def test_wellformed_Message_for_Json(self):
message = "GET /mychat HTTP/1.1\nHost: server.example.com\nUpgrade: websocket\nConnection: Upgrade\nSec-WebSocket-Key: x3JJHMbDL1EzLkh9GBhXDw==\nSec-WebSocket-Protocol: chat\nSec-WebSocket-Version: 13\nOrigin: localhost\n\n"
self.testsocket.sendall(message)
wsresponse = repr(self.testsocket.recv(1024))
self.testsocket.send(encodeMessage("2<---->Test Message"))#This line seems to get stuck at times. Solution is to use sendAll, use \n at the end
data = repr(self.testsocket.recv(1024))
# print data
self.assertEqual(data, "\'json is received\'", "json Messages is identified and accepted")
##TO RUN THE FOLLOWING UNIT TESTS IT IS EXPECTED HAVE THE DATABASE
##CREATED. DATABASE SCRIPT IS PROVIDED TO CREATE THE NECESSARY DATABASES AND TABLES
##ASSISCIATED DATA IS NOT PROVIDED.
class TestDatabase(unittest.TestCase):
def setUp(self):
self.connection = es.dbConnect()
def tearDown(self):
self.connection.close()
def test_data_insert_data_Read(self):
self.assertIsInstance(self.connection, MySQLdb.connection, "Database connection accurately set")
jsondata ={"type":"image", "time":"2014.3.4_14.40.30", "ext":"png", "deviceType":"Mobile", "deviceOS":"Badda", "browsertype":"Firefox", "position":{"lon":25.4583105, "lat":65.0600797, "alt":-1000, "acc":48.38800048828125}, "device":{"ax":0, "ay":0, "az":0, "gx":0, "gy":0, "gz":0, "ra":210.5637, "rb":47.5657, "rg":6.9698, "orientation":"potrait"}, "vwidth":480, "vheight":800}
alt = str(jsondata["position"]["alt"]);
if alt=="None":
alt = '0'
heading = '0'
speed = '0'
width = jsondata["vwidth"]
height =jsondata["vheight"]
if width > height :
screenorientation= 1.00#landscape
else :
screenorientation= 0.00#potrait
filename = jsondata["type"]+"_"+jsondata["time"]+"."+jsondata["ext"]
sqlstring1 = "INSERT INTO Imagedata values (\'"+filename+"\',GeomFromText ('POINT("+ str(jsondata["position"]["lat"])+" "+str(jsondata["position"]["lon"])+")'),"+str(jsondata["position"]["alt"])+","+str(jsondata["position"]["acc"])
sqlstring2 =","+str(jsondata["device"]["gx"])+","+str(jsondata["device"]["gy"])+","+str(jsondata["device"]["gz"])
sqlstring3 = ","+str(jsondata["device"]["ra"])+","+str(jsondata["device"]["rb"])+","+str(jsondata["device"]["rg"])+","+str(screenorientation)+",\'"+jsondata["device"]["orientation"]+"\',now(),\'"+str(jsondata["deviceOS"])+"\',\'"+str(jsondata["browsertype"])+"\',\'"+str(jsondata["deviceType"])+"\');"
sqlstring = sqlstring1 + sqlstring2+ sqlstring3
#print(sqlstring)
es.dbInsert(sqlstring)
sqlreadsting = 'select imagename, Browser,devicetype,X(location) as latitude, Y(location) as longitude from Imagedata where time=\'2014.3.4_14.40.31\''
result = es.dbRead(sqlreadsting)
self.assertIsNotNone(result, "Inserted data is retrieved and it is not null")
for row in result:
self.assertEqual(row[0], "image_2014.3.4_14.40.30.png", "Image name is correctly set and saved")
self.assertEqual(row[1], 65.0600797, "Latitudes are saved")
self.assertEqual(row[2], 25.4583105, "Longitude are saved")
HOST = '127.0.0.1' # The remote host
PORT = 17322
class RestServerTestCase(unittest.TestCase):
def setUp(self):
self.db_fd, flaskr.app.config['DATABASE'] = tempfile.mkstemp()
EventService.app.config['TESTING'] = True
self.app = EventService.app.test_client()
flaskr.init_db()
#self.socketServer = self.app.WebSocketServer('',wsport,'127.0.0.1')
def test_rootpath(self):
rv = self.app.get('/')
assert 'This is a REST Service for 2D3DCapture Server.' in rv.data
def test_post_image(self):
rv = self.app.post('/postImage')
assert 'READY' in rv.data
def test_clossing_websocket(self):
rv =self.app.post('/closewebsocketserver')
assert 'CLOSED' or 'ALREADY_CLOSSED' in rv.data
def test_start_websocket(self):
rv =self.app.get('/startwebsocketserver')
# print rv.data
assert 'READY' in rv.data
def test_post_binary_image(self):
rv =self.app.post('/postBinaryImage')
assert 'READY' or '415 Unsupported Media Type' in rv.data
def test_get_All_Image_Data(self):
rv =self.app.get('/getAllImageData')
jsonmsg = json.loads(rv.data)
self.assertIsNotNone(jsonmsg['imageList'] , "getImageData returns a non None list")
def test_get_location_Image_Data(self):
rv =self.app.get('/getLocationImageData?lat=65.0600797&lon=25.4583105')
jsonmsg = json.loads(rv.data)
self.assertIsNotNone(jsonmsg['imageList'] , "getLocationImageData returns a non None list.This is a feature test for location based image data")
def test_closest_Image_retrieval(self):
jsondata1 ={"type":"image", "time":"2014.3.4_14.40.31", "ext":"png", "deviceType":"Mobile", "deviceOS":"Badda", "browsertype":"Firefox", "position":{"lon":25.4583105, "lat":65.0600797, "alt":-1000, "acc":48.38800048828125}, "device":{"ax":0, "ay":0, "az":0, "gx":0, "gy":0, "gz":0, "ra":210.5637, "rb":47.5657, "rg":6.9698, "orientation":"potrait"}, "vwidth":480, "vheight":800}
jsondata2 ={"type":"image", "time":"2014.3.4_14.40.32", "ext":"png", "deviceType":"Mobile", "deviceOS":"Badda", "browsertype":"Firefox", "position":{"lon":25.4582115, "lat":65.0600797, "alt":-1000, "acc":48.38800048828125}, "device":{"ax":0, "ay":0, "az":0, "gx":0, "gy":0, "gz":0, "ra":210.5637, "rb":47.5657, "rg":6.9698, "orientation":"potrait"}, "vwidth":480, "vheight":800}
jsondata3 ={"type":"image", "time":"2014.3.4_14.40.33", "ext":"png", "deviceType":"Mobile", "deviceOS":"Badda", "browsertype":"Firefox", "position":{"lon":25.4584104, "lat":65.0600797, "alt":-1000, "acc":48.38800048828125}, "device":{"ax":0, "ay":0, "az":0, "gx":0, "gy":0, "gz":0, "ra":210.5637, "rb":47.5657, "rg":6.9698, "orientation":"potrait"}, "vwidth":480, "vheight":800}
jsondata4 ={"type":"image", "time":"2014.3.4_14.40.34", "ext":"png", "deviceType":"Mobile", "deviceOS":"Badda", "browsertype":"Firefox", "position":{"lon":25.4586115, "lat":65.0600797, "alt":-1000, "acc":48.38800048828125}, "device":{"ax":0, "ay":0, "az":0, "gx":0, "gy":0, "gz":0, "ra":210.5637, "rb":47.5657, "rg":6.9698, "orientation":"potrait"}, "vwidth":480, "vheight":800}
jsondata5 ={"type":"image", "time":"2014.3.4_14.40.35", "ext":"png", "deviceType":"Mobile", "deviceOS":"Badda", "browsertype":"Firefox", "position":{"lon":25.4587125, "lat":65.0600797, "alt":-1000, "acc":48.38800048828125}, "device":{"ax":0, "ay":0, "az":0, "gx":0, "gy":0, "gz":0, "ra":210.5637, "rb":47.5657, "rg":6.9698, "orientation":"potrait"}, "vwidth":480, "vheight":800}
jsondata6 ={"type":"image", "time":"2014.3.4_14.40.36", "ext":"png", "deviceType":"Mobile", "deviceOS":"Badda", "browsertype":"Firefox", "position":{"lon":25.4588125, "lat":65.0600797, "alt":-1000, "acc":48.38800048828125}, "device":{"ax":0, "ay":0, "az":0, "gx":0, "gy":0, "gz":0, "ra":210.5637, "rb":47.5657, "rg":6.9698, "orientation":"potrait"}, "vwidth":480, "vheight":800}
es.saveData(jsondata1)
es.saveData(jsondata2)
es.saveData(jsondata3)
es.saveData(jsondata4)
es.saveData(jsondata5)
es.saveData(jsondata6)
radius = 0.0001
photoList = es.getClosestImages( 65.0601787, 25.4583107, radius )
self.assertEqual(len(photoList), 4, "Length of the list should be equal of the first test")
for row in photoList:
assert 'image_2014.3.4_14.40.32.png' or 'image_2014.3.4_14.40.31.png' in row[0]
photoList2 = es.getClosestImages( 65.0601787, 25.4587107, radius )
self.assertEqual(len(photoList2), 2, "Length of the list should be equal of the second test")
for row in photoList2:
assert 'image_2014.3.4_14.40.34.png' or 'image_2014.3.4_14.40.35.png' in row[0]
def suite():
testsuit =unittest.TestSuite()
testsuit.addTest(TestWebSockets('test_webSocketServerOBject'))
testsuit.addTest(TestWebSockets('test_valid_WS_Request'))
testsuit.addTest(TestWebSockets('test_invalid_Messge'))
testsuit.addTest(TestWebSockets('test_invalid_Request'))
testsuit.addTest(TestWebSockets('test_malformed_Message'))
testsuit.addTest(TestWebSockets('test_wellformed_Message_for_Text'))
testsuit.addTest(TestWebSockets('test_wellformed_Message_for_Json'))
testsuit.addTest(TestDatabase('test_data_insert_data_Read'))
testsuit.addTest(RestServerTestCase('test_rootpath'))
testsuit.addTest(RestServerTestCase('test_post_image'))
testsuit.addTest(RestServerTestCase('test_start_websocket'))
testsuit.addTest(RestServerTestCase('test_clossing_websocket'))
testsuit.addTest(RestServerTestCase('test_post_binary_image'))
testsuit.addTest(RestServerTestCase('test_get_All_Image_Data'))
testsuit.addTest(RestServerTestCase('test_closest_Image_retrieval'))
return testsuit
suite = suite()
runner = unittest.TextTestRunner(verbosity=3)
runner.run(suite)
# if __name__ == "__main__":
# #import sys;sys.argv = ['', 'Test.testName']
# unittest.main()
|
StarcoderdataPython
|
3206985
|
import pytest
from .context import gatherers # noqa
from gatherers import rdns
@pytest.mark.parametrize("data,expected", [
(
[
'{"value": "18f.gov"}',
'{"value": "123.112.18f.gov"}',
'{"value": "172.16.17.32"}',
'{"value": "u-123.112.23.23"}',
'{"value": "123.112.fed.us"}',
'{"value": "something.fed.us"}',
'{"value": "18f.gsa.gov"}',
'{"timestamp":"1510189589","name":"172.16.17.32","value":"www.bart.gov","type":"ptr"}',
'{"timestamp":"1510189590","name":"192.168.3.11","value":"z-166-2-164-127.ip.fs.fed.us","type":"ptr"}',
'{"timestamp":"1510189590","name":"192.168.3.11","value":"z-199-131-187-116.ip.fs.fed.us","type":"ptr"}',
'{"timestamp":"1510189590","name":"192.168.3.11","value":"192.168.3.11.4k.usda.gov","type":"ptr"}',
'{"timestamp":"1510189591","name":"172.16.17.32","value":"wildcard.jpl.nasa.gov","type":"ptr"}',
'{"timestamp":"1510189591","name":"192.168.127.12","value":"152-132-2-60.tic.va.gov","type":"ptr"}',
'{"timestamp":"1510189591","name":"192.168.127.12","value":"z-166-3-217-20.ip.fs.fed.us","type":"ptr"}',
'{"timestamp":"1510189591","name":"172.16.58.3","value":"167-253-203-215-gov.emcbc.doe.gov","type":"ptr"}',
'{"timestamp":"1510189591","name":"192.168.127.12","value":"172.16.17.321.4k.usda.gov","type":"ptr"}',
'{"timestamp":"1510189592","name":"172.16.17.32","value":"140-215-230-154.usbr.gov","type":"ptr"}',
'{"timestamp":"1510189593","name":"192.168.3.11","value":"z-166-6-157-98.ip.fs.fed.us","type":"ptr"}',
'{"timestamp":"1510189595","name":"192.168.127.12","value":"130.20.175.6.pnnl.gov","type":"ptr"}',
'{"timestamp":"1510189595","name":"192.168.3.11","value":"192.168.3.11.4k.usda.gov","type":"ptr"}',
'{"timestamp":"1510189595","name":"192.168.127.12","value":"192.168.127.12.4k.usda.gov","type":"ptr"}',
'{"timestamp":"1510189596","name":"192.168.127.12","value":"199.145.148.196.4k.usda.gov","type":"ptr"}',
'{"timestamp":"1510189597","name":"192.168.3.11","value":"host.159-142-211-155.gsa.gov","type":"ptr"}',
'{"timestamp":"1510189597","name":"172.16.58.3","value":"u-159-189-28-97.xr.usgs.gov","type":"ptr"}',
'{"timestamp":"1510189598","name":"172.16.17.32","value":"host.jsc.nasa.gov","type":"ptr"}',
'{"timestamp":"1510189599","name":"172.16.17.32","value":"unassigned.epa.gov","type":"ptr"}',
'{"timestamp":"1510189600","name":"192.168.127.12","value":"u-130-118-135-187.xr.usgs.gov","type":"ptr"}',
'{"timestamp":"1510189600","name":"172.16.58.3","value":"140-214-229-183.usbr.gov","type":"ptr"}',
'{"timestamp":"1510189600","name":"172.16.31.10","value":"199.148.94.97.4k.usda.gov","type":"ptr"}',
'{"timestamp":"1510189601","name":"172.16.58.3","value":"z-170-144-139-133.ip.fs.fed.us","type":"ptr"}',
],
[
"18f.gov",
"something.fed.us",
"18f.gsa.gov",
"www.bart.gov",
"wildcard.jpl.nasa.gov",
# "host.159-142-211-155.gsa.gov", TODO: currently gets stripped, but should it?
"host.jsc.nasa.gov",
"unassigned.epa.gov",
]
),
])
def test_query_for(data, expected):
result = rdns.process_lines(data, rdns.ip_filter, rdns.number_filter)
assert list(result) == expected
|
StarcoderdataPython
|
3213654
|
<filename>tests/test_templater.py
# coding: utf-8
from unittest import TestCase
import templater
import sys
class Silence:
def __init__(self):
self.__log = []
return
def __call__(self):
return self.__log
def write(self, x):
self.__log.append(x)
return
class TestFakeMustaches(TestCase):
def test_fake_mustaches(self):
FM = templater.FakeMustaches
self.assertEqual(FM(' {{test}} {{rar}} ').safe_substitute(test='rar', rar='test'),
' rar test ')
self.assertEqual(FM(' {{test}} {{rar}} ').substitute(test='rar', rar='test'),
' rar test ')
self.assertEqual(FM(' {test} {{rar}} ').substitute(test='rar', rar='test'),
' {test} test ')
excepted = False
try:
FM(' {{test}} {{rar}} ').substitute(test='rar')
except KeyError:
excepted = True
self.assertTrue(excepted)
excepted = False
try:
FM(' {{test}} {{rar}} ').safe_substitute(test='rar')
except KeyError:
excepted = True
self.assertFalse(excepted)
class TestTemplater(TestCase):
def run_templater_main(self, file_prefix):
silence = Silence()
sys.stdout = silence
filename = file_prefix + '.txt'
excepted = False
try:
template_string = templater.graceful_read(filename)
except SystemExit:
excepted = True
self.assertFalse(excepted)
exited = False
try:
templater.main(filename, template_string, 10, 'test')
except SystemExit:
exited = True
# self.assertTrue(exited)
sys.stdout = sys.__stdout__
excepted = False
try:
output = templater.graceful_read(file_prefix + '-test-10.txt')
except SystemExit:
excepted = True
self.assertFalse(excepted)
self.assertTrue(len(output) > 0)
# return the result and the print log
return (output, silence())
def test_file(self):
silence = Silence()
sys.stdout = silence
excepted = False
try:
templater.graceful_read('/var/null')
except SystemExit:
excepted = True
self.assertTrue(excepted)
sys.stdout = sys.__stdout__
def test_template1(self):
import re
output, log = self.run_templater_main('tests/test_templater_template-1')
self.assertFalse(re.match(r'success', '\n'.join(log), re.IGNORECASE) is None)
# print(output, type(output))
self.assertTrue(output.find('{{') == -1)
self.assertTrue(re.search(r'\{\{.*\}\}', output) is None)
self.assertFalse(re.search(r'^Code: test\r?\n?$', output, re.MULTILINE) is None)
self.assertFalse(re.search(r'^Number: 10\r?\n?$', output, re.MULTILINE) is None)
def test_template2(self):
import re
output, log = self.run_templater_main('tests/test_templater_template-2')
self.assertFalse(re.match(r'success', '\n'.join(log), re.IGNORECASE) is None)
self.assertTrue(output.find('{{') == -1)
self.assertTrue(re.search(r'\{\{.*\}\}', output) is None)
self.assertFalse(re.search(r'^Code: test\r?\n?$', output, re.MULTILINE) is None)
self.assertFalse(re.search(r'^Total number: 10\r?\n?$', output, re.MULTILINE) is None)
for i in range(1,11):
self.assertFalse(re.search(r'^Code in item: test\r?\n?$', output, re.MULTILINE) is None)
self.assertFalse(re.search(r'^Total number in item: 10\r?\n?$', output, re.MULTILINE) is None)
self.assertFalse(re.search('^Item number: ' + str(i) + '\r?\n?$', output, re.MULTILINE) is None)
self.assertFalse(re.search('^Field 1: \$\{field_' + str(i) + '_1\}\r?\n?$', output, re.MULTILINE) is None)
self.assertFalse(re.search('^Field 2: \$\{field_' + str(i) + '_2\}\r?\n?$', output, re.MULTILINE) is None)
# @expectedFailure
# def test_template_nonsense(self):
# import re
# output, log = self.run_templater_main('tests/test_templater_template-1')
#
# self.assertFalse(re.search(r'^Extra: \r?\n?$', output, re.MULTILINE) is None)
# self.assertTrue(output.find('nonsense') == -1)
#
# output, log = self.run_templater_main('tests/test_templater_template-2')
#
# self.assertFalse(re.search(r'^Extra: \r?\n?$', output, re.MULTILINE) is None)
# self.assertTrue(output.find('nonsense') == -1)
|
StarcoderdataPython
|
3230696
|
<reponame>SimonSchubotz/Electronic-Laboratory-Notebook<gh_stars>0
import json
import glob, os
import dash
import plotly.io as pio
import datetime
import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Input, Output
import plotly.express as px
from django_plotly_dash import DjangoDash
from django.apps import apps
import plotly.graph_objects as go
from Lab_Misc.General import *
from dbfread import DBF
import pandas as pd
from django.http import HttpResponse
from django.utils import timezone
import numpy as np
import datetime
from Lab_Misc import General
from Exp_Main.models import SFG, ExpBase, OCA
from Analysis.models import OszAnalysis, OszBaseParam, OszAnalysisJoin
from Lab_Dash.models import OszAnalysis as OszAnalysis_Dash
from Lab_Dash.models import OszAnalysisEntry
from Exp_Sub.models import LSP
from plotly.subplots import make_subplots
from Lab_Misc import Load_Data
def conv(x):
return x.replace(',', '.').encode()
def Gen_dash(dash_name):
class Gen_fig():
select_y1 = ['']
select_y2 = ['']
def load_data(self, target_id):
try:
entry = OszAnalysisJoin.objects.get(id = target_id)
self.entry = entry
self.Saved_ExpBases = entry.OszAnalysis.all().values_list('id', flat=True)
return True, 'Data found!'
except:
return False, 'No data found!'
def get_subs(self):
Sub_Exps = self.entry.Sub_Exp.all()
return_str = ''
for Sub_Exp in Sub_Exps:
Device = Sub_Exp.Device
model = apps.get_model('Exp_Sub', str(Device.Abbrev))
Exp_in_Model = model.objects.get(id = Sub_Exp.id)
if Device.Abbrev == 'MFL':
Gas = Exp_in_Model.Gas.first()
if Gas.Name == 'H2O':
MFL_H2O_data = self.get_sub_csv(Exp_in_Model)
#MFL_H2O_data['Flow (Std.)'] = MFL_H2O_data['Flow (Std.)'][MFL_H2O_data['Flow (Std.)']>600]/10#correct for wrong format
MFL_H2O_data['Date_Time'] = pd.to_datetime(MFL_H2O_data['Date_Time'], format='%d.%m.%Y %H:%M:%S', errors="coerce")
MFL_H2O_data['time'] = MFL_H2O_data['Date_Time'].dt.tz_localize(timezone.get_current_timezone())
self.data.update(MFL_H2O_data = MFL_H2O_data)
return_str += ', massflow of water stream'
if Gas.Name == 'N2':
MFL_N2_data = self.get_sub_csv(Exp_in_Model)
MFL_N2_data['Date_Time'] = pd.to_datetime(MFL_N2_data['Date_Time'], format='%d.%m.%Y %H:%M:%S', errors="coerce")
#MFL_N2_data['Flow (Std.)'] = MFL_N2_data['Flow (Std.)'][MFL_N2_data['Flow (Std.)']>600]/10#correct for wrong format
MFL_N2_data['time'] = MFL_N2_data['Date_Time'].dt.tz_localize(timezone.get_current_timezone())
self.data.update(MFL_N2_data = MFL_N2_data)
return_str += ', massflow of nitrogen stream'
if Device.Abbrev == 'HME':
if Exp_in_Model.Environments == '1':
Humidity_data = self.get_sub_dbf(Exp_in_Model)
Humidity_data['UHRZEIT'] = pd.to_datetime(Humidity_data['DATUM'] + Humidity_data['UHRZEIT'], format='%d.%m.%Y %H:%M:%S', errors="coerce")
Humidity_data['time'] = Humidity_data['UHRZEIT'].dt.tz_localize(timezone.get_current_timezone())
self.data.update(HME_cell = Humidity_data)
return_str += ', humidity measurements of the cell'
self.has_sub = True
if Exp_in_Model.Environments == '2':
Humidity_data = self.get_sub_dbf(Exp_in_Model)
Humidity_data['UHRZEIT'] = pd.to_datetime(Humidity_data['DATUM'] + Humidity_data['UHRZEIT'], format='%d.%m.%Y %H:%M:%S', errors="coerce")
Humidity_data['time'] = Humidity_data['UHRZEIT'].dt.tz_localize(timezone.get_current_timezone())
self.data.update(HME_data_room = Humidity_data)
return_str += ', humidity measurements of the room'
self.has_sub = True
return return_str
def get_sub_dbf(self, model):
file = os.path.join( rel_path, model.Link)
table = DBF(file, load=True)
df = pd.DataFrame(iter(table))
return df
def get_sub_csv(self, model):
file = os.path.join( rel_path, model.Link)
#file_name = file[get_LastIndex(file, '\\')+1:get_LastIndex(file, '.')]
df = pd.read_csv(file, sep=';', error_bad_lines=False, decimal = ',', parse_dates=[['Date', 'Time']])#skips bad lines
return df
def slice_data(self, data):
DashTab = self.entry.Dash
if isinstance(DashTab.Y_high, float):
slice_signal = data['Y_axis']<DashTab.Y_high
data = data[slice_signal]
if isinstance(DashTab.Y_low, float):
slice_signal = data['Y_axis']>DashTab.Y_low
data = data[slice_signal]
if isinstance(DashTab.X_high, float):
slice_wavenumber = data['X_axis']<DashTab.X_high
data = data[slice_wavenumber]
if isinstance(DashTab.X_low, float):
slice_wavenumber = data['X_axis']>DashTab.X_low
data = data[slice_wavenumber]
return data
def sel_plot(self):
fig = go.Figure()
i = 0
for y2 in self.select_y2:
entry = OszAnalysis.objects.get(id = y2)
Drop_parameters, Osz_fit_parameters, Derived_parameters = Load_Data.Load_OszAnalysis_in_df(y2)
Positions = ['Right', 'Left', 'Average']
Param_columns = ['Drop_Nr', 'Max_CL', 'Max_CA', 'Min_CA', 'Min_AdvCA']
Fit_columns = ['Drop_Nr', 'x_pos', 'y_pos', 'Step_width', 'Step_hight']
Derived_col = ['Drop_Nr', 'Hit_prec', 'Fit_score']
plot_columns = ['x_lable', 'y_value', 'y_error', 'Drop_Nr', 'Parameter', 'Position']
plt_df = pd.DataFrame(columns=plot_columns)
for selparam in self.Field_Selection:
if General.save_index(Param_columns, selparam) != -1:
for position in Positions:
if General.save_index(self.LoR, position) != -1:
plt_dfi = pd.DataFrame(columns=plot_columns)
xi = ['Drop_' + str(int(dropNr)) + '_' + str(selparam) + '_' + str(position) for dropNr in Drop_parameters['General']['Drop_Nr']]
if position == 'Average':
yi = (Drop_parameters['Right'][selparam] + Drop_parameters['Left'][selparam])/2
else:
yi = Drop_parameters[position][selparam]
yei = np.zeros(len(xi))
plt_dfi['x_lable'] = xi
plt_dfi['y_value'] = yi
plt_dfi['y_error'] = yei
plt_dfi['Drop_Nr'] = [dropNr for dropNr in Drop_parameters['General']['Drop_Nr']]
plt_dfi['Parameter'] = selparam
plt_dfi['Position'] = position
plt_df = plt_df.append(plt_dfi)
for selparam in self.Field_Selection:
if General.save_index(Derived_col, selparam) != -1:
for position in Positions:
if General.save_index(self.LoR, position) != -1:
plt_dfi = pd.DataFrame(columns=plot_columns)
xi = ['Drop_' + str(int(dropNr)) + '_' + str(selparam) + '_' + str(position) for dropNr in Derived_parameters['General']['Drop_Nr']]
if position == 'Average':
yi = (Derived_parameters['Right'][selparam] + Derived_parameters['Left'][selparam])/2
else:
yi = Derived_parameters[position][selparam]
yei = np.zeros(len(xi))
plt_dfi['x_lable'] = xi
plt_dfi['y_value'] = yi
plt_dfi['y_error'] = yei
plt_dfi['Drop_Nr'] = [dropNr for dropNr in Derived_parameters['General']['Drop_Nr']]
plt_dfi['Parameter'] = selparam
plt_dfi['Position'] = position
plt_df = plt_df.append(plt_dfi)
if General.save_index(Fit_columns, selparam) != -1:
for position in Positions:
if General.save_index(self.LoR, position) != -1:
if position == 'Average':
slice_good_fit = (Derived_parameters['Left']['Fit_score'] == 1) & (Derived_parameters['Right']['Fit_score'] == 1)
else:
slice_good_fit = np.array(Derived_parameters[position]['Fit_score'].astype(int)) == 1
plt_dfi = pd.DataFrame(columns=plot_columns)
xi = ['Drop_' + str(int(dropNr)) + '_' + str(selparam) + '_' + str(position) for dropNr in Osz_fit_parameters['General']['General']['Drop_Nr']]
if position == 'Average':
yi = (Osz_fit_parameters['Right']['Value'][selparam] + Osz_fit_parameters['Left']['Value'][selparam])/2
else:
yi = Osz_fit_parameters[position]['Value'][selparam]
if position == 'Average':
yei = (Osz_fit_parameters['Right']['Error'][selparam] + Osz_fit_parameters['Left']['Error'][selparam])/2
else:
yei = Osz_fit_parameters[position]['Error'][selparam]
yei = np.where(yei > 50, 50, yei).tolist() #limit max error to 50
plt_dfi['x_lable'] = np.asarray(xi)[slice_good_fit]
plt_dfi['y_value'] = np.asarray(yi)[slice_good_fit]
plt_dfi['y_error'] = np.asarray(yei)[slice_good_fit]
plt_dfi['Drop_Nr'] = np.asarray([dropNr for dropNr in Osz_fit_parameters['General']['General']['Drop_Nr']])[slice_good_fit]
plt_dfi['Parameter'] = selparam
plt_dfi['Position'] = position
plt_df = plt_df.append(plt_dfi)
plt_df = plt_df.sort_values(by=['Parameter', 'Position', 'Drop_Nr'])
fig.add_trace(go.Scattergl(x=plt_df['x_lable'], y=plt_df['y_value'],
error_y = dict(
type='data', # value of error bar given in data coordinates
array=plt_df['y_error'],
visible=True,
thickness=1.5,),
mode='markers',
name=entry.Name),
)
i+=1
return fig
value = 'temp'
global fig
global Save_clicked
Save_clicked = 0
global Title_clicked
Title_clicked = 0
app = DjangoDash(name=dash_name, id='target_id')
cwd = os.getcwd()
rel_path = get_BasePath()
GenFig = Gen_fig()
fig = {
'data': [{
'y': [1]
}],
'layout': {
'height': 800
}
}
axis_options = [
{'label': 'Dummy', 'value': 'Dummy'},
]
app.layout = html.Div(children=[
html.Div([dcc.Dropdown(id='my-dropdown1',
options=[{'label': 'Select plot', 'value': 'sel_plot'},
],
value='sel_plot',
className='col-md-12',
),
]),
html.Div(id='title', children = [
dcc.Textarea(
id='textarea_tile',
value='Title',
style={'width': '50%', 'height': 20},
),
html.Button('Submit', id='textarea_tile_btn'),
]),
html.Div(id='Plot_sel_dropdown', children = [
dcc.Dropdown(
options=axis_options,
id='MS_drop_y1',
value=['MTL', 'SF'],
style={'width': '33%', 'display': 'table-cell'},
),
dcc.Dropdown(
options=axis_options,
id='MS_drop_y2',
value=[],
multi=True,
style={'width': '33%', 'display': 'table-cell'},
),
dcc.Dropdown(
options=axis_options,
id='MS_Field_Selection',
value=[],
multi=True,
style={'width': '20%', 'display': 'table-cell'},
),
dcc.Dropdown(
options=axis_options,
id='MS_LoR',
value=[],
multi=True,
style={'width': '7%', 'display': 'table-cell'},
),
html.Button('Plot', id='Btn_Plot'),
], style={'width': '100%', 'display': 'flex', 'flex-direction': 'row'},),
dcc.Input(id='target_id', type='hidden', value='1'),
html.Div(id='placeholder', style={'display': 'none'}),
dcc.Graph(
id='example-graph',
figure=fig,
),
dcc.Graph(
id='Sel_plot_graph',
figure=fig,
),
html.Button('Load data', id='Load_Data'),
dcc.Loading(
id="loading",
children=[html.Div([html.Div(id="loading-output")])],
type="default",
),
html.Button('Save image', id='Btn_save_image'),
html.Div(id='Save_output'),
])
def save_fig():
global fig
fig.write_image("fig1.png", width=800, height=400, scale=10)
@app.callback(
Output(component_id='Save_output', component_property='children'),
[Input('Btn_save_image', 'n_clicks'),
Input('MS_drop_y2', 'value'),
Input('textarea_tile', 'value'),],
)
def save_figure(n_clicks, MS_drop_y2, textarea_tile, *args,**kwargs):
global Save_clicked
if n_clicks > Save_clicked:
Save_clicked = n_clicks
OszAnalysis_dash_item = OszAnalysis_Dash(Name = textarea_tile)
OszAnalysis_dash_item.save()
OszAnalysis_list = OszAnalysis.objects.filter(pk__in=MS_drop_y2)
OszAnalysis_item = OszAnalysisJoin(Name = textarea_tile, Dash = OszAnalysis_dash_item)
OszAnalysis_item.save()
OszAnalysis_item.OszAnalysis.add(*OszAnalysis_list)
OszAnalysis_item.save()
for OszAnalysis_id in MS_drop_y2:
entry = OszAnalysis.objects.get(id = OszAnalysis_id)
OszAnalysis_entry_item = OszAnalysisEntry(Name = entry.Name, OszAnalysisID = int(OszAnalysis_id))
OszAnalysis_entry_item.save()
OszAnalysis_dash_item.Entry.add(OszAnalysis_entry_item)
OszAnalysis_dash_item.save()
return 'Image Saved!'
@app.callback(
Output(component_id='Sel_plot_graph', component_property='figure'),
[Input('Btn_Plot', 'n_clicks')],
)
def update_figure_Sel_plot(n_clicks, *args,**kwargs):
fig = GenFig.sel_plot()
return fig
@app.callback(
Output(component_id='placeholder', component_property='style'),
[Input('MS_drop_y1', 'value'),
Input('MS_drop_y2', 'value'),
Input('MS_Field_Selection', 'value'),
Input('MS_LoR', 'value'),]
)
def update_sel_list(select_y1, select_y2, Field_Selection, LoR, *args,**kwargs):
style={'display': 'none'}
GenFig.select_y1 = select_y1
GenFig.select_y2 = select_y2
GenFig.Field_Selection = Field_Selection
GenFig.LoR = LoR
return style
@app.callback(
[Output(component_id='MS_drop_y1', component_property='style'),
Output(component_id='MS_drop_y2', component_property='style'),
Output(component_id='Btn_Plot', component_property='style'),
Output(component_id='Sel_plot_graph', component_property='style'),
Output(component_id='example-graph', component_property='style'),],
[Input('my-dropdown1', 'value')]
)
def update_visible(Graph_select, *args,**kwargs):
if Graph_select != 'sel_plot':
style={'display': 'none'}
style_g={'display': 'block'}
return [style, style, style, style, style_g]
else:
style_d={'width': '33%', 'display': 'table-cell'}
style_b={'display': 'table-cell'}
style_g={'display': 'block'}
style={'display': 'none'}
return [style_d, style_d, style_b, style_g, style]
@app.callback(
Output(component_id='example-graph', component_property='figure'),
[Input('my-dropdown1', 'value')]
)
def update_figure(Graph_select, *args,**kwargs):
global fig
if Graph_select == 'sel_plot':
fig = GenFig.sel_plot()
return fig
@app.callback(
[Output(component_id='MS_drop_y1', component_property='options'),
Output(component_id='MS_Field_Selection', component_property='options'),
Output(component_id='MS_LoR', component_property='options'),
Output("loading-output", "children"),
Output(component_id='MS_drop_y2', component_property='value'),
Output(component_id='textarea_tile', component_property='value'),],
[Input('Load_Data', 'n_clicks'),
Input('target_id', 'value'),])
def update_output(n_clicks, target_id, *args,**kwargs):
data_was_loaded, return_str = GenFig.load_data(target_id)
if data_was_loaded:
return_str += '.\n Select the desired plot at the dropdown.'
axis_options = []
label_names = ['label', 'value']
for data in OszAnalysis.objects.all():
#for col in GenFig.data[data_name].columns:
values = [data.Name + '-' + 'col', data.id]
axis_options.append(dict(zip(label_names, values)))
axis_value = []
for value in GenFig.Saved_ExpBases:
entry = OszAnalysis.objects.get(id = value)
values = [entry.Name, value]
axis_value.append(value)
else:
axis_options = [
{'label': 'Dummy', 'value': 'Dummy'},
]
axis_value = []
Field_Selection = [{'label': 'Max_CL', 'value': 'Max_CL'},
{'label': 'Max_CA', 'value': 'Max_CA'},
{'label': 'Min_CA', 'value': 'Min_CA'},
{'label': 'Min_AdvCA', 'value': 'Min_AdvCA'},
{'label': 'x_pos', 'value': 'x_pos'},
{'label': 'y_pos', 'value': 'y_pos'},
{'label': 'Step_width', 'value': 'Step_width'},
{'label': 'Step_hight', 'value': 'Step_hight'},
{'label': 'Hit_prec', 'value': 'Hit_prec'},
]
LoR = [
{'label': 'Left', 'value': 'Left'},
{'label': 'Right', 'value': 'Right'},
{'label': 'Average', 'value': 'Average'},
]
return [axis_options, Field_Selection, LoR, return_str, axis_value, GenFig.entry.Dash.Title]
@app.callback(
Output(component_id='textarea_tile', component_property='style'),
[Input('textarea_tile', 'value'),
Input('textarea_tile_btn', 'n_clicks'),]
)
def update_title(title, n_clicks, *args,**kwargs):
global Title_clicked
if n_clicks > Title_clicked:
Title_clicked = n_clicks
dash = GenFig.entry.Dash
dash.Title = title
dash.save()
style=style={'width': '50%', 'height': 20}
return style#because a retrun is needed
@app.callback(
Output(component_id='MS_drop_y2', component_property='options'),
[Input('MS_drop_y1', 'value'),
Input('MS_drop_y2', 'value'),]
)
def update_sel_list(select_y1, select_y2, *args,**kwargs):
sel = OszAnalysis.objects.get(id = select_y1)
axis_options = []
label_names = ['label', 'value']
values = [sel.Name + '-' + 'col', sel.id]
axis_options.append(dict(zip(label_names, values)))
""" for value in GenFig.Saved_ExpBases:
entry = SFG.objects.get(id = value)
values = [entry.Name, value]
axis_options.append(dict(zip(label_names, values)))
#GenFig.Saved_ExpBases = [] """
for pk in select_y2:
entry = OszAnalysis.objects.get(id = pk)
values = [entry.Name, entry.id]
#values = [entry['label'], entry['value']]
axis_options.append(dict(zip(label_names, values)))
return axis_options
|
StarcoderdataPython
|
3351291
|
<filename>lnt/server/reporting/summaryreport.py<gh_stars>10-100
import re
import lnt.testing
import lnt.util.stats
###
# Aggregation Function
class Aggregation(object):
def __init__(self):
self.is_initialized = False
def __repr__(self):
return repr(self.getvalue())
def getvalue(self):
abstract
def append(self, values):
if not self.is_initialized:
self.is_initialized = True
self._initialize(values)
self._append(values)
class Sum(Aggregation):
def __init__(self):
Aggregation.__init__(self)
self.sum = None
def getvalue(self):
return self.sum
def _initialize(self, values):
self.sum = [0.] * len(values)
def _append(self, values):
for i, value in enumerate(values):
self.sum[i] += value
class Mean(Aggregation):
def __init__(self):
Aggregation.__init__(self)
self.count = 0
self.sum = None
def getvalue(self):
return [value/self.count for value in self.sum]
def _initialize(self, values):
self.sum = [0.] * len(values)
def _append(self, values):
for i, value in enumerate(values):
self.sum[i] += value
self.count += 1
class GeometricMean(Aggregation):
def __init__(self):
Aggregation.__init__(self)
self.count = 0
self.product = None
def getvalue(self):
return [value ** 1.0/self.count for value in self.product]
def __repr__(self):
return repr(self.geometric_mean)
def _initialize(self, values):
self.product = [1.] * len(values)
def _append(self, values):
for i, value in enumerate(values):
self.product[i] *= value
self.count += 1
class NormalizedMean(Mean):
def _append(self, values):
baseline = values[0]
Mean._append(self, [v/baseline
for v in values])
###
class SummaryReport(object):
def __init__(self, db, report_orders, report_machine_names,
report_machine_patterns):
self.db = db
self.testsuites = list(db.testsuite.values())
self.report_orders = list((name, orders)
for name, orders in report_orders)
self.report_machine_names = set(report_machine_names)
self.report_machine_patterns = list(report_machine_patterns)
self.report_machine_rexes = [
re.compile(pattern)
for pattern in self.report_machine_patterns
]
self.data_table = None
self.requested_machine_ids = None
self.requested_machines = None
self.runs_at_index = None
self.warnings = []
def build(self, session):
# Build a per-testsuite list of the machines that match the specified
# patterns.
def should_be_in_report(machine):
if machine.name in self.report_machine_names:
return True
for rex in self.report_machine_rexes:
if rex.match(machine.name):
return True
self.requested_machines = dict(
(ts, list(filter(should_be_in_report, session.query(ts.Machine).all())))
for ts in self.testsuites)
self.requested_machine_ids = dict(
(ts, [m.id for m in machines])
for ts, machines in self.requested_machines.items()
)
# First, collect all the runs to summarize on, for each index in the
# report orders.
self.runs_at_index = []
for _, orders in self.report_orders:
# For each test suite...
runs = []
for ts in self.testsuites:
# Find all the orders that match.
result = session.query(ts.Order.id).\
filter(ts.Order.llvm_project_revision.in_(
orders)).all()
ts_order_ids = [id for id, in result]
# Find all the runs that matchs those orders.
if not ts_order_ids:
ts_runs = []
else:
ts_runs = session.query(ts.Run).\
filter(ts.Run.order_id.in_(ts_order_ids)).\
filter(ts.Run.machine_id.in_(
self.requested_machine_ids[ts])).all()
if not ts_runs:
self.warnings.append(
'no runs for test suite %r in orders %r' % (
ts.name, orders))
runs.append((ts_runs, ts_order_ids))
self.runs_at_index.append(runs)
# Load the tests for each testsuite.
self.tests = dict((ts, dict((test.id, test)
for test in session.query(ts.Test)))
for ts in self.testsuites)
# Compute the base table for aggregation.
#
# The table is indexed by a test name and test features, which are
# either extracted from the test name or from the test run (depending
# on the suite).
#
# Each value in the table contains a array with one item for each
# report_order entry, which contains all of the samples for that
# entry..
#
# The table keys are tuples of:
# (<test name>,
# <metric>, # Value is either 'Compile Time' or 'Execution Time'.
# <arch>,
# <build mode>, # Value is either 'Debug' or 'Release'.
# <machine id>)
self.data_table = {}
self._build_data_table()
# Compute indexed data table by applying the indexing functions.
self._build_indexed_data_table()
# Normalize across all machines.
self._build_normalized_data_table()
# Build final organized data tables.
self._build_final_data_tables()
def _build_data_table(self):
def get_nts_datapoints_for_sample(ts, sample):
# Get the basic sample info.
run_id = sample[0]
machine_id = run_machine_id_map[run_id]
run_parameters = run_parameters_map[run_id]
# Get the test.
test = ts_tests[sample[1]]
# The test name for a sample in the NTS suite is just the name of
# the sample test.
test_name = test.name
# The arch and build mode are derived from the run flags.
arch = run_parameters['cc_target'].split('-')[0]
if '86' in arch:
arch = 'x86'
if run_parameters['OPTFLAGS'] == '-O0':
build_mode = 'Debug'
else:
build_mode = 'Release'
# Return a datapoint for each passing field.
for field_name, field, status_field in ts_sample_metric_fields:
# Ignore failing samples.
if status_field:
status_field_index = ts.get_field_index(status_field)
if sample[2 + status_field_index] == lnt.testing.FAIL:
continue
# Ignore missing samples.
field_index = ts.get_field_index(field)
value = sample[2 + field_index]
if value is None:
continue
# Otherwise, return a datapoint.
if field_name == 'compile_time':
metric = 'Compile Time'
else:
assert field_name == 'execution_time'
metric = 'Execution Time'
yield ((test_name, metric, arch, build_mode, machine_id),
value)
def get_compile_datapoints_for_sample(ts, sample):
# Get the basic sample info.
run_id = sample[0]
machine_id = run_machine_id_map[run_id]
run_parameters = run_parameters_map[run_id]
# Get the test.
test = ts_tests[sample[1]]
# Extract the compile flags from the test name.
base_name, flags = test.name.split('(')
assert flags[-1] == ')'
other_flags = []
build_mode = None
for flag in flags[:-1].split(','):
# If this is an optimization flag, derive the build mode from
# it.
if flag.startswith('-O'):
if '-O0' in flag:
build_mode = 'Debug'
else:
build_mode = 'Release'
continue
# If this is a 'config' flag, derive the build mode from it.
if flag.startswith('config='):
if flag == "config='Debug'":
build_mode = 'Debug'
else:
assert flag == "config='Release'"
build_mode = 'Release'
continue
# Otherwise, treat the flag as part of the test name.
other_flags.append(flag)
# Form the test name prefix from the remaining flags.
test_name_prefix = '%s(%s)' % (base_name, ','.join(other_flags))
# Extract the arch from the run info (and normalize).
arch = run_parameters['cc_target'].split('-')[0]
if arch.startswith('arm'):
arch = 'ARM'
elif '86' in arch:
arch = 'x86'
# The metric is fixed.
metric = 'Compile Time'
# Report the user and wall time.
for field_name, field, status_field in ts_sample_metric_fields:
if field_name not in ('user_time', 'wall_time'):
continue
# Ignore failing samples.
if status_field:
status_field_index = ts.get_field_index(status_field)
if sample[2 + status_field_index] == lnt.testing.FAIL:
continue
# Ignore missing samples.
field_index = ts.get_field_index(field)
value = sample[2 + field_index]
if value is None:
continue
# Otherwise, return a datapoint.
yield (('%s.%s' % (test_name_prefix, field_name), metric, arch,
build_mode, machine_id), value)
def get_datapoints_for_sample(ts, sample):
# The exact datapoints in each sample depend on the testsuite
if ts.name == 'nts':
return get_nts_datapoints_for_sample(ts, sample)
else:
assert ts.name == 'compile'
return get_compile_datapoints_for_sample(ts, sample)
# For each column...
for index, runs in enumerate(self.runs_at_index):
# For each test suite and run list...
for ts, (ts_runs, _) in zip(self.testsuites, runs):
ts_tests = self.tests[ts]
# Compute the metric fields.
ts_sample_metric_fields = [
(f.name, f, f.status_field)
for f in ts.Sample.get_metric_fields()]
# Compute a mapping from run id to run.
run_id_map = dict((r.id, r)
for r in ts_runs)
# Compute a mapping from run id to machine id.
run_machine_id_map = dict((r.id, r.machine.name)
for r in ts_runs)
# Preload the run parameters.
run_parameters_map = dict((r.id, r.parameters)
for r in ts_runs)
# Load all the samples for all runs we are interested in.
columns = [ts.Sample.run_id, ts.Sample.test_id]
columns.extend(f.column for f in ts.sample_fields)
samples = session.query(*columns).filter(
ts.Sample.run_id.in_(list(run_id_map.keys())))
for sample in samples:
run = run_id_map[sample[0]]
datapoints = list()
for key, value in get_datapoints_for_sample(ts, sample):
items = self.data_table.get(key)
if items is None:
items = [[]
for _ in self.report_orders]
self.data_table[key] = items
items[index].append(value)
def _build_indexed_data_table(self):
def is_in_execution_time_filter(name):
for key in ("SPEC", "ClamAV", "lencod", "minisat", "SIBSim4",
"SPASS", "sqlite3", "viterbi", "Bullet"):
if key in name:
return True
def compute_index_name(key):
test_name, metric, arch, build_mode, machine_id = key
# If this is a nightly test..
if test_name.startswith('SingleSource/') or \
test_name.startswith('MultiSource/') or \
test_name.startswith('External/'):
# If this is a compile time test, aggregate all values into a
# cumulative compile time.
if metric == 'Compile Time':
return ('Lmark', metric, build_mode, arch, machine_id), Sum
# Otherwise, this is an execution time. Index the cumulative
# result of a limited set of benchmarks.
assert metric == 'Execution Time'
if is_in_execution_time_filter(test_name):
return ('Lmark', metric, build_mode, arch, machine_id), Sum
# Otherwise, ignore the test.
return
# Otherwise, we have a compile time suite test.
# Ignore user time results for now.
if not test_name.endswith('.wall_time'):
return
# Index full builds across all job sizes.
if test_name.startswith('build/'):
project_name, subtest_name = re.match(
r'build/(.*)\(j=[0-9]+\)\.(.*)', str(test_name)).groups()
return (('Full Build (%s)' % (project_name,),
metric, build_mode, arch, machine_id),
NormalizedMean)
# Index single file tests across all inputs.
if test_name.startswith('compile/'):
file_name, stage_name, subtest_name = re.match(
r'compile/(.*)/(.*)/\(\)\.(.*)', str(test_name)).groups()
return (('Single File (%s)' % (stage_name,),
metric, build_mode, arch, machine_id),
Mean)
# Index PCH generation tests by input.
if test_name.startswith('pch-gen/'):
file_name, subtest_name = re.match(
r'pch-gen/(.*)/\(\)\.(.*)', str(test_name)).groups()
return (('PCH Generation (%s)' % (file_name,),
metric, build_mode, arch, machine_id),
Mean)
# Otherwise, ignore the test.
return
def is_missing_samples(values):
for samples in values:
if not samples:
return True
self.indexed_data_table = {}
for key, values in self.data_table.items():
# Ignore any test which is missing some data.
if is_missing_samples(values):
self.warnings.append("missing values for %r" % (key,))
continue
# Select the median values.
medians = [lnt.util.stats.median(samples)
for samples in values]
# Compute the index name, and ignore unused tests.
result = compute_index_name(key)
if result is None:
continue
index_name, index_class = result
item = self.indexed_data_table.get(index_name)
if item is None:
self.indexed_data_table[index_name] = item = index_class()
item.append(medians)
def _build_normalized_data_table(self):
self.normalized_data_table = {}
for key, indexed_value in self.indexed_data_table.items():
test_name, metric, build_mode, arch, machine_id = key
if test_name.startswith('Single File'):
aggr = Mean
else:
aggr = NormalizedMean
normalized_key = (test_name, metric, build_mode, arch)
item = self.normalized_data_table.get(normalized_key)
if item is None:
self.normalized_data_table[normalized_key] = \
item = aggr()
item.append(indexed_value.getvalue())
single_file_stage_order = [
'init', 'driver', 'syntax', 'irgen_only', 'codegen', 'assembly']
def _build_final_data_tables(self):
self.grouped_table = {}
self.single_file_table = {}
for key, normalized_value in self.normalized_data_table.items():
test_name, metric, build_mode, arch = key
# If this isn't a single file test, add a plot for it grouped by
# metric and build mode.
group_key = (metric, build_mode)
if not test_name.startswith('Single File'):
items = self.grouped_table[group_key] = self.grouped_table.get(
group_key, [])
items.append((test_name, arch,
normalized_value.getvalue()))
continue
# Add to the single file stack.
stage_name, = re.match(r'Single File \((.*)\)', test_name).groups()
try:
stack_index = self.single_file_stage_order.index(stage_name)
except ValueError:
stack_index = None
# If we don't have an index for this stage, ignore it.
if stack_index is None:
continue
# Otherwise, add the last value to the single file stack.
stack = self.single_file_table.get(group_key)
if stack is None:
self.single_file_table[group_key] = stack = \
[None] * len(self.single_file_stage_order)
stack[stack_index] = normalized_value.getvalue()[-1]
# If this is the last single file stage, also add a plot for it.
if stage_name == self.single_file_stage_order[-1]:
items = self.grouped_table[group_key] = self.grouped_table.get(
group_key, [])
values = normalized_value.getvalue()
baseline = values[0]
items.append(('Single File Tests', arch,
[v/baseline for v in values]))
|
StarcoderdataPython
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.