id
stringlengths 1
7
| text
stringlengths 6
1.03M
| dataset_id
stringclasses 1
value |
---|---|---|
3284621
|
<filename>ioflo/aid/test/test_eventing.py
# -*- coding: utf-8 -*-
"""
Unit Test Template
"""
from __future__ import absolute_import, division, print_function
import sys
import datetime
import unittest
import os
import time
from ioflo.aid.sixing import *
from ioflo.aid.odicting import odict
from ioflo.test import testing
from ioflo.aid.consoling import getConsole
from ioflo.aid.timing import iso8601, tuuid
console = getConsole()
from ioflo.aid import eventing
def setUpModule():
console.reinit(verbosity=console.Wordage.concise)
def tearDownModule():
pass
class BasicTestCase(unittest.TestCase):
"""
Example TestCase
"""
def setUp(self):
"""
Call super if override so House Framer and Frame are setup correctly
"""
super(BasicTestCase, self).setUp()
def tearDown(self):
"""
Call super if override so House Framer and Frame are torn down correctly
"""
super(BasicTestCase, self).tearDown()
def testTagify(self):
"""
Test tagify function
"""
console.terse("{0}\n".format(self.testTagify.__doc__))
console.reinit(verbosity=console.Wordage.profuse)
tag = eventing.tagify()
self.assertEqual(tag, u'')
tag = eventing.tagify(head='exchange')
self.assertEqual(tag, 'exchange')
tag = eventing.tagify(head=['exchange', 'trade'])
self.assertEqual(tag, 'exchange.trade')
tag = eventing.tagify(head='exchange', tail='completed')
self.assertEqual(tag, 'exchange.completed')
tag = eventing.tagify(head='exchange', tail=['process', 'started'])
self.assertEqual(tag, 'exchange.process.started')
tag = eventing.tagify(head=['exchange', 'trade'], tail=['process', 'started'])
self.assertEqual(tag, 'exchange.trade.process.started')
tag = eventing.tagify(head=['exchange', 'trade'], tail='completed')
self.assertEqual(tag, 'exchange.trade.completed')
tag = eventing.tagify(head='exchange', tail=['process', 'started'], sep='/')
self.assertEqual(tag, 'exchange/process/started')
tag = eventing.tagify(tail=['process', 'started'])
self.assertEqual(tag, 'process.started')
console.reinit(verbosity=console.Wordage.concise)
def testEventify(self):
"""
Test eventify function
"""
console.terse("{0}\n".format(self.testEventify.__doc__))
console.reinit(verbosity=console.Wordage.profuse)
dt = datetime.datetime.utcnow()
stamp = dt.isoformat()
time.sleep(0.01)
event = eventing.eventify('hello')
self.assertEqual(event['tag'], 'hello')
self.assertEqual(event['data'], {})
#"YYYY-MM-DDTHH:MM:SS.mmmmmm"
tdt = datetime.datetime.strptime(event['stamp'], "%Y-%m-%dT%H:%M:%S.%f")
self.assertGreater(tdt, dt)
event = eventing.eventify(tag=eventing.tagify(head='exchange', tail='started'),
stamp=stamp)
self.assertEqual(event['tag'], 'exchange.started' )
self.assertEqual(event['stamp'], stamp )
event = eventing.eventify(tag=eventing.tagify(tail='started', head='exchange'),
stamp=stamp,
data = odict(name='John'))
self.assertEqual(event['tag'], 'exchange.started')
self.assertEqual(event['stamp'], stamp)
self.assertEqual(event['data'], {'name': 'John',})
stamp = '2015-08-10T19:26:47.194736'
event = eventing.eventify(tag='process.started', stamp=stamp, data={'name': 'Jill',})
self.assertEqual(event, {'tag': 'process.started',
'stamp': '2015-08-10T19:26:47.194736',
'data': {'name': 'Jill',},})
event = eventing.eventify(tag="with uid", stamp=stamp, uid="abcde")
self.assertEqual(event, {'data': {},
'stamp': '2015-08-10T19:26:47.194736',
'tag': 'with uid',
'uid': 'abcde'})
console.reinit(verbosity=console.Wordage.concise)
def testEventize(self):
"""
Test eventize function
"""
console.terse("{0}\n".format(self.testEventize.__doc__))
console.reinit(verbosity=console.Wordage.profuse)
stamp = iso8601() # "YYYY-MM-DDTHH:MM:SS.mmmmmm"
tuid = tuuid() # "0000014ddf1f2f9c_5e36738"
time.sleep(0.1)
event = eventing.eventize('hello')
self.assertEqual(event['tag'], 'hello')
self.assertFalse('data' in event)
self.assertFalse('stamp' in event)
self.assertFalse('uid' in event)
self.assertFalse('route' in event)
event = eventing.eventize(tag=eventing.tagify(head='exchange', tail='started'),
stamp=True,
uid=True,
data=True,
route=odict([("src", (None, None, None)),
("dst", (None, None, None))]))
self.assertEqual(event['tag'], 'exchange.started')
self.assertTrue('data' in event)
self.assertIsInstance(event["data"], odict)
self.assertEqual(event['data'], odict([]))
self.assertTrue('stamp' in event)
self.assertIsInstance(event["stamp"], str)
self.assertGreater(event['stamp'], stamp)
self.assertTrue('uid' in event)
self.assertIsInstance(event["uid"], str)
self.assertGreater(event['uid'], tuid)
self.assertTrue('route' in event)
self.assertEqual(event['route'] ,odict([("src", (None, None, None)),
("dst", (None, None, None))]))
event = eventing.eventize(tag=eventing.tagify(head='exchange', tail='started'),
stamp=stamp,
uid=tuid,
data=odict(name="John"),
route=odict([("src", (None, None, None)),
("dst", (None, None, None))]))
self.assertEqual(event['tag'], 'exchange.started')
self.assertEqual(event['data'], odict(name="John"))
self.assertEqual(event['stamp'], stamp)
self.assertEqual(event['uid'], tuid)
self.assertEqual(event['route'] ,odict([("src", (None, None, None)),
("dst", (None, None, None))]))
console.reinit(verbosity=console.Wordage.concise)
def runOne(test):
'''
Unittest Runner
'''
test = BasicTestCase(test)
suite = unittest.TestSuite([test])
unittest.TextTestRunner(verbosity=2).run(suite)
def runSome():
""" Unittest runner """
tests = []
names = [
'testTagify',
'testEventify',
'testEventize',
]
tests.extend(map(BasicTestCase, names))
suite = unittest.TestSuite(tests)
unittest.TextTestRunner(verbosity=2).run(suite)
def runAll():
""" Unittest runner """
suite = unittest.TestSuite()
suite.addTest(unittest.TestLoader().loadTestsFromTestCase(BasicTestCase))
unittest.TextTestRunner(verbosity=2).run(suite)
if __name__ == '__main__' and __package__ is None:
#console.reinit(verbosity=console.Wordage.concise)
#runAll() #run all unittests
runSome()#only run some
#runOne('testBasic')
|
StarcoderdataPython
|
59777
|
#
# VAZ Projects
#
#
# Author: <NAME> <<EMAIL>>
from django.conf import settings
MARKDOWN_FEATURES = getattr( settings, 'MARKDOWN_FEATURES', [
# Block.
'heading',
'html_block',
'list',
# Inline.
'emphasis',
'html_inline',
'image',
'link',
'newline',
])
MARKDOWN_OPTIONS = getattr( settings, 'MARKDOWN_FEATURES', {
'breaks': True,
'html': True,
})
|
StarcoderdataPython
|
180447
|
<gh_stars>0
import re
from collections import namedtuple
from itertools import combinations
from queue import Queue
from typing import Iterable, List, Set
Rule = namedtuple('Rule', 'requires creates')
class Relation:
def __init__(self, elements=None, rules=None):
self.rules = rules or [] # type: List[Rule]
self.elements = elements or set() # type: Set[str]
def _ingest_lines(self, lines):
for line_no, line in enumerate(lines):
if not line or line.startswith('#'):
continue
if not self.elements:
self.elements = {c for c in line.lower() if c.isalpha()}
else:
if '->' not in line:
raise ValueError('Syntax error in deps file on line {}: "{}"'.format(line_no + 1, line))
a, b = map(str.strip, line.split('->'))
requires = {c for c in a.lower() if c.isalpha()}
creates = {c for c in b.lower() if c.isalpha()}
self.rules.append(Rule(requires, creates))
@classmethod
def from_file(cls, filename):
with open(filename) as f:
lines = map(str.strip, f.read().split('\n'))
self = cls()
self._ingest_lines(lines)
return self
@classmethod
def from_string(cls, string):
lines = re.split(r'[;,\n]+', string)
self = cls()
self._ingest_lines(lines)
return self
def with_rules(self, rules):
return Relation(self.elements, rules)
def find_closure(self, items: Iterable) -> set:
seen = set(items)
rules = list(self.rules)
last_seen_len = 0
while last_seen_len != len(seen):
last_seen_len = len(seen)
i = 0
while i < len(rules):
rule = rules[i]
if len(rule.requires & seen) == len(rule.requires):
del rules[i]
seen.update(rule.creates)
else:
i += 1
return seen
@staticmethod
def _has_sub_key(all_keys, key):
for n in range(len(key) - 1, -1, -1):
for sub_key in map(frozenset, combinations(key, n)):
if sub_key in all_keys:
return True
return False
def _get_all_sets(self, relation=None):
elements = relation or self.elements
for n in range(len(elements)):
for key in map(frozenset, combinations(elements, n)):
yield key
def find_all_keys(self) -> Set[frozenset]:
all_keys = set()
for key in self._get_all_sets():
items = self.find_closure(key)
if items == self.elements:
all_keys.add(frozenset(key))
return all_keys
def find_candidate_keys(self) -> List[set]:
all_keys = self.find_all_keys()
candidate_keys = []
for key in all_keys:
if not self._has_sub_key(all_keys, key):
candidate_keys.append(set(key))
return candidate_keys
def find_all_functional_deps(self) -> List[Rule]:
rules = []
for key in self._get_all_sets():
items = self.find_closure(key)
new_items = items - key
if new_items:
rules.append(Rule(set(key), set(new_items)))
return rules
def find_bcnf_violators(self) -> Iterable[Rule]:
all_keys = self.find_all_keys()
for rule in self.rules:
if frozenset(rule.requires) not in all_keys:
yield rule
def find_3nf_violators(self) -> Iterable[Rule]:
key_items = set()
for rule in self.rules:
key_items.update(rule.requires)
for rule in self.find_bcnf_violators():
if rule.creates & key_items != rule.creates:
yield rule
def find_minimal_basis(self) -> List[Rule]:
obj = Relation(self.elements)
rules = obj.rules
for rule in self.rules:
for rhs in rule.creates:
rules.append(Rule(rule.requires, {rhs}))
is_modified = True
while is_modified:
is_modified = False
i = 0
while i < len(rules):
rule = rules[i]
if len(rule.requires) > 1:
for new_lhs in map(set, combinations(rule.requires, len(rule.requires) - 1)):
if obj.find_closure(new_lhs) == obj.find_closure(rule.requires):
rule = rules[i] = Rule(new_lhs, rule.creates)
is_modified = True
break
orig_closure = obj.find_closure(rule.requires)
del rules[i]
if obj.find_closure(rule.requires) == orig_closure:
is_modified = True
else:
rules.insert(i, rule)
i += 1
return rules
def make_minimal(self):
self.rules = self.find_minimal_basis()
def compress(self):
"""Combine rules with the same inputs"""
rules = {}
for rule in self.rules:
rules.setdefault(frozenset(rule.requires), set()).update(rule.creates)
self.rules = []
for requires, creates in rules.items():
self.rules.append(Rule(requires, creates))
def project(self, relation: set) -> 'Relation':
rules = []
for lhs in self._get_all_sets(relation):
items = self.find_closure(lhs)
rhs = items & relation - lhs
if rhs:
rules.append(Rule(lhs, rhs))
return Relation(relation, rules)
def decompose_bcnf(self) -> List['Relation']:
relations = []
violating_relations = Queue()
violating_relations.put(self)
while not violating_relations.empty():
obj = violating_relations.get() # type: Relation
try:
violator = next(iter(obj.find_bcnf_violators()))
r1_elems = violator.requires | violator.creates
r2_elems = obj.elements - violator.creates
r1 = obj.project(r1_elems)
r2 = obj.project(r2_elems)
# r1.make_minimal()
# r2.make_minimal()
violating_relations.put(r1)
violating_relations.put(r2)
except StopIteration:
relations.append(obj)
return relations
def decompose_3nf(self) -> List['Relation']:
relations = []
obj = self.with_rules(self.find_minimal_basis())
obj.compress()
for rule in obj.rules:
relations.append(Relation(rule.requires | rule.creates, [rule]))
return relations
|
StarcoderdataPython
|
3249495
|
from setuptools import setup, find_packages
with open("README.md", "r") as readmefile:
package_description = readmefile.read()
setup(
name="my-torch",
version="0.0.6",
author="<NAME>",
author_email="<EMAIL>",
description="A transparent boilerplate + bag of tricks to ease my (yours?) (our?) PyTorch dev time.",
long_description=package_description,
long_description_content_type="text/markdown",
url="https://github.com/geraltofrivia/mytorch/",
packages=find_packages('src'),
package_dir={'': 'src'},
project_urls={
"Source Code": "https://github.com/geraltofrivia/mytorch"
},
install_requires=['spacy', 'tqdm', 'numpy'],
classifiers=[
"Development Status :: 1 - Planning",
"Programming Language :: Python :: 3",
"Operating System :: Unix"
],
keywords=[
'deep learning', 'pytorch', 'boilerplate', 'machine learning', 'neural network', 'preprocessing'
]
)
|
StarcoderdataPython
|
1787574
|
<gh_stars>0
from __future__ import print_function
import numpy as np
from dynamic_graph.sot_talos_balance.dcm_controller import DcmController
from numpy.testing import assert_almost_equal
controller = DcmController("ciao")
print("\nSignals (at creation):")
controller.displaySignals()
Kp = np.array([10.0, 10.0, 0.0])
Ki = np.array([1.0, 1.0, 0.0])
omega = 1
mass = 1
com = np.array([0.0, 0.0, 1.0])
dcm = np.array(3 * [0.0])
dcmDes = np.array(3 * [0.0])
zmpDes = np.array(3 * [0.0])
decayFactor = 0.1
controller.Kp.value = Kp
controller.Ki.value = Ki
controller.omega.value = omega
controller.mass.value = mass
controller.com.value = com
controller.dcm.value = dcm
controller.dcmDes.value = dcmDes
controller.zmpDes.value = zmpDes
controller.decayFactor.value = decayFactor
print()
print("Kp: %s" % (controller.Kp.value, ))
print("Ki: %s" % (controller.Ki.value, ))
print("omega: %s" % (controller.omega.value, ))
print("mass: %s" % (controller.mass.value, ))
print("com: %s" % (controller.com.value, ))
print("dcm: %s" % (controller.dcm.value, ))
print("dcmDes: %s" % (controller.dcmDes.value, ))
print("zmpDes: %s" % (controller.dcmDes.value, ))
print("decayFactor: %s" % (controller.decayFactor.value, ))
print("\n--------------------")
dt = 1
controller.init(dt)
controller.wrenchRef.recompute(0)
zmpRef = np.array(3 * [0.0])
wrenchRef = np.array([0.0, 0.0, 9.81, 0.0, 0.0, 0.0])
print()
print("zmpRef: %s" % (controller.zmpRef.value, ))
assert_almost_equal(controller.zmpRef.value, zmpRef)
print("wrenchRef: %s" % (controller.wrenchRef.value, ))
assert_almost_equal(controller.wrenchRef.value, wrenchRef)
print("\n--------------------")
dcmDes = np.array([1.0, 0.0, 0.0])
controller.dcmDes.value = dcmDes
print("dcmDes: %s" % (controller.dcmDes.value, ))
controller.wrenchRef.recompute(1)
zmpRef = np.array([-11.0, 0.0, 0.0])
wrenchRef = np.array([11.0, 0.0, 9.81, 0.0, float(com[2] * 11), 0.0])
print()
print("zmpRef: %s" % (controller.zmpRef.value, ))
assert_almost_equal(controller.zmpRef.value, zmpRef)
print("wrenchRef: %s" % (controller.wrenchRef.value, ))
assert_almost_equal(controller.wrenchRef.value, wrenchRef)
print("\n--------------------")
controller.dcmDes.time += 1
controller.zmpRef.recompute(2)
controller.wrenchRef.recompute(2)
zmpRef = np.array([-12.0, 0.0, 0.0])
wrenchRef = np.array([12.0, 0.0, 9.81, 0.0, float(com[2] * 12), 0.0])
print()
print("zmpRef: %s" % (controller.zmpRef.value, ))
assert_almost_equal(controller.zmpRef.value, zmpRef)
print("wrenchRef: %s" % (controller.wrenchRef.value, ))
assert_almost_equal(controller.wrenchRef.value, wrenchRef)
|
StarcoderdataPython
|
1600596
|
from flask_wtf import FlaskForm
from wtforms import StringField, SubmitField, SelectField, TextAreaField, PasswordField, BooleanField
from wtforms.validators import DataRequired, Regexp, EqualTo
# 登录表单
class loginForm(FlaskForm):
phone = StringField('电话号码', validators=[DataRequired()])
password = PasswordField('密码', validators=[DataRequired()])
remember_me = BooleanField('下次自动登陆(请不要在公用电脑勾选此项)')
submit = SubmitField('登陆')
# 入库表单
class stock_trade_fields(FlaskForm):
trade_name = StringField('订单名称:', validators=[DataRequired(message='为此次入库起个名字')])
prod_id = SelectField('选择产品:', validators=[DataRequired(message='必须选择产品哦')])
funding_pool_id = SelectField('选择本次资金流向:', validators=[DataRequired(message='选择对应资金池')])
num = StringField('填写数量:', validators=[Regexp(r'^[0-9]*$', message='必须填写整数')])
unit_price = StringField('进货单价:', validators=[Regexp(r'^-?([1-9]\d*\.\d*|0\.\d*[1-9]\d*|0?\.0+|0)$|^-?[1-9]\d*$',
message='进货单价必须为数字')])
remark = TextAreaField('订单备注:')
submit = SubmitField('确定入库')
# 销售表单
class sale_trade_fields(FlaskForm):
trade_name = StringField('订单名称:', validators=[DataRequired(message='为这笔销售起个名字')])
prod_id = SelectField('选择产品:', validators=[DataRequired(message='必须选择产品哦')])
funding_pool_id = SelectField('选择本次资金流向:', validators=[DataRequired(message='选择对应资金池')])
num = StringField('填写数量:', validators=[Regexp(r'^[0-9]*$', message='必须填写整数')])
unit_price = StringField('销售单价:', validators=[Regexp(r'^-?([1-9]\d*\.\d*|0\.\d*[1-9]\d*|0?\.0+|0)$|^-?[1-9]\d*$',
message='销售单价必须为数字')])
remark = TextAreaField('订单备注:')
developer_info = TextAreaField('发展人信息:')
submit = SubmitField('确定售出')
# 花费LIST
class expensive_trade_fields(FlaskForm):
trade_name = StringField('订单名称:', validators=[DataRequired(message='为这笔花费起个名字')])
fee = StringField('花费金额:', validators=[Regexp(r'^-?([1-9]\d*\.\d*|0\.\d*[1-9]\d*|0?\.0+|0)$|^-?[1-9]\d*$',
message='花费金额必须为数字')])
remark = TextAreaField('请填写详细备注信息')
submit = SubmitField('提交')
# 产品类目增加表单
class add_prod_fields(FlaskForm):
prod_name = StringField('产品名称:', validators=[DataRequired(message='必须填写产品名称')])
jiuzhuang = StringField('酒庄:', validators=[DataRequired(message='必须填写酒庄')])
xilie = StringField('系列:', validators=[DataRequired(message='必须填写系列')])
remark = TextAreaField('订单备注:')
submit = SubmitField('确定增加')
# 密码修改
class change_password_fields(FlaskForm):
phone = StringField('电话号码', validators=[DataRequired()])
passwordOld = PasswordField('<PASSWORD>', validators=[DataRequired()])
passwordNew = PasswordField('<PASSWORD>', validators=[DataRequired()])
passwordNewConfirm = PasswordField('确认新密码', validators=[EqualTo('passwordNew', message='两次输入密码不一致')])
submit = SubmitField('确定修改')
|
StarcoderdataPython
|
1758326
|
# -*- coding: utf-8 -*-
"""Implementation of the ``somatic_target_seq_cnv_calling`` step
This step allows for the detection of CNV events for cancer samples from targeted sequenced (e.g.,
exomes or large panels). The wrapped tools start from the aligned reads (thus off ``ngs_mapping``)
and generate CNV calls for somatic variants.
The wrapped tools implement different strategies. Some work "reference free" and just use the
somatic BAM files for their input, some work in "matched cancer normal mode" and need the cancer
and normal BAM files, others again need both normal and cancer BAM files, and additionally a
set of non-cancer BAM files for their background.
==========
Step Input
==========
Gene somatic CNV calling for targeted sequencing starts off the aligned reads, i.e.,
``ngs_mapping``.
===========
Step Output
===========
Generally, the following links are generated to ``output/``.
.. note:: Tool-Specific Output
As the only integrated tool is cnvkit at the moment, the output is very tailored to the result
of this tool. In the future, this section will contain "common" output and tool-specific
output sub sections.
- ``{mapper}.cnvkit.export.{lib_name}-{lib_pk}/out/``
- ``{mapper}.cnvkit.export.{lib_name}-{lib_pk}.bed``
- ``{mapper}.cnvkit.export.{lib_name}-{lib_pk}.seg``
- ``{mapper}.cnvkit.export.{lib_name}-{lib_pk}.vcf.gz``
- ``{mapper}.cnvkit.export.{lib_name}-{lib_pk}.vcf.gz.tbi``
- ``{mapper}.cnvkit.plot.{lib_name}-{lib_pk}/out``
- ``{mapper}.cnvkit.plot.{lib_name}-{lib_pk}.diagram.pdf``
- ``{mapper}.cnvkit.plot.{lib_name}-{lib_pk}.scatter.pdf``
- ``{mapper}.cnvkit.plot.{lib_name}-{lib_pk}.heatmap.pdf``
- ``{mapper}.cnvkit.plot.{lib_name}-{lib_pk}.heatmap.chr1.pdf``
- ...
- ``{mapper}.cnvkit.plot.{lib_name}-{lib_pk}.scatter.chrX.pdf``
- ``{mapper}.cnvkit.report.{lib_name}-{lib_pk}/out``
- ``{mapper}.cnvkit.plot.{lib_name}-{lib_pk}.breaks.txt``
- ``{mapper}.cnvkit.plot.{lib_name}-{lib_pk}.gainloss.txt``
- ``{mapper}.cnvkit.plot.{lib_name}-{lib_pk}.gender.txt``
- ``{mapper}.cnvkit.plot.{lib_name}-{lib_pk}.metrics.txt``
- ``{mapper}.cnvkit.plot.{lib_name}-{lib_pk}.segmetrics.txt``
For example:
::
output/
|-- bwa.cnvkit.export.P001-T1-DNA1-WES1-000007
| `-- out
| |-- bwa.cnvkit.export.P001-T1-DNA1-WES1-000007.bed
| |-- bwa.cnvkit.export.P001-T1-DNA1-WES1-000007.seg
| `-- bwa.cnvkit.export.P001-T1-DNA1-WES1-000007.vcf
|-- bwa.cnvkit.plot.P002-T1-DNA1-WES1-000016
| `-- out
| |-- bwa.cnvkit.plot.P002-T1-DNA1-WES1-000016.diagram.pdf
| |-- bwa.cnvkit.plot.P002-T1-DNA1-WES1-000016.heatmap.pdf
| |-- bwa.cnvkit.plot.P002-T1-DNA1-WES1-000016.scatter.pdf
| |-- bwa.cnvkit.plot.P002-T1-DNA1-WES1-000016.heatmap.chr1.pdf
| |-- ...
| `-- bwa.cnvkit.plot.P002-T1-DNA1-WES1-000016.scatter.chrX.pdf
|-- bwa.cnvkit.report.P002-T1-DNA1-WES1-000016
| `-- out
| |-- bwa.cnvkit.report.P002-T1-DNA1-WES1-000016.breaks.txt
| |-- bwa.cnvkit.report.P002-T1-DNA1-WES1-000016.gainloss.txt
| |-- bwa.cnvkit.report.P002-T1-DNA1-WES1-000016.gender.txt
| |-- bwa.cnvkit.report.P002-T1-DNA1-WES1-000016.metrics.txt
| `-- bwa.cnvkit.report.P002-T1-DNA1-WES1-000016.segmetrics.txt
[...]
=====================
Default Configuration
=====================
The default configuration is as follows.
.. include:: DEFAULT_CONFIG_somatic_targeted_seq_cnv_calling.rst
=====================================
Available Somatic Targeted CNV Caller
=====================================
- ``cnvkit``
"""
from collections import OrderedDict
import itertools
import os
import os.path
import sys
from biomedsheets.shortcuts import CancerCaseSheet, is_not_background
from snakemake.io import expand
from snappy_pipeline.utils import dictify, listify
from snappy_pipeline.workflows.abstract import BaseStep, BaseStepPart, LinkOutStepPart
from snappy_pipeline.workflows.ngs_mapping import NgsMappingWorkflow
__author__ = "<NAME> <<EMAIL>.de>"
#: Default configuration for the somatic_targeted_seq_cnv_calling step
DEFAULT_CONFIG = r"""
# Default configuration somatic_targeted_seq_cnv_calling
step_config:
somatic_targeted_seq_cnv_calling:
tools: ['cnvkit']
path_ngs_mapping: ../ngs_mapping # REQUIRED
cnvkit:
path_target_regions: REQUIRED # REQUIRED
seg_method: haar
seg_threshold: 0.0001
# BCBIO uses
# seg_method: haar
# seg_threshold: 0.0001
# -- OR
# seg_method: cbs
# seg_threshold: 0.000001
copywriter:
path_target_regions: REQUIRED # REQUIRED
bin_size: 20000 # TODO: make actually configurable
plot_genes: REQUIRED # Path to civic annotation
genome: hg19 # Could be hg38 (consider setting prefix to 'chr' when using GRCh38.v1)
features: EnsDb.Hsapiens.v75::EnsDb.Hsapiens.v75
prefix: ''
nThread: 8
cnvetti_on_target:
path_target_regions: REQUIRED # REQUIRED
cnvetti_off_target:
path_target_regions: REQUIRED # REQUIRED
window_length: 20000
"""
#: JSON key for "isCancer"
KEY_IS_CANCER = "isCancer"
#: Value for "libraryType" is whole exome sequencing
VALUE_WES = "WES"
#: Value for "libraryType" is panel sequencing
VALUE_PANEL = "Panel-seq"
#: Values for targeted sequencing
VALUES_TARGETED_SEQ = (VALUE_WES, VALUE_PANEL)
#: Standard key/extension values for BCF files
BCF_KEY_EXTS = (
("bcf", ".bcf"),
("bcf_md5", ".bcf.md5"),
("csi", ".bcf.csi"),
("csi_md5", ".bcf.csi.md5"),
)
class SomaticTargetedSeqCnvCallingStepPart(BaseStepPart):
"""Shared code for all caller classes in somatic_targeted_seq_cnv_calling"""
def __init__(self, parent):
super().__init__(parent)
# Build shortcut from cancer bio sample name to matched cancer sample
self.tumor_ngs_library_to_sample_pair = OrderedDict()
for sheet in self.parent.shortcut_sheets:
self.tumor_ngs_library_to_sample_pair.update(
sheet.all_sample_pairs_by_tumor_dna_ngs_library
)
def get_normal_lib_name(self, wildcards):
"""Return name of normal (non-cancer) library"""
pair = self.tumor_ngs_library_to_sample_pair[wildcards.library_name]
return pair.normal_sample.dna_ngs_library.name
class CnvettiStepPartBase(SomaticTargetedSeqCnvCallingStepPart):
"""Perform somatic targeted CNV calling using CNVetti; shared code.
We group the CNVetti pipeline into three steps:
``coverage``
Pompute target-wise normalized coverage of matched tumor and normal sample, the result is
a BCF file describing the (log)fold coverage of the tumor in relation to the normal.
``segment``
Perform segmentation of the (log)fold coverage.
``postprocess``
Postprocessing of the segmentation, annotation with copy state and gene-wise coverage.
"""
actions = ("coverage", "segment", "postprocess")
def __init__(self, parent):
super().__init__(parent)
# Per-action name pattern to use in paths
self.name_pattern = "{{mapper}}.%(name)s_{action}.{{library_name}}" % {"name": self.name}
def get_input_files(self, action):
"""Return input function for the given action.
Actually delegates to the appropriate ``_get_input_files_{action}`` function. The
"coverage" action takes as input the BAI-indexed BAM files of the matched tumor/normal
pairs, the other actions take as input the output of the previous actions.
"""
assert action in self.actions, "Invalid action"
return getattr(self, "_get_input_files_{action}".format(action=action))()
def _get_input_files_coverage(self):
@dictify
def input_function(wildcards):
"""Helper wrapper function"""
# Get shorcut to Snakemake sub workflow
ngs_mapping = self.parent.sub_workflows["ngs_mapping"]
# Get names of primary libraries of the selected cancer bio sample and the
# corresponding primary normal sample
normal_base_path = (
"output/{mapper}.{normal_library}/out/{mapper}.{normal_library}".format(
normal_library=self.get_normal_lib_name(wildcards), **wildcards
)
)
tumor_base_path = (
"output/{mapper}.{library_name}/out/" "{mapper}.{library_name}"
).format(**wildcards)
yield "normal_bam", ngs_mapping(normal_base_path + ".bam")
yield "normal_bai", ngs_mapping(normal_base_path + ".bam.bai")
yield "tumor_bam", ngs_mapping(tumor_base_path + ".bam")
yield "tumor_bai", ngs_mapping(tumor_base_path + ".bam.bai")
return input_function
def _get_input_files_segment(self):
@dictify
def input_function(wildcards):
for key, value in self._get_output_files_coverage().items():
yield key, value.format(**wildcards)
return input_function
def _get_input_files_postprocess(self):
@dictify
def input_function(wildcards):
for key, value in self._get_output_files_segment().items():
yield key, value.format(**wildcards)
return input_function
def get_output_files(self, action):
"""Return input function for the given action.
Actually delegates to the appropriate ``_get_input_files_{action}`` function; refer to
documentation of the individual functions for more details.
"""
assert action in self.actions, "Invalid action"
return getattr(self, "_get_output_files_{action}".format(action=action))()
@dictify
def _get_output_files_coverage(self):
"""The "coverage" action creates a BCF file (CSI+MD5 files) with an
entry for each target.
"""
name_pattern = self.name_pattern.format(action="coverage")
for key, ext in BCF_KEY_EXTS:
yield key, os.path.join("work", name_pattern, "out", name_pattern + ext)
@dictify
def _get_output_files_segment(self):
"""The "segment" action creates a BCF file (CSI+MD5 files) with an entry for each target
(infix ``.targets``) and also for each segment (infix ``.segments``).
"""
name_pattern = self.name_pattern.format(action="segment")
for infix in ("targets", "segments"):
for key, ext in BCF_KEY_EXTS:
name = "{}_{}".format(infix, key)
yield name, os.path.join(
"work", name_pattern, "out", name_pattern + "." + infix + ext
)
@dictify
def _get_output_files_postprocess(self):
"""The "postprocess" action creates the following text files (consumed by the export to
cBioPortal):
``bins``
The per-bin log-fold change information.
``segments``
Per-segment log-fold change information.
``gene_call``
Per-gene pseudo-GISTIC scores.
``gene_log2``
Per-gene log-fold change information.
"""
name_pattern = self.name_pattern.format(action="postprocess")
for infix in ("targets", "targets_segmented", "segments", "gene_call", "gene_log2"):
for key, ext in (("txt", ".txt"), ("md5", ".txt.md5")):
name = "{}_{}".format(infix, key)
yield name, os.path.join(
"work", name_pattern, "out", name_pattern + "_" + infix + ext
)
def check_config(self):
"""Check configuration"""
if self.name not in self.config["tools"]:
return # skip check
self.parent.ensure_w_config(
("step_config", "somatic_targeted_seq_cnv_calling", self.name, "path_target_regions"),
"Path to target regions is missing for {}".format(self.name),
)
@dictify
def _get_log_file(self, action):
"""Return path to log file for the given action"""
assert action in self.actions, "Invalid action"
name_pattern = self.name_pattern.format(action=action)
key_ext = (
("log", ".log"),
("conda_info", ".conda_info.txt"),
("conda_list", ".conda_list.txt"),
)
for key, ext in key_ext:
yield key, os.path.join("work", name_pattern, "log", name_pattern + ext)
def update_cluster_config(self, cluster_config):
"""Update cluster configuration with resource usage limits for
scheduling
"""
for action in self.actions:
key = "somatic_targeted_seq_cnv_calling_{tool}_{action}".format(
tool=self.name, action=action
)
cluster_config[key] = {"mem": 7500, "time": "24:00", "ntasks": 1}
class CnvettiOffTargetStepPart(CnvettiStepPartBase):
"""Perform somatic targeted CNV calling using CNVetti with off-target reads."""
name = "cnvetti_off_target"
class CnvettiOnTargetStepPart(CnvettiStepPartBase):
"""Perform somatic targeted CNV calling using CNVetti with on-target reads."""
name = "cnvetti_on_target"
def expand_id(*args):
"""Returns a dict that can be passed into expand to get identity for the given values
::
>> expand_id(('foo', 'bar'))
{'foo': ['{foo}'], 'bar': ['{bar}']}
"""
return {key: ["{{{}}}".format(key)] for key in args}
def format_id(*args):
"""Returns a dict that can be passed into format to get identity for the given values
::
>> expand_id(('foo', 'bar'))
{'foo': '{foo}', 'bar': '{bar}'}
"""
return {key: "{{{}}}".format(key) for key in args}
class CnvKitStepPart(SomaticTargetedSeqCnvCallingStepPart):
"""Perform somatic targeted CNV calling using cnvkit"""
name = "cnvkit"
def __init__(self, parent):
super().__init__(parent)
def check_config(self):
"""Check configuration for cnvkit"""
if "cnvkit" not in self.config["tools"]:
return # cnvkit not enabled, skip
self.parent.ensure_w_config(
("step_config", "somatic_targeted_seq_cnv_calling", "cnvkit", "path_target_regions"),
"Path to target regions is missing for cnvkit",
)
def get_input_files(self, action):
"""Return input paths input function, dependent on rule"""
method_mapping = {
"access": None,
"target": self._get_input_files_target,
"antitarget": self._get_input_files_antitarget,
"coverage": self._get_input_files_coverage,
"reference": self._get_input_files_reference,
"call": self._get_input_files_call,
"fix": self._get_input_files_fix,
"segment": self._get_input_files_segment,
"export": self._get_input_files_export,
"plot": self._get_input_files_plot,
"report": self._get_input_files_report,
}
assert action in method_mapping, "Unknown action"
return method_mapping[action]
def _get_input_files_target(self, _):
input_files = {"access": "work/cnvkit.access/out/access.bed"}
return input_files
def _get_input_files_antitarget(self, _):
input_files = {
"access": "work/cnvkit.access/out/access.bed",
"target": "work/cnvkit.target/out/target.bed",
}
return input_files
def _get_input_files_coverage(self, wildcards):
input_files = {
"target": "work/cnvkit.target/out/target.bed",
"antitarget": "work/cnvkit.antitarget/out/antitarget.bed",
}
# BAM/BAI file
ngs_mapping = self.parent.sub_workflows["ngs_mapping"]
base_path = "output/{mapper}.{library_name}/out/{mapper}.{library_name}".format(**wildcards)
input_files["bam"] = ngs_mapping(base_path + ".bam")
input_files["bai"] = ngs_mapping(base_path + ".bam.bai")
return input_files
def _get_input_files_reference(self, wildcards):
input_files = {
"target": "work/cnvkit.target/out/target.bed",
"antitarget": "work/cnvkit.antitarget/out/antitarget.bed",
}
# BAM/BAI file
ngs_mapping = self.parent.sub_workflows["ngs_mapping"]
base_path = "output/{mapper}.{normal_library}/out/{mapper}.{normal_library}".format(
normal_library=self.get_normal_lib_name(wildcards), **wildcards
)
input_files["bam"] = ngs_mapping(base_path + ".bam")
input_files["bai"] = ngs_mapping(base_path + ".bam.bai")
return input_files
def _get_input_files_fix(self, wildcards):
tpl_base = "{mapper}.cnvkit.{substep}.{library_name}"
tpl = "work/" + tpl_base + "/out/" + tpl_base + ".cnn"
input_files = {"ref": tpl.format(substep="reference", **wildcards)}
tpl = "work/" + tpl_base + "/out/" + tpl_base + ".{target}coverage.cnn"
for target in ("target", "antitarget"):
input_files[target] = tpl.format(target=target, substep="coverage", **wildcards)
return input_files
def _get_input_files_segment(self, wildcards):
cnr_pattern = (
"work/{mapper}.cnvkit.fix.{library_name}/out/{mapper}.cnvkit.fix.{library_name}.cnr"
)
input_files = {"cnr": cnr_pattern.format(**wildcards)}
return input_files
def _get_input_files_call(self, wildcards):
segment_pattern = (
"work/{mapper}.cnvkit.segment.{library_name}/out/"
"{mapper}.cnvkit.segment.{library_name}.cns"
)
input_files = {"segment": segment_pattern.format(**wildcards)}
return input_files
def _get_input_files_export(self, wildcards):
cns_pattern = (
"work/{mapper}.cnvkit.call.{library_name}/out/{mapper}.cnvkit.call.{library_name}.cns"
)
input_files = {"cns": cns_pattern.format(**wildcards)}
return input_files
def _get_input_files_plot(self, wildcards):
tpl = (
"work/{mapper}.cnvkit.{substep}.{library_name}/out/"
"{mapper}.cnvkit.{substep}.{library_name}.{ext}"
)
input_files = {
"cnr": tpl.format(substep="fix", ext="cnr", **wildcards),
"cns": tpl.format(substep="segment", ext="cns", **wildcards),
}
return input_files
def _get_input_files_report(self, wildcards):
return self._get_input_files_plot(wildcards)
def get_output_files(self, action):
"""Return output files for the given action"""
method_mapping = {
"access": self._get_output_files_access,
"target": self._get_output_files_target,
"antitarget": self._get_output_files_antitarget,
"coverage": self._get_output_files_coverage,
"reference": self._get_output_files_reference,
"fix": self._get_output_files_fix,
"call": self._get_output_files_call,
"segment": self._get_output_files_segment,
"export": self._get_output_files_export,
"plot": self._get_output_files_plot,
"report": self._get_output_files_report,
}
assert action in method_mapping, "Unknown action"
return method_mapping[action]()
def _get_output_files_access(self):
return "work/cnvkit.access/out/access.bed"
def _get_output_files_target(self):
return "work/cnvkit.target/out/target.bed"
def _get_output_files_antitarget(self):
return "work/cnvkit.antitarget/out/antitarget.bed"
def _get_output_files_coverage(self):
name_pattern = "{mapper}.cnvkit.coverage.{library_name}"
output_files = {}
for target in ("target", "antitarget"):
output_files[target] = os.path.join(
"work", name_pattern, "out", name_pattern + ".{}coverage.cnn".format(target)
)
return output_files
def _get_output_files_reference(self):
name_pattern = "{mapper}.cnvkit.reference.{library_name}"
tpl = os.path.join("work", name_pattern, "out", name_pattern + ".cnn")
return tpl
def _get_output_files_fix(self):
name_pattern = "{mapper}.cnvkit.fix.{library_name}"
tpl = os.path.join("work", name_pattern, "out", name_pattern + ".cnr")
return tpl
def _get_output_files_segment(self):
name_pattern = "{mapper}.cnvkit.segment.{library_name}"
tpl = os.path.join("work", name_pattern, "out", name_pattern + ".cns")
return tpl
def _get_output_files_call(self):
name_pattern = "{mapper}.cnvkit.call.{library_name}"
tpl = os.path.join("work", name_pattern, "out", name_pattern + ".cns")
return tpl
@dictify
def _get_output_files_plot(self):
plots = ("scatter", "diagram", "heatmap")
chroms = list(itertools.chain(range(1, 23), ["X", "Y"]))
# Yield file name pairs for global plots
tpl = (
"work/{mapper}.cnvkit.plot.{library_name}/out/"
"{mapper}.cnvkit.plot.{library_name}.{plot}.pdf"
)
yield from (
(plot, tpl.format(plot=plot, **format_id("mapper", "library_name"))) for plot in plots
)
# Yield file name pairs for the chromosome-wise plots
chrom_plots = ("scatter", "heatmap")
tpl_chrom = (
"work/{mapper}.cnvkit.plot.{library_name}/out/"
"{mapper}.cnvkit.plot.{library_name}.{plot}.chr{chrom}.pdf"
)
yield from (
(
"{plot}_chr{chrom}".format(plot=plot, chrom=chrom),
tpl_chrom.format(plot=plot, chrom=chrom, **format_id("mapper", "library_name")),
)
for plot in chrom_plots
for chrom in chroms
)
def _get_output_files_export(self):
keys = ("bed", "seg", "vcf", "tbi")
exts = ("bed", "seg", "vcf.gz", "vcf.gz.tbi")
name_pattern = "{mapper}.cnvkit.export.{library_name}"
tpl = os.path.join("work", name_pattern, "out", name_pattern + ".{ext}")
output_files = {}
for key, ext in zip(keys, exts):
output_files[key] = tpl.format(ext=ext, **format_id("mapper", "library_name"))
return output_files
@dictify
def _get_output_files_report(self):
reports = ("breaks", "gainloss", "gender", "metrics", "segmetrics")
tpl = (
"work/{mapper}.cnvkit.report.{library_name}/out/"
"{mapper}.cnvkit.report.{library_name}.{report}.txt"
)
yield from (
(report, tpl.format(report=report, **format_id("mapper", "library_name")))
for report in reports
)
def get_log_file(self, action):
"""Return path to log file for the given action"""
prefix = None
if action in ("access", "target", "antitarget"):
prefix = "work/cnvkit.{action}/log/cnvkit.{action}"
elif action in (
"coverage",
"reference",
"fix",
"call",
"segment",
"export",
"plot",
"report",
):
prefix = (
"work/{{mapper}}.cnvkit.{action}.{{library_name}}/log/"
"{{mapper}}.cnvkit.{action}.{{library_name}}"
)
else:
raise ValueError("Unknown action {}".format(action))
prefix = prefix.format(action=action)
key_ext = (
("log", ".log"),
("conda_info", ".conda_info.txt"),
("conda_list", ".conda_list.txt"),
)
log_files = {}
for key, ext in key_ext:
log_files[key] = prefix + ext
log_files[key + "_md5"] = prefix + ext + ".md5"
return log_files
def update_cluster_config(self, cluster_config):
"""Update cluster configuration with resource usage limits for
scheduling
"""
actions = (
"access",
"target",
"antitarget",
"coverage",
"reference",
"fix",
"call",
"segment",
"export",
"plot",
"report",
)
for action in actions:
key = "somatic_targeted_seq_cnv_calling_cnvkit_{}".format(action)
if action == "plot":
memory = 30 * 1024
else:
memory = int(7.5 * 1024)
cluster_config[key] = {"mem": memory, "time": "24:00", "ntasks": 1}
class CopywriterStepPart(SomaticTargetedSeqCnvCallingStepPart):
"""Perform somatic targeted CNV calling using CopywriteR"""
name = "copywriter"
def __init__(self, parent):
super().__init__(parent)
self.base_path_out = (
"work/{{mapper}}.copywriter.{{library_name}}/out/"
"{{mapper}}.copywriter.{{library_name}}{ext}"
)
def get_input_files(self, action):
def input_function_run(wildcards):
"""Helper wrapper function"""
# Get shorcut to Snakemake sub workflow
ngs_mapping = self.parent.sub_workflows["ngs_mapping"]
# Get names of primary libraries of the selected cancer bio sample and the
# corresponding primary normal sample
normal_base_path = (
"output/{mapper}.{normal_library}/out/{mapper}.{normal_library}".format(
normal_library=self.get_normal_lib_name(wildcards), **wildcards
)
)
tumor_base_path = (
"output/{mapper}.{library_name}/out/" "{mapper}.{library_name}"
).format(**wildcards)
return {
"normal_bam": ngs_mapping(normal_base_path + ".bam"),
"normal_bai": ngs_mapping(normal_base_path + ".bam.bai"),
"tumor_bam": ngs_mapping(tumor_base_path + ".bam"),
"tumor_bai": ngs_mapping(tumor_base_path + ".bam.bai"),
}
def input_function_call(wildcards):
tpl = "work/{mapper}.copywriter.{library_name}/CNAprofiles/".format(**wildcards)
exts = {
"input": "input.Rdata",
"segment": "segment.Rdata",
"counts": "read_counts.txt",
"log2": "log2_read_counts.igv",
}
input_files = {}
for k, v in exts.items():
input_files[k] = tpl + v
return input_files
assert action in ["run", "call", "Unsupported actions"]
if action == "run":
return input_function_run
if action == "call":
return input_function_call
@dictify
def get_output_files(self, action):
assert action in ["run", "call", "Unsupported actions"]
exts = {}
tpl = ""
if action == "run":
exts = {
"input": "input.Rdata",
"segment": "segment.Rdata",
"counts": "read_counts.txt",
"log2": "log2_read_counts.igv",
}
tpl = "work/{mapper}.copywriter.{library_name}/CNAprofiles/"
if action == "call":
exts = {
"bins_txt": "bins.txt",
"gene_call_txt": "gene_call.txt",
"gene_log2_txt": "gene_log2.txt",
"segments_txt": "segments.txt",
}
tpl = "work/{mapper}.copywriter.{library_name}/out/{mapper}.copywriter.{library_name}_"
output_files = {}
for k, v in exts.items():
output_files[k] = tpl + v
output_files[k + "_md5"] = tpl + v + ".md5"
return output_files
def check_config(self):
"""Check configuration"""
if "copywriter" not in self.config["tools"]:
return # skip
self.parent.ensure_w_config(
(
"step_config",
"somatic_targeted_seq_cnv_calling",
"copywriter",
"path_target_regions",
),
"Path to target regions is missing",
)
@dictify
def _get_log_file(self, action):
"""Return path to log file for the given action"""
key_ext = (
("log", ".log"),
("conda_info", ".conda_info.txt"),
("conda_list", ".conda_list.txt"),
)
if action in ("prepare"):
tpl = "work/copywriter.{action}/log/snakemake.log"
return tpl.format(action=action)
elif action in ("call", "run"):
tpl = (
"work/{mapper}.copywriter.{library_name}/log/{mapper}.copywriter.{library_name}."
+ action
)
for key, ext in key_ext:
yield key, tpl + ext
else:
raise ValueError("Unknown action {}".format(action))
def update_cluster_config(self, cluster_config):
"""Update cluster configuration with resource usage limits for
scheduling
"""
tpl = "somatic_targeted_seq_cnv_calling_copywriter_{}"
cluster_config[tpl.format("prepare")] = {"mem": int(4000), "time": "2:00", "ntasks": 1}
cluster_config[tpl.format("run")] = {"mem": int(80000), "time": "16:00", "ntasks": 2}
cluster_config[tpl.format("call")] = {"mem": int(8000), "time": "3:59:00", "ntasks": 8}
class SomaticTargetedSeqCnvCallingWorkflow(BaseStep):
"""Perform somatic targeted sequencing CNV calling"""
name = "somatic_targeted_seq_cnv_calling"
sheet_shortcut_class = CancerCaseSheet
@classmethod
def default_config_yaml(cls):
"""Return default config YAML, to be overwritten by project-specific one"""
return DEFAULT_CONFIG
def __init__(
self, workflow, config, cluster_config, config_lookup_paths, config_paths, workdir
):
super().__init__(
workflow,
config,
cluster_config,
config_lookup_paths,
config_paths,
workdir,
(NgsMappingWorkflow,),
)
# Register sub step classes so the sub steps are available
self.register_sub_step_classes(
(
CnvettiOffTargetStepPart,
CnvettiOnTargetStepPart,
CnvKitStepPart,
CopywriterStepPart,
LinkOutStepPart,
)
)
# Initialize sub-workflows
self.register_sub_workflow("ngs_mapping", self.config["path_ngs_mapping"])
@listify
def get_result_files(self):
"""Return list of result files for the somatic targeted sequencing CNV calling step"""
tool_actions = {
"cnvkit": ("call", "report", "export", "plot"), # ("report", "export", "plot"),
"copywriter": ("call",),
"cnvetti_on_target": ("coverage", "segment", "postprocess"),
"cnvetti_off_target": ("coverage", "segment", "postprocess"),
}
for sheet in filter(is_not_background, self.shortcut_sheets):
for sample_pair in sheet.all_sample_pairs:
if (
not sample_pair.tumor_sample.dna_ngs_library
or not sample_pair.normal_sample.dna_ngs_library
):
msg = (
"INFO: sample pair for cancer bio sample {} has is missing primary"
"normal or primary cancer NGS library"
)
print(msg.format(sample_pair.tumor_sample.name), file=sys.stderr)
continue
for tool in self.config["tools"]:
for action in tool_actions[tool]:
try:
tpls = list(self.sub_steps[tool].get_output_files(action).values())
except AttributeError:
tpls = [self.sub_steps[tool].get_output_files(action)]
try:
tpls += self.sub_steps[tool].get_log_file(action).values()
except AttributeError:
tpls += [self.sub_steps[tool].get_log_file(action)]
for tpl in tpls:
filenames = expand(
tpl,
mapper=self.w_config["step_config"]["ngs_mapping"]["tools"]["dna"],
library_name=[sample_pair.tumor_sample.dna_ngs_library.name],
)
for f in filenames:
if ".tmp." not in f:
yield f.replace("work/", "output/")
def check_config(self):
"""Check that the necessary globalc onfiguration is present"""
self.ensure_w_config(
("step_config", "somatic_targeted_seq_cnv_calling", "path_ngs_mapping"),
(
"Path to somatic variant calling not configured but required for "
"targeted sequencing CNV calling"
),
)
self.ensure_w_config(
("static_data_config", "reference", "path"),
(
"Path to reference FASTA file not configured but required for targeted sequencing "
"CNV calling"
),
)
|
StarcoderdataPython
|
1764717
|
# Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# http://www.sphinx-doc.org/en/master/config
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
import os
import sys
# -- Project information -----------------------------------------------------
project = u'OSDA Project'
copyright = u'2019, OSDA Project members and individual contributors'
author = u'The OSDA Project'
# The short X.Y version
version = '3.5.0'
# The full version, including alpha/beta/rc tags
release = '3.5.0'
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
source_suffix = '.rst'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = [ ]
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = None
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
import sphinx_ustack_theme
html_theme = 'sphinx_ustack_theme'
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
html_favicon = '../../icon/favicon.ico'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
html_last_updated_fmt = '%b %d, %Y'
# If true, links to the reST sources are added to the pages.
html_show_sourcelink = False
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
html_show_copyright = True
|
StarcoderdataPython
|
3287787
|
/home/sheldon/anaconda3/lib/python3.6/enum.py
|
StarcoderdataPython
|
111735
|
from __future__ import print_function, absolute_import
from collections import OrderedDict
from ._result_base import H5NastranResultBase
from h5Nastran.post_process.result_readers.punch import PunchReader
import numpy as np
import tables
from six import iteritems
class H5NastranResultPunch(H5NastranResultBase):
def __init__(self, *args, **kwargs):
super(H5NastranResultPunch, self).__init__(*args, **kwargs)
def load_punch(self, filename):
if self._bdf is None:
raise Exception('BDF must be loaded first!')
if self._f06 is not None:
raise Exception('F06 has already been loaded. Cannot load punch file after f06.')
self._punch = filename
self._punch_subcase_ids.clear()
reader = PunchReader(filename)
reader.register_callback(self._load_punch_table)
reader.read()
self.h5f.flush()
for table in self._tables:
table.finalize()
self._tables.clear()
self._write_unsupported_tables()
self._punch_finalize()
def _punch_finalize(self):
dtype = np.dtype([('SUBCASE_ID', '<i8'), ('LOAD_FACTOR', '<f8'), ('DOMAIN_ID', '<i8')])
format = tables.descr_from_dtype(dtype)[0]
self.h5f.create_table(self.table_paths.subcase_path, self.table_paths.subcase_table, format,
'SUBCASES', expectedrows=len(self._punch_subcase_ids), createparents=True)
table = self.h5f.get_node(self.table_paths.subcase)
data = np.zeros(len(self._punch_subcase_ids), dtype=dtype)
subcase_id = data['SUBCASE_ID']
load_factor = data['LOAD_FACTOR']
domain_id = data['DOMAIN_ID']
for key, domain_id_ in iteritems(self._punch_subcase_ids):
index = domain_id_ - 1
subcase_id_, load_factor_ = key
subcase_id[index] = subcase_id_
load_factor[index] = load_factor_
domain_id[index] = domain_id_
table.append(data)
self.h5f.flush()
def _load_punch_table(self, table_data):
key = table_data.header.subcase_id_num, table_data.header.load_factor
if key not in self._punch_subcase_ids:
self._punch_subcase_ids[key] = len(self._punch_subcase_ids) + 1
results_type = table_data.header.results_type_basic
table = self._result_tables.get(results_type, None)
if table is None:
return self._unsupported_table(table_data)
table.write_punch_data(table_data)
self._tables.add(table)
|
StarcoderdataPython
|
3311423
|
from __future__ import print_function
import numpy as np
import random
try:
xrange
except NameError:
xrange = range
class DataLoader:
def __init__(self, mbsz=128, min_len=20, max_len=30, num_classes=29):
self.mbsz = mbsz
self.min_len = min_len
self.max_len = max_len
self.num_classes = num_classes
def sample(self):
inputs = []
input_lens = []
outputs = []
output_lens = []
for i in xrange(self.mbsz):
length = random.randint(self.min_len, self.max_len)
input_lens.append(length)
input = [random.randint(1, self.num_classes-1) for j in xrange(length)]
#output = input[:] # identity output
output = input[::4] # every 4th input is output
"""
# for acronym output
output = []
flag = True
for j in xrange(len(input)):
if input[j] == 1:
flag = True
elif flag == True:
flag = False
output.append(input[j])
"""
output_lens.append(len(output))
inputs.append(input)
outputs.append(output)
input_arr = np.zeros((self.mbsz, self.max_len, self.num_classes))
for i in xrange(self.mbsz):
for j in xrange(len(inputs[i])):
input_arr[i, j, inputs[i][j]] = 1.0
label_arr = np.zeros((sum(output_lens)), dtype=np.int32)
pos = 0
for i in xrange(self.mbsz):
label_arr[pos:pos+output_lens[i]] = outputs[i]
pos += output_lens[i]
return input_arr, np.array(input_lens, dtype=np.int32), label_arr, np.array(output_lens, dtype=np.int32)
if __name__ == '__main__':
dl = DataLoader()
ret = dl.sample()
print(ret[0].shape)
|
StarcoderdataPython
|
4835487
|
<reponame>ajeet1308/code_problems
// https://www.spoj.com/problems/TMUL/
n = int(input())
arr = [ ]
for i in range(0,n):
a,b= input().split()
arr.append(int(a)*int(b))
for num in arr:
print(num)
|
StarcoderdataPython
|
1699459
|
<reponame>OsiriX-Foundation/IntegrationTest
import rq_album
import rq_user
import env
import util
import random
import string
import rq_studies
def test_init():
env.initialize()
print()
def test_get_token():
print()
token = util.get_token(username="titi", password="<PASSWORD>")
env.env_var["USER_1_TOKEN"] = token
token = util.get_token(username="toto", password="<PASSWORD>")
env.env_var["USER_2_TOKEN"] = token
token = util.get_token(username="tata", password="<PASSWORD>")
env.env_var["USER_3_TOKEN"] = token
def test_register():
util.register(token=env.env_var['USER_1_TOKEN'])
util.register(token=env.env_var['USER_2_TOKEN'])
util.register(token=env.env_var['USER_3_TOKEN'])
"""
def test_get_user_exist():
users_list = rq_user.get(env.env_var["USER_1_TOKEN"], status_code=200, params={"reference":env.env_var["USER_1_MAIL"]})
"""
|
StarcoderdataPython
|
3222263
|
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: proto/grid/messages/success_resp_message.proto
"""Generated protocol buffer code."""
# third party
from google.protobuf import descriptor as _descriptor
from google.protobuf import descriptor_pool as _descriptor_pool
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
# syft absolute
from syft.proto.core.common import (
common_object_pb2 as proto_dot_core_dot_common_dot_common__object__pb2,
)
from syft.proto.core.io import address_pb2 as proto_dot_core_dot_io_dot_address__pb2
DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(
b'\n.proto/grid/messages/success_resp_message.proto\x12\x12syft.grid.messages\x1a%proto/core/common/common_object.proto\x1a\x1bproto/core/io/address.proto"y\n\x16SuccessResponseMessage\x12%\n\x06msg_id\x18\x01 \x01(\x0b\x32\x15.syft.core.common.UID\x12\x10\n\x08resp_msg\x18\x03 \x01(\t\x12&\n\x07\x61\x64\x64ress\x18\x04 \x01(\x0b\x32\x15.syft.core.io.Addressb\x06proto3'
)
_SUCCESSRESPONSEMESSAGE = DESCRIPTOR.message_types_by_name["SuccessResponseMessage"]
SuccessResponseMessage = _reflection.GeneratedProtocolMessageType(
"SuccessResponseMessage",
(_message.Message,),
{
"DESCRIPTOR": _SUCCESSRESPONSEMESSAGE,
"__module__": "proto.grid.messages.success_resp_message_pb2"
# @@protoc_insertion_point(class_scope:syft.grid.messages.SuccessResponseMessage)
},
)
_sym_db.RegisterMessage(SuccessResponseMessage)
if _descriptor._USE_C_DESCRIPTORS == False:
DESCRIPTOR._options = None
_SUCCESSRESPONSEMESSAGE._serialized_start = 138
_SUCCESSRESPONSEMESSAGE._serialized_end = 259
# @@protoc_insertion_point(module_scope)
|
StarcoderdataPython
|
3368708
|
<reponame>jKulrativid/DNA_cut_for_grade_XII<filename>CODE/usage_class.py
class DNA:
def __init__(self, strand, direction):
self.name = 'Unnamed DNA'
self.strand = strand.upper()
self.start = direction[0] + '\''
self.stop = direction[3] + '\''
self.a, self.t, self.c, self.g, self.non_base = self.count_each()
def show_all(self):
print(self.name)
print('{}{}{}'.format(self.start, ' ' * len(self.strand), self.stop))
print('{}{}{}'.format(' '*len(self.start), self.strand, ' '*len(self.stop)))
print('{}{}{}'.format(self.start, ' ' * len(self.strand), self.stop))
def rename(self, name):
self.name = name
def show_length(self):
print('Length of {!r} = {}'.format(self.name, len(self.strand)))
def count_each(self):
a, t, c, g = 0, 0, 0, 0
non_base = 0
for base in self.strand:
if base == 'A':
a += 1
elif base == 'T':
t += 1
elif base == 'C':
c += 1
elif base == 'G':
g += 1
else:
non_base += 1
return a, t, c, g, non_base
class Enzyme(DNA):
def __init__(self, strand, direction, delimiter):
super().__init__(strand, direction)
self.name = 'Unnamed Enzyme'
self.delimiter = delimiter
self.strand, self.cut_position = self.enzyme_setting()
# ตัวสุดท้ายของสายที่โดนตัดคือ ตัวที่ self.cut_position (เริ่มนับตัวแรกจาก1)
def enzyme_setting(self):
pure_dna = ''
cnt = 0 # cnt = index ของ
pos = None
for base in self.strand:
if base not in ('A', 'T', 'C', 'G'):
pos = cnt
else:
pure_dna += base
cnt += 1
if pos is None:
return pure_dna, 'Not found'
else:
return pure_dna, pos
class PetriDish:
def __init__(self):
self.stack = list()
def add(self, dna):
self.stack.append(dna)
def show(self):
for index, item in enumerate(self.stack):
print(f'{index+1}. {item.name}')
class DNAStack(PetriDish):
def __init__(self):
super().__init__()
class EnzymeStack(PetriDish):
def __init__(self):
super().__init__()
class CutTest:
def __init__(self, enzyme_stack):
self.history = dict()
for enzyme in enzyme_stack.stack:
self.history[enzyme.name] = 0
def cut_specific(self, dna, enzyme, cutted_stack):
cut_from = 0
for i in range(len(dna.strand) - len(enzyme.strand) + 1):
match_enzyme = True
for j in range(len(enzyme.strand)):
if dna.strand[i + j] != enzyme.strand[j]:
match_enzyme = False
break
if match_enzyme:
cut_end = i
cutted_stack.stack.append(dna.strand[cut_from: cut_end + enzyme.cut_position])
cut_from = cut_end + enzyme.cut_position
cut_end = len(dna.strand) + 1
cutted_stack.stack.append(dna.strand[cut_from: cut_end + enzyme.cut_position])
def cut_all(self, dna, enzyme_stack, cutted_stack):
cut_from = 0
for i in range(len(dna.strand)):
for n in range(len(enzyme_stack.stack)):
match_enzyme = True
for j in range(len(enzyme_stack.stack[n].strand)):
if i + j < len(test_dna.strand):
if dna.strand[i + j] != enzyme_stack.stack[n].strand[j]:
match_enzyme = False
break
else:
match_enzyme = False
break
if match_enzyme:
cut_end = i
cutted_stack.stack.append(dna.strand[cut_from: cut_end + enzyme_stack.stack[n].cut_position])
cut_from = cut_end + enzyme_stack.stack[n].cut_position
if dna.strand[cut_from: len(dna.strand)+1] != '':
cutted_stack.stack.append(dna.strand[cut_from: len(dna.strand)+1])
'''
def show_base(self):
self.count_base()
for _ in range(4):
print('{} Base: {:0>5}'.format())
def count_base(self):
n_a, n_t, n_c, n_g = 0, 0, 0, 0
for base in self.strand:
if base == 'A':
n_a += 1
elif base == 'G':
n_g += 1
'''
'''
@staticmethod
def cut_position(strand, delim):
delim_position = 0
for base in strand:
if base != delim:
delim_position += 1
elif base == delim:
if delim_position != 0:
return delim_position+1, strand.replace(delim, '').upper()
else:
return 'Not Found', strand.replace(delim, '').upper()
'''
# tester
if __name__ == '__main__':
# DNA
test_dna = DNA('gaccggcctaggatccgggc', '3to5')
test_dna.rename('Test DNA')
test_dna.show_all()
print(test_dna.a)
test_dna.show_length()
print('='*60)
# Enzyme <BamHI>
test_enzyme1 = Enzyme('cctag|g', '3to5', '|')
test_enzyme1.rename('BamHI')
test_enzyme1.show_all()
print(test_enzyme1.a)
test_enzyme1.show_length()
print('Cut Positon = {}'.format(test_enzyme1.cut_position))
# Enzyme <HaeIII>
test_enzyme2 = Enzyme('cc|gg', '3to5', '|')
test_enzyme2.rename('HaeIII')
test_enzyme2.show_all()
print(test_enzyme2.a)
test_enzyme2.show_length()
print('Cut Positon = {}'.format(test_enzyme2.cut_position))
# EnzymeStack
enzyme_stack = EnzymeStack()
enzyme_stack.add(test_enzyme1)
enzyme_stack.add(test_enzyme2)
enzyme_stack.show()
# DNA_stack
dna_keeper = DNAStack()
# cutter
cutter = CutTest(enzyme_stack)
# cut specific
cutter.cut_specific(test_dna, test_enzyme1, dna_keeper)
cutter.cut_specific(test_dna, test_enzyme2, dna_keeper)
# cut all
cutter.cut_all(test_dna, enzyme_stack, dna_keeper)
# output
print(dna_keeper.stack)
|
StarcoderdataPython
|
1676008
|
import abc
import itertools
import numpy as np
from keras.preprocessing.image import apply_affine_transform
class AffineTransformation(object):
def __init__(self, flip, tx, ty, k_90_rotate):
self.flip = flip
self.tx = tx
self.ty = ty
self.k_90_rotate = k_90_rotate
def __call__(self, x):
res_x = x
if self.flip:
res_x = np.fliplr(res_x)
if self.tx != 0 or self.ty != 0:
res_x = apply_affine_transform(res_x, tx=self.tx, ty=self.ty, channel_axis=2, fill_mode='reflect')
if self.k_90_rotate != 0:
res_x = np.rot90(res_x, self.k_90_rotate)
return res_x
class AbstractTransformer(abc.ABC):
def __init__(self):
self._transformation_list = None
self._create_transformation_list()
@property
def n_transforms(self):
return len(self._transformation_list)
@abc.abstractmethod
def _create_transformation_list(self):
return
# 这个 object list 的写法也太酷了,一下子包含了好多重transform的class,只需要用idx调整即可
def transform_batch(self, x_batch, t_inds):
assert len(x_batch) == len(t_inds)
transformed_batch = x_batch.copy()
for i, t_ind in enumerate(t_inds):
transformed_batch[i] = self._transformation_list[t_ind](transformed_batch[i])
return transformed_batch
class Transformer(AbstractTransformer):
def __init__(self, translation_x=8, translation_y=8):
self.max_tx = translation_x
self.max_ty = translation_y
super().__init__()
def _create_transformation_list(self):
transformation_list = []
# iteration tools to generate several iterations
for is_flip, tx, ty, k_rotate in itertools.product((False, True),
(0, -self.max_tx, self.max_tx),
(0, -self.max_ty, self.max_ty),
range(4)):
# Make an affine transformation, then return a transformation result
transformation = AffineTransformation(is_flip, tx, ty, k_rotate)
transformation_list.append(transformation)
self._transformation_list = transformation_list
class SimpleTransformer(AbstractTransformer):
def _create_transformation_list(self):
transformation_list = []
for is_flip, k_rotate in itertools.product((False, True),
range(4)):
transformation = AffineTransformation(is_flip, 0, 0, k_rotate)
transformation_list.append(transformation)
self._transformation_list = transformation_list
|
StarcoderdataPython
|
1710348
|
<filename>TP1/zip2/main.py
import math
import random
import time
import matplotlib.pyplot as pyplot
from matplotlib.pyplot import Figure, subplot
from smallestenclosingcircle import make_circle
def search(Positions, k, c):
solution = greedy_solution(Positions, k, c)
meilleur = solution
T = 500.0
theta = T
P = 50
Kmax = 50
alpha = 0.8;
for i in range(0, Kmax):
for j in range(1, P+1):
voisin = find_neighbour(solution, Positions)
delta = cout(solution, k, c) - cout(voisin, k, c)
if CritereMetropolis(delta, theta):
solution = voisin
if cout(solution, k, c) < cout(meilleur, k , c):
meilleur = solution
theta *= alpha
return meilleur
def find_neighbour(solution, positions):
if len(solution) <= 1:
return solution
antenna = solution[int(random.random() * len(solution))]
closest_antenna = min([ant for ant in solution if ant != antenna], key=lambda a : dist_squared(a, antenna))
ant1_coverage = [p for p in positions if verify_cover(p, antenna)]
ant2_coverage = [p for p in positions if verify_cover(p, closest_antenna)]
le_point = min(ant2_coverage, key=lambda pos : dist_squared(pos, antenna))
ant2_coverage = [p for p in ant2_coverage if p != le_point]
ant1_coverage.append(le_point)
new_ant1 = make_circle(ant1_coverage);
new_ant2 = make_circle(ant2_coverage);
solution = [ant for ant in solution if (ant != antenna and ant != closest_antenna)]
if new_ant1:
solution.append(new_ant1)
if new_ant2:
solution.append(new_ant2)
return solution
def cout(solution, K, C):
return sum([K + C * antenna[2]**2 for antenna in solution])
def greedy_solution(remaining_positions, k, c):
positions = list(remaining_positions)
if not remaining_positions:
return
antennas = [(remaining_positions[0][0], remaining_positions[0][1], 1)]
remaining_positions = remaining_positions[1:]
for pos in remaining_positions:
closest_antenna = min(antennas, key=lambda x : dist_squared(pos, (x[0],x[1])))
covered_points = [p for p in positions if verify_cover(p, closest_antenna)]
covered_points.append(pos)
new_antenna = make_circle(covered_points)
if((k+c*new_antenna[2]**2) > (k+c*closest_antenna[2]**2 + k)):
new_antenna = (pos[0], pos[1], 1)
else:
antennas = [ant for ant in antennas if ant != closest_antenna]
antennas.append(new_antenna)
remaining_positions = [p for p in remaining_positions if p != pos]
return antennas
def CritereMetropolis(delta, T):
if delta > 0:
return True
random.seed()
x = delta/T
return math.exp(x) >= random.random()
def dist_squared(pos1, pos2):
return (pos1[0] - pos2[0]) ** 2 + (pos1[1] - pos2[1]) ** 2
def verify_cover(position, antenna):
return dist_squared(position, (antenna[0], antenna[1])) <= antenna[2] ** 2
def main():
#points = [(30, 0), (10, 10), (25,20),(20, 30), (30, 40), (40, 50), (30, 99)];
#points = [(30, 0), (10, 10), (20,20), (30, 40),(50,40)];
points = random_pos(20, 100)
fig=pyplot.figure(1)
ax = fig.add_subplot(1,1,1)
pyplot.scatter(*zip(*points),marker='x' )
before = time.clock()
result = search(points, 200, 1)
temp = time.clock() - before
for antenna in result:
circle = pyplot.Circle((antenna[0],antenna[1]), antenna[2], color='g', fill=False)
ax.add_patch(circle)
limx = pyplot.xlim()
limy = pyplot.ylim()
pyplot.xlim(0, max(limy[1], limx[1]))
pyplot.ylim(0, max(limy[1], limx[1]))
ax.set_aspect(1);
pyplot.show()
def random_pos(numPos, max):
points = []
random.seed()
for _ in range(0, numPos):
points.append((random.randrange(0, max), random.randrange(0, max)))
return points
if __name__ == "__main__":
main()
|
StarcoderdataPython
|
3302497
|
from entangled.forms import EntangledModelForm
from djangocms_frontend.fields import AttributesFormField, TagTypeFormField
from ...common.responsive import ResponsiveFormMixin
from ...models import FrontendUIItem
class MediaForm(ResponsiveFormMixin, EntangledModelForm):
"""
Layout > "Media" Plugin
http://getbootstrap.com/docs/4.0/layout/media-object/
"""
class Meta:
model = FrontendUIItem
entangled_fields = {
"config": [
"attributes",
]
}
untangled_fields = ("tag_type",)
attributes = AttributesFormField()
tag_type = TagTypeFormField()
class MediaBodyForm(EntangledModelForm):
"""
Layout > "Media body" Plugin
http://getbootstrap.com/docs/4.0/layout/media-object/
"""
class Meta:
model = FrontendUIItem
entangled_fields = {
"config": [
"attributes",
]
}
untangled_fields = ("tag_type",)
attributes = AttributesFormField()
tag_type = TagTypeFormField()
|
StarcoderdataPython
|
1746698
|
<reponame>shilpasayura/bk
def merge(xs, ys):
# return a sorted list with the merged contents of the sorted lists xs and ys
i = 0
j = 0
zs = []
while not (i == len(xs) or j == len(ys)):
if xs[i] < ys[j]:
zs.append(xs[i])
i = i + 1
else:
zs.append(ys[j])
j = j + 1
while i != len(xs):
zs.append(xs[i])
i = i + 1
while j != len(ys):
zs.append(ys[j])
j = j + 1
return zs
def test(test_case_xs, test_case_ys, expected):
actual = merge(test_case_xs, test_case_ys)
if actual == expected:
print("Passed test for " + str(test_case_xs) + ", " + str(test_case_ys))
else:
print("Didn't pass test for " + str(test_case_xs) + ", " + str(test_case_ys))
print("The result was " + str(actual) + " but it should have been " + str(expected))
test([], [], [])
test([0], [], [0])
test([], [0], [0])
test([0, 1, 2], [0, 1, 2], [0, 0, 1, 1, 2, 2])
test([0, 1, 2], [3, 4, 5], [0, 1, 2, 3, 4, 5])
test([3, 4, 5], [0, 1, 2], [0, 1, 2, 3, 4, 5])
test([0, 1, 2, 3, 4, 5, 6, 7, 8, 9], [0, 1, 2, 3], [0, 0, 1, 1, 2, 2, 3, 3, 4, 5, 6, 7, 8, 9])
test([0, 1, 2, 3], [0, 1, 2, 3, 4, 5, 6, 7, 8, 9], [0, 0, 1, 1, 2, 2, 3, 3, 4, 5, 6, 7, 8, 9])
|
StarcoderdataPython
|
88718
|
from dataclasses import dataclass
@dataclass
class Vehicle:
name: str
model: str
max_speed: int
|
StarcoderdataPython
|
1626972
|
<filename>app.py
from flask import Flask, request
from flask_cors import CORS
import apis
app = Flask(__name__)
cors = CORS(app, resource={'/v1/GET/*': {"origin": "*"}})
@app.route('/')
def hello_world():
return 'Hello World!'
@app.route('/v1/GET/stu_grade_score/<ID>', methods=['GET', 'POST'])
def gradeScore(ID):
resp = apis.getStuGradeScore(ID, apis.header)
return resp
@app.route('/v1/GET/stu_score/<ID>', methods=['GET', 'POST'])
def score(ID):
resp = apis.getStuScore(ID, apis.header)
return resp
@app.route('/v1/GET/stu_course_schedule/<ID>', methods=['GET', 'POST'])
def courseSchedule(ID):
year = request.args.get('year')
term = request.args.get('term')
if year and term:
schoolYear = year
# schoolYear = str(year) + '-' + str(int(year) + 1)
resp = apis.getStuCourseSchedule(ID, schoolYear=schoolYear, term=term, header=apis.header)
return resp
else:
resp = apis.jsonResponse('404', 'failed', ['请检查输入内容是否正确'])
resp = apis.class2dict(resp)
# print(resp)
return apis.jsonify(resp)
@app.route('/v1/GET/stu_photo/<ID>', methods=['GET', 'POST'])
def stuPhoto(ID):
resp = apis.getStuPhoto(ID, apis.header)
return resp
@app.route('/v1/GET/stu_info/<ID>', methods=['GET', 'POST'])
def stuInfo(ID):
resp = apis.getStuInfo(ID, apis.header)
return resp
@app.route('/v1/POST/stu_login_status', methods=['POST'])
def stuLoginStatus():
print(request.form)
stuID = request.form['stuID']
stuPwd = request.form['stuPwd']
resp = apis.getLoginState(stuID, stuPwd, apis.header)
return resp
if __name__ == '__main__':
app.run('0.0.0.0')
|
StarcoderdataPython
|
64819
|
<filename>__init__.py<gh_stars>0
from mycroft import MycroftSkill, intent_file_handler
class Whyplessey(MycroftSkill):
def __init__(self):
MycroftSkill.__init__(self)
@intent_file_handler('whyplessey.intent')
def handle_whyplessey(self, message):
self.speak_dialog('whyplessey')
def create_skill():
return Whyplessey()
|
StarcoderdataPython
|
3246112
|
# -*- coding: utf-8 -*-
from .version import version_info, __version__
from .highcharts.highcharts import Highchart
from .highmaps.highmaps import Highmap
from .highstock.highstock import Highstock
from . import ipynb
|
StarcoderdataPython
|
1683259
|
"""Export constants shared by all classes of the module."""
from sys import maxint
# Actions (from /usr/include/net/pfvar.h)
PF_PASS = 0
PF_DROP = 1
PF_SCRUB = 2
PF_NOSCRUB = 3
PF_NAT = 4
PF_NONAT = 5
PF_BINAT = 6
PF_NOBINAT = 7
PF_RDR = 8
PF_NORDR = 9
PF_SYNPROXY_DROP = 10
PF_DEFER = 11
PF_MATCH = 12
PF_DIVERT = 13
PF_RT = 14
PF_AFRT = 15
# PF transaction types (from /usr/include/net/pfvar.h)
PF_TRANS_RULESET = 0
PF_TRANS_ALTQ = 1
PF_TRANS_TABLE = 2
# PF rule flags (from /usr/include/net/pfvar.h)
PFRULE_DROP = 0x0000
PFRULE_RETURNRST = 0x0001
PFRULE_FRAGMENT = 0x0002
PFRULE_RETURNICMP = 0x0004
PFRULE_RETURN = 0x0008
PFRULE_NOSYNC = 0x0010
PFRULE_SRCTRACK = 0x0020
PFRULE_RULESRCTRACK = 0x0040
# PF rule flags (from /usr/include/net/pfvar.h)
PFRULE_IFBOUND = 0x00010000
PFRULE_STATESLOPPY = 0x00020000
PFRULE_PFLOW = 0x00040000
PFRULE_ONCE = 0x00100000
PFRULE_AFTO = 0x00200000
# Port comparison operators (from /usr/include/net/pfvar.h)
PF_OP_NONE = 0
PF_OP_IRG = 1
PF_OP_EQ = 2
PF_OP_NE = 3
PF_OP_LT = 4
PF_OP_LE = 5
PF_OP_GT = 6
PF_OP_GE = 7
PF_OP_XRG = 8
PF_OP_RRG = 9
# Rules retrieval options (from /usr/include/net/pfvar.h)
PF_GET_NONE = 0
PF_GET_CLR_CNTR = 1
# PF keep states (from /usr/include/net/pfvar.h)
PF_STATE_NORMAL = 0x1
PF_STATE_MODULATE = 0x2
PF_STATE_SYNPROXY = 0x3
# Routing options (from /usr/include/net/pfvar.h)
PF_NOPFROUTE = 0
PF_ROUTETO = 1
PF_DUPTO = 2
PF_REPLYTO = 3
# State keys (from /usr/include/net/pfvar.h)
PF_SK_WIRE = 0
PF_SK_STACK = 1
PF_SK_BOTH = 2
# Log options (from /usr/include/net/pfvar.h)
PF_LOG = 0x01
PF_LOG_ALL = 0x02
PF_LOG_SOCKET_LOOKUP = 0x04
PF_LOG_FORCE = 0x08
PF_LOG_MATCHES = 0x10
# Address types (from /usr/include/net/pfvar.h)
PF_ADDR_ADDRMASK = 0
PF_ADDR_NOROUTE = 1
PF_ADDR_DYNIFTL = 2
PF_ADDR_TABLE = 3
PF_ADDR_RTLABEL = 4
PF_ADDR_URPFFAILED = 5
PF_ADDR_RANGE = 6
PF_ADDR_NONE = 7
# OS fingerprints matches (from /usr/include/net/pfvar.h)
PF_OSFP_ANY = 0
PF_OSFP_UNKNOWN = -1
PF_OSFP_NOMATCH = -2
# Interface flags (from /usr/include/net/pfvar.h)
PFI_AFLAG_NETWORK = 0x01
PFI_AFLAG_BROADCAST = 0x02
PFI_AFLAG_PEER = 0x04
PFI_AFLAG_MODEMASK = 0x07
PFI_AFLAG_NOALIAS = 0x08
# Traffic directions (from /usr/include/net/pfvar.h)
PF_INOUT = 0
PF_IN = 1
PF_OUT = 2
PF_FWD = 3
# Flush options (from /usr/include/net/pfvar.h)
PF_FLUSH = 0x01
PF_FLUSH_GLOBAL = 0x02
# IP type of service (from /usr/include/netinet/ip.h)
IPTOS_LOWDELAY = 0x10
IPTOS_THROUGHPUT = 0x08
IPTOS_RELIABILITY = 0x04
# NAT ports range (from /usr/src/sbin/pfctl/pfctl_parser.h)
PF_NAT_PROXY_PORT_LOW = 50001
PF_NAT_PROXY_PORT_HIGH = 65535
# Pool IDs (from /usr/src/sbin/pfctl/pfctl_parser.c)
PF_POOL_ROUTE = 0
PF_POOL_NAT = 1
PF_POOL_RDR = 2
# Pool options (from /usr/include/net/pfvar.h)
PF_POOL_TYPEMASK = 0x0f
PF_POOL_STICKYADDR = 0x20
# Pool types (from /usr/include/net/pfvar.h)
PF_POOL_NONE = 0
PF_POOL_BITMASK = 1
PF_POOL_RANDOM = 2
PF_POOL_SRCHASH = 3
PF_POOL_ROUNDROBIN = 4
PF_POOL_LEASTSTATES = 5
# Mask for window scaling factor (from /usr/include/net/pfvar.h)
PF_WSCALE_MASK = 0x0f
# Debug levels (from /usr/include/sys/syslog.h)
LOG_EMERG = 0
LOG_ALERT = 1
LOG_CRIT = 2
LOG_ERR = 3
LOG_WARNING = 4
LOG_NOTICE = 5
LOG_INFO = 6
LOG_DEBUG = 7
# The 'unlimited' value for limits on the memory pools
UINT_MAX = 0xffffffff
# Limits (from /usr/include/net/pfvar.h)
PF_LIMIT_STATES = 0
PF_LIMIT_SRC_NODES = 1
PF_LIMIT_FRAGS = 2
PF_LIMIT_TABLES = 3
PF_LIMIT_TABLE_ENTRIES = 4
PF_LIMIT_MAX = 5
# Timeouts (from /usr/include/net/pfvar.h)
PFTM_TCP_FIRST_PACKET = 0
PFTM_TCP_OPENING = 1
PFTM_TCP_ESTABLISHED = 2
PFTM_TCP_CLOSING = 3
PFTM_TCP_FIN_WAIT = 4
PFTM_TCP_CLOSED = 5
PFTM_UDP_FIRST_PACKET = 6
PFTM_UDP_SINGLE = 7
PFTM_UDP_MULTIPLE = 8
PFTM_ICMP_FIRST_PACKET = 9
PFTM_ICMP_ERROR_REPLY = 10
PFTM_OTHER_FIRST_PACKET = 11
PFTM_OTHER_SINGLE = 12
PFTM_OTHER_MULTIPLE = 13
PFTM_FRAG = 14
PFTM_INTERVAL = 15
PFTM_ADAPTIVE_START = 16
PFTM_ADAPTIVE_END = 17
PFTM_SRC_NODE = 18
PFTM_TS_DIFF = 19
PFTM_MAX = 20
PFTM_PURGE = 21
PFTM_UNLINKED = 22
# TCP States (from /usr/include/netinet/tcp_fsm.h)
TCPS_CLOSED = 0
TCPS_LISTEN = 1
TCPS_SYN_SENT = 2
TCPS_SYN_RECEIVED = 3
TCPS_ESTABLISHED = 4
TCPS_CLOSE_WAIT = 5
TCPS_FIN_WAIT_1 = 6
TCPS_CLOSING = 7
TCPS_LAST_ACK = 8
TCPS_FIN_WAIT_2 = 9
TCPS_TIME_WAIT = 10
TCP_NSTATES = 11
# From /usr/include/net/pfvar.h
PF_TCPS_PROXY_SRC = TCP_NSTATES + 0
PF_TCPS_PROXY_DST = TCP_NSTATES + 1
# UDP state enumeration (from /usr/include/net/pfvar.h)
PFUDPS_NO_TRAFFIC = 0
PFUDPS_SINGLE = 1
PFUDPS_MULTIPLE = 2
PFUDPS_NSTATES = 3
# States for non-TCP protocols (from /usr/include/net/pfvar.h)
PFOTHERS_NO_TRAFFIC = 0
PFOTHERS_SINGLE = 1
PFOTHERS_MULTIPLE = 2
PFOTHERS_NSTATES = 3
# Pfsync flags (from /usr/include/net/pfvar.h)
PFSYNC_FLAG_SRCNODE = 0x04
PFSYNC_FLAG_NATSRCNODE = 0x08
# PF states flags (from /usr/include/net/pfvar.h)
PFSTATE_ALLOWOPTS = 0x0001
PFSTATE_SLOPPY = 0x0002
PFSTATE_PFLOW = 0x0004
PFSTATE_NOSYNC = 0x0008
PFSTATE_ACK = 0x0010
PFSTATE_NODF = 0x0020
PFSTATE_SETTOS = 0x0040
PFSTATE_RANDOMID = 0x0080
PFSTATE_SCRUB_TCP = 0x0100
PFSTATE_SETPRIO = 0x0200
PFSTATE_SCRUBMASK = PFSTATE_NODF|PFSTATE_RANDOMID|PFSTATE_SCRUB_TCP
PFSTATE_SETMASK = PFSTATE_SETTOS|PFSTATE_SETPRIO
# Reassembly flags (from /usr/include/net/pfvar.h)
PF_REASS_ENABLED = 0x01
PF_REASS_NODF = 0x02
# Table flags (from /usr/include/net/pfvar.h)
PFR_TFLAG_PERSIST = 0x01
PFR_TFLAG_CONST = 0x02
PFR_TFLAG_ACTIVE = 0x04
PFR_TFLAG_INACTIVE = 0x08
PFR_TFLAG_REFERENCED = 0x10
PFR_TFLAG_REFDANCHOR = 0x20
PFR_TFLAG_COUNTERS = 0x40
PFR_TFLAG_USRMASK = 0x43
PFR_TFLAG_SETMASK = 0x3C
PFR_TFLAG_ALLMASK = 0x7F
PFR_FLAG_DUMMY = 0x00000002
PFR_FLAG_FEEDBACK = 0x00000004
PFR_FLAG_CLSTATS = 0x00000008
PFR_FLAG_ADDRSTOO = 0x00000010
PFR_FLAG_REPLACE = 0x00000020
PFR_FLAG_ALLRSETS = 0x00000040
PFR_FLAG_ALLMASK = 0x0000007f
PFR_DIR_IN = 0
PFR_DIR_OUT = 1
PFR_DIR_MAX = 2
PFR_OP_BLOCK = 0
PFR_OP_MATCH = 1
PFR_OP_PASS = 2
PFR_OP_ADDR_MAX = 3
PFR_OP_TABLE_MAX = 4
PFR_OP_XPASS = PFR_OP_ADDR_MAX
PFR_REFCNT_RULE = 0
PFR_REFCNT_ANCHOR = 1
PFR_REFCNT_MAX = 2
# pfrke type (from /usr/include/net/pfvar.h)
PFRKE_PLAIN = 0
PFRKE_ROUTE = 1
PFRKE_COST = 2
PFRKE_MAX = 3
# Interface flags (from /usr/include/net/pfvar.h)
PFI_IFLAG_SKIP = 0x0100
PFI_IFLAG_ANY = 0x0200
# From /usr/src/sbin/pfctl/pfctl.h
DEFAULT_PRIORITY = 1
DEFAULT_QLIMIT = 50
# Queue flags (from /usr/include/net/pfvar.h)
PFQS_FLOWQUEUE = 0x0001
PFQS_ROOTCLASS = 0x0002
PFQS_DEFAULT = 0x1000
# ICMP types (from /usr/include/netinet/ip_icmp.h)
ICMP_ECHO = 8
ICMP_ECHOREPLY = 0
ICMP_UNREACH = 3
ICMP_SOURCEQUENCH = 4
ICMP_REDIRECT = 5
ICMP_ALTHOSTADDR = 6
ICMP_ROUTERADVERT = 9
ICMP_ROUTERSOLICIT = 10
ICMP_TIMXCEED = 11
ICMP_PARAMPROB = 12
ICMP_TSTAMP = 13
ICMP_TSTAMPREPLY = 14
ICMP_IREQ = 15
ICMP_IREQREPLY = 16
ICMP_MASKREQ = 17
ICMP_MASKREPLY = 18
ICMP_TRACEROUTE = 30
ICMP_DATACONVERR = 31
ICMP_MOBILE_REDIRECT = 32
ICMP_IPV6_WHEREAREYOU = 33
ICMP_IPV6_IAMHERE = 34
ICMP_MOBILE_REGREQUEST = 35
ICMP_MOBILE_REGREPLY = 36
ICMP_SKIP = 39
ICMP_PHOTURIS = 40
# ICMP codes (from /usr/include/netinet/ip_icmp.h)
ICMP_UNREACH_NET = 0 # Destination unreachable
ICMP_UNREACH_HOST = 1
ICMP_UNREACH_PROTOCOL = 2
ICMP_UNREACH_PORT = 3
ICMP_UNREACH_NEEDFRAG = 4
ICMP_UNREACH_SRCFAIL = 5
ICMP_UNREACH_NET_UNKNOWN = 6
ICMP_UNREACH_HOST_UNKNOWN = 7
ICMP_UNREACH_ISOLATED = 8
ICMP_UNREACH_NET_PROHIB = 9
ICMP_UNREACH_HOST_PROHIB = 10
ICMP_UNREACH_TOSNET = 11
ICMP_UNREACH_TOSHOST = 12
ICMP_UNREACH_FILTER_PROHIB = 13
ICMP_UNREACH_HOST_PRECEDENCE = 14
ICMP_UNREACH_PRECEDENCE_CUTOFF = 15
ICMP_REDIRECT_NET = 0 # Shorter route
ICMP_REDIRECT_HOST = 1
ICMP_REDIRECT_TOSNET = 2
ICMP_REDIRECT_TOSHOST = 3
ICMP_ROUTERADVERT_NORMAL = 0 # Router advertisement
ICMP_ROUTERADVERT_NOROUTE_COMMON = 16
ICMP_TIMXCEED_INTRANS = 0 # Time exceeded
ICMP_TIMXCEED_REASS = 1
ICMP_PARAMPROB_ERRATPTR = 0 # IP header bad
ICMP_PARAMPROB_OPTABSENT = 1
ICMP_PARAMPROB_LENGTH = 2
ICMP_PHOTURIS_UNKNOWN_INDEX = 1 # Photuris
ICMP_PHOTURIS_AUTH_FAILED = 2
ICMP_PHOTURIS_DECRYPT_FAILED = 3
# ICMP6 types (from /usr/include/netinet/icmp6.h)
ICMP6_DST_UNREACH = 1
ICMP6_PACKET_TOO_BIG = 2
ICMP6_TIME_EXCEEDED = 3
ICMP6_PARAM_PROB = 4
ICMP6_ECHO_REQUEST = 128
ICMP6_ECHO_REPLY = 129
ICMP6_MEMBERSHIP_QUERY = 130
MLD_LISTENER_QUERY = 130
ICMP6_MEMBERSHIP_REPORT = 131
MLD_LISTENER_REPORT = 131
ICMP6_MEMBERSHIP_REDUCTION = 132
MLD_LISTENER_DONE = 132
ND_ROUTER_SOLICIT = 133
ND_ROUTER_ADVERT = 134
ND_NEIGHBOR_SOLICIT = 135
ND_NEIGHBOR_ADVERT = 136
ND_REDIRECT = 137
ICMP6_ROUTER_RENUMBERING = 138
ICMP6_WRUREQUEST = 139
ICMP6_WRUREPLY = 140
ICMP6_FQDN_QUERY = 139
ICMP6_FQDN_REPLY = 140
ICMP6_NI_QUERY = 139
ICMP6_NI_REPLY = 140
MLD_MTRACE_RESP = 200
MLD_MTRACE = 201
# ICMP6 codes (from /usr/include/netinet/icmp6.h)
ICMP6_DST_UNREACH_NOROUTE = 0
ICMP6_DST_UNREACH_ADMIN = 1
ICMP6_DST_UNREACH_NOTNEIGHBOR = 2
ICMP6_DST_UNREACH_BEYONDSCOPE = 2
ICMP6_DST_UNREACH_ADDR = 3
ICMP6_DST_UNREACH_NOPORT = 4
ICMP6_TIME_EXCEED_TRANSIT = 0
ICMP6_TIME_EXCEED_REASSEMBLY = 1
ICMP6_PARAMPROB_HEADER = 0
ICMP6_PARAMPROB_NEXTHEADER = 1
ICMP6_PARAMPROB_OPTION = 2
ND_REDIRECT_ONLINK = 0
ND_REDIRECT_ROUTER = 1
|
StarcoderdataPython
|
3237398
|
# -*- coding: utf-8 -*-
"""
Created on Sat Nov 25 11:39:15 2017
@author: Administrator
"""
import matplotlib.pyplot as plt
#使用import导入模块matplotlib.pyplot,并简写成plt
import numpy as np
#使用import导入模块numpy,并简写成np
import plotly as py # 导入plotly库并命名为py
# -------------pre def
pympl = py.offline.plot_mpl
# 配置中文显示
plt.rcParams['font.family'] = ['SimHei'] # 用来正常显示中文标签
plt.rcParams['axes.unicode_minus'] = False # 用来正常显示负号
fig, ax = plt.subplots()
x = np.linspace(1,10)
# np.linspace(start, stop, num=50, endpoint=True, retstep=False, dtype=None)
# 参数解释:start,stop是开始,结束的数字,num是生成多少个数字,默认50个;endpoint是最后一个stop数字是否包含进去,默认包含;retstep,是两个数字间的间距,默认不显示;dtype默认。
y = x*3 + 5
#线性方程y= x*3 + 5
plt.title("线性函数")
#设置标题
plt.xlabel("x 值")
#设置x轴标签
plt.ylabel("y 值")
#设置y轴标签
ax.plot(x, y)
#画图
plot_url = pympl(fig,filename=r'tmp/simple_line.html', show_link=False,resize=True)
|
StarcoderdataPython
|
73912
|
import os
from torch.utils.data import Dataset
from facade_project import LABEL_NAME_TO_VALUE
from facade_project.utils.load import load_tuple_from_json
class FacadeLabelmeDataset(Dataset):
"""
Facade Labelme Dataset
A dataset which loads labelme style json files within a directory.
Items of the dataset are: tuple(image, mask)
A demo can be found in "notebook/nb_demo_datasets.ipynb"
"""
def __init__(self, img_dir, label_name_to_value=LABEL_NAME_TO_VALUE):
Dataset.__init__(self)
self.label_name_to_value = label_name_to_value
self.img_paths = [os.path.join(img_dir, filename) for filename in sorted(os.listdir(img_dir))]
self.img_paths = [path for path in self.img_paths]
def __len__(self):
return len(self.img_paths)
def __getitem__(self, idx):
img_path = self.img_paths[idx]
return load_tuple_from_json(img_path, self.label_name_to_value)
|
StarcoderdataPython
|
1711292
|
from tkinter import *
from functools import partial
class Program:
def __init__(self, master, contents):
self.buttons = []
self.last_highlight = None
self.breakpoints = set()
frame = LabelFrame(master, text="Program")
frame.pack(side=TOP, anchor=W, padx=2, fill=X)
# pinched from https://stackoverflow.com/a/16198198
scroll_bar = Scrollbar(frame, orient=HORIZONTAL)
scroll_bar.pack(fill=X, side=BOTTOM, expand=FALSE)
canvas = Canvas(frame, bd=0, highlightthickness=0, height=50,
xscrollcommand=scroll_bar.set)
canvas.pack(side=BOTTOM, fill=BOTH, expand=TRUE)
scroll_bar.config(command=canvas.xview)
# reset the view
canvas.xview_moveto(0)
canvas.yview_moveto(0)
# create a frame inside the canvas which will be scrolled with it
self.interior = interior = Frame(canvas)
interior_id = canvas.create_window(0, 0, window=interior,
anchor=NW)
# track changes to the canvas and frame width and sync them,
# also updating the scrollbar
def _configure_interior(event):
# update the scrollbars to match the size of the inner frame
size = (interior.winfo_reqwidth(), interior.winfo_reqheight())
canvas.config(scrollregion="0 0 %s %s" % size)
if interior.winfo_reqwidth() != canvas.winfo_width():
# update the canvas's width to fit the inner frame
canvas.config(width=interior.winfo_reqwidth())
interior.bind('<Configure>', _configure_interior)
for index, c in enumerate(contents):
b = Button(self.interior, command=partial(self.add_or_remove_breakpoint, index), width=2, pady=5, text=c, font='TkFixedFont 20')
b.pack(side=LEFT)
self.buttons.append(b)
def reset(self):
for button in self.buttons:
button.config(fg="black", highlightbackground="white")
self.last_highlight = None
self.breakpoints = set()
def highlight(self, index):
if self.last_highlight is not None:
self.buttons[self.last_highlight].config(fg="black")
if index not in range(len(self.buttons)):
return
self.buttons[index].config(fg="red")
self.last_highlight = index
def add_or_remove_breakpoint(self, index):
if index not in self.breakpoints:
self.breakpoints.add(index)
self.buttons[index].config(highlightbackground="blue")
else:
self.breakpoints.remove(index)
self.buttons[index].config(highlightbackground="white")
|
StarcoderdataPython
|
133020
|
#!/usr/bin/env python
# coding: utf8
import asyncio
import datetime
import json
import os
import subprocess
import time
from sys import platform
from requests import get
from colorama import Fore, init
with open('settings.json', 'r') as settings:
settings = json.load(settings)
async def clear():
os.system("clear")
async def event(message):
# python gradiant.py banner.txt 73,204,255 white > output.txt
print(" " + Fore.LIGHTWHITE_EX + datetime.datetime.now().strftime("%H:%M:%S") + " │ [38;2;73;204;255m[[38;2;99;211;255mE[38;2;125;218;255mV[38;2;151;225;255mE[38;2;177;232;255mN[38;2;203;239;255mT[38;2;229;246;255m][0;00m" + Fore.LIGHTWHITE_EX + " | " + message)
async def detection(message):
# python gradiant.py banner.txt 255,52,52 white > output.txt
print(" " + Fore.LIGHTWHITE_EX + datetime.datetime.now().strftime("%H:%M:%S") + " │ [38;2;255;52;52m[[38;2;255;70;70mD[38;2;255;88;88mE[38;2;255;106;106mT[38;2;255;124;124mE[38;2;255;142;142mC[38;2;255;160;160mT[38;2;255;178;178mI[38;2;255;196;196mO[38;2;255;214;214mN[38;2;255;232;232m][0;00m" + Fore.LIGHTWHITE_EX + " | " + message)
async def action(message):
# python gradiant.py banner.txt 255,253,52 white > output.txt
print(" " + Fore.LIGHTWHITE_EX + datetime.datetime.now().strftime("%H:%M:%S") + " │ [38;2;255;253;52m[[38;2;255;253;77mA[38;2;255;253;102mC[38;2;255;253;127mT[38;2;255;253;152mI[38;2;255;253;177mO[38;2;255;253;202mN[38;2;255;253;227m][0;00m" + Fore.LIGHTWHITE_EX + " | " + message)
async def mitigation(message):
# python gradiant.py banner.txt 255,166,0 white > output.txt
print(" " + Fore.LIGHTWHITE_EX + datetime.datetime.now().strftime("%H:%M:%S") + " │ [38;2;255;166;0m[[38;2;255;173;21mM[38;2;255;180;42mI[38;2;255;187;63mT[38;2;255;194;84mI[38;2;255;201;105mG[38;2;255;208;126mA[38;2;255;215;147mT[38;2;255;222;168mI[38;2;255;229;189mO[38;2;255;236;210mN[38;2;255;243;231m][0;00m" + Fore.LIGHTWHITE_EX + " | " + message)
async def main():
if os.path.isdir(settings['directory']):
pass
else:
directory_choice = input("Specified dump directory could not be found, would you like to create the directory? [Y | N]: ")
if directory_choice.lower() == "y":
print(f"{Fore.LIGHTGREEN_EX}Creating the directory")
await asyncio.sleep(1)
try:
os.system("mkdir -p " + settings['directory'])
except Exception as e:
print(f"Could not create directory: {e}")
await asyncio.sleep(1)
exit(0)
else:
print(f"\n {Fore.LIGHTRED_EX}Goodbye. \n")
await asyncio.sleep(1)
exit(0)
await clear()
print(f"""
[38;2;73;204;255m│ [38;2;73;204;255mM[38;2;93;209;255mi[38;2;113;214;255mt[38;2;133;219;255mi[38;2;153;224;255mg[38;2;173;229;255ma[38;2;193;234;255mt[38;2;213;239;255mo[38;2;233;244;255mr[0;00m 1.2
[38;2;103;212;255m│
[38;2;133;220;255m│ {Fore.WHITE}Efficiently capture & mitigate DDoS attacks in real-time.
[38;2;193;236;255m│ {Fore.WHITE}Developers, Flairings.
""")
await listen()
checks = 0
async def listen():
global checks
try:
ip = get('https://api.ipify.org').text
await event(f"Started traffic listener on {settings['interface']} : {ip}")
except Exception:
await event(f"Started traffic listener on {settings['interface']}")
while True:
pps_old = os.popen(f"grep {settings['interface']}: /proc/net/dev | cut -d : -f2 | awk " + "'{ print $2 }'").read().replace("\n", "")
mbps_old = os.popen(f"cat /sys/class/net/{settings['interface']}/statistics/rx_bytes").read().replace("\n", "")
time.sleep(1)
mbps_new = os.popen(f"cat /sys/class/net/{settings['interface']}/statistics/rx_bytes").read().replace("\n", "")
pps_new = os.popen(f"grep {settings['interface']}: /proc/net/dev | cut -d : -f2 | awk " + "'{ print $2 }'").read().replace("\n", "")
mbps = (int(mbps_new)) - (int(mbps_old))
mbps = mbps / 125000
pps = (int(pps_new)) - (int(pps_old))
if pps > settings['pps_threshold'] and mbps > settings['mbps_threshold']:
checks += 1
if checks > settings['checks']:
attack_detected = True
await detected(pps, mbps)
checks = 0
else:
pass
global file
fixed_time_date = f"{datetime.datetime.now().strftime('%H:%M:%S, %m/%d/%Y')}".replace(",", "-").replace("/", "-").replace(" ", "")
file = f"{settings['directory']}attack-{fixed_time_date}.pcap"
async def detected(pps, mbps):
await asyncio.sleep(2)
await detection(f"PPS & MBPS Threshold reached | {pps} pp/s, {mbps} mbit/s.")
await action(f"Capturing all incoming traffic...")
await capture(pps, mbps)
await asyncio.sleep(2)
await event(f"Successfully captured possible attack to {file}")
await mitigate()
await action(f"Sleeping for {settings['sleep_time']} seconds.")
print("\n")
await asyncio.sleep(int(settings['sleep_time']))
await event("Waiting for a new attack.")
dropped = 0
async def mitigate(): # fbi#0001 helped with the netstat commands <3
global dropped
await mitigation(f"Attempting to mitigate incoming tcp connections...") # reminder: probably should thread this
os.system(f"netstat -tn 2>/dev/null | grep :{settings['port']}" + " | awk '{print $5}' | cut -d: -f1 | sort | uniq | sort -nr > temp_log.txt")
for i in range(int(5)):
time.sleep(1)
os.system(f"netstat -tn 2>/dev/null | grep :{settings['port']}" + "| awk '{print $5}' | cut -d: -f1 | sort | uniq | sort -nr >> temp_log.txt")
os.system("sudo cat temp_log.txt | sort | uniq | sort -nr > logs.txt")
logs = open('logs.txt', 'r')
start = time.time()
for line in logs:
if line.strip() in settings['whitelist']:
await mitigation(f"{line.strip()} is whitelisted.")
pass
else: #cba to find out if the ip is already in iptables but pretty sure it overrides it anyways
process = subprocess.Popen([f"iptables -A INPUT -s {line.strip()} -j DROP"], stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
stdout, stderr = process.communicate()
stderr = f"{stderr}"
if stderr.__contains__("File exists"):
await mitigation(f"{line.strip()} is already blacklisted.")
pass
else:
dropped += 1
await mitigation(f"{line.strip()} has been blacklisted.")
if dropped > 0:
await mitigation(f"Successfully blacklisted {dropped} IPs in {round(time.time() - start, 2)} seconds.")
dropped = 0
else:
dropped = 0
await mitigation("Unable to drop any IPs, perhaps the method isn't TCP based.")
async def capture(pps, mbps):
process = subprocess.Popen(f"sudo tcpdump -i {settings['interface']} -t -w {file} -c {settings['dump_size']}", shell=True, stdin=subprocess.PIPE, stderr=subprocess.STDOUT, stdout=subprocess.PIPE, close_fds=True)
out, err = process.communicate()
if __name__ == '__main__':
if platform == "linux" or platform == "linux2":
if os.geteuid() != 0:
exit("You need to have root privileges to run this script.\nPlease try again, this time using 'sudo'. Exiting.")
else:
os.system("export PYTHONIOENCODING=UTF-8")
try:
init()
loop = asyncio.get_event_loop()
loop.run_until_complete(main())
except KeyboardInterrupt:
print(f"{Fore.LIGHTRED_EX}\n Goodbye. \n")
else:
print("This script is developed for Linux operating systems only \nIf this is linux and you are seeing this message please create an issue at https://github.com/Flairings")
time.sleep(5)
exit(0)
# copyright, flairings.agency.
|
StarcoderdataPython
|
69858
|
#!/usr/bin/env python
#
# Author: <NAME> <<EMAIL>>
#
'''
Some hacky functions
'''
import os, sys
import imp
import tempfile
import shutil
import functools
import itertools
import math
import ctypes
import numpy
import h5py
from pyscf.lib import param
c_double_p = ctypes.POINTER(ctypes.c_double)
c_int_p = ctypes.POINTER(ctypes.c_int)
c_null_ptr = ctypes.POINTER(ctypes.c_void_p)
def load_library(libname):
# numpy 1.6 has bug in ctypeslib.load_library, see numpy/distutils/misc_util.py
if '1.6' in numpy.__version__:
if (sys.platform.startswith('linux') or
sys.platform.startswith('gnukfreebsd')):
so_ext = '.so'
elif sys.platform.startswith('darwin'):
so_ext = '.dylib'
elif sys.platform.startswith('win'):
so_ext = '.dll'
else:
raise OSError('Unknown platform')
libname_so = libname + so_ext
return ctypes.CDLL(os.path.join(os.path.dirname(__file__), libname_so))
else:
_loaderpath = os.path.dirname(__file__)
return numpy.ctypeslib.load_library(libname, _loaderpath)
#Fixme, the standard resouce module gives wrong number when objects are released
#see http://fa.bianp.net/blog/2013/different-ways-to-get-memory-consumption-or-lessons-learned-from-memory_profiler/#fn:1
#or use slow functions as memory_profiler._get_memory did
CLOCK_TICKS = os.sysconf("SC_CLK_TCK")
PAGESIZE = os.sysconf("SC_PAGE_SIZE")
def current_memory():
#import resource
#return resource.getrusage(resource.RUSAGE_SELF).ru_maxrss / 1000
if sys.platform.startswith('linux'):
with open("/proc/%s/statm" % os.getpid()) as f:
vms, rss = [int(x)*PAGESIZE for x in f.readline().split()[:2]]
return rss/1e6, vms/1e6
else:
return 0, 0
def num_threads():
if 'OMP_NUM_THREADS' in os.environ:
return int(os.environ['OMP_NUM_THREADS'])
else:
import multiprocessing
return multiprocessing.cpu_count()
def c_int_arr(m):
npm = numpy.array(m).flatten('C')
arr = (ctypes.c_int * npm.size)(*npm)
# cannot return LP_c_double class,
#Xreturn npm.ctypes.data_as(c_int_p), which destructs npm before return
return arr
def f_int_arr(m):
npm = numpy.array(m).flatten('F')
arr = (ctypes.c_int * npm.size)(*npm)
return arr
def c_double_arr(m):
npm = numpy.array(m).flatten('C')
arr = (ctypes.c_double * npm.size)(*npm)
return arr
def f_double_arr(m):
npm = numpy.array(m).flatten('F')
arr = (ctypes.c_double * npm.size)(*npm)
return arr
def member(test, x, lst):
for l in lst:
if test(x, l):
return True
return False
def remove_dup(test, lst, from_end=False):
if test is None:
return set(lst)
else:
if from_end:
lst = list(reversed(lst))
seen = []
for l in lst:
if not member(test, l, seen):
seen.append(l)
return seen
def remove_if(test, lst):
return [x for x in lst if not test(x)]
def find_if(test, lst):
for l in lst:
if test(l):
return l
raise ValueError('No element of the given list matches the test condition.')
def arg_first_match(test, lst):
for i,x in enumerate(lst):
if test(x):
return i
raise ValueError('No element of the given list matches the test condition.')
def _balanced_partition(cum, ntasks):
segsize = float(cum[-1]) / ntasks
bounds = numpy.arange(ntasks+1) * segsize
displs = abs(bounds[:,None] - cum).argmin(axis=1)
return displs
def _blocksize_partition(cum, blocksize):
n = len(cum) - 1
displs = [0]
p0 = 0
for i in range(1, n):
if cum[i+1]-cum[p0] > blocksize:
displs.append(i)
p0 = i
displs.append(n)
return displs
def flatten(lst):
'''flatten nested lists
x[0] + x[1] + x[2] + ...
Examples:
>>> flatten([[0, 2], [1], [[9, 8, 7]]])
[0, 2, 1, [9, 8, 7]]
'''
return list(itertools.chain.from_iterable(lst))
def prange(start, end, step):
for i in range(start, end, step):
yield i, min(i+step, end)
def prange_tril(start, stop, blocksize):
'''for p0, p1 in prange_tril: p1*(p1+1)/2-p0*(p0+1)/2 < blocksize'''
idx = numpy.arange(start, stop+1)
cum_costs = idx*(idx+1)//2 - start*(start+1)//2
displs = [x+start for x in _blocksize_partition(cum_costs, blocksize)]
return zip(displs[:-1], displs[1:])
class ctypes_stdout(object):
'''make c-printf output to string, but keep python print in /dev/pts/1.
Note it cannot correctly handle c-printf with GCC, don't know why.
Usage:
with ctypes_stdout() as stdout:
...
print(stdout.read())'''
def __enter__(self):
sys.stdout.flush()
self._contents = None
self.old_stdout_fileno = sys.stdout.fileno()
self.bak_stdout_fd = os.dup(self.old_stdout_fileno)
self.bak_stdout = sys.stdout
self.fd, self.ftmp = tempfile.mkstemp(dir='/dev/shm')
os.dup2(self.fd, self.old_stdout_fileno)
sys.stdout = os.fdopen(self.bak_stdout_fd, 'w')
return self
def __exit__(self, type, value, traceback):
sys.stdout.flush()
os.fsync(self.fd)
self._contents = open(self.ftmp, 'r').read()
os.dup2(self.bak_stdout_fd, self.old_stdout_fileno)
sys.stdout = self.bak_stdout # self.bak_stdout_fd is closed
#os.close(self.fd) is closed when os.fdopen is closed
os.remove(self.ftmp)
def read(self):
if self._contents:
return self._contents
else:
sys.stdout.flush()
#f = os.fdopen(self.fd, 'r') # need to rewind(0) before reading
#f.seek(0)
return open(self.ftmp, 'r').read()
class capture_stdout(object):
'''redirect all stdout (c printf & python print) into a string
Usage:
with capture_stdout() as stdout:
...
print(stdout.read())
'''
def __enter__(self):
sys.stdout.flush()
self._contents = None
self.old_stdout_fileno = sys.stdout.fileno()
self.bak_stdout_fd = os.dup(self.old_stdout_fileno)
self.fd, self.ftmp = tempfile.mkstemp(dir='/dev/shm')
os.dup2(self.fd, self.old_stdout_fileno)
return self
def __exit__(self, type, value, traceback):
sys.stdout.flush()
self._contents = open(self.ftmp, 'r').read()
os.dup2(self.bak_stdout_fd, self.old_stdout_fileno)
os.close(self.bak_stdout_fd)
#os.close(self.fd) will be closed when os.fdopen is closed
os.remove(self.ftmp)
def read(self):
if self._contents:
return self._contents
else:
sys.stdout.flush()
#f = os.fdopen(self.fd, 'r') # need to rewind(0) before reading
#f.seek(0)
return open(self.ftmp, 'r').read()
class quite_run(object):
'''output nothing
Examples
--------
with quite_run():
...
'''
def __enter__(self):
sys.stdout.flush()
self.dirnow = os.getcwd()
self.tmpdir = tempfile.mkdtemp(dir='/dev/shm')
os.chdir(self.tmpdir)
self.old_stdout_fileno = sys.stdout.fileno()
self.bak_stdout_fd = os.dup(self.old_stdout_fileno)
self.fnull = open(os.devnull, 'wb')
os.dup2(self.fnull.fileno(), self.old_stdout_fileno)
def __exit__(self, type, value, traceback):
sys.stdout.flush()
os.dup2(self.bak_stdout_fd, self.old_stdout_fileno)
self.fnull.close()
shutil.rmtree(self.tmpdir)
os.chdir(self.dirnow)
# from pygeocoder
# this decorator lets me use methods as both static and instance methods
# In contrast to classmethod, when obj.function() is called, the first
# argument is obj in omnimethod rather than obj.__class__ in classmethod
class omnimethod(object):
def __init__(self, func):
self.func = func
def __get__(self, instance, owner):
return functools.partial(self.func, instance)
class StreamObject(object):
'''For most methods, there are three stream functions to pipe computing stream:
1 ``.set_`` function to update object attributes, eg
``mf = scf.RHF(mol).set(conv_tol=1e-5)`` is identical to proceed in two steps
``mf = scf.RHF(mol); mf.conv_tol=1e-5``
2 ``.run`` function to execute the kenerl function (the function arguments
are passed to kernel function). If keyword arguments is given, it will first
call ``.set`` function to update object attributes then execute the kernel
function. Eg
``mf = scf.RHF(mol).run(dm_init, conv_tol=1e-5)`` is identical to three steps
``mf = scf.RHF(mol); mf.conv_tol=1e-5; mf.kernel(dm_init)``
3 ``.apply`` function to apply the given function/class to the current object
(function arguments and keyword arguments are passed to the given function).
Eg
``mol.apply(scf.RHF).run().apply(mcscf.CASSCF, 6, 4, frozen=4)`` is identical to
``mf = scf.RHF(mol); mf.kernel(); mcscf.CASSCF(mf, 6, 4, frozen=4)``
'''
verbose = 0
stdout = sys.stdout
_keys = set(['verbose', 'stdout'])
def run(self, *args, **kwargs):
'''Call the kernel function of current object. `args` will be passed
to kernel function. `kwargs` will be used to update the attributes of
current object.
'''
self.set(**kwargs)
self.kernel(*args)
return self
def set(self, **kwargs):
'''Update the attributes of the current object.
'''
#if hasattr(self, '_keys'):
# for k,v in kwargs.items():
# setattr(self, k, v)
# if k not in self._keys:
# sys.stderr.write('Warning: %s does not have attribute %s\n'
# % (self.__class__, k))
#else:
for k,v in kwargs.items():
setattr(self, k, v)
return self
def apply(self, fn, *args, **kwargs):
'''Apply the fn to rest arguments: return fn(*args, **kwargs)
'''
return fn(self, *args, **kwargs)
# def _format_args(self, args, kwargs, kernel_kw_lst):
# args1 = [kwargs.pop(k, v) for k, v in kernel_kw_lst]
# return args + args1[len(args):], kwargs
def check_sanity(self):
'''Check misinput of class attributes, check whether a class method is
overwritten. It does not check the attributes which are prefixed with
"_".
'''
if (self.verbose > 0 and # logger.QUIET
hasattr(self, '_keys')):
check_sanity(self, self._keys, self.stdout)
return self
_warn_once_registry = {}
def check_sanity(obj, keysref, stdout=sys.stdout):
'''Check misinput of class attributes, check whether a class method is
overwritten. It does not check the attributes which are prefixed with
"_".
'''
objkeys = [x for x in obj.__dict__ if not x.startswith('_')]
keysub = set(objkeys) - set(keysref)
if keysub:
class_attr = set(dir(obj.__class__))
keyin = keysub.intersection(class_attr)
if keyin:
msg = ('Overwrite attributes %s of %s\n' %
(' '.join(keyin), obj.__class__))
if msg not in _warn_once_registry:
_warn_once_registry[msg] = 1
sys.stderr.write(msg)
if stdout is not sys.stdout:
stdout.write(msg)
keydiff = keysub - class_attr
if keydiff:
msg = ('%s does not have attributes %s\n' %
(obj.__class__, ' '.join(keydiff)))
if msg not in _warn_once_registry:
_warn_once_registry[msg] = 1
sys.stderr.write(msg)
if stdout is not sys.stdout:
stdout.write(msg)
return obj
def with_doc(doc):
'''Use this decorator to add doc string for function
@with_doc(doc)
def fn:
...
makes
fn.__doc__ = doc
'''
def make_fn(fn):
fn.__doc__ = doc
return fn
return make_fn
def overwrite_mro(obj, mro):
'''A hacky function to overwrite the __mro__ attribute'''
class HackMRO(type):
pass
# Overwrite type.mro function so that Temp class can use the given mro
HackMRO.mro = lambda self: mro
if sys.version_info < (3,):
class Temp(obj.__class__):
__metaclass__ = HackMRO
else:
#class Temp(obj.__class__, metaclass=HackMRO):
# pass
raise NotImplementedError()
obj = Temp()
# Delete mro function otherwise all subclass of Temp are not able to
# resolve the right mro
del(HackMRO.mro)
return obj
def izip(*args):
'''python2 izip == python3 zip'''
if sys.version_info < (3,):
return itertools.izip(*args)
else:
return zip(*args)
from threading import Thread
from multiprocessing import Queue, Process
class ProcessWithReturnValue(Process):
def __init__(self, group=None, target=None, name=None, args=(),
kwargs=None):
self._q = Queue()
def qwrap(*args, **kwargs):
self._q.put(target(*args, **kwargs))
Process.__init__(self, group, qwrap, name, args, kwargs)
def join(self):
Process.join(self)
return self._q.get()
get = join
class ThreadWithReturnValue(Thread):
def __init__(self, group=None, target=None, name=None, args=(),
kwargs=None):
self._q = Queue()
def qwrap(*args, **kwargs):
self._q.put(target(*args, **kwargs))
Thread.__init__(self, group, qwrap, name, args, kwargs)
def join(self):
Thread.join(self)
return self._q.get()
get = join
def background_thread(func, *args, **kwargs):
'''applying function in background'''
thread = ThreadWithReturnValue(target=func, args=args, kwargs=kwargs)
thread.start()
return thread
def background_process(func, *args, **kwargs):
'''applying function in background'''
thread = ProcessWithReturnValue(target=func, args=args, kwargs=kwargs)
thread.start()
return thread
bg = background = bg_thread = background_thread
bp = bg_process = background_process
class H5TmpFile(h5py.File):
def __init__(self, filename=None, *args, **kwargs):
if filename is None:
tmpfile = tempfile.NamedTemporaryFile(dir=param.TMPDIR)
filename = tmpfile.name
h5py.File.__init__(self, filename, *args, **kwargs)
def __del__(self):
self.close()
def finger(a):
return numpy.dot(numpy.cos(numpy.arange(a.size)), a.ravel())
def ndpointer(*args, **kwargs):
base = numpy.ctypeslib.ndpointer(*args, **kwargs)
@classmethod
def from_param(cls, obj):
if obj is None:
return obj
return base.from_param(obj)
return type(base.__name__, (base,), {'from_param': from_param})
class call_in_background(object):
'''Asynchonously execute the given function
Usage:
with call_in_background(fun) as async_fun:
async_fun(a, b) # == fun(a, b)
do_something_else()
with call_in_background(fun1, fun2) as (afun1, afun2):
afun2(a, b)
do_something_else()
afun2(a, b)
do_something_else()
afun1(a, b)
do_something_else()
'''
def __init__(self, *fns):
self.fns = fns
self.handler = None
def __enter__(self):
if imp.lock_held():
# Some modules like nosetests, coverage etc
# python -m unittest test_xxx.py or nosetests test_xxx.py
# hang when Python multi-threading was used in the import stage due to (Python
# import lock) bug in the threading module. See also
# https://github.com/paramiko/paramiko/issues/104
# https://docs.python.org/2/library/threading.html#importing-in-threaded-code
# Disable the asynchoronous mode for safe importing
def def_async_fn(fn):
return fn
else:
def def_async_fn(fn):
def async_fn(*args, **kwargs):
if self.handler is not None:
self.handler.join()
self.handler = Thread(target=fn, args=args, kwargs=kwargs)
self.handler.start()
return self.handler
return async_fn
if len(self.fns) == 1:
return def_async_fn(self.fns[0])
else:
return [def_async_fn(fn) for fn in self.fns]
def __exit__(self, type, value, traceback):
if self.handler is not None:
self.handler.join()
if __name__ == '__main__':
for i,j in prange_tril(0, 90, 300):
print(i, j, j*(j+1)//2-i*(i+1)//2)
|
StarcoderdataPython
|
197223
|
<gh_stars>0
from setuptools import setup, find_packages
from mezzanine_youth_sports import __version__
import subprocess
def get_long_desc():
"""Use Pandoc to convert the readme to ReST for the PyPI."""
try:
return subprocess.check_output(['pandoc', '-f', 'markdown', '-t', 'rst', 'README.md'])
except:
print("WARNING: The long readme wasn't converted properly")
setup(
name='mezzanine-youth-sports',
version=__version__,
url='https://github.com/kumichou/mezzanine-youth-sports',
author='<NAME>',
author_email='<EMAIL>',
license='MIT',
description='Youth Sports management application for the Mezzanine CMS',
long_description=get_long_desc(),
keywords='django, mezzanine, youth, sports',
packages=find_packages(),
setup_requires=('setuptools'),
install_requires=('setuptools', 'mezzanine'),
classifiers=[
'Development Status :: 3 - Alpha',
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
'Topic :: Internet :: WWW/HTTP :: WSGI',
'Topic :: Software Development :: Libraries :: Application Frameworks',
'Topic :: Software Development :: Libraries :: Python Modules',],
zip_safe=False,
include_package_data=True,
)
|
StarcoderdataPython
|
3260058
|
<reponame>pybpod/pybpod-gui-plugin-soundcard
import os
SOUNDCARD_PLUGIN_ICON = os.path.join(os.path.dirname(__file__), 'resources', 'sound-card.png')
SOUNDCARD_PLUGIN_WINDOW_SIZE = 500, 600
|
StarcoderdataPython
|
1786938
|
try:
from IPython.core import DataMetadata, RecursiveObject, ReprGetter, get_repr_mimebundle
except ImportError:
from collections import namedtuple
class RecursiveObject:
"""
Default recursive object that provides a recursion repr if needed.
You may register a formatter for this object that will be call when
recursion is reached.
"""
def __init__(self, already_seen):
self.seen = already_seen
pass
def __repr__(self):
return '<recursion ... {}>'.format(str(self.seen))
def _repr_html_(self):
import html
return '<recursion ... {}>'.format(html.escape(str(self.seen)))
DataMetadata = namedtuple('DataMetadata', ('data','metadata'))
class ReprGetter:
"""
Object to carry recursion state when computing formating information when
computing rich representation.
useful when computing representation concurrently of nested object that may
refer to common resources.
"""
__slots__ = ('_objs',)
def __init__(self):
self._objs = set()
def get_repr_mimebundle(self, obj, include=None, exclude=None, *, on_recursion=RecursiveObject):
"""
return the representations of an object and associated metadata.
An given object can have many representation available, that can be defined
in many ways: `_repr_*_` methods, `_repr_mimebundle_`, or user-registered
formatter for types.
When given an object, :any:`get_repr_mimebundle` will search for the
various formatting option with their associated priority and return the
requested representation and associated metadata.
Parameters
----------
obj : an objects
The Python objects to get the representation data.
include : list, tuple or set, optional
A list of format type strings (MIME types) to include in the
format data dict. If this is set *only* the format types included
in this list will be computed.
exclude : list, tuple or set, optional
A list of format type strings (MIME types) to exclude in the format
data dict. If this is set all format types will be computed,
except for those included in this argument.
on_recursion: callable
Return an object to compute the representation when recursion is
detected.
Returns
-------
(data, metadata) : named tuple of two dicts
- 0/.data: See :any:`DisplayFormatter.format`.
- 1/.metadata: See :any:`DisplayFormatter.format`
Note
----
When :any:`get_repr_mimebundle` detect it is recursively called, it will
attempt to return the representation of :class:`RecursiveObject`. You
may register extra formatter for :class:`RecursiveObject`.
If you are computing objects representation in a concurrent way (thread,
coroutines, ...), you should make sure to instanciate a
:class:`ReprGetter` object and use one per task to avoid race conditions.
If a specific mimetype formatter need to call `get_repr_mimebundle()`
for another mimeformat, then it must pass the mimetypes values it desire
to `include` in order to correctly avoid recursion
See Also
--------
:func:`display`, :any:`DisplayFormatter.format`
"""
from IPython.core.interactiveshell import InteractiveShell
if isinstance(include, str):
include = (include,)
if not include:
keys = {(id(obj), None)}
else:
keys = {(id(obj), f) for f in include}
fmt = InteractiveShell.instance().display_formatter.format
if id(obj) == id(object):
return DataMetadata({'text/plain':"<class 'object'>"}, {})
if self._objs.intersection(keys):
return DataMetadata(*fmt(on_recursion(obj), include=include, exclude=exclude))
else:
try:
self._objs.update(keys)
return DataMetadata(*fmt(obj, include=include, exclude=exclude))
finally:
self._objs.difference_update(keys)
# Expose this for convenience at the top level. Similar to what the random
# module in python does. If you want to avoid weird behavior from concurrency:
# Instantiate your own.
get_repr_mimebundle = ReprGetter().get_repr_mimebundle
|
StarcoderdataPython
|
1777731
|
<filename>src/endpoints/order.py
import oandapyV20.endpoints.orders as orders
from connection import Connection
import json
class Order:
conn = Connection.getInstance()
accountID = conn.config['ACCOUNT_ID']
def __init__(self, units, instrument):
with open('src/orderbody.json', 'r') as f:
data = json.load(f)
data['order']['units'] = units
data['order']['instrument'] = instrument
self.data = data
self.units = units
def create_order(self):
q = orders.OrderCreate(self.accountID, self.data)
return self.conn.API.request(q)
|
StarcoderdataPython
|
48728
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2015 Edgewall Software
# Copyright (C) 2015 <NAME> <<EMAIL>>
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://trac.edgewall.com/license.html.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://projects.edgewall.com/trac/.
#
# Author: <NAME> <<EMAIL>>
import urllib2
from trac.admin import IAdminPanelProvider
from trac.config import BoolOption, IntOption
from trac.core import Component, implements
from trac.web.api import HTTPNotFound
from trac.web.chrome import (
ITemplateProvider, add_link, add_script, add_script_data, add_stylesheet)
from tracspamfilter.api import _, gettext, ngettext
from tracspamfilter.filters.akismet import AkismetFilterStrategy
from tracspamfilter.filters.blogspam import BlogSpamFilterStrategy
from tracspamfilter.filters.botscout import BotScoutFilterStrategy
from tracspamfilter.filters.fspamlist import FSpamListFilterStrategy
from tracspamfilter.filters.stopforumspam import StopForumSpamFilterStrategy
from tracspamfilter.filtersystem import FilterSystem
from tracspamfilter.model import LogEntry, Statistics
try:
from tracspamfilter.filters.bayes import BayesianFilterStrategy
except ImportError: # SpamBayes not installed
BayesianFilterStrategy = None
try:
from tracspamfilter.filters.httpbl import HttpBLFilterStrategy
from tracspamfilter.filters.ip_blacklist import IPBlacklistFilterStrategy
from tracspamfilter.filters.url_blacklist import URLBlacklistFilterStrategy
except ImportError: # DNS python not installed
HttpBLFilterStrategy = None
IPBlacklistFilterStrategy = None
URLBlacklistFilterStrategy = None
try:
from tracspamfilter.filters.mollom import MollomFilterStrategy
except ImportError: # Mollom not installed
MollomFilterStrategy = None
class SpamFilterAdminPageProvider(Component):
"""Web administration panel for configuring and monitoring the spam
filtering system.
"""
implements(ITemplateProvider)
implements(IAdminPanelProvider)
MAX_PER_PAGE = 10000
MIN_PER_PAGE = 5
DEF_PER_PAGE = IntOption('spam-filter', 'spam_monitor_entries', '100',
"How many monitor entries are displayed by default "
"(between 5 and 10000).", doc_domain='tracspamfilter')
train_only = BoolOption('spam-filter', 'show_train_only', False,
"Show the buttons for training without deleting entry.",
doc_domain='tracspamfilter')
# IAdminPanelProvider methods
def get_admin_panels(self, req):
if 'SPAM_CONFIG' in req.perm:
yield ('spamfilter', _("Spam Filtering"),
'config', _("Configuration"))
if 'SPAM_MONITOR' in req.perm:
yield ('spamfilter', _("Spam Filtering"),
'monitor', _("Monitoring"))
def render_admin_panel(self, req, cat, page, path_info):
if page == 'config':
if req.method == 'POST':
if self._process_config_panel(req):
req.redirect(req.href.admin(cat, page))
data = self._render_config_panel(req, cat, page)
else:
if req.method == 'POST':
if self._process_monitoring_panel(req):
req.redirect(req.href.admin(cat, page,
page=req.args.getint('page'),
num=req.args.getint('num')))
if path_info:
data = self._render_monitoring_entry(req, cat, page, path_info)
page = 'entry'
else:
data = self._render_monitoring_panel(req, cat, page)
data['allowselect'] = True
data['monitor'] = True
add_script_data(req, {
'bayestext': _("SpamBayes determined spam probability "
"of %s%%"),
'sel100text': _("Select 100.00%% entries") % (),
'sel90text': _("Select >90.00%% entries") % (),
'sel10text': _("Select <10.00%% entries") % (),
'sel0text': _("Select 0.00%% entries") % (),
'selspamtext': _("Select Spam entries"),
'selhamtext': _('Select Ham entries')
})
add_script(req, 'spamfilter/adminmonitor.js')
add_script_data(req, {'toggleform': 'spammonitorform'})
add_script(req, 'spamfilter/toggle.js')
add_stylesheet(req, 'spamfilter/admin.css')
data['accmgr'] = 'ACCTMGR_USER_ADMIN' in req.perm
return 'admin_spam%s.html' % page, data
# ITemplateProvider methods
def get_htdocs_dirs(self):
from pkg_resources import resource_filename
return [('spamfilter', resource_filename(__name__, 'htdocs'))]
def get_templates_dirs(self):
from pkg_resources import resource_filename
return [resource_filename(__name__, 'templates')]
# Internal methods
def _render_config_panel(self, req, cat, page):
req.perm.require('SPAM_CONFIG')
filter_system = FilterSystem(self.env)
strategies = []
for strategy in filter_system.strategies:
for variable in dir(strategy):
if variable.endswith('karma_points'):
strategies.append({
'name': strategy.__class__.__name__,
'karma_points': getattr(strategy, variable),
'variable': variable,
'karma_help': gettext(getattr(strategy.__class__,
variable).__doc__)
})
add_script(req, 'spamfilter/adminconfig.js')
return {
'strategies': sorted(strategies, key=lambda x: x['name']),
'min_karma': filter_system.min_karma,
'authenticated_karma': filter_system.authenticated_karma,
'attachment_karma': filter_system.attachment_karma,
'register_karma': filter_system.register_karma,
'trust_authenticated': filter_system.trust_authenticated,
'logging_enabled': filter_system.logging_enabled,
'nolog_obvious': filter_system.nolog_obvious,
'purge_age': filter_system.purge_age,
'spam_monitor_entries_min': self.MIN_PER_PAGE,
'spam_monitor_entries_max': self.MAX_PER_PAGE,
'spam_monitor_entries': self.DEF_PER_PAGE
}
def _process_config_panel(self, req):
req.perm.require('SPAM_CONFIG')
spam_config = self.config['spam-filter']
min_karma = req.args.as_int('min_karma')
if min_karma is not None:
spam_config.set('min_karma', min_karma)
attachment_karma = req.args.as_int('attachment_karma')
if attachment_karma is not None:
spam_config.set('attachment_karma', attachment_karma)
register_karma = req.args.as_int('register_karma')
if register_karma is not None:
spam_config.set('register_karma', register_karma)
authenticated_karma = req.args.as_int('authenticated_karma')
if authenticated_karma is not None:
spam_config.set('authenticated_karma', authenticated_karma)
for strategy in FilterSystem(self.env).strategies:
for variable in dir(strategy):
if variable.endswith('karma_points'):
key = strategy.__class__.__name__ + '_' + variable
points = req.args.get(key)
if points is not None:
option = getattr(strategy.__class__, variable)
self.config.set(option.section, option.name, points)
logging_enabled = 'logging_enabled' in req.args
spam_config.set('logging_enabled', logging_enabled)
nolog_obvious = 'nolog_obvious' in req.args
spam_config.set('nolog_obvious', nolog_obvious)
trust_authenticated = 'trust_authenticated' in req.args
spam_config.set('trust_authenticated', trust_authenticated)
if logging_enabled:
purge_age = req.args.as_int('purge_age')
if purge_age is not None:
spam_config.set('purge_age', purge_age)
spam_monitor_entries = req.args.as_int('spam_monitor_entries',
min=self.MIN_PER_PAGE,
max=self.MAX_PER_PAGE)
if spam_monitor_entries is not None:
spam_config.set('spam_monitor_entries', spam_monitor_entries)
self.config.save()
return True
def _render_monitoring_panel(self, req, cat, page):
req.perm.require('SPAM_MONITOR')
pagenum = req.args.as_int('page', 1) - 1
pagesize = req.args.as_int('num', self.DEF_PER_PAGE,
min=self.MIN_PER_PAGE,
max=self.MAX_PER_PAGE)
total = LogEntry.count(self.env)
if total < pagesize:
pagenum = 0
elif total <= pagenum * pagesize:
pagenum = (total - 1) / pagesize
offset = pagenum * pagesize
entries = list(LogEntry.select(self.env, limit=pagesize,
offset=offset))
if pagenum > 0:
add_link(req, 'prev',
req.href.admin(cat, page, page=pagenum, num=pagesize),
_("Previous Page"))
if offset + pagesize < total:
add_link(req, 'next',
req.href.admin(cat, page, page=pagenum + 2, num=pagesize),
_("Next Page"))
return {
'enabled': FilterSystem(self.env).logging_enabled,
'entries': entries,
'offset': offset + 1,
'page': pagenum + 1,
'num': pagesize,
'total': total,
'train_only': self.train_only
}
def _render_monitoring_entry(self, req, cat, page, entry_id):
req.perm.require('SPAM_MONITOR')
entry = LogEntry.fetch(self.env, entry_id)
if not entry:
raise HTTPNotFound(_("Log entry not found"))
previous = entry.get_previous()
if previous:
add_link(req, 'prev', req.href.admin(cat, page, previous.id),
_("Log Entry %(id)s", id=previous.id))
add_link(req, 'up', req.href.admin(cat, page), _("Log Entry List"))
next = entry.get_next()
if next:
add_link(req, 'next', req.href.admin(cat, page, next.id),
_("Log Entry %(id)s", id=next.id))
return {'entry': entry, 'train_only': self.train_only}
def _process_monitoring_panel(self, req):
req.perm.require('SPAM_TRAIN')
filtersys = FilterSystem(self.env)
spam = 'markspam' in req.args or 'markspamdel' in req.args
train = spam or 'markham' in req.args or 'markhamdel' in req.args
delete = 'delete' in req.args or 'markspamdel' in req.args or \
'markhamdel' in req.args or 'deletenostats' in req.args
deletestats = 'delete' in req.args
if train or delete:
entries = req.args.getlist('sel')
if entries:
if train:
filtersys.train(req, entries, spam=spam, delete=delete)
elif delete:
filtersys.delete(req, entries, deletestats)
if 'deleteobvious' in req.args:
filtersys.deleteobvious(req)
return True
class ExternalAdminPageProvider(Component):
"""Web administration panel for configuring the External spam filters."""
implements(IAdminPanelProvider)
# IAdminPanelProvider methods
def get_admin_panels(self, req):
if 'SPAM_CONFIG' in req.perm:
yield ('spamfilter', _("Spam Filtering"),
'external', _("External Services"))
def render_admin_panel(self, req, cat, page, path_info):
req.perm.require('SPAM_CONFIG')
data = {}
spam_config = self.config['spam-filter']
akismet = AkismetFilterStrategy(self.env)
stopforumspam = StopForumSpamFilterStrategy(self.env)
botscout = BotScoutFilterStrategy(self.env)
fspamlist = FSpamListFilterStrategy(self.env)
ip_blacklist_default = ip6_blacklist_default = \
url_blacklist_default = None
if HttpBLFilterStrategy:
ip_blacklist = IPBlacklistFilterStrategy(self.env)
ip_blacklist_default = ip_blacklist.servers_default
ip6_blacklist_default = ip_blacklist.servers6_default
url_blacklist = URLBlacklistFilterStrategy(self.env)
url_blacklist_default = url_blacklist.servers_default
mollom = 0
if MollomFilterStrategy:
mollom = MollomFilterStrategy(self.env)
blogspam = BlogSpamFilterStrategy(self.env)
if req.method == 'POST':
if 'cancel' in req.args:
req.redirect(req.href.admin(cat, page))
akismet_api_url = req.args.get('akismet_api_url')
akismet_api_key = req.args.get('akismet_api_key')
mollom_api_url = req.args.get('mollom_api_url')
mollom_public_key = req.args.get('mollom_public_key')
mollom_private_key = req.args.get('mollom_private_key')
stopforumspam_api_key = req.args.get('stopforumspam_api_key')
botscout_api_key = req.args.get('botscout_api_key')
fspamlist_api_key = req.args.get('fspamlist_api_key')
httpbl_api_key = req.args.get('httpbl_api_key')
ip_blacklist_servers = req.args.get('ip_blacklist_servers')
ip6_blacklist_servers = req.args.get('ip6_blacklist_servers')
url_blacklist_servers = req.args.get('url_blacklist_servers')
blogspam_api_url = req.args.get('blogspam_api_url')
blogspam_skip_tests = req.args.get('blogspam_skip_tests')
use_external = 'use_external' in req.args
train_external = 'train_external' in req.args
skip_external = req.args.get('skip_external')
stop_external = req.args.get('stop_external')
skip_externalham = req.args.get('skip_externalham')
stop_externalham = req.args.get('stop_externalham')
try:
verified_key = akismet.verify_key(req, akismet_api_url,
akismet_api_key)
if akismet_api_key and not verified_key:
data['akismeterror'] = 'The API key is invalid'
data['error'] = 1
except urllib2.URLError, e:
data['alismeterror'] = e.reason[1]
data['error'] = 1
if mollom:
try:
verified_key = mollom.verify_key(req, mollom_api_url,
mollom_public_key,
mollom_private_key)
except urllib2.URLError, e:
data['mollomerror'] = e.reason[1]
data['error'] = 1
else:
if mollom_public_key and mollom_private_key and \
not verified_key:
data['mollomerror'] = 'The API keys are invalid'
data['error'] = 1
if not data.get('error', 0):
spam_config.set('akismet_api_url', akismet_api_url)
spam_config.set('akismet_api_key', akismet_api_key)
spam_config.set('mollom_api_url', mollom_api_url)
spam_config.set('mollom_public_key', mollom_public_key)
spam_config.set('mollom_private_key', mollom_private_key)
spam_config.set('stopforumspam_api_key', stopforumspam_api_key)
spam_config.set('botscout_api_key', botscout_api_key)
spam_config.set('fspamlist_api_key', fspamlist_api_key)
spam_config.set('httpbl_api_key', httpbl_api_key)
if HttpBLFilterStrategy:
if ip_blacklist_servers != ip_blacklist_default:
spam_config.set('ip_blacklist_servers',
ip_blacklist_servers)
else:
spam_config.remove('ip_blacklist_servers')
if ip6_blacklist_servers != ip6_blacklist_default:
spam_config.set('ip6_blacklist_servers',
ip6_blacklist_servers)
else:
spam_config.remove('ip6_blacklist_servers')
if url_blacklist_servers != url_blacklist_default:
spam_config.set('url_blacklist_servers',
url_blacklist_servers)
else:
spam_config.remove('url_blacklist_servers')
spam_config.set('blogspam_json_api_url',
blogspam_api_url)
spam_config.set('blogspam_json_skip_tests',
blogspam_skip_tests)
spam_config.set('use_external', use_external)
spam_config.set('train_external', train_external)
spam_config.set('skip_external', skip_external)
spam_config.set('stop_external', stop_external)
spam_config.set('skip_externalham', skip_externalham)
spam_config.set('stop_externalham', stop_externalham)
self.config.save()
req.redirect(req.href.admin(cat, page))
else:
filter_system = FilterSystem(self.env)
use_external = filter_system.use_external
train_external = filter_system.train_external
skip_external = filter_system.skip_external
stop_external = filter_system.stop_external
skip_externalham = filter_system.skip_externalham
stop_externalham = filter_system.stop_externalham
blogspam_api_url = blogspam.api_url
blogspam_skip_tests = ','.join(blogspam.skip_tests)
akismet_api_url = akismet.api_url
akismet_api_key = akismet.api_key
mollom_public_key = mollom_private_key = mollom_api_url = None
if MollomFilterStrategy:
mollom_api_url = mollom.api_url
mollom_public_key = mollom.public_key
mollom_private_key = mollom.private_key
stopforumspam_api_key = stopforumspam.api_key
botscout_api_key = botscout.api_key
fspamlist_api_key = fspamlist.api_key
httpbl_api_key = spam_config.get('httpbl_api_key')
ip_blacklist_servers = spam_config.get('ip_blacklist_servers')
ip6_blacklist_servers = spam_config.get('ip6_blacklist_servers')
url_blacklist_servers = spam_config.get('url_blacklist_servers')
if HttpBLFilterStrategy:
data['blacklists'] = 1
data['ip_blacklist_default'] = ip_blacklist_default
data['ip6_blacklist_default'] = ip6_blacklist_default
data['url_blacklist_default'] = url_blacklist_default
if MollomFilterStrategy:
data['mollom'] = 1
data['mollom_public_key'] = mollom_public_key
data['mollom_private_key'] = mollom_private_key
data['mollom_api_url'] = mollom_api_url
data['blogspam_api_url'] = blogspam_api_url
data['blogspam_skip_tests'] = blogspam_skip_tests
data['blogspam_methods'] = blogspam.getmethods()
data.update({
'akismet_api_key': akismet_api_key,
'akismet_api_url': akismet_api_url,
'httpbl_api_key': httpbl_api_key,
'stopforumspam_api_key': stopforumspam_api_key,
'botscout_api_key': botscout_api_key,
'fspamlist_api_key': fspamlist_api_key,
'use_external': use_external,
'train_external': train_external,
'skip_external': skip_external,
'stop_external': stop_external,
'skip_externalham': skip_externalham,
'stop_externalham': stop_externalham,
'ip_blacklist_servers': ip_blacklist_servers,
'ip6_blacklist_servers': ip6_blacklist_servers,
'url_blacklist_servers': url_blacklist_servers
})
add_script(req, 'spamfilter/adminexternal.js')
add_stylesheet(req, 'spamfilter/admin.css')
return 'admin_external.html', data
class BayesAdminPageProvider(Component):
"""Web administration panel for configuring the Bayes spam filter."""
if BayesianFilterStrategy:
implements(IAdminPanelProvider)
# IAdminPanelProvider methods
def get_admin_panels(self, req):
if 'SPAM_CONFIG' in req.perm:
yield 'spamfilter', _("Spam Filtering"), 'bayes', _("Bayes")
def render_admin_panel(self, req, cat, page, path_info):
req.perm.require('SPAM_CONFIG')
bayes = BayesianFilterStrategy(self.env)
hammie = bayes._get_hammie()
data = {}
if req.method == 'POST':
if 'train' in req.args:
bayes.train(None, None, req.args['bayes_content'], '127.0.0.1',
spam='spam' in req.args['train'].lower())
req.redirect(req.href.admin(cat, page))
elif 'test' in req.args:
bayes_content = req.args['bayes_content']
data['content'] = bayes_content
try:
data['score'] = hammie.score(bayes_content.encode('utf-8'))
except Exception, e:
self.log.warn('Bayes test failed: %s', e, exc_info=True)
data['error'] = unicode(e)
else:
if 'reset' in req.args:
self.log.info('Resetting SpamBayes training database')
self.env.db_transaction("DELETE FROM spamfilter_bayes")
elif 'reduce' in req.args:
self.log.info('Reducing SpamBayes training database')
bayes.reduce()
min_training = req.args.as_int('min_training')
if min_training is not None and \
min_training != bayes.min_training:
self.config.set('spam-filter', 'bayes_min_training',
min_training)
self.config.save()
min_dbcount = req.args.as_int('min_dbcount')
if min_dbcount is not None and \
min_dbcount != bayes.min_dbcount:
self.config.set('spam-filter', 'bayes_min_dbcount',
min_dbcount)
self.config.save()
req.redirect(req.href.admin(cat, page))
ratio = ''
nspam = hammie.bayes.nspam
nham = hammie.bayes.nham
if nham and nspam:
if nspam > nham:
ratio = _("(ratio %.1f : 1)") % (float(nspam) / float(nham))
else:
ratio = _("(ratio 1 : %.1f)") % (float(nham) / float(nspam))
dblines, dblines_spamonly, dblines_hamonly, dblines_reduce = \
bayes.dblines()
dblines_mixed = dblines - dblines_hamonly - dblines_spamonly
data.update({
'min_training': bayes.min_training,
'min_dbcount': bayes.min_dbcount,
'dblines': dblines,
'dblinesreducenum': dblines_reduce,
'dblinesspamonly':
ngettext("%(num)d spam", "%(num)d spam", dblines_spamonly),
'dblineshamonly':
ngettext("%(num)d ham", "%(num)d ham", dblines_hamonly),
'dblinesreduce':
ngettext("%(num)d line", "%(num)d lines", dblines_reduce),
'dblinesmixed':
ngettext("%(num)d mixed", "%(num)d mixed", dblines_mixed),
'nspam': nspam,
'nham': nham,
'ratio': ratio
})
add_script_data(req, {'hasdata': True if nham + nspam > 0 else False})
add_script(req, 'spamfilter/adminbayes.js')
add_stylesheet(req, 'spamfilter/admin.css')
return 'admin_bayes.html', data
class StatisticsAdminPageProvider(Component):
"""Web administration panel for spam filter statistics."""
implements(IAdminPanelProvider)
# IAdminPanelProvider methods
def get_admin_panels(self, req):
if 'SPAM_CONFIG' in req.perm:
yield ('spamfilter', _("Spam Filtering"),
'statistics', _("Statistics"))
def render_admin_panel(self, req, cat, page, path_info):
req.perm.require('SPAM_CONFIG')
stats = Statistics(self.env)
if req.method == 'POST':
if 'clean' in req.args:
stats.clean(req.args['strategy'])
elif 'cleanall' in req.args:
stats.cleanall()
req.redirect(req.href.admin(cat, page))
strategies, overall = stats.getstats()
data = {'strategies': strategies, 'overall': overall}
add_stylesheet(req, 'spamfilter/admin.css')
return 'admin_statistics.html', data
|
StarcoderdataPython
|
1607843
|
from setuptools import setup, find_packages
from codecs import open
import importlib
import os
root = os.path.abspath(os.path.dirname(__file__))
# Get the long description from the README file
with open(os.path.join(root, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
# Load the version number from ./src/tarski/version.py
spec = importlib.util.spec_from_file_location('tsk.version', os.path.join(root, 'src/tarski/version.py'))
version = importlib.util.module_from_spec(spec)
spec.loader.exec_module(version)
def main():
setup(
name='tarski',
version=version.__version__,
description='Tarski is a framework for the specification, modeling and manipulation of AI planning problems.',
long_description=long_description,
long_description_content_type='text/markdown',
url='https://github.com/aig-upf/tarski',
author='<NAME> and <NAME>',
author_email='<EMAIL>',
keywords='planning logic STRIPS RDDL',
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Science/Research',
'Intended Audience :: Developers',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'License :: OSI Approved :: GNU General Public License v3 (GPLv3)',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
],
packages=find_packages('src'), # include all packages under src
package_dir={'': 'src'}, # tell distutils packages are under src
python_requires='>=3.6', # supported Python ranges
install_requires=[
# psutil not supported on Windows, we haven't tested in other platforms, but since it's not essential
# to the functioning of Tarski, better be conservative here and install only on Linux.
'psutil; platform_system=="Linux"',
'multipledispatch',
# Antlr pinned to a specific version to avoid messages "ANTLR runtime and generated code versions disagree"
# messages. If we want to bump this up, we'll need to regenerate the grammar files with the new version.
'antlr4-python3-runtime==4.7.2',
# Clingo (gringo) bindings to the clingo solver
'clingo>=5.5.1',
],
extras_require={
'test': ['pytest', 'tox', 'pytest-cov', 'mypy'],
'docs': ['sphinx>=2.1.2', 'recommonmark', 'nbsphinx', 'sphinx_rtd_theme', 'ipykernel', 'ipython'],
'arithmetic': ['scipy', 'numpy'],
'rddl': ['pyrddl'],
},
# This will include non-code files specified in the manifest, see e.g.
# http://python-packaging.readthedocs.io/en/latest/non-code-files.html
include_package_data=True,
)
if __name__ == '__main__':
main()
|
StarcoderdataPython
|
123109
|
import urlparse
from urllib import urlencode
from django.conf import settings
import jwt
from mozpay.verify import verify_claims, verify_keys
from nose.tools import eq_
import amo
from amo.helpers import absolutify
from amo.urlresolvers import reverse
from mkt.webpay.webpay_jwt import (get_product_jwt, WebAppProduct,
InAppProduct)
from mkt import regions
from mkt.purchase.tests.utils import InAppPurchaseTest, PurchaseTest
from stats.models import Contribution
class TestPurchaseJWT(PurchaseTest):
def setUp(self):
super(TestPurchaseJWT, self).setUp()
self.product = WebAppProduct(self.addon)
self.token = get_product_jwt(
self.product,
region=regions.US,
user=self.user,
)
self.token_data = jwt.decode(
str(self.token['webpayJWT']), verify=False)
self.contribution = Contribution.objects.get()
def test_claims(self):
verify_claims(self.token_data)
def test_keys(self):
verify_keys(self.token_data,
('iss',
'typ',
'aud',
'iat',
'exp',
'request.name',
'request.description',
'request.pricePoint',
'request.postbackURL',
'request.chargebackURL',
'request.productData'))
def test_valid_jwt(self):
eq_(self.token_data['iss'], settings.APP_PURCHASE_KEY)
eq_(self.token_data['typ'], settings.APP_PURCHASE_TYP)
eq_(self.token_data['aud'], settings.APP_PURCHASE_AUD)
contribution = Contribution.objects.get()
eq_(contribution.type, amo.CONTRIB_PENDING)
eq_(contribution.price_tier, self.addon.premium.price)
eq_(contribution.user, self.user)
request = self.token_data['request']
eq_(request['id'], self.product.external_id())
eq_(request['name'], self.product.name())
eq_(request['icons'], self.product.icons())
eq_(request['description'], self.product.description())
eq_(request['pricePoint'], self.product.price().name)
eq_(request['postbackURL'], absolutify(reverse('webpay.postback')))
eq_(request['chargebackURL'], absolutify(reverse('webpay.chargeback')))
token_product_data = urlparse.parse_qs(request['productData'])
expected_product_data = urlparse.parse_qs(
urlencode(self.product.product_data(self.contribution)))
eq_(token_product_data, expected_product_data)
class TestWebAppProduct(PurchaseTest):
def setUp(self):
super(TestWebAppProduct, self).setUp()
self.product = WebAppProduct(self.addon)
self.token = get_product_jwt(
self.product,
region=regions.US,
user=self.user,
)
self.contribution = Contribution.objects.get()
def test_external_id_with_no_domain(self):
with self.settings(DOMAIN=None):
eq_(self.product.external_id(),
'marketplace-dev:{0}'.format(self.addon.pk))
def test_external_id_with_domain(self):
with self.settings(DOMAIN='marketplace.allizom.org'):
eq_(self.product.external_id(),
'marketplace:{0}'.format(self.addon.pk))
def test_webapp_product(self):
eq_(self.product.id(), self.addon.pk)
eq_(self.product.name(), unicode(self.addon.name))
eq_(self.product.addon(), self.addon)
eq_(self.product.amount(regions.US),
self.addon.get_price(region=regions.US.id))
eq_(self.product.price(), self.addon.premium.price)
eq_(self.product.icons()['512'],
absolutify(self.addon.get_icon_url(512)))
eq_(self.product.description(), self.addon.description)
eq_(self.product.application_size(),
self.addon.current_version.all_files[0].size)
eq_(self.product.seller_uuid(), (self.addon
.single_pay_account()
.payment_account
.solitude_seller
.uuid))
product_data = self.product.product_data(self.contribution)
eq_(product_data['contrib_uuid'], self.contribution.uuid)
eq_(product_data['seller_uuid'], self.product.seller_uuid())
eq_(product_data['addon_id'], self.product.addon().pk)
eq_(product_data['application_size'], self.product.application_size())
class TestInAppProduct(InAppPurchaseTest):
def setUp(self):
super(TestInAppProduct, self).setUp()
self.product = InAppProduct(self.inapp)
self.token = get_product_jwt(self.product)
self.contribution = Contribution.objects.get()
def test_external_id_with_no_domain(self):
with self.settings(DOMAIN=None):
eq_(self.product.external_id(),
'inapp.marketplace-dev:{0}'.format(self.inapp.pk))
def test_external_id_with_domain(self):
with self.settings(DOMAIN='marketplace.allizom.org'):
eq_(self.product.external_id(),
'inapp.marketplace:{0}'.format(self.inapp.pk))
def test_inapp_product(self):
eq_(self.product.id(), self.inapp.pk)
eq_(self.product.name(), unicode(self.inapp.name))
eq_(self.product.addon(), self.inapp.webapp)
eq_(self.product.amount(regions.US), None)
eq_(self.product.price(), self.inapp.price)
eq_(self.product.icons()[64], absolutify(self.inapp.logo_url))
eq_(self.product.description(), self.inapp.webapp.description)
eq_(self.product.application_size(), None)
eq_(self.product.seller_uuid(), (self.inapp
.webapp
.single_pay_account()
.payment_account
.solitude_seller
.uuid))
product_data = self.product.product_data(self.contribution)
eq_(product_data['contrib_uuid'], self.contribution.uuid)
eq_(product_data['seller_uuid'], self.product.seller_uuid())
eq_(product_data['addon_id'], self.product.addon().pk)
eq_(product_data['inapp_id'], self.product.id())
eq_(product_data['application_size'], self.product.application_size())
|
StarcoderdataPython
|
3380974
|
# Generated by Django 3.0.5 on 2021-05-31 19:04
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('quiz', '0006_auto_20210531_2004'),
]
operations = [
migrations.AlterField(
model_name='course',
name='start_time',
field=models.DateTimeField(default=datetime.datetime(2021, 5, 31, 20, 4, 19, 804814)),
),
]
|
StarcoderdataPython
|
3329689
|
class ProductPackaging(object):
def __init__(self, packaging, packaging_tags, emb_codes, emb_codes_tags, first_packaging_code_geo):
self.Packaging = packaging
self.PackagingTags = packaging_tags
self.EmbCodes = emb_codes
self.EmbCodesTags = emb_codes_tags
self.FirstPackagingCodeGeo = first_packaging_code_geo
def __str__(self):
return "Packaging => {0} # packaging_tags => {1}".format(self.Packaging, self.PackagingTags)
|
StarcoderdataPython
|
3382740
|
#!/usr/bin/python
def leerDato():
import urllib2
html = urllib2.urlopen('http://ip.42.pl/raw').read()
return html
print leerDato()
print ":)"
|
StarcoderdataPython
|
3347266
|
<reponame>rgmyr/litholog<gh_stars>10-100
"""
IO classes & functions.
This is an incomplete implemention of a more general/customizable
implementation of data checking/pre-processing.
"""
import operator
from abc import abstractmethod
import pandas as pd
from litholog import utils
class BaseCheck():
"""
Base class for `Check` objects, which check something and take an action if the check fails.
Parameters
----------
check_fn : FunctionType
Function that should return True if the check passes, False if it doesnt.
action : FunctionType or Exception
Action to take when `check_fn` fails (returns False).
If function, will apply action to operand and return result.
If Exception, will raise the Exception.
"""
def __init__(self, check_fn, action):
self.check_fn = check_fn
self.action = action
@property
@abstractmethod
def level(self):
raise NotImplementedError
def check(self, thing):
return self.check_fn(thing)
def apply(self, thing):
"""
Application of `action` to a `thing` if `check_fn` returns False.
"""
if not self.check(thing):
if isinstance(self.action, Exception):
raise self.action
else:
return self.action(thing)
else:
return thing
class RowCheck(BaseCheck):
@property
def level(self):
return 'row'
class TableCheck(BaseCheck):
@property
def level(self):
return 'group'
class DataFrameChecker():
"""
Reads csv table(s) and applies a list of `Checks`
"""
def __init__(self, checks, df=None, **kwargs):
assert all(isinstance(chk, BaseCheck) for chk in checks), '`checks` must be an iterable of `Check`s'
for check in checks:
self.add_check(check)
if df:
if isinstance(df, pd.DataFrame):
self.df = df
else:
self.df = self.read(df, **kwargs)
@property
def has_df(self):
return isinstance(self.df, pd.DataFrame)
def read(self, fpath, converters={}, **kwargs):
try:
return pd.read_csv(fpath, converters=converters, **kwargs)
except UnicodeDecodeError:
kwargs['encoding'] = 'latin-1'
return pd.read_csv(fpath, converters=converters, **kwargs)
def add_check(self, check):
assert isinstance(check, BaseCheck), f'Can only add a `*Check`, not {type(check)}'
if c.level is 'row':
self.row_checks.append(check)
elif c.level is 'group':
self.group_checks.append(check)
else:
raise ValueError(f'Unknown `Check.level` value: {check.level}')
def split_by(self, field):
pass
def fill_column_nan(df, col, fill_value, indicator='missing'):
"""
Fill missing values in `col` with `fill_value`.
Add a new bool column '`indicator`_`col`'.
"""
pass
|
StarcoderdataPython
|
178304
|
# 实现PCA分析和法向量计算,并加载数据集中的文件进行验证
import os
import time
import numpy as np
from pyntcloud import PyntCloud
import open3d as o3d
def PCA(data: PyntCloud.points, correlation: bool=False, sort: bool=True) -> np.array:
""" Calculate PCA
Parameters
----------
data(PyntCloud.points): 点云,NX3的矩阵
correlation(bool): 区分np的cov和corrcoef,不输入时默认为False
sort(bool): 特征值排序,排序是为了其他功能方便使用,不输入时默认为True
Returns
----------
eigenvalues(np.array): 特征值
eigenvectors(np.array): 特征向量
"""
# 作业1
# 屏蔽开始
# Normalize X by the center
X_ = data - np.mean(data, axis=0)
# Get the H matrix (3x3)
H = np.dot(X_.T, X_)
# Compute SVD of H (Eigenvector of X = Eigenvector of H)
# Get U, Sigma, V* (M = U Sigma V*)
# V.columns are eigenvectors of M*M
# U.columns are eigenvectors of MM*
# Sigma.diagonal elements are non-negative roots of the eigenvalues of MM* and M*M
eigenvectors, eigenvalues, _ = np.linalg.svd(H)
# 屏蔽结束
if sort:
sort = eigenvalues.argsort()[::-1]
eigenvalues = eigenvalues[sort]
eigenvectors = eigenvectors[:, sort]
return eigenvalues, eigenvectors
def main():
# 指定点云路径
path = '../../../modelnet40_normal_resampled/'
shape_name_list = np.loadtxt(os.path.join(path, 'modelnet40_shape_names.txt') if os.path.isdir(path) else None,dtype=str)
for item in shape_name_list:
# Import model
filename = os.path.join(path, item, item+'_0001.txt')
pointcloud = np.loadtxt(filename, delimiter=',')[:, 0:3]
print('total points number is:', pointcloud.shape[0])
# Convert to PyntCloud and Open3D formats
point_cloud_o3d = o3d.geometry.PointCloud()
point_cloud_o3d.points = o3d.utility.Vector3dVector(pointcloud)
# point_cloud_pynt = PyntCloud.from_instance("open3d", point_cloud_o3d)
# points = point_cloud_pynt.points
# 用PCA分析点云主方向
N = pointcloud.shape[0]
t0 = time.time()
w, v = PCA(pointcloud)
t1 = time.time()
print('###### PCA time taken (per 1k points): ', round((t1 - t0)/N*1000, 5))
point_cloud_vector = v[:, 2] #点云主方向对应的向量
print('the main orientation of this pointcloud is: ', point_cloud_vector)
principle_axis = np.concatenate((np.array([[0.,0.,0.]]), v.T))
print('Principal Axis: ', principle_axis)
# Visualise the PCA Axis
axis = o3d.geometry.TriangleMesh.create_coordinate_frame().rotate(v, center=(0,0,0))
# Visualise the PCA Projection
pr_data = pointcloud - np.dot(pointcloud, v[:,2][:,np.newaxis])*v[:, 2]
pr_data = 1*v[:, 2] + pr_data
pc_view = o3d.geometry.PointCloud(points=o3d.utility.Vector3dVector(pointcloud))
pc_view.colors = o3d.utility.Vector3dVector([[0,0,0]])
pr_view = o3d.geometry.PointCloud(points=o3d.utility.Vector3dVector(pr_data))
# o3d.visualization.draw_geometries([pc_view, axis, pr_view])
# 作业2
# 屏蔽开始
# 循环计算每个点的法向量
# 由于最近邻搜索是第二章的内容,所以此处允许直接调用open3d中的函数
# Feed the pointcloud into a kdtree structure
t0 = time.time()
pcd_tree = o3d.geometry.KDTreeFlann(point_cloud_o3d)
t1 = time.time()
print('###### KDTreeFlann time taken (per 1k points): ', round((t1 - t0)/N*1000, 5))
normals = []
t0 = time.time()
for index in range(N):
# For each point, search for its nearest k neighbors
[_, idx, _] = pcd_tree.search_knn_vector_3d(pc_view.points[index], 21)
neighbor_pc = np.asarray(pc_view.points)[idx]
# Compute the eigenvectors for its neighbors
_, v = PCA(neighbor_pc)
normals.append(v[:, 2])
t1 = time.time()
print('###### My Normal Estimation time taken (per 1k points): ', round((t1 - t0)/N*1000, 5))
t0 = time.time()
point_cloud_o3d.estimate_normals()
t1 = time.time()
print('###### Open3D Normal Estimation time taken (per 1k points): ', round((t1 - t0)/N*1000, 5))
# 屏蔽结束
# 此处把法向量存放在了normals中
o3d_normals = np.asarray(point_cloud_o3d.normals, dtype=np.float64)
normals = np.array(normals, dtype=np.float64)
point_cloud_o3d.normals = o3d.utility.Vector3dVector(normals)
# build pca line set:
points = np.vstack((pointcloud, pointcloud + 0.03*normals))
lines = [[i, i+N] for i in range(N)]
colors = np.zeros((N, 3)).tolist()
surface_normals_my = o3d.geometry.LineSet(
points=o3d.utility.Vector3dVector(points),
lines=o3d.utility.Vector2iVector(lines),
)
surface_normals_my.colors = o3d.utility.Vector3dVector(colors)
points = np.vstack((pointcloud, pointcloud + 0.03*o3d_normals))
lines = [[i, i+N] for i in range(N)]
colors = np.full((N, 3), 0.5).tolist()
surface_normals_o3d = o3d.geometry.LineSet(
points=o3d.utility.Vector3dVector(points),
lines=o3d.utility.Vector2iVector(lines),
)
surface_normals_o3d.colors = o3d.utility.Vector3dVector(colors)
o3d.visualization.draw_geometries([pc_view, axis, pr_view, surface_normals_my, surface_normals_o3d]) # point_cloud_o3d,
if __name__ == '__main__':
main()
|
StarcoderdataPython
|
3286275
|
<reponame>igorcosta/bidu
import pytest
from bidu.utils.test_utils import layer_test, bidu_test
from bidu.layers import noise
@bidu_test
def test_GaussianNoise():
layer_test(noise.GaussianNoise,
kwargs={'sigma': 1.},
input_shape=(3, 2, 3))
@bidu_test
def test_GaussianDropout():
layer_test(noise.GaussianDropout,
kwargs={'p': 0.5},
input_shape=(3, 2, 3))
if __name__ == '__main__':
pytest.main([__file__])
|
StarcoderdataPython
|
3319385
|
from observer import AbsSubject
class MySubject(AbsSubject):
_foo = -1
_bar = -1
@property
def foo(self):
return self._foo
@property
def bar(self):
return self._bar
def set_states(self, new_foo, new_bar):
self._foo = new_foo
self._bar = new_bar
self.notify()
|
StarcoderdataPython
|
3270226
|
<reponame>windniw/just-for-fun<gh_stars>1-10
"""
link: https://leetcode-cn.com/problems/trapping-rain-water-ii
problem: 给二维矩阵代表每个点的高度,向其最大的保留水量,设周围高度为0
solution: 维护边沿,用最小堆每次抛出边缘的最小值,依次向内收缩做搜索。
"""
class Solution:
def trapRainWater(self, heightMap: List[List[int]]) -> int:
if not heightMap or not heightMap[0]:
return 0
heap, n, m = [], len(heightMap), len(heightMap[0])
visit = [[False] * m for _ in range(n)]
visit[0][0] = visit[0][-1] = visit[-1][0] = visit[-1][-1] = True
for i in range(1, n - 1):
heapq.heappush(heap, (heightMap[i][0], i, 0))
heapq.heappush(heap, (heightMap[i][-1], i, m - 1))
visit[i][0] = visit[i][-1] = True
for i in range(1, m - 1):
heapq.heappush(heap, (heightMap[0][i], 0, i))
heapq.heappush(heap, (heightMap[-1][i], n - 1, i))
visit[0][i] = visit[-1][i] = True
res = 0
while heap:
(v, i, j) = heapq.heappop(heap)
for ii, jj in [(0, 1), (0, -1), (-1, 0), (1, 0)]:
x, y = i + ii, j + jj
if 0 <= x < n and 0 <= y < m and not visit[x][y]:
visit[x][y] = True
if heightMap[x][y] >= v:
heapq.heappush(heap, (heightMap[x][y], x, y))
else:
heapq.heappush(heap, (v, x, y))
res += v - heightMap[x][y]
return res
|
StarcoderdataPython
|
1718174
|
<filename>Visual/model.py<gh_stars>1-10
import torch.nn.functional as F
import torch.optim as optim
import torch.nn as nn
import torch
class QNetwork(nn.Module):
def __init__(self, action_size, seed):
super(QNetwork, self).__init__()
nfilters = [128, 128*2, 128*2]
self.seed = torch.manual_seed(seed)
self.conv1 = nn.Conv3d(3, nfilters[0], kernel_size=(1, 3, 3), stride=(1,3,3))
self.bn1 = nn.BatchNorm3d(nfilters[0])
self.conv2 = nn.Conv3d(nfilters[0], nfilters[1], kernel_size=(1, 3, 3), stride=(1,3,3))
self.bn2 = nn.BatchNorm3d(nfilters[1])
self.conv3 = nn.Conv3d(nfilters[1], nfilters[2], kernel_size=(4, 3, 3), stride=(1,3,3))
self.bn3 = nn.BatchNorm3d(nfilters[2])
fc = [2304, 1024]
self.fc1 = nn.Linear(fc[0], fc[1])
self.fc2 = nn.Linear(fc[1], action_size)
def forward(self, x):
x = F.relu(self.bn1(self.conv1(x)))
x = F.relu(self.bn2(self.conv2(x)))
x = F.relu(self.bn3(self.conv3(x)))
x = x.view(x.size(0), -1)
x = F.relu(self.fc1(x))
x = self.fc2(x)
return x
|
StarcoderdataPython
|
1643817
|
<gh_stars>0
import plotly
import plotly.express as px
from plotly.missing_ipywidgets import FigureWidget
import pandas as pd
token = "<KEY>"
def show_bush_fires(df: pd.DataFrame):
px.set_mapbox_access_token(token)
fig = px.scatter_mapbox(df, lat='latitude', lon='longitude', color='ranking', title="Wildfires in past 24 hours", size='est_fire_area', color_continuous_scale=px.colors.sequential.matter, size_max=15, zoom=3)
return fig
# Convert plotly figure to html.
def to_html(fig: FigureWidget):
#config = {'scrollZoom': False, 'showLink': False, 'displayModeBar': False}
return plotly.io.to_html(fig, validate=False, include_plotlyjs='cdn')#, config=config)
|
StarcoderdataPython
|
3394347
|
import asyncio
from collections import defaultdict
from functools import wraps
from pengbot import logger
from pengbot.context import Context
from pengbot.utils import isbound
class UnknownCommand(Exception):
pass
class BaseAdapter:
handlers = []
signals = {}
running = False
name = None
loop = None
def __init__(self, setup_method, **kwargs):
self.context = Context()
self.setup_method = setup_method
def __call__(self, *args, **kwargs):
try:
self.run()
except KeyboardInterrupt:
exit(0)
@property
def name(self):
return self.context.get('name', None) or self.setup_method.__name__
def run(self):
self.setup_method()
self.receive()
def receive(self, *args):
self.loop = asyncio.get_event_loop()
self.loop.set_debug(True)
try:
self.loop.run_until_complete(self.handle_message(*args))
finally:
self.loop.close()
async def handle_message(self, *args):
for handler in self.handlers:
coroutine = handler(*args)
print('handler=', handler)
print('create_task=', coroutine)
task = self.emit(coroutine)
print('task=', task)
print()
def emit(self, coroutine):
print('emit=', coroutine)
self.loop.create_task(coroutine)
def send(self, message):
raise NotImplementedError()
def say(self, *args, **kwargs):
raise NotImplementedError()
# Directives
def signal(self):
adapter = self
def decorator(func):
@wraps(func)
async def wrapper(*args, **kwargs):
print('func=', func)
result = await func(*args, **kwargs)
for listener in adapter.signals.get(func.__qualname__, []):
print('listener=', listener)
if isinstance(result, tuple):
adapter.emit(listener(*result))
else:
adapter.emit(listener(result))
return result
return wrapper
return decorator
def listen(self, signal=None):
def decorator(func):
@wraps(func)
def callback(*args, **kwargs):
return func(*args, **kwargs)
if not signal:
self.handlers.append(callback)
else:
if signal in self.signals:
self.signals[signal.__qualname__].append(callback)
else:
self.signals[signal.__qualname__] = [callback]
return decorator
class SocketAdapter(BaseAdapter):
pass
class ProcessAdapter(BaseAdapter):
pass
|
StarcoderdataPython
|
1644277
|
<filename>5.analysis/scikit-multilearn-master/skmultilearn/problem_transform/cc.py
from builtins import range
from ..base.problem_transformation import ProblemTransformationBase
from scipy.sparse import hstack, coo_matrix, issparse
import copy
import numpy as np
import random
class ClassifierChain(ProblemTransformationBase):
"""Classifier Chains Multi-Label Classifier.
This class provides implementation of Jesse Read's problem transformation
method called Classifier Chains. For L labels it trains L classifiers
ordered in a chain according to the
`Bayesian chain rule <https://en.wikipedia.org/wiki/Chain_rule_(probability)>`_.
The first classifier is trained just on the input space, and then each next
classifier is trained on the input space and all previous classifiers in the
chain.
The default classifier chains follow the same ordering as provided in the
training set, i.e. label in column 0, then 1, etc.
You can find more information about this method in Jesse Read's
`ECML presentation <https://users.ics.aalto.fi/jesse/talks/chains-ECML-2009-presentation.pdf>`_
or `journal paper <http://www.cs.waikato.ac.nz/~eibe/pubs/ccformlc.pdf>`_.
"""
BRIEFNAME = "CC"
def __init__(self, classifier=None, require_dense=None):
super(ClassifierChain, self).__init__(classifier, require_dense)
def fit(self, X, y):
"""Fit classifier with training data
Internally this method uses a sparse CSC representation
(:py:class:`scipy.sparse.csc_matrix`) of the X & y matrices.
:param X: input features
:type X: dense or sparse matrix (n_samples, n_features)
:param y: binary indicator matrix with label assignments
:type y: dense or sparse matrix of {0, 1} (n_samples, n_labels)
:returns: Fitted instance of self
"""
# fit L = len(y[0]) BR classifiers h_i
# on X + y[:i] as input space and y[i+1] as output
X_extended = self.ensure_input_format(
X, sparse_format='csc', enforce_sparse=True)
y = self.ensure_output_format(
y, sparse_format='csc', enforce_sparse=True)
self.label_count = y.shape[1]
self.classifiers = [None for x in range(self.label_count)]
for label in range(self.label_count):
self.classifier = copy.deepcopy(self.classifier)
y_subset = self.generate_data_subset(y, label, axis=1)
self.classifiers[label] = self.classifier.fit(self.ensure_input_format(
X_extended), self.ensure_output_format(y_subset))
X_extended = hstack([X_extended, y_subset])
return self
def predict(self, X):
"""Predict labels for X
Internally this method uses a sparse CSC representation (:py:class:`scipy.sparse.csc_matrix`) of the X matrix.
:param X: input features
:type X: dense or sparse matrix (n_samples, n_features)
:returns: binary indicator matrix with label assignments
:rtype: sparse matrix of int (n_samples, n_labels)
"""
X_extended = self.ensure_input_format(
X, sparse_format='csc', enforce_sparse=True)
prediction = None
for label in range(self.label_count):
prediction = self.classifiers[label].predict(
self.ensure_input_format(X_extended))
prediction = self.ensure_multi_label_from_single_class(prediction)
X_extended = hstack([X_extended, prediction])
return X_extended[:, -self.label_count:]
def predict_proba(self, X):
"""Predict probabilities of label assignments for X
Internally this method uses a sparse CSC representation (:py:class:`scipy.sparse.csc_matrix`) of the X matrix.
:param X: input features
:type X: dense or sparse matrix (n_samples, n_labels)
:returns: matrix with label assignment probabilities
:rtype: sparse matrix of float (n_samples, n_labels)
"""
X_extended = self.ensure_input_format(
X, sparse_format='csc', enforce_sparse=True)
prediction = None
results = []
for label in range(self.label_count):
prediction = self.classifiers[label].predict(
self.ensure_input_format(X_extended))
prediction = self.ensure_output_format(
prediction, sparse_format='csc', enforce_sparse=True)
prediction_proba = self.classifiers[label].predict_proba(
self.ensure_input_format(X_extended))
prediction_proba = self.ensure_output_format(
prediction_proba, sparse_format='csc', enforce_sparse=True)[:, 1]
X_extended = hstack([X_extended, prediction.T]).tocsc()
results.append(prediction_proba)
return hstack(results)
|
StarcoderdataPython
|
185491
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# BEGIN LICENSE
# Copyright (c) 2014 <NAME> <<EMAIL>>
# Copyright (c) 2017 <NAME> <<EMAIL>>
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation
# files (the "Software"), to deal in the Software without
# restriction, including without limitation the rights to use,
# copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following
# conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
# END LICENSE
""" Fetches weather reports from Dark Sky for displaying on a screen. """
__version__ = "0.0.12"
###############################################################################
# Raspberry Pi Weather Display
# Original By: <NAME> 10/25/2014
# Modified By: <NAME> 12/30/2017 & multiple times since
###############################################################################
# local imports
from weather import *
class Daily(Weather):
def get_daily(self, last_update_time):
new_last_update_time = self.get_forecast(last_update_time)
return new_last_update_time
def disp_daily(self, last_update_time):
# Fill the screen with black
self.screen.fill((0, 0, 0))
xmin = 10
lines = 5
line_color = (255, 255, 255)
text_color = (255, 255, 255)
font_name = "freesans"
self.draw_screen_border(line_color, xmin, lines)
self.disp_time_date(font_name, text_color)
self.disp_current_temp(font_name, text_color)
self.disp_summary()
self.display_conditions_line(
'Feels Like:', int(round(self.weather.apparentTemperature)),
True)
try:
wind_bearing = self.weather.windBearing
wind_direction = deg_to_compass(wind_bearing) + ' @ '
except AttributeError:
wind_direction = ''
wind_txt = wind_direction + str(
int(round(self.weather.windSpeed))) + \
' ' + get_windspeed_abbreviation()
self.display_conditions_line(
'Wind:', wind_txt, False, 1)
self.display_conditions_line(
'Humidity:', str(int(round((self.weather.humidity * 100)))) + '%',
False, 2)
# Skipping multiplier 3 (line 4)
if self.take_umbrella:
umbrella_txt = 'Grab your umbrella!'
else:
umbrella_txt = 'No umbrella needed today.'
self.disp_umbrella_info(umbrella_txt)
# Today
today = self.weather.daily[0]
today_string = "Today"
multiplier = 1
self.display_subwindow(today, today_string, multiplier)
# counts from 0 to 2
for future_day in range(3):
this_day = self.weather.daily[future_day + 1]
this_day_no = datetime.datetime.fromtimestamp(this_day.time)
this_day_string = this_day_no.strftime("%A")
multiplier += 2
self.display_subwindow(this_day, this_day_string, multiplier)
# Update the display
pygame.display.update()
|
StarcoderdataPython
|
1663782
|
from . import analysis, config, modeling, preproc, save_load, train_repeated_model, train_repeated_model_binary, \
train_single_model, train_single_model_binary, train_single_model_cv
__all__ = [analysis, config, modeling, preproc, save_load, train_repeated_model, train_repeated_model_binary,
train_single_model, train_single_model_binary, train_single_model_cv]
|
StarcoderdataPython
|
1797408
|
from disposable_email_domains import whitelist
def test_whitelist_inclusion():
assert 'spamcannon.com' in whitelist
def test_whitelist_exclusion():
assert 'spamcowboy.com' not in whitelist
|
StarcoderdataPython
|
107676
|
import csv
import os
from clients.models import Client
class ClientsServices():
def __init__(self, database) -> None:
self.database = database
self.database_tmp = f'{database}.tmp'
def create_client(self, client):
with open(self.database, mode='a') as f:
writer = csv.DictWriter(f, fieldnames=Client.schema())
writer.writerow(client.to_dict())
def read_clients(self):
with open(self.database, mode='r') as f:
reader = csv.DictReader(f, fieldnames=Client.schema())
return list(reader)
def update_client(self, update_client):
all_clients = self.read_clients()
update_clients = []
for client in all_clients:
if client["uid"] == update_client.uid:
update_clients.append(update_client.to_dict())
else:
update_clients.append(client)
self._save_database(update_clients)
def _save_database(self, clients):
with open(self.database_tmp, mode='w') as f:
writer = csv.DictWriter(f, fieldnames=Client.schema())
writer.writerows(clients)
os.remove(self.database)
os.rename(self.database_tmp, self.database)
def delete_client(self, delete_uid):
all_rows = self.read_clients()
update_data = [row for row in all_rows if row["uid"] != delete_uid]
self._save_database(update_data)
|
StarcoderdataPython
|
15514
|
<filename>llist_gameboard/urls.py
"""
URL's for the LList Game Board app.
"""
from django.urls import path
from llist_gameboard.api import llist_api
from . import views
urlpatterns = [
# Views
path('', views.llist_game_board, name='llist-game-board'),
#Game Play API Calls For Linked List
path('llist_api', llist_api.api_overview, name='llist-game-board-api_overview'),
path('llist_api/start_game/<str:difficulty>/<str:player_ids>/<str:data_structures>', llist_api.start_game, name='llist-game-board-start_game'),
path('llist_api/board/<str:game_id>', llist_api.board, name='llist-game-board-game_status'),
path('llist_api/dig_tunnel/<str:game_id>/<str:origin>/<str:destination>', llist_api.dig_tunnel, name='llist-game-board-dig_tunnel'),
path('llist_api/dig_chamber/<str:game_id>/<str:origin>/<str:move_ant>/<str:ant>', llist_api.dig_chamber, name='llist-game-board-dig_chamber'),
path('llist_api/fill_chamber/<str:game_id>/<str:to_fill>', llist_api.fill_chamber, name='llist-game-board-fill_chamber'),
path('llist_api/spawn_ant/<str:game_id>', llist_api.spawn_ant, name='llist-game-board-spawn_ant'),
path('llist_api/forage/<str:game_id>/<str:difficulty>/<str:dest>', llist_api.forage, name='llist-game-board-forage'),
path('llist_api/move_food/<str:game_id>/<str:start>/<str:dest>', llist_api.move_food, name='llist-game-board-move_food'),
path('llist_api/move_ant/<str:game_id>/<str:start>/<str:dest>', llist_api.move_ant, name='llist-game-board-move_ant'),
]
|
StarcoderdataPython
|
3273750
|
<filename>NamingConvention/NamingConventionTests/NameCompliance.py
#! /usr/bin/python
import re
import sys
#Description: Checks a string against the standard naming conventions.
#Author: <NAME>
#Date: 25/10/17
#Authorised by:
#Last Modified: 17/11/17
#Audit Log:
#Notes: Returns error messages as a string or an empty string if no errors.
# Accepts nothing, 'Version' or 'Path' as a second argument to allow for exceptions.
#--------------------------------------------------------------------#
inputString = sys.argv[1]
stringType = ""
if len(sys.argv) == 3:
stringType = sys.argv[2]
stringSizeLimit = 64
stringSize = len(inputString)
#Invalid argument
if stringType != "" and stringType != "Version" and stringType != "Path":
print 'ERROR: >>>', inputString, '<<< submitted with invalid second argument. Second argument should be "Version" or "Path" if present.'
#Length limit
if stringSize > stringSizeLimit:
print 'ERROR: >>>', inputString, '<<< is too long.'
#No whitespace characters
searchObj = re.search('\s', inputString)
if searchObj:
print 'ERROR: >>>', inputString, '<<< contains whitespace.'
#Underscore not followed by a capital letter or number
searchObj = re.search('[_][^A-Z0-9]', inputString)
if searchObj:
print 'ERROR: >>>', inputString, '<<< contains an underscore not followed by capital or number.'
#Begins with a capital letter, not a Path
if stringType == "" or stringType == "Version":
searchObj = re.search('^[A-Z]', inputString)
if not searchObj:
print 'ERROR: >>>', inputString, '<<< does not begin with a capital letter.'
#No argument
if stringType == "":
#String contains no non-alphaumeric character other than '_'
searchObj = re.search('\W', inputString)
if searchObj:
print 'ERROR: >>>', inputString, '<<< contains a character other than alphanumeric or underscore.'
#Version Argument
if stringType == "Version":
dotCount = len(re.findall('\.', inputString))
searchObj = re.search('[^a-zA-Z0-9_\.]', inputString)
if searchObj or dotCount > 1:
print 'ERROR: >>>', inputString, '<<< contains a character other than alphanumeric, underscore or . outside of a version number.'
#Path Argument
if stringType == "Path":
firstChar = re.search('^[^/]', inputString)
searchObj = re.search('[^a-zA-Z0-9_/]', inputString)
if searchObj or firstChar:
print 'ERROR: >>>', inputString, '<<< contains a character other than alphanumeric, underscore or /, or does not begin with /.'
sys.exit(0)
|
StarcoderdataPython
|
1777292
|
<reponame>eons-dev/build_cpp
import os
import logging
import shutil
import jsonpickle
from distutils.file_util import copy_file
from distutils.dir_util import copy_tree, mkpath
from ebbs import Builder
# Class name is what is used at cli, so we defy convention here in favor of ease-of-use.
class cpp(Builder):
def __init__(this, name="C++ Builder"):
super().__init__(name)
this.clearBuildPath = True
this.optionalKWArgs["cpp_version"] = 11
this.optionalKWArgs["cmake_version"] = "3.12.0"
this.optionalKWArgs["file_name"] = None
this.optionalKWArgs["dep_lib"] = None
this.optionalKWArgs["output_dir"] = "out"
this.optionalKWArgs["toolchain"] = None
this.optionalKWArgs["toolchain_dir"] = "tool"
this.optionalKWArgs["define"] = None
this.optionalKWArgs["build_type"] = "Debug"
this.optionalKWArgs["install_bin_to"] = "/usr/local/bin"
this.optionalKWArgs["install_inc_to"] = "/usr/local/include"
this.optionalKWArgs["install_lib_to"] = "/usr/local/lib"
this.supportedProjectTypes.append("lib")
this.supportedProjectTypes.append("mod")
this.supportedProjectTypes.append("bin")
this.supportedProjectTypes.append("srv")
this.supportedProjectTypes.append("test")
this.valid_cxx_extensions = [
".c",
".cpp",
".h",
".hpp"
]
this.valid_lib_extensions = [
".a",
".so"
]
# Required Builder method. See that class for details.
def DidBuildSucceed(this):
result = this.packagePath
logging.debug(f"Checking if build was successful; output should be in {result}")
return bool(os.listdir(result))
# Required Builder method. See that class for details.
def Build(this):
if (this.file_name is None):
this.file_name = this.projectName
this.projectIsLib = False
if (this.projectType in ["lib", "mod"]):
this.projectIsLib = True
this.packagePath = os.path.join(this.buildPath, this.output_dir)
mkpath(this.packagePath)
this.toolPath = os.path.join(this.buildPath, this.toolchain_dir)
logging.debug(f"Building in {this.buildPath}")
logging.debug(f"Packaging in {this.packagePath}")
this.GenCMake()
if (this.projectIsLib):
this.GenSingleHeader()
this.CMake(".")
this.Make()
# include header files with libraries
if (this.projectIsLib):
copy_tree(this.incPath, this.packagePath)
# this.GenInstall()
def GetSourceFiles(this, directory, seperator=" "):
ret = ""
for root, dirs, files in os.walk(directory):
for f in files:
name, ext = os.path.splitext(f)
if (ext in this.valid_cxx_extensions):
# logging.info(f" {os.path.join(root, f)}")
ret += f"{os.path.join(root, f)}{seperator}"
return ret[:-1]
def GetLibs(this, directory, seperator=" "):
ret = ""
for file in os.listdir(directory):
if (not os.path.isfile(os.path.join(directory, file))):
continue
name, ext = os.path.splitext(file)
if (ext in this.valid_lib_extensions):
ret += (f"{name[3:]}{seperator}")
return ret[:-1]
def GenCMake(this):
# Write our cmake file
cmakeFile = open("CMakeLists.txt", "w")
cmakeFile.write(f'''
cmake_minimum_required (VERSION {this.cmake_version})
set(CMAKE_CXX_STANDARD {this.cpp_version})
set(CMAKE_POSITION_INDEPENDENT_CODE ON)
set(CMAKE_ARCHIVE_OUTPUT_DIRECTORY {this.packagePath})
set(CMAKE_LIBRARY_OUTPUT_DIRECTORY {this.packagePath})
set(CMAKE_RUNTIME_OUTPUT_DIRECTORY {this.packagePath})
set(CMAKE_BUILD_TYPE {this.build_type})
''')
if (this.toolchain is not None):
# Make sure we have the toolchain.
if (not os.path.isdir(this.toolPath)):
toolchainSourcePath = os.path.join(this.executor.args.repo_store,f"toolchain_{this.toolchain}")
if (not os.path.isdir(toolchainSourcePath)):
this.executor.DownloadPackage(f"toolchain_{this.toolchain}", registerClasses=False, createSubDirectory=True)
copy_tree(toolchainSourcePath, this.toolPath)
toolchainCmakeFile = f"{os.path.join(this.toolPath,this.toolchain)}.cmake"
if (not os.path.isfile(toolchainCmakeFile)):
raise OtherBuildError(f"Could not find cmake file: {toolchainCmakeFile}")
toolchainBinPath = os.path.join(this.toolPath, "bin")
cmakeFile.write(f"set(CROSS_TARGET_TOOLCHAIN_PATH {toolchainBinPath})\n")
cmakeFile.write(f"set(CMAKE_TOOLCHAIN_FILE {toolchainCmakeFile})\n")
cmakeFile.write(f"project({this.file_name})\n")
if (this.incPath is not None):
cmakeFile.write(f"include_directories({this.incPath})\n")
if (this.projectIsLib):
logging.info("Adding library specific code")
cmakeFile.write(f"add_library ({this.file_name} SHARED {this.GetSourceFiles(this.srcPath)})\n")
else:
logging.info("Adding binary specific code")
cmakeFile.write(f"add_executable({this.file_name} {this.GetSourceFiles(this.srcPath)})\n")
if (this.define is not None):
cmakeFile.write(f"add_compile_definitions(")
for key, value in this.define.items():
if (not value):
cmakeFile.write(f"{key}")
else:
cmakeFile.write(f"{key}={value}")
cmakeFile.write(" ")
cmakeFile.write(")\n")
cmakeFile.write(f'''
set(THREADS_PREFER_PTHREAD_FLAG ON)
find_package(Threads REQUIRED)
target_link_libraries({this.file_name} Threads::Threads)
''')
if (this.libPath is not None):
cmakeFile.write(f"include_directories({this.libPath})\n")
cmakeFile.write(f"target_link_directories({this.file_name} PUBLIC {this.libPath}))\n")
cmakeFile.write(f"target_link_libraries({this.file_name} {this.GetLibs(this.libPath)})\n")
if (this.dep_lib is not None):
cmakeFile.write(f"target_link_libraries({this.file_name} {' '.join(this.dep_lib)})\n")
cmakeFile.write('set(CMAKE_CXX_LINK_EXECUTABLE "<CMAKE_CXX_COMPILER> <FLAGS> <CMAKE_CXX_LINK_FLAGS> <LINK_FLAGS> <OBJECTS> -o <TARGET> -Wl,--start-group -Wl,--whole-archive <LINK_LIBRARIES> -Wl,--no-whole-archive -Wl,--end-group")') #per https://stackoverflow.com/questions/53071878/using-whole-archive-linker-option-with-cmake-and-libraries-with-other-library
cmakeFile.close()
def GenSingleHeader(this):
includes = this.GetSourceFiles(this.incPath).split(' ')
includes = [os.path.relpath(path, this.incPath) for path in includes]
singleHeaderFile = open(os.path.join(this.packagePath,f"{this.projectName}.h"), "w+")
singleHeaderFile.write('#pragma once\n')
for i in includes:
singleHeaderFile.write(f"#include <{i}>\n")
singleHeaderFile.close()
# Create install.json, which will be used by emi to install what we've built.
# TODO: why not just use jsonpickle?
def GenInstall(this):
files = []
dirs = []
for thing in os.listdir(this.packagePath):
if (os.path.isdir(thing)):
dirs.append(thing)
else:
files.append(thing)
installFile = open(os.path.join(this.packagePath,"install.json"), "w+")
installFile.write('{\n')
if (this.dep_lib is not None):
installFile.write(' dep: [\n')
for i, d in enumerate(this.dep_lib):
installFile.write(f' {d}')
if (i != len(files)-1):
installFile.write(',\n')
else:
installFile.write('\n')
installFile.write(' ],\n')
if (not this.projectIsLib):
installFile.write(' bin: [\n')
for i, f in enumerate(files):
installFile.write(f' {f}')
if (i != len(files)-1):
installFile.write(',\n')
else:
installFile.write('\n')
for i, d in enumerate(dirs):
installFile.write(f' {d}')
if (i != len(files)-1):
installFile.write(',\n')
else:
installFile.write('\n')
installFile.write(' ]\n')
if (this.projectIsLib):
#FIXME: THIS NEEDS WORK.
# Separate library files from include files.
# NOTE: this only parses files in the root of packagePath, not recursively.
includes = []
libraries = []
for f in files:
name, ext = os.path.splitext(f)
if (ext in this.valid_lib_extensions):
libraries.append(f)
else:
includes.append(f)
installFile.write('{\n')
installFile.write(' lib: [\n')
for i, f in enumerate(libraries):
installFile.write(f' {f}')
if (i != len(files)-1):
installFile.write(',\n')
else:
installFile.write('\n')
installFile.write(' ],\n')
installFile.write(' inc: [\n')
for i, thing in enumerate(dirs + includes):
installFile.write(f' {thing}')
if (i != len(files)-1):
installFile.write(',\n')
else:
installFile.write('\n')
installFile.write(' ]\n')
installFile.write('}\n')
installFile.close()
def CMake(this, path):
this.RunCommand(f"cmake {path}")
def Make(this):
this.RunCommand("make")
|
StarcoderdataPython
|
1775546
|
import os
from pathlib import Path
class Initialize:
def __init__(self, config, database, sql):
self.home = str(Path.home())
self.database = database
self.sql = sql
self.config = config
data_path = self.home+'/core3-tbw/core/data/tbw.db'
if os.path.exists(data_path) == False:
self.initialize()
quit()
else:
print("Database detected - no initialization needed")
self.update_delegate_records()
def initialize(self):
self.sql.open_connection()
print("Setting up database")
self.sql.setup()
print("Importing forged blocks")
self.database.open_connection()
total_blocks = self.database.get_all_blocks()
self.database.close_connection()
print("Storing forged blocks in database")
self.sql.store_blocks(total_blocks)
print("Marking blocks proccessed up to starting block {}".format(self.config.start_block))
self.sql.mark_processed(self.config.start_block, initial = "Y")
processed_blocks = self.sql.processed_blocks().fetchall()
self.sql.close_connection()
print("Total blocks imported - {}".format(len(total_blocks)))
print("Total blocks marked as processed - {}".format(len(processed_blocks)))
print("Finished setting up database")
def update_delegate_records(self):
self.sql.open_connection()
accounts = [i for i in self.config.delegate_fee_address]
self.sql.store_delegate_rewards(accounts)
self.sql.close_connection()
|
StarcoderdataPython
|
163479
|
<gh_stars>0
from django.contrib.auth.decorators import login_required
from django.shortcuts import redirect
from django.views.generic import TemplateView
from rooms import models as room_models
from . import models
@login_required
def toggle_room(request, room_pk):
action = request.GET.get("action", None)
room = room_models.Room.objects.get_or_none(pk=room_pk)
if room is not None and action is not None:
the_list, _ = models.List.objects.get_or_create(
user=request.user, name="My Favorite Houses"
)
if action == "add":
the_list.rooms.add(room)
elif action == "remove":
the_list.rooms.remove(room)
return redirect("rooms:detail", pk=room_pk)
class SeeFavsView(TemplateView):
template_name = "lists/list_detail.html"
|
StarcoderdataPython
|
1798165
|
"""
Exception types used by powercmd module.
"""
class InvalidInput(ValueError):
"""An error raised if the input cannot be parsed as a valid command."""
|
StarcoderdataPython
|
3313253
|
'''
Write a program that takes an array A of n numbers,
and rearranges A's elements to get a new array B
having the property that:
B[0] < B[1] > B[2] < B[3] > B[4] < B[5] > ...
'''
def rearrange(array): # Time: O(n)
for i in range(len(array) - 1):
if ((i % 2 and array[i] < array[i + 1]) or
(not i % 2 and array[i] > array[i + 1])):
array[i], array[i + 1] = array[i + 1], array[i]
return array
assert(rearrange([1, 2, 3, 4, 5, 6, 7, 8, 9]) == [1, 3, 2, 5, 4, 7, 6, 9, 8])
assert(rearrange([9, 8, 7, 6, 5, 4, 3, 2, 1]) == [8, 9, 6, 7, 4, 5, 2, 3, 1])
assert(rearrange([12, 11, 13, 9, 12, 8, 14, 13, 15])
== [11, 13, 9, 12, 8, 14, 12, 15, 13])
assert(rearrange([310, 315, 275, 295, 260, 270, 290, 230, 255, 250])
== [310, 315, 275, 295, 260, 290, 230, 270, 250, 255])
def rearrange_with_sorted(array): # Time: O(n)
for i in range(len(array)):
array[i: i + 2] = sorted(array[i:i + 2], reverse=i % 2)
return array
assert(rearrange_with_sorted([1, 2, 3, 4, 5, 6, 7, 8, 9])
== [1, 3, 2, 5, 4, 7, 6, 9, 8])
assert(rearrange_with_sorted([9, 8, 7, 6, 5, 4, 3, 2, 1])
== [8, 9, 6, 7, 4, 5, 2, 3, 1])
assert(rearrange_with_sorted([12, 11, 13, 9, 12, 8, 14, 13, 15])
== [11, 13, 9, 12, 8, 14, 12, 15, 13])
assert(rearrange_with_sorted([310, 315, 275, 295, 260, 270, 290, 230, 255, 250])
== [310, 315, 275, 295, 260, 290, 230, 270, 250, 255])
|
StarcoderdataPython
|
115451
|
<filename>ui_shapeFlow.py
# -*- coding: utf-8 -*-
# GUI for shapeFlow and ShapeMatching Maya plugins
# How To Use:
# 1. in script editor
# import plugin_deformer.ui_shapeFlow as ui
# reload(ui)
# ui.UI_Gradient()
#
# 2. Select target mesh and then shift+click to select the end mesh.
# 3. Create deformer from menu
# 4. Play animetion
# (Time slider is connected to the deformer to activate "commpute" method)
# @author <NAME>
# @date 2013/May/13
#import debugmaya
#debugmaya.startDebug()
# Maya modules
import maya.cmds as cmds
import pymel.core as pm
# load plugin
deformerTypes = ["ShapeFlow","ShapeMatching"]
for type in deformerTypes:
try:
cmds.loadPlugin(type)
except:
print("Plugin %s already loaded" %(type))
# GUI
class UI_ShapeFlow:
uiID = "ShapeFlow"
title = "ShapeFlowPlugin"
deformers = []
## Constructor
def __init__(self):
if pm.window(self.uiID, exists=True):
pm.deleteUI(self.uiID)
win = pm.window(self.uiID, title=self.title, menuBar=True)
with win:
pm.menu( label='Plugin', tearOff=True )
for type in deformerTypes:
pm.menuItem( label=type, c=pm.Callback( self.initPlugin, type) )
self._parentLayout = pm.columnLayout( adj=True )
with self._parentLayout:
self.createUISet()
def createUISet(self):
self._childLayout = pm.columnLayout( adj=True )
with self._childLayout:
pm.text(l="Click target mesh, then shift+click end mesh")
self.deformers = pm.ls(type=deformerTypes[0])
for node in self.deformers:
frameLayout = pm.frameLayout( label=node.name(), collapsable = True)
with frameLayout:
pm.button( l="Del", c=pm.Callback( self.deleteNode, node))
pm.attrControlGrp( label="active", attribute= node.active)
pm.attrFieldSliderGrp(label="delta", min=0.001, max=5.0, attribute=node.delta)
pm.attrFieldSliderGrp(label="shapeMatching", min=0.1, max=10.0, attribute=node.smw)
#
self.deformers = pm.ls(type=deformerTypes[1])
for node in self.deformers:
frameLayout = pm.frameLayout( label=node.name(), collapsable = True)
with frameLayout:
pm.button( l="Del", c=pm.Callback( self.deleteNode, node))
pm.attrControlGrp( label="active", attribute= node.active)
pm.attrFieldSliderGrp(label="delta", min=0.001, max=5.0, attribute=node.delta)
pm.attrFieldSliderGrp(label="stiffness", min=0.001, max=10.0, attribute=node.stf)
pm.attrFieldSliderGrp(label="attenuation", min=0.001, max=1.0, attribute=node.att)
# delete deformer node
def deleteNode(self,node):
cmds.delete(node.name())
self.updateUI()
def initPlugin(self,type):
meshes = pm.selected( type="transform" )
if len(meshes)<2:
return
pm.select( meshes[-1])
deformer = cmds.deformer(type=type)[0]
shape=meshes[-2].getShapes()[0]
cmds.connectAttr(shape+".outMesh", deformer+".startShape")
cmds.connectAttr("time1.outTime", deformer+".slider")
self.updateUI()
def updateUI(self):
pm.deleteUI( self._childLayout )
pm.setParent(self._parentLayout)
self.createUISet()
|
StarcoderdataPython
|
3221990
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
from __future__ import print_function
import warnings
from six import BytesIO
from astropy.table import Table
from astropy.io import fits
from astropy import coordinates
from astropy import units as u
from ..query import BaseQuery
from ..utils import commons
from ..utils import async_to_sync
from ..exceptions import InvalidQueryError, NoResultsWarning
from . import conf
__all__ = ['Heasarc', 'HeasarcClass']
@async_to_sync
class HeasarcClass(BaseQuery):
"""
HEASARC query class.
For a list of available HEASARC mission tables, visit:
https://heasarc.gsfc.nasa.gov/cgi-bin/W3Browse/w3catindex.pl
"""
URL = conf.server
TIMEOUT = conf.timeout
coord_systems = ['fk5', 'fk4', 'equatorial', 'galactic']
def query_async(self, request_payload, cache=True, url=conf.server):
"""
Submit a query based on a given request_payload. This allows detailed
control of the query to be submitted.
"""
response = self._request('GET', url, params=request_payload,
timeout=self.TIMEOUT, cache=cache)
return response
def query_mission_list(self, cache=True, get_query_payload=False):
"""
Returns a list of all available mission tables with descriptions
"""
request_payload = self._args_to_payload(
Entry='none',
mission='xxx',
displaymode='BatchDisplay'
)
if get_query_payload:
return request_payload
# Parse the results specially (it's ascii format, not fits)
response = self.query_async(
request_payload,
url=conf.server,
cache=cache
)
data = BytesIO(response.content)
data_str = data.read().decode('utf-8')
data_str = data_str.replace('Table xxx does not seem to exist!\n\n\n\nAvailable tables:\n', '')
table = Table.read(data_str, format='ascii.fixed_width_two_line',
delimiter='+', header_start=1, position_line=2,
data_start=3, data_end=-1)
return table
def query_mission_cols(self, mission, cache=True, get_query_payload=False,
**kwargs):
"""
Returns a list containing the names of columns that can be returned for
a given mission table. By default all column names are returned.
Parameters
----------
mission : str
Mission table (short name) to search from
fields : str, optional
Return format for columns from the server available options:
* Standard : Return default table columns
* All (default) : Return all table columns
* <custom> : User defined csv list of columns to be returned
All other parameters have no effect
"""
# Query fails if nothing is found, so set search radius very large and
# only take a single value (all we care about is the column names)
kwargs['resultmax'] = 1
# By default, return all column names
fields = kwargs.get('fields', None)
if fields is None:
kwargs['fields'] = 'All'
response = self.query_region_async(position='0.0 0.0', mission=mission,
radius='361 degree', cache=cache,
get_query_payload=get_query_payload,
**kwargs)
# Return payload if requested
if get_query_payload:
return response
return self._parse_result(response).colnames
def query_object_async(self, object_name, mission,
cache=True, get_query_payload=False,
**kwargs):
"""
Query around a specific object within a given mission catalog
Parameters
----------
object_name : str
Object to query around. To set search radius use the 'radius'
parameter.
mission : str
Mission table to search from
**kwargs :
see `~astroquery.heasarc.HeasarcClass._args_to_payload` for list
of additional parameters that can be used to refine search query
"""
request_payload = self._args_to_payload(
mission=mission,
entry=object_name,
**kwargs
)
# Return payload if requested
if get_query_payload:
return request_payload
return self.query_async(request_payload, cache=cache)
def query_region_async(self, position, mission, radius,
cache=True, get_query_payload=False,
**kwargs):
"""
Query around specific set of coordinates within a given mission
catalog. Method first converts the supplied coordinates into the FK5
reference frame and searches for sources from there. Because of this,
returned offset coordinates may look different than the ones supplied.
Parameters
----------
position : `astropy.coordinates` or str
The position around which to search. It may be specified as a
string in which case it is resolved using online services or as
the appropriate `astropy.coordinates` object. ICRS coordinates
may also be entered as a string.
(adapted from nrao module)
mission : str
Mission table to search from
radius :
Astropy Quantity object, or a string that can be parsed into one.
e.g., '1 degree' or 1*u.degree.
**kwargs :
see `~astroquery.heasarc.HeasarcClass._args_to_payload` for list
of additional parameters that can be used to refine search query
"""
# Convert the coordinates to FK5
c = commons.parse_coordinates(position).transform_to(coordinates.FK5)
kwargs['coordsys'] = 'fk5'
kwargs['equinox'] = 2000
# Generate the request
request_payload = self._args_to_payload(
mission=mission,
entry="{},{}".format(c.ra.degree, c.dec.degree),
radius=u.Quantity(radius),
**kwargs
)
# Return payload if requested
if get_query_payload:
return request_payload
# Submit the request
return self.query_async(request_payload, cache=cache)
def _fallback(self, content):
"""
Blank columns which have to be converted to float or in fail so
lets fix that by replacing with -1's
"""
data = BytesIO(content)
header = fits.getheader(data, 1) # Get header for column info
colstart = [y for x, y in header.items() if "TBCOL" in x]
collens = [int(float(y[1:]))
for x, y in header.items() if "TFORM" in x]
new_table = []
old_table = content.split("END")[-1].strip()
for line in old_table.split("\n"):
newline = []
for n, tup in enumerate(zip(colstart, collens), start=1):
cstart, clen = tup
part = line[cstart - 1:cstart + clen]
newline.append(part)
if len(part.strip()) == 0:
if header["TFORM%i" % n][0] in ["F", "I"]:
# extra space is required to sperate column
newline[-1] = "-1".rjust(clen) + " "
new_table.append("".join(newline))
data = BytesIO(content.replace(old_table, "\n".join(new_table)))
return Table.read(data, hdu=1)
def _parse_result(self, response, verbose=False):
# if verbose is False then suppress any VOTable related warnings
if not verbose:
commons.suppress_vo_warnings()
if "BATCH_RETRIEVAL_MSG ERROR:" in response.text:
raise InvalidQueryError("One or more inputs is not recognized by HEASARC. "
"Check that the object name is in GRB, SIMBAD+Sesame, or "
"NED format and that the mission name is as listed in "
"query_mission_list().")
elif "Software error:" in response.text:
raise InvalidQueryError("Unspecified error from HEASARC database. "
"\nCheck error message: \n{!s}".format(response.text))
elif "NO MATCHING ROWS" in response.text:
warnings.warn(NoResultsWarning("No matching rows were found in the query."))
return Table()
try:
data = BytesIO(response.content)
table = Table.read(data, hdu=1)
return table
except ValueError:
return self._fallback(response.content)
def _args_to_payload(self, **kwargs):
"""
Generates the payload based on user supplied arguments
Parameters
----------
mission : str
Mission table to query
entry : str, optional
Object or position for center of query. A blank value will return
all entries in the mission table. Acceptable formats:
* Object name : Name of object, e.g. 'Crab'
* Coordinates : X,Y coordinates, either as 'degrees,degrees' or
'hh mm ss,dd mm ss'
fields : str, optional
Return format for columns from the server available options:
* Standard (default) : Return default table columns
* All : Return all table columns
* <custom> : User defined csv list of columns to be
returned
radius : float (arcmin), optional
Astropy Quantity object, or a string that can be parsed into one.
e.g., '1 degree' or 1*u.degree.
coordsys: str, optional
If 'entry' is a set of coordinates, this specifies the coordinate
system used to interpret them. By default, equatorial coordinates
are assumed. Possible values:
* 'fk5' <default> (FK5 J2000 equatorial coordinates)
* 'fk4' (FK4 B1950 equatorial coordinates)
* 'equatorial' (equatorial coordinates, `equinox` param
determines epoch)
* 'galactic' (Galactic coordinates)
equinox : int, optional
Epoch by which to interpret supplied equatorial coordinates
(defaults to 2000, ignored if `coordsys` is not 'equatorial')
resultmax : int, optional
Set maximum query results to be returned
sortvar : str, optional
Set the name of the column by which to sort the results. By default
the results are sorted by distance from queried object/position
displaymode : str, optional
Return format from server. Since the user does not interact with
this directly, it's best to leave this alone
action : str, optional
Type of action to be taken (defaults to 'Query')
"""
# Define the basic query for this object
request_payload = dict(
tablehead=('name=BATCHRETRIEVALCATALOG_2.0 {}'
.format(kwargs.get('mission'))),
Entry=kwargs.get('entry', 'none'),
Action=kwargs.get('action', 'Query'),
displaymode=kwargs.get('displaymode', 'FitsDisplay')
)
# Fill in optional information for refined queries
# Handle queries involving coordinates
coordsys = kwargs.get('coordsys', 'fk5')
if coordsys.lower() == 'fk5':
request_payload['Coordinates'] = 'Equatorial: R.A. Dec'
elif coordsys.lower() == 'fk4':
request_payload['Coordinates'] = 'Equatorial: R.A. Dec'
request_payload['equinox'] = 1950
elif coordsys.lower() == 'equatorial':
request_payload['Coordinates'] = 'Equatorial: R.A. Dec'
equinox = kwargs.get('equinox', None)
if equinox is not None:
request_payload['Equinox'] = str(equinox)
elif coordsys.lower() == 'galactic':
request_payload['Coordinates'] = 'Galactic: LII BII'
else:
raise ValueError("'coordsys' parameter must be one of {!s}"
.format(self.coord_systems))
# Specify which table columns are to be returned
fields = kwargs.get('fields', None)
if fields is not None:
if fields.lower() == 'standard':
request_payload['Fields'] = 'Standard'
elif fields.lower() == 'all':
request_payload['Fields'] = 'All'
else:
request_payload['varon'] = fields.lower().split(',')
# Set search radius (arcmin)
radius = kwargs.get('radius', None)
if radius is not None:
request_payload['Radius'] = "{}".format(u.Quantity(radius).to(u.arcmin))
# Maximum number of results to be returned
resultmax = kwargs.get('resultmax', None)
if resultmax is not None:
request_payload['ResultMax'] = int(resultmax)
# Set variable for sorting results
sortvar = kwargs.get('sortvar', None)
if sortvar is not None:
request_payload['sortvar'] = sortvar.lower()
return request_payload
Heasarc = HeasarcClass()
|
StarcoderdataPython
|
1756740
|
<reponame>HMProenca/RuleList<filename>tests/rulelistmodel/test_rulelsetmodel.py
import numpy as np
import pandas as pd
import pytest
from gmpy2 import mpz, bit_mask
from rulelist.datastructure.data import Data
from rulelist.rulelistmodel.rulesetmodel import RuleSetModel
@pytest.fixture
def constant_parameters():
input_n_cutpoints = 5
input_discretization = "static"
input_target_data = "gaussian"
input_minsupp = 0
dictinput = {"attribute1": np.arange(100),
"attribute2": np.array(["below50" if i < 50 else "above49" for i in range(100)])}
input_input_data = pd.DataFrame(data=dictinput)
dictoutput = {"target1": np.arange(100), "target2": np.ones(100)}
input_output_data = pd.DataFrame(data=dictoutput)
yield input_input_data, input_output_data, input_n_cutpoints, input_discretization, input_target_data,input_minsupp
@pytest.fixture
def generate_input_dataframe_two_target(constant_parameters):
input_input_data, input_output_data, input_n_cutpoints, input_discretization, input_target_data ,input_minsupp\
= constant_parameters
data = Data(input_input_data, input_n_cutpoints, input_discretization,
input_output_data, input_target_data,input_minsupp)
yield data
class TestRuleSetModel:
def test_initialization(self, generate_input_dataframe_two_target):
data = generate_input_dataframe_two_target
input_task = "discovery"
input_target_model = "gaussian"
input_max_depth = 5
input_beam_width = 10
input_minsupp = 0
input_max_rules = 10
input_alpha_gain = 1
expected_task = input_task
expected_target_model = input_target_model
expected_alpha = 1
expected_beam_width = input_beam_width
expected_max_depth = min(input_max_depth,data.number_attributes)
expected_max_rules = input_max_rules
expected_bitset_covered = mpz()
expected_support_covered = 0
expected_bitset_uncovered = bit_mask(data.number_instances)
expected_support_uncovered = data.number_instances
expected_default_rule_statistics_usage = data.number_instances
expected_length_data = None
expected_length_original = None
expected_length_defaultrule = None
expected_length_ratio = 1.0
expected_subgroups = []
expected_length_model = 0
output_ruleset = RuleSetModel(data,input_task, input_max_depth,input_beam_width,input_minsupp,
input_max_rules,input_alpha_gain)
assert expected_task == output_ruleset.task
assert expected_target_model == output_ruleset.target_model
assert expected_alpha == output_ruleset.alpha_gain
assert expected_beam_width == output_ruleset.beam_width
assert expected_max_depth == output_ruleset.max_depth
assert expected_max_rules == output_ruleset.max_rules
assert expected_bitset_covered == output_ruleset.bitset_covered
assert expected_support_covered == output_ruleset.support_covered
assert expected_bitset_uncovered == output_ruleset.bitset_uncovered
assert expected_support_uncovered == output_ruleset.support_uncovered
assert expected_subgroups == output_ruleset.subgroups
assert expected_length_model == output_ruleset.length_model
assert expected_default_rule_statistics_usage == output_ruleset.default_rule_statistics.usage
assert expected_length_data == output_ruleset.length_data
assert expected_length_original == output_ruleset.length_original
assert expected_length_defaultrule == output_ruleset.length_defaultrule
assert expected_length_ratio == output_ruleset.length_ratio
|
StarcoderdataPython
|
3391019
|
from __future__ import annotations
from abc import ABC, abstractmethod
from typing import Any
class Builder(ABC):
"""
The Builder interface specifies methods for
creating the different parts of the Product
objects.
"""
@property
@abstractmethod
def product(self) -> None:
pass
@abstractmethod
def product_part_a(self) -> None:
pass
@abstractmethod
def product_part_b(self) -> None:
pass
@abstractmethod
def product_part_c(self) -> None:
pass
class ConcreteBuilder1(Builder):
"""
The Concrete Builder classes follow the builder
interface and provide specific implementations
of the building steps, Your program may have
several variations of Builders, implemented
by different types.
"""
def __init__(self) -> None:
"""
A fresh builder instance should contain a
blank product object, which is used in
further assembly.
"""
self.reset()
def reset(self) -> None:
self._product = Product1()
@property
def product(self) -> Product1:
product = self._product
self.reset()
return product
def product_part_a(self) -> None:
self._product.add("PartA1")
def product_part_b(self) -> None:
self._product.add("PartB1")
def product_part_c(self) -> None:
self._product.add("PartC1")
class Product1():
def __init__(self) -> None:
self.parts = []
def add(self, part: Any) -> None:
self.parts.append(part)
def list_parts(self) -> None:
print(f"Product parts: {', '.join(self.parts)}", end="")
class Director:
"""
"""
def __init__(self) -> None:
self._builder = None
@property
def builder(self) -> Builder:
return self._builder
@builder.setter
def builder(self, builder:Builder) -> None:
"""
"""
self._builder = builder
def build_minimal_viable_product(self) -> None:
self.builder.product_part_a()
def build_full_featured_product(self) -> None:
self.builder.product_part_a()
self.builder.product_part_b()
self.builder.product_part_c()
if __name__ == "__main__":
director = Director()
builder = ConcreteBuilder1()
director.builder = builder
print("Standard basic product: ")
director.build_minimal_viable_product()
builder.product.list_parts()
print("\n")
print("Standard full featured product: ")
director.build_full_featured_product()
builder.product.list_parts()
print("\n")
# print("Custom product: ")
# builder.produce_product_part_a()
# builder.produce_product_part_b()
# builder.produce_product_part_c()
# builder.get_result().list_parts()
|
StarcoderdataPython
|
28481
|
# Copyright European Organization for Nuclear Research (CERN)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# You may not use this file except in compliance with the License.
# You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
#
# Authors:
# - <NAME>, <<EMAIL>>, 2012-2014
# - <NAME>, <<EMAIL>>, 2013-2015
# - <NAME>, <<EMAIL>>, 2013-2016
# - <NAME>, <<EMAIL>>, 2014-2016
# - <NAME>, <<EMAIL>>, 2016
"""
Methods common to different conveyor submitter daemons.
"""
import math
import datetime
import json
import logging
import random
import time
import traceback
from dogpile.cache import make_region
from dogpile.cache.api import NoValue
from rucio.common.closeness_sorter import sort_sources
from rucio.common.exception import DataIdentifierNotFound, RSEProtocolNotSupported, InvalidRSEExpression, InvalidRequest
from rucio.common.rse_attributes import get_rse_attributes
from rucio.common.utils import construct_surl, chunks
from rucio.core import did, replica, request, rse as rse_core
from rucio.core.monitor import record_counter, record_timer, record_gauge
from rucio.core.rse_expression_parser import parse_expression
from rucio.db.sqla.constants import DIDType, RequestType, RequestState, RSEType
from rucio.db.sqla.session import read_session
from rucio.rse import rsemanager as rsemgr
REGION_SHORT = make_region().configure('dogpile.cache.memcached',
expiration_time=600,
arguments={'url': "127.0.0.1:11211", 'distributed_lock': True})
def get_rses(rses=None, include_rses=None, exclude_rses=None):
working_rses = []
rses_list = rse_core.list_rses()
if rses:
working_rses = [rse for rse in rses_list if rse['rse'] in rses]
if include_rses:
try:
parsed_rses = parse_expression(include_rses, session=None)
except InvalidRSEExpression, e:
logging.error("Invalid RSE exception %s to include RSEs" % (include_rses))
else:
for rse in parsed_rses:
if rse not in working_rses:
working_rses.append(rse)
if not (rses or include_rses):
working_rses = rses_list
if exclude_rses:
try:
parsed_rses = parse_expression(exclude_rses, session=None)
except InvalidRSEExpression, e:
logging.error("Invalid RSE exception %s to exclude RSEs: %s" % (exclude_rses, e))
else:
working_rses = [rse for rse in working_rses if rse not in parsed_rses]
working_rses = [rsemgr.get_rse_info(rse['rse']) for rse in working_rses]
return working_rses
def get_requests(rse_id=None,
process=0, total_processes=1, thread=0, total_threads=1,
mock=False, bulk=100, activity=None, activity_shares=None):
ts = time.time()
reqs = request.get_next(request_type=[RequestType.TRANSFER,
RequestType.STAGEIN,
RequestType.STAGEOUT],
state=RequestState.QUEUED,
limit=bulk,
rse=rse_id,
activity=activity,
process=process,
total_processes=total_processes,
thread=thread,
total_threads=total_threads,
activity_shares=activity_shares)
record_timer('daemons.conveyor.submitter.get_next', (time.time() - ts) * 1000)
return reqs
def get_sources(dest_rse, schemes, req, max_sources=4):
allowed_rses = []
if req['request_type'] == RequestType.STAGEIN:
rses = rse_core.list_rses(filters={'staging_buffer': dest_rse['rse']})
allowed_rses = [x['rse'] for x in rses]
allowed_source_rses = []
if req['attributes']:
if type(req['attributes']) is dict:
req_attributes = json.loads(json.dumps(req['attributes']))
else:
req_attributes = json.loads(str(req['attributes']))
source_replica_expression = req_attributes["source_replica_expression"]
if source_replica_expression:
try:
parsed_rses = parse_expression(source_replica_expression, session=None)
except InvalidRSEExpression, e:
logging.error("Invalid RSE exception %s for request %s: %s" % (source_replica_expression,
req['request_id'],
e))
allowed_source_rses = []
else:
allowed_source_rses = [x['rse'] for x in parsed_rses]
tmpsrc = []
metadata = {}
try:
ts = time.time()
replications = replica.list_replicas(dids=[{'scope': req['scope'],
'name': req['name'],
'type': DIDType.FILE}],
schemes=schemes)
record_timer('daemons.conveyor.submitter.list_replicas', (time.time() - ts) * 1000)
# return gracefully if there are no replicas for a DID
if not replications:
return None, None
for source in replications:
try:
metadata['filesize'] = long(source['bytes'])
except KeyError, e:
logging.error('source for %s:%s has no filesize set - skipping' % (source['scope'], source['name']))
continue
metadata['md5'] = source['md5']
metadata['adler32'] = source['adler32']
# TODO: Source protection
# we need to know upfront if we are mixed DISK/TAPE source
mixed_source = []
for source_rse in source['rses']:
mixed_source.append(rse_core.get_rse(source_rse).rse_type)
mixed_source = True if len(set(mixed_source)) > 1 else False
for source_rse in source['rses']:
if req['request_type'] == RequestType.STAGEIN:
if source_rse in allowed_rses:
for pfn in source['rses'][source_rse]:
# In case of staging request, we only use one source
tmpsrc = [(str(source_rse), str(pfn)), ]
elif req['request_type'] == RequestType.TRANSFER:
if source_rse == dest_rse['rse']:
logging.debug('Skip source %s for request %s because it is the destination' % (source_rse,
req['request_id']))
continue
if allowed_source_rses and not (source_rse in allowed_source_rses):
logging.debug('Skip source %s for request %s because of source_replica_expression %s' % (source_rse,
req['request_id'],
req['attributes']))
continue
# do not allow mixed source jobs, either all DISK or all TAPE
# do not use TAPE on the first try
if mixed_source:
if not req['previous_attempt_id'] and rse_core.get_rse(source_rse).rse_type == RSEType.TAPE and source_rse not in allowed_source_rses:
logging.debug('Skip tape source %s for request %s' % (source_rse,
req['request_id']))
continue
elif req['previous_attempt_id'] and rse_core.get_rse(source_rse).rse_type == RSEType.DISK and source_rse not in allowed_source_rses:
logging.debug('Skip disk source %s for retrial request %s' % (source_rse,
req['request_id']))
continue
filtered_sources = [x for x in source['rses'][source_rse] if x.startswith('gsiftp')]
if not filtered_sources:
filtered_sources = source['rses'][source_rse]
for pfn in filtered_sources:
tmpsrc.append((str(source_rse), str(pfn)))
except DataIdentifierNotFound:
record_counter('daemons.conveyor.submitter.lost_did')
logging.warn('DID %s:%s does not exist anymore - marking request %s as LOST' % (req['scope'],
req['name'],
req['request_id']))
return None, None
except:
record_counter('daemons.conveyor.submitter.unexpected')
logging.critical('Something unexpected happened: %s' % traceback.format_exc())
return None, None
sources = []
if tmpsrc == []:
record_counter('daemons.conveyor.submitter.nosource')
logging.warn('No source replicas found for DID %s:%s - deep check for unavailable replicas' % (req['scope'],
req['name']))
if sum(1 for tmp in replica.list_replicas([{'scope': req['scope'],
'name': req['name'],
'type': DIDType.FILE}],
schemes=schemes,
unavailable=True)):
logging.error('DID %s:%s lost! This should not happen!' % (req['scope'], req['name']))
return None, None
else:
used_sources = request.get_sources(req['request_id'])
for tmp in tmpsrc:
source_rse_info = rsemgr.get_rse_info(tmp[0])
rank = None
if used_sources:
for used_source in used_sources:
if used_source['rse_id'] == source_rse_info['id']:
# file already used
rank = used_source['ranking']
break
sources.append((tmp[0], tmp[1], source_rse_info['id'], rank))
if len(sources) > 1:
sources = sort_sources(sources, dest_rse['rse'])
if len(sources) > max_sources:
sources = sources[:max_sources]
random.shuffle(sources)
return sources, metadata
def get_destinations(rse_info, scheme, req, naming_convention):
dsn = 'other'
pfn = {}
if not rse_info['deterministic']:
ts = time.time()
# get rule scope and name
if req['attributes']:
if type(req['attributes']) is dict:
req_attributes = json.loads(json.dumps(req['attributes']))
else:
req_attributes = json.loads(str(req['attributes']))
if 'ds_name' in req_attributes:
dsn = req_attributes["ds_name"]
if dsn == 'other':
# select a containing dataset
for parent in did.list_parent_dids(req['scope'], req['name']):
if parent['type'] == DIDType.DATASET:
dsn = parent['name']
break
record_timer('daemons.conveyor.submitter.list_parent_dids', (time.time() - ts) * 1000)
# DQ2 path always starts with /, but prefix might not end with /
path = construct_surl(dsn, req['name'], naming_convention)
# retrial transfers to tape need a new filename - add timestamp
if req['request_type'] == RequestType.TRANSFER and rse_info['rse_type'] == 'TAPE':
if 'previous_attempt_id' in req and req['previous_attempt_id']:
path = '%s_%i' % (path, int(time.time()))
logging.debug('Retrial transfer request %s DID %s:%s to tape %s renamed to %s' % (req['request_id'],
req['scope'],
req['name'],
rse_info['rse'],
path))
elif req['activity'] and req['activity'] == 'Recovery':
path = '%s_%i' % (path, int(time.time()))
logging.debug('Recovery transfer request %s DID %s:%s to tape %s renamed to %s' % (req['request_id'],
req['scope'],
req['name'],
rse_info['rse'],
path))
# we must set the destination path for nondeterministic replicas explicitly
replica.update_replicas_paths([{'scope': req['scope'],
'name': req['name'],
'rse_id': req['dest_rse_id'],
'path': path}])
lfn = [{'scope': req['scope'], 'name': req['name'], 'path': path}]
else:
lfn = [{'scope': req['scope'], 'name': req['name']}]
ts = time.time()
try:
pfn = rsemgr.lfns2pfns(rse_info, lfns=lfn, operation='write', scheme=scheme)
except RSEProtocolNotSupported:
logging.error('Operation "write" not supported by %s' % (rse_info['rse']))
return None, None
record_timer('daemons.conveyor.submitter.lfns2pfns', (time.time() - ts) * 1000)
destinations = []
for k in pfn:
if isinstance(pfn[k], (str, unicode)):
destinations.append(pfn[k])
elif isinstance(pfn[k], (tuple, list)):
for url in pfn[k]:
destinations.append(pfn[k][url])
protocol = None
try:
protocol = rsemgr.select_protocol(rse_info, 'write', scheme=scheme)
except RSEProtocolNotSupported:
logging.error('Operation "write" not supported by %s' % (rse_info['rse']))
return None, None
# we need to set the spacetoken if we use SRM
dest_spacetoken = None
if protocol['extended_attributes'] and 'space_token' in protocol['extended_attributes']:
dest_spacetoken = protocol['extended_attributes']['space_token']
return destinations, dest_spacetoken
def get_transfer(rse, req, scheme, mock, max_sources=4):
src_spacetoken = None
if req['request_type'] == RequestType.STAGEIN:
# for staging in, get the sources at first, then use the sources as destination
if not (rse['staging_area'] or rse['rse'].endswith("STAGING")):
raise InvalidRequest('Not a STAGING RSE for STAGE-IN request')
ts = time.time()
if scheme is None:
sources, metadata = get_sources(rse, None, req, max_sources=max_sources)
else:
if not isinstance(scheme, list):
scheme = scheme.split(',')
sources, metadata = get_sources(rse, scheme, req, max_sources=max_sources)
record_timer('daemons.conveyor.submitter.get_sources', (time.time() - ts) * 1000)
logging.debug('Sources for request %s: %s' % (req['request_id'], sources))
if sources is None:
logging.error("Request %s DID %s:%s RSE %s failed to get sources" % (req['request_id'],
req['scope'],
req['name'],
rse['rse']))
return None
filesize = metadata['filesize']
md5 = metadata['md5']
adler32 = metadata['adler32']
# Sources are properly set, so now we can finally force the source RSE to the destination RSE for STAGEIN
dest_rse = sources[0][0]
rse_attr = rse_core.list_rse_attributes(sources[0][0])
fts_hosts = rse_attr.get('fts', None)
naming_convention = rse_attr.get('naming_convention', None)
if len(sources) == 1:
destinations = [sources[0][1]]
else:
# TODO: need to check
return None
protocol = None
try:
# for stagin, dest_space_token should be the source space token
source_rse_info = rsemgr.get_rse_info(sources[0][0])
protocol = rsemgr.select_protocol(source_rse_info, 'write')
except RSEProtocolNotSupported:
logging.error('Operation "write" not supported by %s' % (source_rse_info['rse']))
return None
# we need to set the spacetoken if we use SRM
dest_spacetoken = None
if 'space_token' in protocol['extended_attributes']:
dest_spacetoken = protocol['extended_attributes']['space_token']
# Extend the metadata dictionary with request attributes
copy_pin_lifetime, overwrite, bring_online = -1, True, None
if req['attributes']:
if type(req['attributes']) is dict:
attr = json.loads(json.dumps(req['attributes']))
else:
attr = json.loads(str(req['attributes']))
copy_pin_lifetime = attr.get('lifetime')
overwrite = False
bring_online = 172800
else:
# for normal transfer, get the destination at first, then use the destination scheme to get sources
rse_attr = rse_core.list_rse_attributes(rse['rse'], rse['id'])
fts_hosts = rse_attr.get('fts', None)
naming_convention = rse_attr.get('naming_convention', None)
ts = time.time()
destinations, dest_spacetoken = get_destinations(rse, scheme, req, naming_convention)
record_timer('daemons.conveyor.submitter.get_destinations', (time.time() - ts) * 1000)
logging.debug('Destinations for request %s: %s' % (req['request_id'], destinations))
if destinations is None:
logging.error("Request %s DID %s:%s RSE %s failed to get destinations" % (req['request_id'],
req['scope'],
req['name'],
rse['rse']))
return None
schemes = []
for destination in destinations:
schemes.append(destination.split("://")[0])
if 'srm' in schemes and 'gsiftp' not in schemes:
schemes.append('gsiftp')
if 'gsiftp' in schemes and 'srm' not in schemes:
schemes.append('srm')
logging.debug('Schemes will be allowed for sources: %s' % (schemes))
ts = time.time()
sources, metadata = get_sources(rse, schemes, req, max_sources=max_sources)
record_timer('daemons.conveyor.submitter.get_sources', (time.time() - ts) * 1000)
logging.debug('Sources for request %s: %s' % (req['request_id'], sources))
if not sources:
logging.error("Request %s DID %s:%s RSE %s failed to get sources" % (req['request_id'],
req['scope'],
req['name'],
rse['rse']))
return None
dest_rse = rse['rse']
# exclude destination replica from source
new_sources = sources
for source in sources:
if source[0] == dest_rse:
logging.info('Excluding source %s for request %s: source is destination' % (source[0],
req['request_id']))
new_sources.remove(source)
sources = new_sources
filesize = metadata['filesize']
md5 = metadata['md5']
adler32 = metadata['adler32']
# Extend the metadata dictionary with request attributes
copy_pin_lifetime, overwrite, bring_online = -1, True, None
if rse_core.get_rse(sources[0][0]).rse_type == RSEType.TAPE:
bring_online = 172800
if rse_core.get_rse(None, rse_id=req['dest_rse_id']).rse_type == RSEType.TAPE:
overwrite = False
# make sure we only use one source when bring_online is needed
if bring_online and len(sources) > 1:
sources = [sources[0]]
logging.info('Only using first source %s for bring_online request %s' % (sources,
req['request_id']))
# Come up with mock sources if necessary
if mock:
tmp_sources = []
for s in sources:
tmp_sources.append((s[0], ':'.join(['mock'] + s[1].split(':')[1:]), s[2], s[3]))
sources = tmp_sources
source_surls = [s[1] for s in sources]
if not source_surls:
logging.error('All sources excluded - SKIP REQUEST %s' % req['request_id'])
return
tmp_metadata = {'request_id': req['request_id'],
'scope': req['scope'],
'name': req['name'],
'activity': req['activity'],
'src_rse': sources[0][0],
'dst_rse': dest_rse,
'dest_rse_id': req['dest_rse_id'],
'filesize': filesize,
'md5': md5,
'adler32': adler32}
if 'previous_attempt_id' in req and req['previous_attempt_id']:
tmp_metadata['previous_attempt_id'] = req['previous_attempt_id']
retry_count = req['retry_count']
if not retry_count:
retry_count = 0
if not fts_hosts:
logging.error('Destination RSE %s FTS attribute not defined - SKIP REQUEST %s' % (rse['rse'], req['request_id']))
return
fts_list = fts_hosts.split(",")
external_host = fts_list[retry_count % len(fts_list)]
transfer = {'request_id': req['request_id'],
'sources': sources,
# 'src_urls': source_surls,
'dest_urls': destinations,
'filesize': filesize,
'md5': md5,
'adler32': adler32,
'src_spacetoken': src_spacetoken,
'dest_spacetoken': dest_spacetoken,
'activity': req['activity'],
'overwrite': overwrite,
'bring_online': bring_online,
'copy_pin_lifetime': copy_pin_lifetime,
'external_host': external_host,
'file_metadata': tmp_metadata,
'rule_id': req['rule_id']}
return transfer
def get_transfers_from_requests(process=0, total_processes=1, thread=0, total_threads=1, rse_ids=None,
mock=False, bulk=100, activity=None, activity_shares=None, scheme=None, max_sources=4):
ts = time.time()
reqs = get_requests(process=process,
total_processes=total_processes,
thread=thread,
total_threads=total_threads,
mock=mock,
bulk=bulk,
activity=activity,
activity_shares=activity_shares)
record_timer('daemons.conveyor.submitter.get_requests', (time.time() - ts) * 1000)
if reqs:
logging.debug('%i:%i - Getting %i requests' % (process, thread, len(reqs)))
if not reqs or reqs == []:
return {}
# get transfers
transfers = {}
for req in reqs:
try:
if rse_ids and req['dest_rse_id'] not in rse_ids:
# logging.info("Request dest %s is not in RSEs list, skip")
continue
else:
dest_rse = rse_core.get_rse(rse=None, rse_id=req['dest_rse_id'])
rse_info = rsemgr.get_rse_info(dest_rse['rse'])
ts = time.time()
transfer = get_transfer(rse_info, req, scheme, mock, max_sources=max_sources)
record_timer('daemons.conveyor.submitter.get_transfer', (time.time() - ts) * 1000)
logging.debug('Transfer for request %s: %s' % (req['request_id'], transfer))
if transfer is None:
logging.error("Request %s DID %s:%s RSE %s failed to get transfer" % (req['request_id'],
req['scope'],
req['name'],
rse_info['rse']))
request.set_request_state(req['request_id'], RequestState.LOST)
continue
transfers[req['request_id']] = transfer
except Exception, e:
logging.error("Failed to get transfer for request(%s): %s " % (req['request_id'], str(e)))
return transfers
def bulk_group_transfer(transfers, policy='rule', group_bulk=200, fts_source_strategy='auto', max_time_in_queue=None):
grouped_transfers = {}
grouped_jobs = {}
for request_id in transfers:
transfer = transfers[request_id]
external_host = transfer['external_host']
if external_host not in grouped_transfers:
grouped_transfers[external_host] = {}
grouped_jobs[external_host] = []
file = {'sources': transfer['sources'],
'destinations': transfer['dest_urls'],
'metadata': transfer['file_metadata'],
'filesize': int(transfer['file_metadata']['filesize']),
'checksum': None,
'selection_strategy': fts_source_strategy,
'request_type': transfer['file_metadata'].get('request_type', None),
'activity': str(transfer['file_metadata']['activity'])}
if file['metadata'].get('verify_checksum', True):
if 'md5' in file['metadata'].keys() and file['metadata']['md5']:
file['checksum'] = 'MD5:%s' % str(file['metadata']['md5'])
if 'adler32' in file['metadata'].keys() and file['metadata']['adler32']:
file['checksum'] = 'ADLER32:%s' % str(file['metadata']['adler32'])
job_params = {'verify_checksum': True if file['checksum'] and file['metadata'].get('verify_checksum', True) else False,
'spacetoken': transfer['dest_spacetoken'] if transfer['dest_spacetoken'] else 'null',
'copy_pin_lifetime': transfer['copy_pin_lifetime'] if transfer['copy_pin_lifetime'] else -1,
'bring_online': transfer['bring_online'] if transfer['bring_online'] else None,
'job_metadata': {'issuer': 'rucio'}, # finaly job_meta will like this. currently job_meta will equal file_meta to include request_id and etc.
'source_spacetoken': transfer['src_spacetoken'] if transfer['src_spacetoken'] else None,
'overwrite': transfer['overwrite'],
'priority': 3}
if max_time_in_queue:
if transfer['file_metadata']['activity'] in max_time_in_queue:
job_params['max_time_in_queue'] = max_time_in_queue[transfer['file_metadata']['activity']]
elif 'default' in max_time_in_queue:
job_params['max_time_in_queue'] = max_time_in_queue['default']
# for multiple source replicas, no bulk submission
if len(transfer['sources']) > 1:
job_params['job_metadata']['multi_sources'] = True
grouped_jobs[external_host].append({'files': [file], 'job_params': job_params})
else:
job_params['job_metadata']['multi_sources'] = False
job_key = '%s,%s,%s,%s,%s,%s,%s,%s' % (job_params['verify_checksum'], job_params['spacetoken'], job_params['copy_pin_lifetime'],
job_params['bring_online'], job_params['job_metadata'], job_params['source_spacetoken'],
job_params['overwrite'], job_params['priority'])
if 'max_time_in_queue' in job_params:
job_key = job_key + ',%s' % job_params['max_time_in_queue']
if job_key not in grouped_transfers[external_host]:
grouped_transfers[external_host][job_key] = {}
if policy == 'rule':
policy_key = '%s' % (transfer['rule_id'])
if policy == 'dest':
policy_key = '%s' % (file['metadata']['dst_rse'])
if policy == 'src_dest':
policy_key = '%s,%s' % (file['metadata']['src_rse'], file['metadata']['dst_rse'])
if policy == 'rule_src_dest':
policy_key = '%s,%s,%s' % (transfer['rule_id'], file['metadata']['src_rse'], file['metadata']['dst_rse'])
# maybe here we need to hash the key if it's too long
if policy_key not in grouped_transfers[external_host][job_key]:
grouped_transfers[external_host][job_key][policy_key] = {'files': [file], 'job_params': job_params}
else:
grouped_transfers[external_host][job_key][policy_key]['files'].append(file)
# for jobs with different job_key, we cannot put in one job.
for external_host in grouped_transfers:
for job_key in grouped_transfers[external_host]:
# for all policy groups in job_key, the job_params is the same.
for policy_key in grouped_transfers[external_host][job_key]:
job_params = grouped_transfers[external_host][job_key][policy_key]['job_params']
for xfers_files in chunks(grouped_transfers[external_host][job_key][policy_key]['files'], group_bulk):
# for the last small piece, just submit it.
grouped_jobs[external_host].append({'files': xfers_files, 'job_params': job_params})
return grouped_jobs
@read_session
def get_unavailable_read_rse_ids(session=None):
key = 'unavailable_read_rse_ids'
result = REGION_SHORT.get(key)
if type(result) is NoValue:
try:
logging.debug("Refresh unavailable read rses")
unavailable_read_rses = rse_core.list_rses(filters={'availability_read': False}, session=session)
unavailable_read_rse_ids = [r['id'] for r in unavailable_read_rses]
REGION_SHORT.set(key, unavailable_read_rse_ids)
return unavailable_read_rse_ids
except:
logging.warning("Failed to refresh unavailable read rses, error: %s" % (traceback.format_exc()))
return []
return result
@read_session
def get_transfer_requests_and_source_replicas(process=None, total_processes=None, thread=None, total_threads=None,
limit=None, activity=None, older_than=None, rses=None, schemes=None,
bring_online=43200, retry_other_fts=False, failover_schemes=None, session=None):
req_sources = request.list_transfer_requests_and_source_replicas(process=process, total_processes=total_processes, thread=thread, total_threads=total_threads,
limit=limit, activity=activity, older_than=older_than, rses=rses, session=session)
unavailable_read_rse_ids = get_unavailable_read_rse_ids(session=session)
bring_online_local = bring_online
transfers, rses_info, protocols, rse_attrs, reqs_no_source, reqs_only_tape_source, reqs_scheme_mismatch = {}, {}, {}, {}, [], [], []
for id, rule_id, scope, name, md5, adler32, bytes, activity, attributes, previous_attempt_id, dest_rse_id, source_rse_id, rse, deterministic, rse_type, path, retry_count, src_url, ranking, link_ranking in req_sources:
transfer_src_type = "DISK"
transfer_dst_type = "DISK"
allow_tape_source = True
try:
if rses and dest_rse_id not in rses:
continue
current_schemes = schemes
if previous_attempt_id and failover_schemes:
current_schemes = failover_schemes
if id not in transfers:
if id not in reqs_no_source:
reqs_no_source.append(id)
# source_rse_id will be None if no source replicas
# rse will be None if rse is staging area
if source_rse_id is None or rse is None:
continue
if link_ranking is None:
logging.debug("Request %s: no link from %s to %s" % (id, source_rse_id, dest_rse_id))
continue
if source_rse_id in unavailable_read_rse_ids:
continue
# Get destination rse information and protocol
if dest_rse_id not in rses_info:
dest_rse = rse_core.get_rse_name(rse_id=dest_rse_id, session=session)
rses_info[dest_rse_id] = rsemgr.get_rse_info(dest_rse, session=session)
if dest_rse_id not in rse_attrs:
rse_attrs[dest_rse_id] = get_rse_attributes(dest_rse_id, session=session)
attr = None
if attributes:
if type(attributes) is dict:
attr = json.loads(json.dumps(attributes))
else:
attr = json.loads(str(attributes))
# parse source expression
source_replica_expression = attr["source_replica_expression"] if (attr and "source_replica_expression" in attr) else None
if source_replica_expression:
try:
parsed_rses = parse_expression(source_replica_expression, session=session)
except InvalidRSEExpression, e:
logging.error("Invalid RSE exception %s: %s" % (source_replica_expression, e))
continue
else:
allowed_rses = [x['rse'] for x in parsed_rses]
if rse not in allowed_rses:
continue
# parse allow tape source expression, not finally version.
# allow_tape_source = attr["allow_tape_source"] if (attr and "allow_tape_source" in attr) else True
allow_tape_source = True
# Get protocol
if dest_rse_id not in protocols:
try:
protocols[dest_rse_id] = rsemgr.create_protocol(rses_info[dest_rse_id], 'write', current_schemes)
except RSEProtocolNotSupported:
logging.error('Operation "write" not supported by %s with schemes %s' % (rses_info[dest_rse_id]['rse'], current_schemes))
if id in reqs_no_source:
reqs_no_source.remove(id)
if id not in reqs_scheme_mismatch:
reqs_scheme_mismatch.append(id)
continue
# get dest space token
dest_spacetoken = None
if protocols[dest_rse_id].attributes and \
'extended_attributes' in protocols[dest_rse_id].attributes and \
protocols[dest_rse_id].attributes['extended_attributes'] and \
'space_token' in protocols[dest_rse_id].attributes['extended_attributes']:
dest_spacetoken = protocols[dest_rse_id].attributes['extended_attributes']['space_token']
# Compute the destination url
if rses_info[dest_rse_id]['deterministic']:
dest_url = protocols[dest_rse_id].lfns2pfns(lfns={'scope': scope, 'name': name}).values()[0]
else:
# compute dest url in case of non deterministic
# naming convention, etc.
dsn = 'other'
if attr and 'ds_name' in attr:
dsn = attr["ds_name"]
else:
# select a containing dataset
for parent in did.list_parent_dids(scope, name):
if parent['type'] == DIDType.DATASET:
dsn = parent['name']
break
# DQ2 path always starts with /, but prefix might not end with /
naming_convention = rse_attrs[dest_rse_id].get('naming_convention', None)
dest_path = construct_surl(dsn, name, naming_convention)
if rses_info[dest_rse_id]['rse_type'] == RSEType.TAPE or rses_info[dest_rse_id]['rse_type'] == 'TAPE':
if retry_count or activity == 'Recovery':
dest_path = '%s_%i' % (dest_path, int(time.time()))
dest_url = protocols[dest_rse_id].lfns2pfns(lfns={'scope': scope, 'name': name, 'path': dest_path}).values()[0]
# get allowed source scheme
src_schemes = []
dest_scheme = dest_url.split("://")[0]
if dest_scheme in ['srm', 'gsiftp']:
src_schemes = ['srm', 'gsiftp']
else:
src_schemes = [dest_scheme]
# Compute the sources: urls, etc
if source_rse_id not in rses_info:
# source_rse = rse_core.get_rse_name(rse_id=source_rse_id, session=session)
source_rse = rse
rses_info[source_rse_id] = rsemgr.get_rse_info(source_rse, session=session)
# Get protocol
source_rse_id_key = '%s_%s' % (source_rse_id, '_'.join(src_schemes))
if source_rse_id_key not in protocols:
try:
protocols[source_rse_id_key] = rsemgr.create_protocol(rses_info[source_rse_id], 'read', src_schemes)
except RSEProtocolNotSupported:
logging.error('Operation "read" not supported by %s with schemes %s' % (rses_info[source_rse_id]['rse'], src_schemes))
if id in reqs_no_source:
reqs_no_source.remove(id)
if id not in reqs_scheme_mismatch:
reqs_scheme_mismatch.append(id)
continue
source_url = protocols[source_rse_id_key].lfns2pfns(lfns={'scope': scope, 'name': name, 'path': path}).values()[0]
# Extend the metadata dictionary with request attributes
overwrite, bring_online = True, None
if rses_info[source_rse_id]['rse_type'] == RSEType.TAPE or rses_info[source_rse_id]['rse_type'] == 'TAPE':
bring_online = bring_online_local
transfer_src_type = "TAPE"
if not allow_tape_source:
if id not in reqs_only_tape_source:
reqs_only_tape_source.append(id)
if id in reqs_no_source:
reqs_no_source.remove(id)
continue
if rses_info[dest_rse_id]['rse_type'] == RSEType.TAPE or rses_info[dest_rse_id]['rse_type'] == 'TAPE':
overwrite = False
transfer_dst_type = "TAPE"
# get external_host
fts_hosts = rse_attrs[dest_rse_id].get('fts', None)
if not fts_hosts:
logging.error('Source RSE %s FTS attribute not defined - SKIP REQUEST %s' % (rse, id))
continue
if retry_count is None:
retry_count = 0
fts_list = fts_hosts.split(",")
external_host = fts_list[0]
if retry_other_fts:
external_host = fts_list[retry_count % len(fts_list)]
if id in reqs_no_source:
reqs_no_source.remove(id)
if id in reqs_only_tape_source:
reqs_only_tape_source.remove(id)
file_metadata = {'request_id': id,
'scope': scope,
'name': name,
'activity': activity,
'request_type': str(RequestType.TRANSFER).lower(),
'src_type': transfer_src_type,
'dst_type': transfer_dst_type,
'src_rse': rse,
'dst_rse': rses_info[dest_rse_id]['rse'],
'src_rse_id': source_rse_id,
'dest_rse_id': dest_rse_id,
'filesize': bytes,
'md5': md5,
'adler32': adler32,
'verify_checksum': rse_attrs[dest_rse_id].get('verify_checksum', True)}
if previous_attempt_id:
file_metadata['previous_attempt_id'] = previous_attempt_id
transfers[id] = {'request_id': id,
'schemes': src_schemes,
# 'src_urls': [source_url],
'sources': [(rse, source_url, source_rse_id, ranking if ranking is not None else 0, link_ranking)],
'dest_urls': [dest_url],
'src_spacetoken': None,
'dest_spacetoken': dest_spacetoken,
'overwrite': overwrite,
'bring_online': bring_online,
'copy_pin_lifetime': attr.get('lifetime', -1),
'external_host': external_host,
'selection_strategy': 'auto',
'rule_id': rule_id,
'file_metadata': file_metadata}
else:
schemes = transfers[id]['schemes']
# source_rse_id will be None if no source replicas
# rse will be None if rse is staging area
if source_rse_id is None or rse is None:
continue
if link_ranking is None:
logging.debug("Request %s: no link from %s to %s" % (id, source_rse_id, dest_rse_id))
continue
if source_rse_id in unavailable_read_rse_ids:
continue
attr = None
if attributes:
if type(attributes) is dict:
attr = json.loads(json.dumps(attributes))
else:
attr = json.loads(str(attributes))
# parse source expression
source_replica_expression = attr["source_replica_expression"] if (attr and "source_replica_expression" in attr) else None
if source_replica_expression:
try:
parsed_rses = parse_expression(source_replica_expression, session=session)
except InvalidRSEExpression, e:
logging.error("Invalid RSE exception %s: %s" % (source_replica_expression, e))
continue
else:
allowed_rses = [x['rse'] for x in parsed_rses]
if rse not in allowed_rses:
continue
# parse allow tape source expression, not finally version.
allow_tape_source = attr["allow_tape_source"] if (attr and "allow_tape_source" in attr) else True
# Compute the sources: urls, etc
if source_rse_id not in rses_info:
# source_rse = rse_core.get_rse_name(rse_id=source_rse_id, session=session)
source_rse = rse
rses_info[source_rse_id] = rsemgr.get_rse_info(source_rse, session=session)
if ranking is None:
ranking = 0
# TAPE should not mixed with Disk and should not use as first try
# If there is a source whose ranking is no less than the Tape ranking, Tape will not be used.
if rses_info[source_rse_id]['rse_type'] == RSEType.TAPE or rses_info[source_rse_id]['rse_type'] == 'TAPE':
# current src_rse is Tape
if not allow_tape_source:
continue
if not transfers[id]['bring_online']:
# the sources already founded are disks.
avail_top_ranking = None
founded_sources = transfers[id]['sources']
for founded_source in founded_sources:
if avail_top_ranking is None:
avail_top_ranking = founded_source[3]
continue
if founded_source[3] is not None and founded_source[3] > avail_top_ranking:
avail_top_ranking = founded_source[3]
if avail_top_ranking >= ranking:
# current Tape source is not the highest ranking, will use disk sources
continue
else:
transfers[id]['sources'] = []
transfers[id]['bring_online'] = bring_online_local
transfer_src_type = "TAPE"
transfers[id]['file_metadata']['src_type'] = transfer_src_type
transfers[id]['file_metadata']['src_rse'] = rse
else:
# the sources already founded is Tape too.
# multiple Tape source replicas are not allowed in FTS3.
if transfers[id]['sources'][0][3] > ranking or (transfers[id]['sources'][0][3] == ranking and transfers[id]['sources'][0][4] >= link_ranking):
continue
else:
transfers[id]['sources'] = []
transfers[id]['bring_online'] = bring_online_local
transfers[id]['file_metadata']['src_rse'] = rse
else:
# current src_rse is Disk
if transfers[id]['bring_online']:
# the founded sources are Tape
avail_top_ranking = None
founded_sources = transfers[id]['sources']
for founded_source in founded_sources:
if avail_top_ranking is None:
avail_top_ranking = founded_source[3]
continue
if founded_source[3] is not None and founded_source[3] > avail_top_ranking:
avail_top_ranking = founded_source[3]
if ranking >= avail_top_ranking:
# current disk replica has higher ranking than founded sources
# remove founded Tape sources
transfers[id]['sources'] = []
transfers[id]['bring_online'] = None
transfer_src_type = "DISK"
transfers[id]['file_metadata']['src_type'] = transfer_src_type
transfers[id]['file_metadata']['src_rse'] = rse
else:
continue
# Get protocol
source_rse_id_key = '%s_%s' % (source_rse_id, '_'.join(schemes))
if source_rse_id_key not in protocols:
try:
protocols[source_rse_id_key] = rsemgr.create_protocol(rses_info[source_rse_id], 'read', schemes)
except RSEProtocolNotSupported:
logging.error('Operation "read" not supported by %s with schemes %s' % (rses_info[source_rse_id]['rse'], schemes))
if id not in reqs_scheme_mismatch:
reqs_scheme_mismatch.append(id)
continue
source_url = protocols[source_rse_id_key].lfns2pfns(lfns={'scope': scope, 'name': name, 'path': path}).values()[0]
# transfers[id]['src_urls'].append((source_rse_id, source_url))
transfers[id]['sources'].append((rse, source_url, source_rse_id, ranking, link_ranking))
except:
logging.critical("Exception happened when trying to get transfer for request %s: %s" % (id, traceback.format_exc()))
break
return transfers, reqs_no_source, reqs_scheme_mismatch, reqs_only_tape_source
@read_session
def get_stagein_requests_and_source_replicas(process=None, total_processes=None, thread=None, total_threads=None, failover_schemes=None,
limit=None, activity=None, older_than=None, rses=None, mock=False, schemes=None,
bring_online=43200, retry_other_fts=False, session=None):
req_sources = request.list_stagein_requests_and_source_replicas(process=process, total_processes=total_processes, thread=thread, total_threads=total_threads,
limit=limit, activity=activity, older_than=older_than, rses=rses, session=session)
transfers, rses_info, protocols, rse_attrs, reqs_no_source = {}, {}, {}, {}, []
for id, rule_id, scope, name, md5, adler32, bytes, activity, attributes, dest_rse_id, source_rse_id, rse, deterministic, rse_type, path, staging_buffer, retry_count, previous_attempt_id, src_url, ranking in req_sources:
try:
if rses and dest_rse_id not in rses:
continue
current_schemes = schemes
if previous_attempt_id and failover_schemes:
current_schemes = failover_schemes
if id not in transfers:
if id not in reqs_no_source:
reqs_no_source.append(id)
if not src_url:
# source_rse_id will be None if no source replicas
# rse will be None if rse is staging area
# staging_buffer will be None if rse has no key 'staging_buffer'
if source_rse_id is None or rse is None or staging_buffer is None:
continue
# Get destination rse information and protocol
if dest_rse_id not in rses_info:
dest_rse = rse_core.get_rse_name(rse_id=dest_rse_id, session=session)
rses_info[dest_rse_id] = rsemgr.get_rse_info(dest_rse, session=session)
if staging_buffer != rses_info[dest_rse_id]['rse']:
continue
attr = None
if attributes:
if type(attributes) is dict:
attr = json.loads(json.dumps(attributes))
else:
attr = json.loads(str(attributes))
source_replica_expression = attr["source_replica_expression"] if "source_replica_expression" in attr else None
if source_replica_expression:
try:
parsed_rses = parse_expression(source_replica_expression, session=session)
except InvalidRSEExpression, e:
logging.error("Invalid RSE exception %s: %s" % (source_replica_expression, e))
continue
else:
allowed_rses = [x['rse'] for x in parsed_rses]
if rse not in allowed_rses:
continue
if source_rse_id not in rses_info:
# source_rse = rse_core.get_rse_name(rse_id=source_rse_id, session=session)
source_rse = rse
rses_info[source_rse_id] = rsemgr.get_rse_info(source_rse, session=session)
if source_rse_id not in rse_attrs:
rse_attrs[source_rse_id] = get_rse_attributes(source_rse_id, session=session)
if source_rse_id not in protocols:
protocols[source_rse_id] = rsemgr.create_protocol(rses_info[source_rse_id], 'write', current_schemes)
# we need to set the spacetoken if we use SRM
dest_spacetoken = None
if protocols[source_rse_id].attributes and \
'extended_attributes' in protocols[source_rse_id].attributes and \
protocols[source_rse_id].attributes['extended_attributes'] and \
'space_token' in protocols[source_rse_id].attributes['extended_attributes']:
dest_spacetoken = protocols[source_rse_id].attributes['extended_attributes']['space_token']
source_url = protocols[source_rse_id].lfns2pfns(lfns={'scope': scope, 'name': name, 'path': path}).values()[0]
else:
# source_rse_id will be None if no source replicas
# rse will be None if rse is staging area
# staging_buffer will be None if rse has no key 'staging_buffer'
if source_rse_id is None or rse is None or staging_buffer is None:
continue
attr = None
if attributes:
if type(attributes) is dict:
attr = json.loads(json.dumps(attributes))
else:
attr = json.loads(str(attributes))
# to get space token and fts attribute
if source_rse_id not in rses_info:
# source_rse = rse_core.get_rse_name(rse_id=source_rse_id, session=session)
source_rse = rse
rses_info[source_rse_id] = rsemgr.get_rse_info(source_rse, session=session)
if source_rse_id not in rse_attrs:
rse_attrs[source_rse_id] = get_rse_attributes(source_rse_id, session=session)
if source_rse_id not in protocols:
protocols[source_rse_id] = rsemgr.create_protocol(rses_info[source_rse_id], 'write', current_schemes)
# we need to set the spacetoken if we use SRM
dest_spacetoken = None
if protocols[source_rse_id].attributes and \
'extended_attributes' in protocols[source_rse_id].attributes and \
protocols[source_rse_id].attributes['extended_attributes'] and \
'space_token' in protocols[source_rse_id].attributes['extended_attributes']:
dest_spacetoken = protocols[source_rse_id].attributes['extended_attributes']['space_token']
source_url = src_url
fts_hosts = rse_attrs[source_rse_id].get('fts', None)
if not fts_hosts:
logging.error('Source RSE %s FTS attribute not defined - SKIP REQUEST %s' % (rse, id))
continue
if not retry_count:
retry_count = 0
fts_list = fts_hosts.split(",")
external_host = fts_list[0]
if retry_other_fts:
external_host = fts_list[retry_count % len(fts_list)]
if id in reqs_no_source:
reqs_no_source.remove(id)
file_metadata = {'request_id': id,
'scope': scope,
'name': name,
'activity': activity,
'request_type': str(RequestType.STAGEIN).lower(),
'src_type': "TAPE",
'dst_type': "DISK",
'src_rse': rse,
'dst_rse': rse,
'src_rse_id': source_rse_id,
'dest_rse_id': dest_rse_id,
'filesize': bytes,
'md5': md5,
'adler32': adler32}
if previous_attempt_id:
file_metadata['previous_attempt_id'] = previous_attempt_id
transfers[id] = {'request_id': id,
# 'src_urls': [source_url],
'sources': [(rse, source_url, source_rse_id, ranking)],
'dest_urls': [source_url],
'src_spacetoken': None,
'dest_spacetoken': dest_spacetoken,
'overwrite': False,
'bring_online': bring_online,
'copy_pin_lifetime': attr.get('lifetime', -1) if attr else -1,
'external_host': external_host,
'selection_strategy': 'auto',
'rule_id': rule_id,
'file_metadata': file_metadata}
logging.debug("Transfer for request(%s): %s" % (id, transfers[id]))
except:
logging.critical("Exception happened when trying to get transfer for request %s: %s" % (id, traceback.format_exc()))
break
return transfers, reqs_no_source
def get_stagein_transfers(process=None, total_processes=None, thread=None, total_threads=None, failover_schemes=None,
limit=None, activity=None, older_than=None, rses=None, mock=False, schemes=None, bring_online=43200, retry_other_fts=False, session=None):
transfers, reqs_no_source = get_stagein_requests_and_source_replicas(process=process, total_processes=total_processes, thread=thread, total_threads=total_threads,
limit=limit, activity=activity, older_than=older_than, rses=rses, mock=mock, schemes=schemes,
bring_online=bring_online, retry_other_fts=retry_other_fts, failover_schemes=failover_schemes,
session=session)
request.set_requests_state(reqs_no_source, RequestState.NO_SOURCES)
return transfers
def handle_requests_with_scheme_mismatch(transfers=None, reqs_scheme_mismatch=None, schemes=None):
if not reqs_scheme_mismatch:
return transfers
for request_id in reqs_scheme_mismatch:
logging.debug("Request %s with schemes %s has mismatched sources, will handle it" % (request_id, schemes))
found_avail_source = 0
if request_id in transfers:
for source in transfers[request_id]['sources']:
ranking = source[3]
if ranking >= 0:
# if ranking less than 0, it means it already failed at least one time.
found_avail_source = 1
break
if not found_avail_source:
# todo
# try to force scheme to regenerate the dest_url and src_url
# transfer = get_transfer_from_request_id(request_id, scheme='srm') # if rsemgr can select protocol by order, we can change
# if transfer:
# transfers[request_id] = transfer
pass
return transfers
def mock_sources(sources):
tmp_sources = []
for s in sources:
tmp_sources.append((s[0], ':'.join(['mock'] + s[1].split(':')[1:]), s[2], s[3]))
sources = tmp_sources
return tmp_sources
def sort_link_ranking(sources):
rank_sources = {}
ret_sources = []
for source in sources:
rse, source_url, source_rse_id, ranking, link_ranking = source
if link_ranking not in rank_sources:
rank_sources[link_ranking] = []
rank_sources[link_ranking].append(source)
rank_keys = rank_sources.keys()
rank_keys.sort(reverse=True)
for rank_key in rank_keys:
sources_list = rank_sources[rank_key]
random.shuffle(sources_list)
ret_sources = ret_sources + sources_list
return ret_sources
def sort_ranking(sources):
logging.debug("Sources before sorting: %s" % sources)
rank_sources = {}
ret_sources = []
for source in sources:
# ranking is from sources table, is the retry times
# link_ranking is from distances table, is the link rank.
# link_ranking should not be None(None means no link, the source will not be used).
rse, source_url, source_rse_id, ranking, link_ranking = source
if ranking is None:
ranking = 0
if ranking not in rank_sources:
rank_sources[ranking] = []
rank_sources[ranking].append(source)
rank_keys = rank_sources.keys()
rank_keys.sort(reverse=True)
for rank_key in rank_keys:
sources_list = sort_link_ranking(rank_sources[rank_key])
ret_sources = ret_sources + sources_list
logging.debug("Sources after sorting: %s" % ret_sources)
return ret_sources
def get_transfers(process=None, total_processes=None, thread=None, total_threads=None,
failover_schemes=None, limit=None, activity=None, older_than=None,
rses=None, schemes=None, mock=False, max_sources=4, bring_online=43200,
retry_other_fts=False, session=None):
transfers, reqs_no_source, reqs_scheme_mismatch, reqs_only_tape_source = get_transfer_requests_and_source_replicas(process=process, total_processes=total_processes, thread=thread, total_threads=total_threads,
limit=limit, activity=activity, older_than=older_than, rses=rses, schemes=schemes,
bring_online=bring_online, retry_other_fts=retry_other_fts,
failover_schemes=failover_schemes, session=session)
request.set_requests_state(reqs_no_source, RequestState.NO_SOURCES)
request.set_requests_state(reqs_only_tape_source, RequestState.ONLY_TAPE_SOURCES)
request.set_requests_state(reqs_scheme_mismatch, RequestState.MISMATCH_SCHEME)
for request_id in transfers:
sources = transfers[request_id]['sources']
sources = sort_ranking(sources)
if len(sources) > max_sources:
sources = sources[:max_sources]
if not mock:
transfers[request_id]['sources'] = sources
else:
transfers[request_id]['sources'] = mock_sources(sources)
# remove link_ranking in the final sources
sources = transfers[request_id]['sources']
transfers[request_id]['sources'] = []
for source in sources:
rse, source_url, source_rse_id, ranking, link_ranking = source
transfers[request_id]['sources'].append((rse, source_url, source_rse_id, ranking))
transfers[request_id]['file_metadata']['src_rse'] = sources[0][0]
transfers[request_id]['file_metadata']['src_rse_id'] = sources[0][2]
logging.debug("Transfer for request(%s): %s" % (request_id, transfers[request_id]))
return transfers
def submit_transfer(external_host, job, submitter='submitter', cachedir=None, process=0, thread=0, timeout=None):
# prepare submitting
xfers_ret = {}
try:
for file in job['files']:
file_metadata = file['metadata']
request_id = file_metadata['request_id']
log_str = '%s:%s PREPARING REQUEST %s DID %s:%s TO SUBMITTING STATE PREVIOUS %s FROM %s TO %s USING %s ' % (process, thread,
file_metadata['request_id'],
file_metadata['scope'],
file_metadata['name'],
file_metadata['previous_attempt_id'] if 'previous_attempt_id' in file_metadata else None,
file['sources'],
file['destinations'],
external_host)
xfers_ret[request_id] = {'state': RequestState.SUBMITTING, 'external_host': external_host, 'external_id': None, 'dest_url': file['destinations'][0]}
logging.info("%s" % (log_str))
xfers_ret[request_id]['file'] = file
logging.debug("%s:%s start to prepare transfer" % (process, thread))
request.prepare_request_transfers(xfers_ret)
logging.debug("%s:%s finished to prepare transfer" % (process, thread))
except:
logging.error("%s:%s Failed to prepare requests %s state to SUBMITTING(Will not submit jobs but return directly) with error: %s" % (process, thread, xfers_ret.keys(), traceback.format_exc()))
return
# submit the job
eid = None
try:
ts = time.time()
logging.info("%s:%s About to submit job to %s with timeout %s" % (process, thread, external_host, timeout))
eid = request.submit_bulk_transfers(external_host, files=job['files'], transfertool='fts3', job_params=job['job_params'], timeout=timeout)
duration = time.time() - ts
logging.info("%s:%s Submit job %s to %s in %s seconds" % (process, thread, eid, external_host, duration))
record_timer('daemons.conveyor.%s.submit_bulk_transfer.per_file' % submitter, (time.time() - ts) * 1000 / len(job['files']))
record_counter('daemons.conveyor.%s.submit_bulk_transfer' % submitter, len(job['files']))
record_timer('daemons.conveyor.%s.submit_bulk_transfer.files' % submitter, len(job['files']))
except Exception, ex:
logging.error("Failed to submit a job with error %s: %s" % (str(ex), traceback.format_exc()))
# register transfer
xfers_ret = {}
try:
for file in job['files']:
file_metadata = file['metadata']
request_id = file_metadata['request_id']
log_str = '%s:%s COPYING REQUEST %s DID %s:%s USING %s' % (process, thread, file_metadata['request_id'], file_metadata['scope'], file_metadata['name'], external_host)
if eid:
xfers_ret[request_id] = {'scope': file_metadata['scope'],
'name': file_metadata['name'],
'state': RequestState.SUBMITTED,
'external_host': external_host,
'external_id': eid,
'request_type': file.get('request_type', None),
'dst_rse': file_metadata.get('dst_rse', None),
'src_rse': file_metadata.get('src_rse', None),
'src_rse_id': file_metadata['src_rse_id'],
'metadata': file_metadata}
log_str += 'with state(%s) with eid(%s)' % (RequestState.SUBMITTED, eid)
logging.info("%s" % (log_str))
else:
xfers_ret[request_id] = {'scope': file_metadata['scope'],
'name': file_metadata['name'],
'state': RequestState.SUBMISSION_FAILED,
'external_host': external_host,
'external_id': None,
'request_type': file.get('request_type', None),
'dst_rse': file_metadata.get('dst_rse', None),
'src_rse': file_metadata.get('src_rse', None),
'src_rse_id': file_metadata['src_rse_id'],
'metadata': file_metadata}
log_str += 'with state(%s) with eid(%s)' % (RequestState.SUBMISSION_FAILED, None)
logging.warn("%s" % (log_str))
logging.debug("%s:%s start to register transfer state" % (process, thread))
request.set_request_transfers_state(xfers_ret, datetime.datetime.utcnow())
logging.debug("%s:%s finished to register transfer state" % (process, thread))
except:
logging.error("%s:%s Failed to register transfer state with error: %s" % (process, thread, traceback.format_exc()))
try:
if eid:
logging.info("%s:%s Cancel transfer %s on %s" % (process, thread, eid, external_host))
request.cancel_request_external_id(eid, external_host)
except:
logging.error("%s:%s Failed to cancel transfers %s on %s with error: %s" % (process, thread, eid, external_host, traceback.format_exc()))
def schedule_requests():
try:
logging.info("Throttler retrieve requests statistics")
results = request.get_stats_by_activity_dest_state(state=[RequestState.QUEUED, RequestState.SUBMITTING, RequestState.SUBMITTED, RequestState.WAITING])
result_dict = {}
for activity, dest_rse_id, account, state, counter in results:
threshold = request.get_config_limit(activity, dest_rse_id)
if threshold or (counter and (state == RequestState.WAITING)):
if activity not in result_dict:
result_dict[activity] = {}
if dest_rse_id not in result_dict[activity]:
result_dict[activity][dest_rse_id] = {'waiting': 0, 'transfer': 0, 'threshold': threshold, 'accounts': {}}
if account not in result_dict[activity][dest_rse_id]['accounts']:
result_dict[activity][dest_rse_id]['accounts'][account] = {'waiting': 0, 'transfer': 0}
if state == RequestState.WAITING:
result_dict[activity][dest_rse_id]['accounts'][account]['waiting'] += counter
result_dict[activity][dest_rse_id]['waiting'] += counter
else:
result_dict[activity][dest_rse_id]['accounts'][account]['transfer'] += counter
result_dict[activity][dest_rse_id]['transfer'] += counter
for activity in result_dict:
for dest_rse_id in result_dict[activity]:
threshold = result_dict[activity][dest_rse_id]['threshold']
transfer = result_dict[activity][dest_rse_id]['transfer']
waiting = result_dict[activity][dest_rse_id]['waiting']
logging.debug("Request status for %s at %s: %s" % (activity, activity, result_dict[activity][dest_rse_id]))
if threshold is None:
logging.debug("Throttler remove limits(threshold: %s) and release all waiting requests for acitivity %s, rse_id %s" % (threshold, activity, dest_rse_id))
rse_core.delete_rse_transfer_limits(rse=None, activity=activity, rse_id=dest_rse_id)
request.release_waiting_requests(rse=None, activity=activity, rse_id=dest_rse_id)
rse_name = rse_core.get_rse_name(rse_id=dest_rse_id)
record_counter('daemons.conveyor.throttler.delete_rse_transfer_limits.%s.%s' % (activity, rse_name))
elif transfer + waiting > threshold:
logging.debug("Throttler set limits for acitivity %s, rse_id %s" % (activity, dest_rse_id))
rse_core.set_rse_transfer_limits(rse=None, activity=activity, rse_id=dest_rse_id, max_transfers=threshold, transfers=transfer, waitings=waiting)
rse_name = rse_core.get_rse_name(rse_id=dest_rse_id)
record_gauge('daemons.conveyor.throttler.set_rse_transfer_limits.%s.%s.max_transfers' % (activity, rse_name), threshold)
record_gauge('daemons.conveyor.throttler.set_rse_transfer_limits.%s.%s.transfers' % (activity, rse_name), transfer)
record_gauge('daemons.conveyor.throttler.set_rse_transfer_limits.%s.%s.waitings' % (activity, rse_name), waiting)
if transfer < 0.8 * threshold:
# release requests on account
nr_accounts = len(result_dict[activity][dest_rse_id]['accounts'])
if nr_accounts < 1:
nr_accounts = 1
to_release = threshold - transfer
threshold_per_account = math.ceil(threshold / nr_accounts)
to_release_per_account = math.ceil(to_release / nr_accounts)
accounts = result_dict[activity][dest_rse_id]['accounts']
for account in accounts:
if nr_accounts == 1:
logging.debug("Throttler release %s waiting requests for acitivity %s, rse_id %s, account %s " % (to_release, activity, dest_rse_id, account))
request.release_waiting_requests(rse=None, activity=activity, rse_id=dest_rse_id, account=account, count=to_release)
record_gauge('daemons.conveyor.throttler.release_waiting_requests.%s.%s.%s' % (activity, rse_name, account), to_release)
elif accounts[account]['transfer'] > threshold_per_account:
logging.debug("Throttler will not release waiting requests for acitivity %s, rse_id %s, account %s: It queued more transfers than its share " %
(accounts[account]['waiting'], activity, dest_rse_id, account))
nr_accounts -= 1
to_release_per_account = math.ceil(to_release / nr_accounts)
elif accounts[account]['waiting'] < to_release_per_account:
logging.debug("Throttler release %s waiting requests for acitivity %s, rse_id %s, account %s " % (accounts[account]['waiting'], activity, dest_rse_id, account))
request.release_waiting_requests(rse=None, activity=activity, rse_id=dest_rse_id, account=account, count=accounts[account]['waiting'])
record_gauge('daemons.conveyor.throttler.release_waiting_requests.%s.%s.%s' % (activity, rse_name, account), accounts[account]['waiting'])
to_release = to_release - accounts[account]['waiting']
nr_accounts -= 1
to_release_per_account = math.ceil(to_release / nr_accounts)
else:
logging.debug("Throttler release %s waiting requests for acitivity %s, rse_id %s, account %s " % (to_release_per_account, activity, dest_rse_id, account))
request.release_waiting_requests(rse=None, activity=activity, rse_id=dest_rse_id, account=account, count=to_release_per_account)
record_gauge('daemons.conveyor.throttler.release_waiting_requests.%s.%s.%s' % (activity, rse_name, account), to_release_per_account)
to_release = to_release - to_release_per_account
nr_accounts -= 1
elif waiting > 0:
logging.debug("Throttler remove limits(threshold: %s) and release all waiting requests for acitivity %s, rse_id %s" % (threshold, activity, dest_rse_id))
rse_core.delete_rse_transfer_limits(rse=None, activity=activity, rse_id=dest_rse_id)
request.release_waiting_requests(rse=None, activity=activity, rse_id=dest_rse_id)
rse_name = rse_core.get_rse_name(rse_id=dest_rse_id)
record_counter('daemons.conveyor.throttler.delete_rse_transfer_limits.%s.%s' % (activity, rse_name))
except:
logging.warning("Failed to schedule requests, error: %s" % (traceback.format_exc()))
|
StarcoderdataPython
|
3374869
|
#!/usr/bin/python
# -- Content-Encoding: UTF-8 --
"""
Tests remote services discovery using the JSON-RPC transport
:author: <NAME>
:copyright: Copyright 2020, <NAME>
:license: Apache License 2.0
..
Copyright 2020 <NAME>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
# Standard library
import time
import threading
try:
import unittest2 as unittest
except ImportError:
import unittest
try:
# Try to import modules
from multiprocessing import Process, Queue
# IronPython fails when creating a queue
Queue()
except ImportError:
# Some interpreters don't have support for multiprocessing
raise unittest.SkipTest("Interpreter doesn't support multiprocessing")
try:
import queue
except ImportError:
import Queue as queue
# Pelix
from pelix.framework import create_framework, FrameworkFactory
from pelix.ipopo.constants import use_ipopo
import pelix.http
import pelix.remote
# Local utilities
from tests.utilities import WrappedProcess
# ------------------------------------------------------------------------------
__version_info__ = (1, 0, 1)
__version__ = ".".join(str(x) for x in __version_info__)
# Documentation strings format
__docformat__ = "restructuredtext en"
# ------------------------------------------------------------------------------
SVC_SPEC = "pelix.test.remote"
class RemoteService(object):
"""
Exported service
"""
def __init__(self, state_queue, event):
"""
Sets up members
;param state_queue: Queue to store status
:param event: Stop event
"""
self.state_queue = state_queue
self.event = event
def echo(self, value):
"""
Returns the given value
"""
self.state_queue.put("call-echo")
return value
def stop(self):
"""
Stops the peer
"""
self.event.set()
# ------------------------------------------------------------------------------
def load_framework(transport, discovery, components):
"""
Starts a Pelix framework in the local process
:param transport: Name of the transport bundle to install
:param discovery: Name of the discovery bundle to install
:param components: Tuples (factory, name) of instances to start
"""
all_bundles = ['pelix.ipopo.core',
'pelix.http.basic',
'pelix.remote.dispatcher',
'pelix.remote.registry',
discovery,
transport]
# Start the framework
framework = create_framework(all_bundles)
framework.start()
with use_ipopo(framework.get_bundle_context()) as ipopo:
# Start a HTTP service on a random port
ipopo.instantiate(pelix.http.FACTORY_HTTP_BASIC,
"http-server",
{pelix.http.HTTP_SERVICE_ADDRESS: "0.0.0.0",
pelix.http.HTTP_SERVICE_PORT: 0})
ipopo.instantiate(pelix.remote.FACTORY_REGISTRY_SERVLET,
"dispatcher-servlet")
# Start other components
for component in components:
try:
factory, name, opts = component
except ValueError:
factory, name = component
opts = {}
ipopo.instantiate(factory, name, opts)
return framework
def export_framework(state_queue, transport, discovery, components):
"""
Starts a Pelix framework, on the export side
:param state_queue: Queue to store status
:param transport: Name of the transport bundle to install
:param discovery: Name of the discovery bundle to install
:param components: Tuples (factory, name) of instances to start
"""
try:
# Load the framework
framework = load_framework(transport, discovery, components)
context = framework.get_bundle_context()
# Register the exported service
event = threading.Event()
context.register_service(
SVC_SPEC, RemoteService(state_queue, event),
{pelix.remote.PROP_EXPORTED_INTERFACES: '*'})
# Send the ready state
state_queue.put("ready")
# Loop until the end message
event.wait()
# Stopping
state_queue.put("stopping")
framework.stop()
except Exception as ex:
state_queue.put("Error: {0}".format(ex))
# ------------------------------------------------------------------------------
class HttpTransportsTest(unittest.TestCase):
"""
Tests Pelix built-in Remote Services transports
"""
def __init__(self, *args, **kwargs):
super(HttpTransportsTest, self).__init__(*args, **kwargs)
self._load_framework = load_framework
self._export_framework = export_framework
def _run_test(self, discovery_bundle, discovery_factory,
discovery_opts=None):
"""
Runs a remote service call test
:param discovery_bundle: Discovery implementation bundle to use
:param discovery_factory: Name of the discovery factory
:param discovery_opts: Initial parameters of the discovery component
:raise queue.Empty: Peer took to long to answer
:raise ValueError: Test failed
"""
transport_bundle = "pelix.remote.json_rpc"
# Define components
components = [
(pelix.remote.FACTORY_TRANSPORT_JSONRPC_EXPORTER, "rs-exporter"),
(pelix.remote.FACTORY_TRANSPORT_JSONRPC_IMPORTER, "rs-importer"),
(discovery_factory, "discovery", discovery_opts)]
# Start the remote framework
status_queue = Queue()
peer = WrappedProcess(target=self._export_framework,
args=(status_queue, transport_bundle,
discovery_bundle, components))
peer.start()
try:
# Wait for the ready state
state = status_queue.get(4)
self.assertEqual(state, "ready")
# Load the local framework (after the fork)
framework = self._load_framework(
transport_bundle, discovery_bundle, components)
context = framework.get_bundle_context()
# Look for the remote service
for _ in range(10):
svc_ref = context.get_service_reference(SVC_SPEC)
if svc_ref is not None:
break
time.sleep(.5)
else:
self.fail("Remote Service not found")
# Get it
svc = context.get_service(svc_ref)
# Echo call
for value in (None, "Test", 42, [1, 2, 3], {"a": "b"}):
result = svc.echo(value)
# Check state
state = status_queue.get(2)
self.assertEqual(state, "call-echo")
# Check result
self.assertEqual(result, value)
# Stop the peer
svc.stop()
# Wait for the peer to stop
state = status_queue.get(2)
self.assertEqual(state, "stopping")
# Wait a bit more, to let coverage save its files
peer.join(1)
# Check the remote service
# Look for the remote service
for _ in range(10):
svc_ref = context.get_service_reference(SVC_SPEC)
if svc_ref is None:
break
time.sleep(2)
else:
self.fail("Remote Service still registered")
finally:
# Stop everything (and delete the framework in any case
try:
FrameworkFactory.delete_framework()
except:
pass
peer.terminate()
status_queue.close()
def test_multicast(self):
"""
Tests the Multicast discovery
"""
try:
self._run_test("pelix.remote.discovery.multicast",
pelix.remote.FACTORY_DISCOVERY_MULTICAST)
except queue.Empty:
# Process error
self.fail("Remote framework took to long to reply")
def test_mdns(self):
"""
Tests the mDNS/Zeroconf discovery
"""
try:
import zeroconf
except ImportError:
self.skipTest("zeroconf is missing: can't test mDNS discovery")
try:
self._run_test("pelix.remote.discovery.mdns",
pelix.remote.FACTORY_DISCOVERY_ZEROCONF,
{"zeroconf.ttl": 10})
except queue.Empty:
# Process error
self.fail("Remote framework took to long to reply")
def test_mqtt(self):
"""
Tests the MQTT discovery
"""
try:
import paho
except ImportError:
self.skipTest("paho is missing: can't test MQTT discovery")
try:
self._run_test("pelix.remote.discovery.mqtt",
pelix.remote.FACTORY_DISCOVERY_MQTT)
except queue.Empty:
# Process error
self.fail("Remote framework took to long to reply")
def test_redis(self):
"""
Tests the Redis discovery
"""
try:
import redis
except ImportError:
self.skipTest("redis is missing: can't test Redis discovery")
try:
self._run_test("pelix.remote.discovery.redis",
pelix.remote.FACTORY_DISCOVERY_REDIS)
except queue.Empty:
# Process error
self.fail("Remote framework took to long to reply")
def test_zookeeper(self):
"""
Tests the ZooKeeper discovery
"""
try:
import kazoo
except ImportError:
self.skipTest("Kazoo is missing: can't test ZooKeeper discovery")
try:
self._run_test("pelix.remote.discovery.zookeeper",
pelix.remote.FACTORY_DISCOVERY_ZOOKEEPER,
{"zookeeper.hosts": "localhost:2181"})
except queue.Empty:
# Process error
self.fail("Remote framework took to long to reply")
# ------------------------------------------------------------------------------
if __name__ == "__main__":
# Set logging level
import logging
logging.basicConfig(level=logging.DEBUG)
unittest.main()
|
StarcoderdataPython
|
3338386
|
<gh_stars>1-10
import unittest
from unittest.mock import Mock
from importlib import import_module
component = import_module('run.helpers.impobj')
class import_object_Test(unittest.TestCase):
# Tests
def test(self):
self.assertIs(
component.import_object('unittest.mock.Mock'), Mock)
def test_with_package(self):
self.assertIs(
component.import_object('.Mock', package='unittest.mock'), Mock)
def test_with_name_in_bad_format(self):
self.assertRaises(ValueError,
component.import_object, 'unittest')
def test_with_module_is_not_existent(self):
self.assertRaises(ImportError,
component.import_object, 'not_existent.mock.Mock')
def test_with_name_is_not_existent(self):
self.assertRaises(AttributeError,
component.import_object, 'unittest.mock.not_existent')
def test_with_name_is_object(self):
self.assertIs(component.import_object(Mock), Mock)
|
StarcoderdataPython
|
4802496
|
"""
clustering of word embeddings
@TODO documentation of the module
"""
import numpy as np
from sklearn.base import BaseEstimator
from gensim.models import Word2Vec, KeyedVectors
from sklearn.decomposition import PCA
from sklearn.cluster import KMeans
class WordClustering(BaseEstimator):
""" theme-affinity vectorization of documents
w2v_size : int, default=128
size of the hidden layer in the embedding Word2Vec model
n_clusters : int, default=30
number of clusters, to the number of output parameters for the
vectorization.
It is advised to set `n_clusters` to the approximate number of
lexical fields
clustering : sklearn.cluster instace, default=KMeans(n_clusters=30)
clustering algorithm
The number of clusters must be equal to `n_clusters`
pretrained : bool, default=False
False to train a new w2v model
True to use a model already trained
model_path : str, default=None
path to the trained w2v model
Only used when `pretrained` is set to True
"""
def __init__(self,
w2v_size=128,
n_clusters=30,
clustering=KMeans(n_clusters=30),
pretrained=False,
model_path=None):
self.w2v_size = w2v_size
self.n_clusters = n_clusters
self.clustering = clustering
self.pretrained = pretrained
self.model_path = model_path
# vocabulary
self.vocabulary_ = None
# distribued representation of the words
self.word_vectors_ = None
# cluster id for each word
self.cluster_ids_ = None
self.clustering.set_params(n_clusters=n_clusters)
def fit(self, X=None, y=None, **fit_params):
""" train w2v and clustering models
Parameters
----------
X : iterable of iterable, defaul=None
corpus of tokenized documents if `pretrained`=False
else, X=None and the pretrained model is used
y : None
fit_params : additionnal parameters for word2vec algorithm
Returns
-------
self
"""
if self.pretrained:
w2v = KeyedVectors.load_word2vec_format(self.model_path)
else:
w2v = Word2Vec(X, size=self.w2v_size)
self.vocabulary_ = w2v.wv.vocab
self.word_vectors_ = w2v[self.vocabulary_]
self.cluster_ids_ = self.clustering.fit_predict(self.word_vectors_)
return self
def transform(self, X, y=None):
""" transforms each row of `X` into a vector of clusters affinities
Parameters
----------
X : iterable of iterable
y: None
Returns
-------
numpy.ndarray, shape=(n, p)
transformed docments, where `p=n_cluster`
"""
vectors = []
for x in X:
vector = np.zeros(self.n_clusters)
count = 0
for t in x:
try:
word_id = self.vocabulary_[t].index
word_cluster = self.cluster_ids_[word_id]
vector[word_cluster] = vector[word_cluster] + 1
count += 1
# except word is not in vocabular
except KeyError:
pass
if count > 0:
vectors.append(vector / count)
else:
vectors.append(vector)
return np.array(vectors)
def get_clusters_words(self):
""" return the words in each cluster
Returns
-------
dict
keys are cluster ids, values are lists of words
"""
words_cluster = {}
for cluser_id in np.unique(self.cluster_ids_):
words_cluster[str(cluser_id)] = []
for i, word in enumerate(self.vocabulary_):
label = str(self.cluster_ids_[i])
words_cluster[label].append(word)
return words_cluster
def embed_corpus(X, n_clusters, clustering, **kwargs):
""" transforms X into vector of cluster affinities
..deprecated use `WordClustering` object instead
Parameters
----------
X : iterable of iterable, (length=n)
corpus of document
clustering : sklearn.cluster object
instanciated clustering algorithm
Returns
-------
np.ndarray, shape=(n, n_clusters)
"""
# fit
w2v = Word2Vec(X, size=128)
words = w2v.wv.vocab
word_vectors = w2v[words]
pca_word_vectors = PCA(n_components=0.9).fit_transform(word_vectors)
# clustering = AgglomerativeClustering(n_clusters, affinity='euclidean')
cluster_ids = clustering.fit_predict(pca_word_vectors)
# transform
vectors = []
for x in X:
vector = np.zeros(n_clusters)
count = 0
for t in x:
try:
word_id = words[t].index
word_cluster = cluster_ids[word_id]
vector[word_cluster] = vector[word_cluster] + 1
count += 1
except KeyError:
pass
vectors.append(vector / count)
return np.array(vectors), cluster_ids
|
StarcoderdataPython
|
176763
|
<reponame>vandurme/TFMTL<filename>mtl/extractors/lbirnn.py<gh_stars>1-10
# Copyright 2018 Johns Hopkins University. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from six.moves import xrange
import mtl.util.registry as registry
from mtl.util.common import (validate_extractor_inputs,
listify,
unlistify)
def get_multi_cell(cell_type, cell_size, num_layers):
if cell_type == tf.contrib.rnn.GRUCell:
cell = cell_type(cell_size,
kernel_initializer=tf.contrib.layers.xavier_initializer())
elif cell_type == tf.contrib.rnn.LSTMCell:
cell = cell_type(cell_size,
initializer=tf.contrib.layers.xavier_initializer())
# TODO initializer???
elif cell_type == tf.contrib.rnn.SRUCell:
cell = cell_type(cell_size,
initializer=tf.contrib.layers.xavier_initializer())
# TODO layer num???
# elif cell_type == RANCell:
# cell = cell_type(num_units=cell_size)
else:
cell = cell_type(cell_size)
if num_layers > 1:
return tf.contrib.rnn.MultiRNNCell([cell] * num_layers)
else:
return cell
def fill_initial_state(initial_states, cells, batch_size):
# replace None values with zero states
initial_states = list(initial_states) # ensure mutability
for i, c in enumerate(initial_states):
if c is None:
initial_states[i] = cells.zero_state(batch_size,
tf.float32)
initial_states = tuple(initial_states) # restore immutability
return initial_states
def get_idx(indices, batch_size):
# row index [[0], [1], ..., [N]]
r = tf.range(batch_size)
r = tf.cast(r, dtype=tf.int64)
r = tf.expand_dims(r, 1)
# make sure indices are able to be concatenated with range
# i.e., of the form [[idx_0], [idx_1], ..., [idx_N]]
rank = len(indices.get_shape().as_list())
if rank == 1:
indices = tf.expand_dims(indices, 1)
elif rank == 2:
pass
else:
raise ValueError("indices doesn't have rank 1 or 2: rank=%d" % (rank))
idx = tf.concat([r, indices], axis=1)
return idx
def _lbirnn_helper(inputs,
lengths,
is_training,
indices=None,
num_layers=2,
cell_type=tf.contrib.rnn.LSTMCell,
cell_size=64,
initial_state_fwd=None,
initial_state_bwd=None,
scope=None,
**kwargs):
"""Stacked linear chain bi-directional RNN
Inputs
_____
inputs: batch of size [batch_size, batch_len, embed_size]
lengths: batch of size [batch_size]
indices: which token index in each batch example should be output
shape: [batch_size] or [batch_size, 1]
num_layers: number of stacked layers in the bi-RNN
cell_type: type of RNN cell to use (e.g., LSTM, GRU)
cell_size: cell's output size
initial_state_fwd: initial state for forward direction
initial_state_bwd: initial state for backward direction
Outputs
_______
If the input word vectors have dimension D and indices is None,
the output is a Tensor of size
[batch_size, batch_len, cell_size_fwd + cell_size_bwd]
= [batch_size, batch_len, 2*cell_size].
If indices is not None, the output is a Tensor of size
[batch_size, cell_size_fwd + cell_size_bwd]
= [batch_size, 2*cell_size]
"""
if scope is not None:
scope_name = scope
else:
scope_name = "lbirnn"
with tf.variable_scope(scope_name, reuse=tf.AUTO_REUSE) as varscope:
# print("_lbirnn_helper scope={}".format(varscope))
# reverse each batch example up through its length, maintaining right-padding
inputs_rev = tf.reverse_sequence(inputs, lengths, batch_axis=0,
seq_axis=1)
cells_fwd = get_multi_cell(cell_type, cell_size, num_layers)
cells_bwd = get_multi_cell(cell_type, cell_size, num_layers)
if is_training and ("output_keep_prob" in kwargs) and (
kwargs["output_keep_prob"] < 1.0):
cells_fwd = tf.contrib.rnn.DropoutWrapper(cell=cells_fwd,
output_keep_prob=kwargs[
"output_keep_prob"])
cells_bwd = tf.contrib.rnn.DropoutWrapper(cell=cells_bwd,
output_keep_prob=kwargs[
"output_keep_prob"])
else:
pass
if "attention" in kwargs and kwargs["attention"] == True:
if "attn_length" in kwargs:
attn_length = kwargs["attn_length"]
else:
attn_length = 10
cells_fwd = tf.contrib.rnn.AttentionCellWrapper(cells_fwd,
attn_length=attn_length)
cells_bwd = tf.contrib.rnn.AttentionCellWrapper(cells_bwd,
attn_length=attn_length)
batch_size = tf.shape(inputs)[0]
initial_state_fwd = fill_initial_state(listify(initial_state_fwd),
cells_fwd,
batch_size)
initial_state_fwd = unlistify(initial_state_fwd)
initial_state_bwd = fill_initial_state(listify(initial_state_bwd),
cells_bwd,
batch_size)
initial_state_bwd = unlistify(initial_state_bwd)
# if initial_state_fwd is None:
# #initial_state_fwd = [None]
# initial_state_fwd = fill_initial_state([None], cells_fwd, batch_size)#[0]
# else:
# initial_state_fwd = fill_initial_state(initial_state_fwd, cells_fwd, batch_size)
#
# if initial_state_bwd is None:
# #initial_state_bwd = [None]
# initial_state_bwd = fill_initial_state([None], cells_bwd, batch_size)#[0]
# else:
# initial_state_bwd = fill_initial_state(initial_state_bwd, cells_bwd, batch_size)
code_fwd, last_state_fwd = tf.nn.dynamic_rnn(cells_fwd,
inputs,
sequence_length=lengths,
initial_state=initial_state_fwd,
time_major=False,
scope="rnn_fwd")
tmp, last_state_bwd = tf.nn.dynamic_rnn(cells_bwd,
inputs_rev,
sequence_length=lengths,
initial_state=initial_state_bwd,
time_major=False,
scope="rnn_bwd")
# reverse backward-pass outputs so they align with the forward-pass outputs
code_bwd = tf.reverse_sequence(tmp, lengths, batch_axis=0, seq_axis=1)
if indices is not None:
idx = get_idx(indices, batch_size)
# get the (indices[i])-th token's output from row i
code_fwd = tf.gather_nd(code_fwd, idx)
code_bwd = tf.gather_nd(code_bwd, idx)
return (code_fwd, code_bwd), (last_state_fwd, last_state_bwd)
"""
def lbirnn(inputs,
lengths,
is_training,
indices=None,
num_layers=2,
cell_type=tf.contrib.rnn.LSTMCell,
cell_size=64,
initial_state_fwd=None,
initial_state_bwd=None,
**kwargs):
with tf.variable_scope("single-stage-lbirnn", reuse=tf.AUTO_REUSE) as varscope:
o, _ = _lbirnn_helper(inputs,
lengths,
is_training=is_training,
indices=indices,
num_layers=num_layers,
cell_type=cell_type,
cell_size=cell_size,
initial_state_fwd=initial_state_fwd,
initial_state_bwd=initial_state_bwd,
scope=varscope,
**kwargs)
(code_fwd, code_bwd) = o
code = tf.concat([code_fwd, code_bwd], axis=-1)
outputs = code
return outputs
"""
def lbirnn(inputs,
lengths,
is_training,
indices=None,
num_layers=2,
cell_type=tf.contrib.rnn.GRUCell,
cell_size=64,
initial_state_fwd=None,
initial_state_bwd=None,
**kwargs):
"""Serial stacked linear chain bi-directional RNN
If `indices` is specified for the last stage, the outputs of the tokens
in the last stage as specified by `indices` will be returned.
If `indices` is None for the last stage, the encodings for all tokens
in the sequence are returned.
Inputs
_____
All arguments denoted with (*) should be given as lists,
one element per stage in the series. The specifications given
below are for a single stage.
inputs (*): Tensor of size [batch_size, batch_len, embed_size]
lengths (*): Tensor of size [batch_size]
indices: Tensor of which token index in each batch item should be output;
shape: [batch_size] or [batch_size, 1]
num_layers: number of stacked layers in the bi-RNN
cell_type: type of RNN cell to use (e.g., LSTM, GRU)
cell_size: cell's output size
initial_state_fwd: initial state for forward direction, may be None
initial_state_bwd: initial state for backward direction, may be None
Outputs
_______
If the input word vectors have dimension D and the series has N stages:
if `indices` is not None:
the output is a Tensor of size [batch_size, cell_size]
if `indices` is None:
the output is a Tensor of size [batch_size, batch_len, cell_size]
"""
validate_extractor_inputs(inputs, lengths)
num_stages = len(inputs)
fwd_ = initial_state_fwd
bwd_ = initial_state_bwd
prev_varscope = None
for n_stage in xrange(num_stages):
# with tf.variable_scope("serial_lbirnn", reuse=tf.AUTO_REUSE) as varscope:
with tf.variable_scope(
"serial-lbirnn-seq{}".format(n_stage)) as varscope:
if prev_varscope is not None:
prev_varscope.reuse_variables()
if n_stage == num_stages - 1:
# Use the user-specified indices on the last stage
indices_ = indices
else:
indices_ = None
o, s = _lbirnn_helper(inputs[n_stage],
lengths[n_stage],
is_training=is_training,
indices=indices_,
num_layers=num_layers,
cell_type=cell_type,
cell_size=cell_size,
initial_state_fwd=fwd_,
initial_state_bwd=bwd_,
scope=varscope,
**kwargs)
(code_fwd, code_bwd), (last_state_fwd, last_state_bwd) = o, s
# Update arguments for next stage
fwd_ = last_state_fwd
bwd_ = last_state_bwd
prev_varscope = varscope
code = tf.concat([code_fwd, code_bwd], axis=-1)
outputs = code
return outputs
def _lbirnn_stock_helper(inputs,
lengths,
is_training,
num_layers=2,
cell_type=tf.contrib.rnn.GRUCell,
cell_size=64,
initial_state_fwd=None,
initial_state_bwd=None,
scope=None,
**kwargs):
scope_name = scope if scope is not None else "stock-lbirnn"
with tf.variable_scope(scope_name) as varscope:
cells_fwd = get_multi_cell(cell_type, cell_size, num_layers)
cells_bwd = get_multi_cell(cell_type, cell_size, num_layers)
if "skip_connections" in kwargs and kwargs["skip_connections"]:
cells_fwd = tf.contrib.rnn.ResidualWrapper(cells_fwd)
cells_bwd = tf.contrib.rnn.ResidualWrapper(cells_bwd)
if is_training and ("output_keep_prob" in kwargs) and (
kwargs["output_keep_prob"] < 1.0):
cells_fwd = tf.contrib.rnn.DropoutWrapper(cell=cells_fwd,
output_keep_prob=kwargs[
"output_keep_prob"])
cells_bwd = tf.contrib.rnn.DropoutWrapper(cell=cells_bwd,
output_keep_prob=kwargs[
"output_keep_prob"])
else:
pass
if "attention" in kwargs and kwargs["attention"] == True:
if "attn_length" in kwargs:
attn_length = kwargs["attn_length"]
else:
attn_length = 10
cells_fwd = tf.contrib.rnn.AttentionCellWrapper(cells_fwd,
attn_length=attn_length)
cells_bwd = tf.contrib.rnn.AttentionCellWrapper(cells_bwd,
attn_length=attn_length)
outputs, last_states = tf.nn.bidirectional_dynamic_rnn(cells_fwd,
cells_bwd,
inputs,
sequence_length=lengths,
initial_state_fw=initial_state_fwd,
initial_state_bw=initial_state_bwd,
dtype=tf.float32)
return outputs, last_states
def lbirnn_stock(inputs,
lengths,
is_training,
num_layers=2,
cell_type=tf.contrib.rnn.GRUCell,
cell_size=64,
initial_state_fwd=None,
initial_state_bwd=None,
**kwargs):
validate_extractor_inputs(inputs, lengths)
num_stages = len(inputs)
fwd_ = initial_state_fwd
bwd_ = initial_state_bwd
prev_varscope = None
for n_stage in xrange(num_stages):
with tf.variable_scope(
"serial-lbirnn-stock-seq{}".format(n_stage)) as varscope:
if prev_varscope is not None:
prev_varscope.reuse_variables()
code, states = _lbirnn_stock_helper(inputs[n_stage],
lengths[n_stage],
is_training=is_training,
num_layers=num_layers,
cell_type=cell_type,
cell_size=cell_size,
initial_state_fwd=fwd_,
initial_state_bwd=bwd_,
scope=varscope,
**kwargs)
fwd_ = states[0]
bwd_ = states[1]
prev_varscope = varscope
# concatenate hx_fwd and hx_bwd of top layer
# `states` = ((cx_fwd, hx_fwd), (cx_bwd, hx_bwd))
# TODO for GRU
if num_layers > 1:
# shape states: [2, num_layers, 2]
# (cf. https://github.com/coastalcph/mtl-disparate/blob/master/mtl/nn.py#L43)
if cell_type == tf.contrib.rnn.GRUCell:
# TODO
pass
elif cell_type == tf.contrib.rnn.LSTMCell:
output = tf.concat([states[0][-1][1], states[1][-1][1]], 1)
else:
# shape states: [2, 2]
# (cf. https://github.com/coastalcph/mtl-disparate/blob/master/mtl/nn.py#L40)
# if cell_type == tf.contrib.rnn.GRUCell or cell_type == RANCell:
if cell_type == tf.contrib.rnn.GRUCell:
output = tf.concat([states[0], states[1]], 1)
elif cell_type == tf.contrib.rnn.LSTMCell:
output = tf.concat([states[0][1], states[1][1]], 1)
return output
@registry.register_hparams
def sRUDER_NAACL18_HPARAMS():
hp = tf.contrib.training.HParams(
cell_type='lstm',
cell_size=100,
num_layers=1,
keep_prob=0.5
)
return hp
@registry.register_encoder
def ruder_encoder(inputs, lengths, is_training, hp=None):
assert type(inputs) is list
assert type(lengths) is list
assert len(inputs) == len(lengths)
assert len(inputs) == 2
assert hp is not None
num_input_dim = len(inputs[0].get_shape().as_list())
assert num_input_dim == 3 # BATCH X TIME X EMBED
num_length_dim = len(lengths[0].get_shape().as_list())
assert num_length_dim == 1
if hp.cell_type == 'gru':
cell_type = tf.contrib.rnn.GRUCell
elif hp.cell_type == 'lstm':
cell_type = tf.contrib.rnn.LSTMCell
else:
raise ValueError(hp.cell_type)
keep_prob = hp.keep_prob if is_training else 1.0
code = lbirnn_stock(inputs,
lengths,
is_training=is_training,
num_layers=hp.num_layers,
cell_type=cell_type,
cell_size=hp.cell_size)
assert len(code.get_shape().as_list()) == 2
return code
|
StarcoderdataPython
|
3204504
|
<filename>ax/service/tests/test_global_stopping.py
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from typing import Dict, Tuple
import numpy as np
from ax.core.types import TParameterization
from ax.exceptions.core import OptimizationShouldStop
from ax.global_stopping.strategies.base import BaseGlobalStoppingStrategy
from ax.service.ax_client import AxClient
from ax.utils.common.testutils import TestCase
from ax.utils.measurement.synthetic_functions import branin
from ax.utils.testing.core_stubs import DummyGlobalStoppingStrategy
class TestGlobalStoppingIntegration(TestCase):
def get_ax_client_for_branin(
self,
global_stopping_strategy: BaseGlobalStoppingStrategy,
) -> AxClient:
"""
Instantiates an AxClient for the branin experiment with the specified
global stopping strategy.
"""
ax_client = AxClient(global_stopping_strategy=global_stopping_strategy)
ax_client.create_experiment(
name="branin_test_experiment",
parameters=[
{
"name": "x1",
"type": "range",
"bounds": [-5.0, 10.0],
},
{
"name": "x2",
"type": "range",
"bounds": [0.0, 15.0],
},
],
objective_name="branin",
minimize=True,
)
return ax_client
def evaluate(self, parameters: TParameterization) -> Dict[str, Tuple[float, float]]:
"""Evaluates the parameters for branin experiment."""
x = np.array([parameters.get(f"x{i+1}") for i in range(2)])
return {"branin": (branin(x), 0.0)}
def test_global_stopping_integration(self):
"""
Specifying a dummy global stopping strategy which stops
the optimization after 3 trials are completed.
"""
global_stopping_strategy = DummyGlobalStoppingStrategy(
min_trials=2, trial_to_stop=3
)
ax_client = self.get_ax_client_for_branin(
global_stopping_strategy=global_stopping_strategy
)
# Running the first 3 iterations.
for _ in range(3):
parameters, trial_index = ax_client.get_next_trial()
ax_client.complete_trial(
trial_index=trial_index, raw_data=self.evaluate(parameters)
)
# Trying to run the 4th iteration, which should raise
exception = OptimizationShouldStop(message="Stop the optimization.")
with self.assertRaises(OptimizationShouldStop) as cm:
parameters, trial_index = ax_client.get_next_trial()
# Assert Exception's message is unchanged.
self.assertEqual(cm.exception.message, exception.message)
# Trying to run the 4th iteration by overruling the stopping strategy.
parameters, trial_index = ax_client.get_next_trial(force=True)
self.assertIsNotNone(parameters)
def test_min_trials(self):
"""
Tests the min_trials mechanism of the stopping strategy; that is,
the stopping strategy should not take effect before min_trials trials
are completed.
"""
global_stopping_strategy = DummyGlobalStoppingStrategy(
min_trials=3, trial_to_stop=2
)
ax_client = self.get_ax_client_for_branin(
global_stopping_strategy=global_stopping_strategy
)
# Running the first 2 iterations.
for _ in range(2):
parameters, trial_index = ax_client.get_next_trial()
ax_client.complete_trial(
trial_index=trial_index, raw_data=self.evaluate(parameters)
)
# Since min_trials=3, GSS should not stop creating the 3rd iteration.
parameters, trial_index = ax_client.get_next_trial()
ax_client.complete_trial(
trial_index=trial_index, raw_data=self.evaluate(parameters)
)
self.assertIsNotNone(parameters)
# Now, GSS should stop creating the 4th iteration.
exception = OptimizationShouldStop(message="Stop the optimization.")
with self.assertRaises(OptimizationShouldStop) as cm:
parameters, trial_index = ax_client.get_next_trial()
# Assert Exception's message is unchanged.
self.assertEqual(cm.exception.message, exception.message)
|
StarcoderdataPython
|
3299023
|
from .converter import BaseConverter
from .exceptions import AnnotationConversionError, MarshmallowAnnotationError
from .registry import TypeRegistry, field_factory, registry, scheme_factory
from .scheme import AnnotationSchema, AnnotationSchemaMeta
__version__ = "2.4.1"
__author__ = "<NAME>"
__license__ = "MIT"
|
StarcoderdataPython
|
3292825
|
<gh_stars>10-100
import numpy as np
import tensorflow as tf
import copy
import elbo.util as util
from elbo.joint_model import Model, MovingAverageStopper
from grammar import list_successors
from models import build_model
class ExperimentSettings(object):
def __init__(self):
self.gaussian_auto_ard = True
self.constant_gaussian_std = None
self.noise_prec_alpha = np.float32(1.0)
self.noise_prec_beta = np.float32(0.01)
self.beta_prior_params = np.float32(1.0), np.float32(1.0)
self.constant_noise_std = None
self.dirichlet_alpha = np.float32(1.0)
self.max_rank = 6
self.adam_rate = 0.02
self.stopping_rule = MovingAverageStopper(decay=0.999, eps=0.5,
min_steps=1000)
self.beamsize = 2
self.p_stop_structure = 0.3
self.n_elbo_samples = 50
def beamsearch_helper(beam, beamsize=2):
scores = [score(model) for model in beam]
perm = sorted(np.arange(len(beam)), key = lambda i : -scores[i])
sorted_beam = beam[perm]
best = sorted_beam[:beamsize]
newbeam = []
for oldmodel in best:
successors = list_successors(oldmodel)
newbeam += [oldmodel,] + successors
return newbeam
def score_model(structure, X, settings):
N, D = X.shape
m = build_model(structure, (N, D), settings)
m.observe(X)
jm = Model(m)
jm.train(silent=True,
stopping_rule=settings.stopping_rule,
adam_rate=settings.adam_rate)
score = jm.monte_carlo_elbo(n_samples=settings.n_elbo_samples)
return score
def initialize_from(old_model, new_model):
for name, new_node in new_model.input_nodes.items():
if name in old_model.input_nodes:
old_node = old_model.input_nodes[name]
initialize_from(old_node, new_node)
old_qdist = old_model.q_distribution()
new_qdist = old_model.q_distribution()
if isinstance(old_qdist, GaussianQistribution):
# TODO figure out the right, elegant way to do this
# see doc for discussion
# new_qdist._intialize(old_qdist.mean, old_qdist.logodds)
pass
def score_and_sort_beam(beam, X, settings):
scored_beam = []
for (structure, structure_logp, model_score) in beam:
score = score_model(structure, X, settings) + structure_logp
print "score", score, "for structure", structure
scored_beam.append((structure, structure_logp, score))
sorted_beam = sorted(scored_beam, key = lambda a : -a[2])
return sorted_beam
def expand_beam(beam, settings):
continue_logp = np.log(1.0-settings.p_stop_structure)
new_beam = copy.copy(beam)
for (structure, structure_score, model_score) in beam:
successors = list_successors(structure)
for successor in successors:
new_score = structure_score + continue_logp - np.log(len(successors))
new_beam.append((successor, new_score, None))
return new_beam
def do_structure_search(X, settings):
base_logprob = np.log(settings.p_stop_structure)
structure_beam = [('g', base_logprob, 0.0),]
best_structures = score_and_sort_beam(structure_beam, X, settings)
old_best_score = -np.inf
best_score = best_structures[0][2]
i = 0
while best_score > old_best_score:
structure_beam = expand_beam(best_structures, settings)
scored_structures = score_and_sort_beam(structure_beam, X, settings)
best_structures = scored_structures[:settings.beamsize]
old_best_score = best_score
best_score = best_structures[0][2]
i+=1
print "epoch %d" % i, "beam", best_structures
def main():
N = 50
D = 20
settings = ExperimentSettings()
settings.max_rank=2
settings.gaussian_auto_ard = False
settings.constant_gaussian_std = 1.0
settings.constant_noise_std = 0.1
#X = np.float32(np.random.randn(N, D))
m = build_model(('lowrank', ('chain', 'g'), 'g'), (N, D), settings)
#m = build_model(('chain', 'g'), (N, D), settings)
X = m.sample()
#X /= np.std(X)
best_structure = do_structure_search(X, settings)
if __name__ == "__main__":
main()
|
StarcoderdataPython
|
128823
|
<filename>profile_generator/model/faded.py
import math
from profile_generator.unit import Curve
def curve(offset: float, slope: float) -> Curve:
fade_end = 3 * offset / (2 - 2 * slope)
a = (1 - offset / fade_end - slope) / math.pow(fade_end, 2)
b = 0
c = slope
d = offset
def _curve(x: float) -> float:
if x < fade_end:
return a * math.pow(x, 3) + b * math.pow(x, 2) + c * x + d
else:
return x
return _curve
|
StarcoderdataPython
|
3313853
|
from osgeo import gdal
import os
import sys
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
import pandas as pd
country = 'denmark'
if country == 'france':
geotif_2015 = gdal.Open(r'C:\Users\Niels\Documents\GitHub\PopNet\data\france\2015.tif')
geotif_2020 = gdal.Open(r'C:\Users\Niels\Documents\GitHub\PopNet\experiments\france\final\pred_2020.tif')
geotif_2030 = gdal.Open(r'C:\Users\Niels\Documents\GitHub\PopNet\experiments\france\final\pred_2030.tif')
geotif_2040 = gdal.Open(r'C:\Users\Niels\Documents\GitHub\PopNet\experiments\france\final\pred_2040.tif')
geotif_2050 = gdal.Open(r'C:\Users\Niels\Documents\GitHub\PopNet\experiments\france\final\pred_2050.tif')
geotif_2060 = gdal.Open(r'C:\Users\Niels\Documents\GitHub\PopNet\experiments\france\final\pred_2060.tif')
geotif_2070 = gdal.Open(r'C:\Users\Niels\Documents\GitHub\PopNet\experiments\france\final\pred_2070.tif')
geotif_2080 = gdal.Open(r'C:\Users\Niels\Documents\GitHub\PopNet\experiments\france\final\pred_2080.tif')
geotif_2090 = gdal.Open(r'C:\Users\Niels\Documents\GitHub\PopNet\experiments\france\final\pred_2090.tif')
geotif_2100 = gdal.Open(r'C:\Users\Niels\Documents\GitHub\PopNet\experiments\france\final\pred_2100.tif')
if country == 'denmark':
# geotif_2015 = gdal.Open(r'C:\Users\Niels\Documents\GitHub\PopNet\data\denmark\2015.tif')
# geotif_2020_fr = gdal.Open(r'C:\Users\Niels\Documents\GitHub\PopNet\experiments\denmark\france_model\outputs\comparison_fr\bbox_pred_2020.tif')
# geotif_2030_fr = gdal.Open(r'C:\Users\Niels\Documents\GitHub\PopNet\experiments\denmark\france_model\outputs\comparison_fr\bbox_pred_2030.tif')
# geotif_2040_fr = gdal.Open(r'C:\Users\Niels\Documents\GitHub\PopNet\experiments\denmark\france_model\outputs\comparison_fr\bbox_pred_2040.tif')
# geotif_2050_fr = gdal.Open(r'C:\Users\Niels\Documents\GitHub\PopNet\experiments\denmark\france_model\outputs\comparison_fr\bbox_pred_2050.tif')
# geotif_2060_fr = gdal.Open(r'C:\Users\Niels\Documents\GitHub\PopNet\experiments\denmark\france_model\outputs\comparison_fr\bbox_pred_2060.tif')
# geotif_2070_fr = gdal.Open(r'C:\Users\Niels\Documents\GitHub\PopNet\experiments\denmark\france_model\outputs\comparison_fr\bbox_pred_2070.tif')
# geotif_2080_fr = gdal.Open(r'C:\Users\Niels\Documents\GitHub\PopNet\experiments\denmark\france_model\outputs\comparison_fr\bbox_pred_2080.tif')
# geotif_2090_fr = gdal.Open(r'C:\Users\Niels\Documents\GitHub\PopNet\experiments\denmark\france_model\outputs\comparison_fr\bbox_pred_2090.tif')
# geotif_2100_fr = gdal.Open(r'C:\Users\Niels\Documents\GitHub\PopNet\experiments\denmark\france_model\outputs\comparison_fr\bbox_pred_2100.tif')
#
# geotif_2020_dk = gdal.Open(r'C:\Users\Niels\Documents\GitHub\PopNet\experiments\denmark\france_model\outputs\comparison_dk\bbox_pred_2020.tif')
# geotif_2030_dk = gdal.Open(r'C:\Users\Niels\Documents\GitHub\PopNet\experiments\denmark\france_model\outputs\comparison_dk\bbox_pred_2030.tif')
# geotif_2040_dk = gdal.Open(r'C:\Users\Niels\Documents\GitHub\PopNet\experiments\denmark\france_model\outputs\comparison_dk\bbox_pred_2040.tif')
# geotif_2050_dk = gdal.Open(r'C:\Users\Niels\Documents\GitHub\PopNet\experiments\denmark\france_model\outputs\comparison_dk\bbox_pred_2050.tif')
# geotif_2060_dk = gdal.Open(r'C:\Users\Niels\Documents\GitHub\PopNet\experiments\denmark\france_model\outputs\comparison_dk\bbox_pred_2060.tif')
# geotif_2070_dk = gdal.Open(r'C:\Users\Niels\Documents\GitHub\PopNet\experiments\denmark\france_model\outputs\comparison_dk\bbox_pred_2070.tif')
# geotif_2080_dk = gdal.Open(r'C:\Users\Niels\Documents\GitHub\PopNet\experiments\denmark\france_model\outputs\comparison_dk\bbox_pred_2080.tif')
# geotif_2090_dk = gdal.Open(r'C:\Users\Niels\Documents\GitHub\PopNet\experiments\denmark\france_model\outputs\comparison_dk\bbox_pred_2090.tif')
# geotif_2100_dk = gdal.Open(r'C:\Users\Niels\Documents\GitHub\PopNet\experiments\denmark\france_model\outputs\comparison_dk\bbox_pred_2100.tif')
geotif_2015 = gdal.Open(r'C:\Users\Niels\Documents\GitHub\PopNet\data\denmark\2015.tif')
geotif_2020_lake = gdal.Open(
r'C:\Users\Niels\Documents\GitHub\PopNet\experiments\denmark\road\outputs\bbox\bbox_pred_2020.tif')
geotif_2050_lake = gdal.Open(
r'C:\Users\Niels\Documents\GitHub\PopNet\experiments\denmark\road\outputs\bbox\bbox_pred_2050.tif')
geotif_2100_lake = gdal.Open(
r'C:\Users\Niels\Documents\GitHub\PopNet\experiments\denmark\road\outputs\bbox\bbox_pred_2100.tif')
geotif_2020_fin = gdal.Open(
r'C:\Users\Niels\Documents\GitHub\PopNet\experiments\denmark\final\outputs\bbox\bbox_pred_2020.tif')
geotif_2050_fin = gdal.Open(
r'C:\Users\Niels\Documents\GitHub\PopNet\experiments\denmark\final\outputs\bbox\bbox_pred_2050.tif')
geotif_2100_fin = gdal.Open(
r'C:\Users\Niels\Documents\GitHub\PopNet\experiments\denmark\final\outputs\bbox\bbox_pred_2100.tif')
# geotif_2030 = gdal.Open(r'C:\Users\Niels\Documents\GitHub\PopNet\experiments\france\final\pred_2030.tif')
# geotif_2040 = gdal.Open(r'C:\Users\Niels\Documents\GitHub\PopNet\experiments\france\final\pred_2040.tif')
geotif_2050 = gdal.Open(r'C:\Users\Niels\Documents\GitHub\PopNet\experiments\denmark\final\pred_2050.tif')
# geotif_2060 = gdal.Open(r'C:\Users\Niels\Documents\GitHub\PopNet\experiments\france\final\pred_2060.tif')
# geotif_2070 = gdal.Open(r'C:\Users\Niels\Documents\GitHub\PopNet\experiments\france\final\pred_2070.tif')
# geotif_2080 = gdal.Open(r'C:\Users\Niels\Documents\GitHub\PopNet\experiments\france\final\pred_2080.tif')
# geotif_2090 = gdal.Open(r'C:\Users\Niels\Documents\GitHub\PopNet\experiments\france\final\pred_2090.tif')
geotif_2100 = gdal.Open(r'C:\Users\Niels\Documents\GitHub\PopNet\experiments\denmark\final\pred_2100.tif')
# np_2015 = np.array(geotif_2015.GetRasterBand(1).ReadAsArray()).flatten()
np_2020_lake = np.array(geotif_2020_lake.GetRasterBand(1).ReadAsArray()).flatten()
# np_2030 = np.array(geotif_2030.GetRasterBand(1).ReadAsArray()).flatten()
# np_2040 = np.array(geotif_2040.GetRasterBand(1).ReadAsArray()).flatten()
np_2050_lake = np.array(geotif_2050_lake.GetRasterBand(1).ReadAsArray()).flatten()
# np_2060 = np.array(geotif_2060.GetRasterBand(1).ReadAsArray()).flatten()
# np_2070 = np.array(geotif_2070.GetRasterBand(1).ReadAsArray()).flatten()
# np_2080 = np.array(geotif_2080.GetRasterBand(1).ReadAsArray()).flatten()
# np_2090 = np.array(geotif_2090.GetRasterBand(1).ReadAsArray()).flatten()
np_2100_lake = np.array(geotif_2100_lake.GetRasterBand(1).ReadAsArray()).flatten()
np_2020_fin = np.array(geotif_2020_fin.GetRasterBand(1).ReadAsArray()).flatten()
np_2050_fin = np.array(geotif_2050_fin.GetRasterBand(1).ReadAsArray()).flatten()
np_2100_fin = np.array(geotif_2100_fin.GetRasterBand(1).ReadAsArray()).flatten()
# geotif_2020_dk = gdal.Open(r'C:\Users\Niels\Documents\GitHub\PopNet\experiments\denmark\final\pred_2020.tif')
# geotif_2030_dk = gdal.Open(r'C:\Users\Niels\Documents\GitHub\PopNet\experiments\denmark\final\pred_2030.tif')
# geotif_2040_dk = gdal.Open(r'C:\Users\Niels\Documents\GitHub\PopNet\experiments\denmark\final\pred_2040.tif')
# geotif_2050_dk = gdal.Open(r'C:\Users\Niels\Documents\GitHub\PopNet\experiments\denmark\final\pred_2050.tif')
# geotif_2060_dk = gdal.Open(r'C:\Users\Niels\Documents\GitHub\PopNet\experiments\denmark\final\pred_2060.tif')
# geotif_2070_dk = gdal.Open(r'C:\Users\Niels\Documents\GitHub\PopNet\experiments\denmark\final\pred_2070.tif')
# geotif_2080_dk = gdal.Open(r'C:\Users\Niels\Documents\GitHub\PopNet\experiments\denmark\final\pred_2080.tif')
# geotif_2090_dk = gdal.Open(r'C:\Users\Niels\Documents\GitHub\PopNet\experiments\denmark\final\pred_2090.tif')
# geotif_2100_dk = gdal.Open(r'C:\Users\Niels\Documents\GitHub\PopNet\experiments\denmark\final\pred_2100.tif')
# np_2015_fr = np.array(geotif_2015_fr.GetRasterBand(1).ReadAsArray()).flatten()
# np_2020_fr = np.array(geotif_2020_fr.GetRasterBand(1).ReadAsArray()).flatten()
# np_2030_fr = np.array(geotif_2030_fr.GetRasterBand(1).ReadAsArray()).flatten()
# np_2040_fr = np.array(geotif_2040_fr.GetRasterBand(1).ReadAsArray()).flatten()
# np_2050_fr = np.array(geotif_2050_fr.GetRasterBand(1).ReadAsArray()).flatten()
# np_2060_fr = np.array(geotif_2060_fr.GetRasterBand(1).ReadAsArray()).flatten()
# np_2070_fr = np.array(geotif_2070_fr.GetRasterBand(1).ReadAsArray()).flatten()
# np_2080_fr = np.array(geotif_2080_fr.GetRasterBand(1).ReadAsArray()).flatten()
# np_2090_fr = np.array(geotif_2090_fr.GetRasterBand(1).ReadAsArray()).flatten()
# np_2100_fr = np.array(geotif_2100_fr.GetRasterBand(1).ReadAsArray()).flatten()
# np_2020_dk = np.array(geotif_2020_dk.GetRasterBand(1).ReadAsArray()).flatten()
# np_2030_dk = np.array(geotif_2030_dk.GetRasterBand(1).ReadAsArray()).flatten()
# np_2040_dk = np.array(geotif_2040_dk.GetRasterBand(1).ReadAsArray()).flatten()
# np_2050_dk = np.array(geotif_2050_dk.GetRasterBand(1).ReadAsArray()).flatten()
# np_2060_dk = np.array(geotif_2060_dk.GetRasterBand(1).ReadAsArray()).flatten()
# np_2070_dk = np.array(geotif_2070_dk.GetRasterBand(1).ReadAsArray()).flatten()
# np_2080_dk = np.array(geotif_2080_dk.GetRasterBand(1).ReadAsArray()).flatten()
# np_2090_dk = np.array(geotif_2090_dk.GetRasterBand(1).ReadAsArray()).flatten()
# np_2100_dk = np.array(geotif_2100_dk.GetRasterBand(1).ReadAsArray()).flatten()
# print(np.max(np_2015))
# print(np.max(np_2020))
# print(np.max(np_2030))
# print(np.max(np_2040))
# print(np.max(np_2050))
# print(np.max(np_2060))
# print(np.max(np_2070))
# print(np.max(np_2080))
# print(np.max(np_2090))
# print(np.max(np_2100))
# population = np.concatenate((np_2020_fr, np_2030_fr))
# year = np.concatenate((np.full(np_2020_fr.shape, '2020'), np.full(np_2030_fr.shape, '2030')))
# country = np.concatenate((np.full(np_2020_fr.shape, 'France'), np.full(np_2030_fr.shape, 'France')))
#
# population = np.concatenate((population, np_2040_fr))
# year = np.concatenate((year, np.full(np_2040_fr.shape, '2040')))
# country = np.concatenate((country, np.full(np_2040_fr.shape, 'France')))
#
# population = np.concatenate((population, np_2050_fr))
# year = np.concatenate((year, np.full(np_2050_fr.shape, '2050')))
# country = np.concatenate((country, np.full(np_2050_fr.shape, 'France')))
#
# population = np.concatenate((population, np_2060_fr))
# year = np.concatenate((year, np.full(np_2060_fr.shape, '2060')))
# country = np.concatenate((country, np.full(np_2060_fr.shape, 'France')))
#
# population = np.concatenate((population, np_2070_fr))
# year = np.concatenate((year, np.full(np_2070_fr.shape, '2070')))
# country = np.concatenate((country, np.full(np_2070_fr.shape, 'France')))
#
# population = np.concatenate((population, np_2080_fr))
# year = np.concatenate((year, np.full(np_2080_fr.shape, '2080')))
# country = np.concatenate((country, np.full(np_2080_fr.shape, 'France')))
#
# population = np.concatenate((population, np_2090_fr))
# year = np.concatenate((year, np.full(np_2090_fr.shape, '2090')))
# country = np.concatenate((country, np.full(np_2090_fr.shape, 'France')))
#
# population = np.concatenate((population, np_2100_fr))
# year = np.concatenate((year, np.full(np_2100_fr.shape, '2100')))
# country = np.concatenate((country, np.full(np_2100_fr.shape, 'France')))
#
# population = np.concatenate((population, np_2020_dk))
# year = np.concatenate((year, np.full(np_2020_dk.shape, '2020')))
# country = np.concatenate((country, np.full(np_2020_dk.shape, 'Denmark')))
#
# population = np.concatenate((population, np_2030_dk))
# year = np.concatenate((year, np.full(np_2030_dk.shape, '2030')))
# country = np.concatenate((country, np.full(np_2030_dk.shape, 'Denmark')))
#
# population = np.concatenate((population, np_2040_dk))
# year = np.concatenate((year, np.full(np_2040_dk.shape, '2040')))
# country = np.concatenate((country, np.full(np_2040_dk.shape, 'Denmark')))
#
# population = np.concatenate((population, np_2050_dk))
# year = np.concatenate((year, np.full(np_2050_dk.shape, '2050')))
# country = np.concatenate((country, np.full(np_2050_dk.shape, 'Denmark')))
#
# population = np.concatenate((population, np_2060_dk))
# year = np.concatenate((year, np.full(np_2060_dk.shape, '2060')))
# country = np.concatenate((country, np.full(np_2060_dk.shape, 'Denmark')))
#
# population = np.concatenate((population, np_2070_dk))
# year = np.concatenate((year, np.full(np_2070_dk.shape, '2070')))
# country = np.concatenate((country, np.full(np_2070_dk.shape, 'Denmark')))
#
# population = np.concatenate((population, np_2080_dk))
# year = np.concatenate((year, np.full(np_2080_dk.shape, '2080')))
# country = np.concatenate((country, np.full(np_2080_dk.shape, 'Denmark')))
#
# population = np.concatenate((population, np_2090_dk))
# year = np.concatenate((year, np.full(np_2090_dk.shape, '2090')))
# country = np.concatenate((country, np.full(np_2090_dk.shape, 'Denmark')))
#
# population = np.concatenate((population, np_2100_dk))
# year = np.concatenate((year, np.full(np_2100_dk.shape, '2100')))
# country = np.concatenate((country, np.full(np_2100_dk.shape, 'Denmark')))
#
# print(population.shape)
# print(year.shape)
population = np.concatenate((np_2020_lake, np_2050_lake))
year = np.concatenate((np.full(np_2020_lake.shape, '2020'), np.full(np_2050_lake.shape, '2050')))
scenario = np.concatenate((np.full(np_2020_lake.shape, 'With road'), np.full(np_2050_lake.shape, 'With road')))
# population = np.concatenate((population, np_2040))
# year = np.concatenate((year, np.full(np_2040.shape, '2040')))
# population = np.concatenate((population, np_2050))
# year = np.concatenate((year, np.full(np_2050.shape, '2050')))
# population = np.concatenate((population, np_2060))
# year = np.concatenate((year, np.full(np_2060.shape, '2060')))
# population = np.concatenate((population, np_2070))
# year = np.concatenate((year, np.full(np_2070.shape, '2070')))
# population = np.concatenate((population, np_2080))
# year = np.concatenate((year, np.full(np_2080.shape, '2080')))
# population = np.concatenate((population, np_2090))
# year = np.concatenate((year, np.full(np_2090.shape, '2090')))
population = np.concatenate((population, np_2100_lake))
year = np.concatenate((year, np.full(np_2100_lake.shape, '2100')))
scenario = np.concatenate((scenario, np.full(np_2100_lake.shape, 'With road')))
population = np.concatenate((population, np_2020_fin))
year = np.concatenate((year, np.full(np_2020_fin.shape, '2020')))
scenario = np.concatenate((scenario, np.full(np_2100_fin.shape, 'Original')))
population = np.concatenate((population, np_2050_fin))
year = np.concatenate((year, np.full(np_2050_fin.shape, '2050')))
scenario = np.concatenate((scenario, np.full(np_2050_fin.shape, 'Original')))
population = np.concatenate((population, np_2100_fin))
year = np.concatenate((year, np.full(np_2100_fin.shape, '2100')))
scenario = np.concatenate((scenario, np.full(np_2100_fin.shape, 'Original')))
df = pd.DataFrame({'Population': population, 'Year': year, 'Scenario': scenario})
df['Categories'] = df['Population']
# df = df[df['Population'] > 1]
sns.set(style='whitegrid')
fig = plt.figure()
ax = sns.violinplot(x=df["Year"], y=df["Population"], hue=df["Scenario"], palette='Set1', cut=0, split=True, legend=False)
ax.set_title('Population Distribution - Road Scenario', fontsize=16)
plt.legend(loc='upper left')
# ax.set_ylim(10, np.max(population) + 100)
plt.savefig('violin_plot.png', bbox_inches='tight')
plt.show()
plt.clf()
plt.close()
|
StarcoderdataPython
|
27574
|
from allauth.account.forms import LoginForm as AllauthLoginForm
class LoginForm(AllauthLoginForm):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
del self.fields["login"].widget.attrs["placeholder"]
del self.fields["password"].widget.attrs["placeholder"]
|
StarcoderdataPython
|
4815022
|
<reponame>opennode/opennode-tui<gh_stars>1-10
""" Forms for OpenNode Terminal User Interface """
import operator
from snack import Textbox, Button, GridForm
from opennode.cli.fields import FloatField, IntegerField, StringField
from opennode.cli.fields import PasswordField, IpField, RadioBarField
from opennode.cli.fields import BindMountsField, CheckboxField
from opennode.cli.fields import HostnameField
from opennode.cli.fields import validate_range
class Form(object):
errors, data = [], {}
def __init__(self, screen, title, fields):
self.title, self.screen, self.fields = title, screen, fields
def validate(self):
self.errors = reduce(operator.add, [field.errors for field in self.fields
if not field.validate()], [])
if not self.errors:
self.data = dict([(field.name, field.value()) for field in self.fields])
return True
else:
return False
def display(self):
pass
class KvmForm(Form):
def __init__(self, screen, title, settings):
self.memory = FloatField("memory", settings["memory"], settings["memory_min"],
settings["memory_max"])
self.vcpu = IntegerField("vcpu", settings["vcpu"], settings["vcpu_min"], settings["vcpu_max"])
self.hostname = StringField("hostname", settings.get("hostname", ""))
Form.__init__(self, screen, title, [self.memory, self.vcpu, self.hostname])
def display(self):
button_save, button_exit = Button("Create VM"), Button("Main menu")
separator = (Textbox(20, 1, "", 0, 0), Textbox(20, 1, "", 0, 0))
rows = [
(Textbox(20, 1, "Memory size (GB):", 0, 0), self.memory),
(Textbox(20, 1, "Memory min/max:", 0, 0),
Textbox(20, 1, "%s / %s" % (self.memory.min_value, self.memory.max_value), 0, 0)),
separator,
(Textbox(20, 1, "Number of CPUs:", 0, 0), self.vcpu),
(Textbox(20, 1, "CPU number min/max:", 0, 0),
Textbox(20, 1, "%s / %s" % (self.vcpu.min_value, self.vcpu.max_value), 0, 0)),
separator,
(Textbox(20, 1, "Hostname:", 0, 0), self.hostname),
separator,
(button_save, button_exit)
]
form = GridForm(self.screen, self.title, 2, len(rows))
for i, row in enumerate(rows):
for j, cell in enumerate(row):
form.add(cell, j, i)
return form.runOnce() != button_exit
class OpenvzForm(Form):
def __init__(self, screen, title, settings):
self.memory = FloatField("memory", settings["memory"], settings["memory_min"],
settings["memory_max"])
self.swap = FloatField("swap", settings["swap"], settings["swap_min"], settings["swap_max"])
self.vcpu = FloatField("vcpu", settings["vcpu"], settings["vcpu_min"], settings["vcpu_max"])
self.vcpulimit = IntegerField("vcpulimit", settings["vcpulimit"], settings["vcpulimit_min"],
settings["vcpulimit_max"])
self.disk = FloatField("disk", settings["disk"], settings["disk_min"], settings["disk_max"])
self.ioprio = RadioBarField("ioprio", screen, [('Low ', 0, settings["ioprio"] == 0),
('Default', 4, settings["ioprio"] == 4),
('High ', 7, settings["ioprio"] == 7)])
self.bind_mounts = BindMountsField("bind_mounts", settings["bind_mounts"], required=False)
self.hostname = StringField("hostname", settings.get("hostname", ""))
self.ip_address = IpField("ip_address", settings["ip_address"], display_name="IP address")
self.nameserver = IpField("nameserver", settings["nameserver"])
self.password = PasswordField("passwd", settings["passwd"], display_name="password")
self.password2 = PasswordField("<PASSWORD>", settings["passwd"], display_name="password")
self.ostemplate = StringField("ostemplate", settings["ostemplate"], display_name="OS template")
self.startvm = CheckboxField("startvm", settings.get("startvm", 0), display_name="Start VM")
self.onboot = CheckboxField("onboot", settings.get("onboot", 0), display_name="Start on boot")
Form.__init__(self, screen, title, [self.memory, self.swap, self.vcpu,
self.vcpulimit, self.disk, self.ioprio,
self.bind_mounts, self.hostname,
self.ip_address, self.nameserver,
self.password, <PASSWORD>,
self.ostemplate, self.startvm,
self.onboot])
self.settings = settings # save passed parameters for convenience
def display(self):
button_exit, button_save = Button("Back"), Button("Create VM")
separator = (Textbox(20, 1, "", 0, 0), Textbox(20, 1, "", 0, 0))
rows = [
(Textbox(20, 1, "Memory size (GB):", 0, 0), self.memory),
(Textbox(20, 1, "Memory min/max:", 0, 0),
Textbox(20, 1, "%s / %s" % (self.memory.min_value, self.memory.max_value), 0, 0)),
(Textbox(20, 1, "VSwap size (GB):", 0, 0), self.swap),
(Textbox(20, 1, "VSwap min/max:", 0, 0),
Textbox(20, 1, "%s / %s" % (self.swap.min_value, self.swap.max_value), 0, 0)),
(Textbox(20, 1, "Number of CPUs:", 0, 0), self.vcpu),
(Textbox(20, 1, "CPU number min/max:", 0, 0),
Textbox(20, 1, "%s / %s" % (self.vcpu.min_value, self.vcpu.max_value), 0, 0)),
(Textbox(20, 1, "CPU usage limit (%):", 0, 0), self.vcpulimit),
(Textbox(20, 1, "CPU usage min/max:", 0, 0),
Textbox(20, 1, "%s / %s" % (self.vcpulimit.min_value, self.vcpulimit.max_value), 0, 0)),
(Textbox(20, 1, "Disk size (GB):", 0, 0), self.disk),
(Textbox(20, 1, "Disk size min/max:", 0, 0),
Textbox(20, 1, "%s / %s" % (self.disk.min_value, self.disk.max_value), 0, 0)),
(Textbox(20, 1, "IO Priority:", 0, 0), self.ioprio),
(Textbox(20, 1, "Bind mounts:", 0, 0), self.bind_mounts),
(Textbox(20, 1, "", 0, 0),
Textbox(20, 1, "/src1,/dst1;/srcN,..", 0, 0)),
(Textbox(20, 1, "Hostname:", 0, 0), self.hostname),
(Textbox(20, 1, "IP-address:", 0, 0), self.ip_address),
(Textbox(20, 2, "Nameserver:", 0, 0), self.nameserver),
(Textbox(20, 1, "Root password:", 0, 0), self.password),
(Textbox(20, 2, "Root password x2:", 0, 0), self.password2),
(Textbox(20, 2, "OS Template:", 0, 0), self.ostemplate),
(self.startvm, self.onboot),
separator,
(button_save, button_exit)
]
form = GridForm(self.screen, self.title, 2, len(rows))
for i, row in enumerate(rows):
for j, cell in enumerate(row):
form.add(cell, j, i)
return form.runOnce() != button_exit
def validate(self):
Form.validate(self)
if (self.password.validate() and self.password2.validate() and
self.password.value() != self.password2.value()):
self.errors.append(("passwd", "Passwords don't match."))
bm_valid = self.bind_mounts.validate()
if bm_valid:
error_str = "\n".join([s[1] for s in bm_valid])
self.errors.append(("bind_mounts", "%s" % error_str))
return not self.errors
class OpenvzTemplateForm(Form):
def __init__(self, screen, title, settings):
self.memory = FloatField("memory", settings["memory"])
self.memory_min = FloatField("memory_min", settings.get("memory_min", ""),
display_name="min memory", required=False)
self.memory_max = FloatField("memory_max", settings.get("memory_max", ""),
display_name="max memory", required=False)
self.vcpu = FloatField("vcpu", settings["vcpu"])
self.vcpu_min = FloatField("vcpu_min", settings.get("vcpu_min", ""),
display_name="min vcpu", required=False)
self.vcpu_max = FloatField("vcpu_max", settings.get("vcpu_max", ""),
display_name="max vcpu", required=False)
self.disk = FloatField("disk", settings["disk"])
self.ostemplate = StringField("ostemplate", settings.get("ostemplate", ""))
Form.__init__(self, screen, title, [self.memory, self.memory_min, self.memory_max, self.vcpu,
self.vcpu_min, self.vcpu_max, self.disk, self.ostemplate])
def display(self):
button_save, button_exit = Button("Create"), Button("Back")
separator = (Textbox(20, 1, "", 0, 0), Textbox(20, 1, "", 0, 0))
rows = [
(Textbox(20, 1, "Memory size (GB):", 0, 0), self.memory),
(Textbox(20, 1, "Min memory size (GB):", 0, 0), self.memory_min),
(Textbox(20, 1, "Max memory size (GB):", 0, 0), self.memory_max),
separator,
(Textbox(20, 1, "Number of CPUs:", 0, 0), self.vcpu),
(Textbox(20, 1, "Min number of CPUs:", 0, 0), self.vcpu_min),
(Textbox(20, 1, "Max number of CPUs:", 0, 0), self.vcpu_max),
separator,
(Textbox(20, 1, "Disk size (GB):", 0, 0), self.disk),
separator,
(Textbox(20, 1, "OS template:", 0, 0), self.ostemplate),
separator,
(button_exit, button_save)
]
form = GridForm(self.screen, self.title, 2, len(rows))
for i, row in enumerate(rows):
for j, cell in enumerate(row):
form.add(cell, j, i)
return form.runOnce() != button_exit
def validate(self):
if Form.validate(self):
self.errors.extend(validate_range("memory", self.memory.value(),
self.memory_min.value(),
self.memory_max.value(), float))
self.errors.extend(validate_range("vcpu", self.vcpu.value(),
self.vcpu_min.value(),
self.vcpu_max.value(), int))
return not self.errors
class KvmTemplateForm(Form):
def __init__(self, screen, title, settings):
self.memory = FloatField("memory", settings["memory"])
self.memory_min = FloatField("memory_min", settings.get("memory_min", ""),
display_name="min memory", required=False)
self.memory_max = FloatField("memory_max", settings.get("memory_max", ""),
display_name="max memory", required=False)
self.vcpu = FloatField("vcpu", settings["vcpu"])
self.vcpu_min = FloatField("vcpu_min", settings.get("vcpu_min", ""),
display_name="min vcpu", required=False)
self.vcpu_max = FloatField("vcpu_max", settings.get("vcpu_max", ""),
display_name="max vcpu", required=False)
self.username = StringField("username", settings.get("username", ''), display_name="username")
self.password = PasswordField("<PASSWORD>", settings.get("passwd", ''), display_name="password")
self.password2 = PasswordField("<PASSWORD>", settings.get("passwd", ''), display_name="password")
Form.__init__(self, screen, title, [self.memory, self.memory_min, self.memory_max,
self.vcpu, self.vcpu_min, self.vcpu_max,
self.username, self.password])
def display(self):
button_save, button_exit = Button("Create"), Button("Back")
separator = (Textbox(20, 1, "", 0, 0), Textbox(20, 1, "", 0, 0))
rows = [
(Textbox(20, 1, "Memory size (GB):", 0, 0), self.memory),
(Textbox(20, 1, "Min memory size (GB):", 0, 0), self.memory_min),
(Textbox(20, 1, "Max memory size (GB):", 0, 0), self.memory_max),
separator,
(Textbox(20, 1, "Number of CPUs:", 0, 0), self.vcpu),
(Textbox(20, 1, "Min number of CPUs:", 0, 0), self.vcpu_min),
(Textbox(20, 1, "Max number of CPUs:", 0, 0), self.vcpu_max),
separator,
(Textbox(20, 1, "Default admin:", 0, 0), self.username),
separator,
(Textbox(20, 1, "Root password:", 0, 0), self.password),
(Textbox(20, 1, "Root password x2:", 0, 0), self.password2),
separator,
(button_exit, button_save)
]
form = GridForm(self.screen, self.title, 2, len(rows))
for i, row in enumerate(rows):
for j, cell in enumerate(row):
form.add(cell, j, i)
return form.runOnce() != button_exit
def validate(self):
if Form.validate(self):
if (self.memory_min.value() and self.memory_max.value() and
float(self.memory_min.value()) > float(self.memory_max.value())):
self.errors.extend([("memory", "Min memory exceeds max memory value.")])
else:
self.errors.extend(validate_range("memory", self.memory.value(),
self.memory_min.value(),
self.memory_max.value(), float))
self.errors.extend(validate_range("vcpu", self.vcpu.value(),
self.vcpu_min.value(),
self.vcpu_max.value(), int))
if (self.password.validate() and self.password2.validate() and
self.password.value() != self.password2.value()):
self.errors.extend(("passwd", "Passwords don't match."))
return not self.errors
class OpenvzModificationForm(Form):
def __init__(self, screen, title, settings):
self.settings = settings
self.memory = FloatField("memory", float(settings["memory"]) / 1024)
self.swap = FloatField("swap", float(settings["swap"]) / 1024)
self.vcpu = IntegerField("vcpu", settings["vcpu"])
self.bootorder = IntegerField("bootorder", settings.get("bootorder"), required=False)
self.disk = FloatField("diskspace", float(settings["diskspace"]["/"]) / 1024)
self.ioprio = RadioBarField("ioprio", screen, [('Low ', 0, settings["ioprio"] == 0),
('Default', 4, settings["ioprio"] == 4),
('High ', 7, settings["ioprio"] == 7)])
self.bind_mounts = BindMountsField("bind_mounts", settings["bind_mounts"], required=False)
self.vcpulimit = IntegerField("vcpulimit", settings["vcpulimit"],
min_value=0, max_value=100)
self.onboot = CheckboxField("onboot", settings.get("onboot", 0), display_name="Start on boot")
self.ctid = IntegerField('ctid', settings['ctid'],
display_name='VEID', required=False)
self.hostname = StringField("hostname", settings.get("name", ""))
Form.__init__(self, screen, title, [self.memory, self.vcpu, self.disk, self.ioprio, self.hostname,
self.bind_mounts, self.swap, self.onboot, self.bootorder,
self.vcpulimit, self.ctid])
def display(self):
button_save, button_exit = Button("Update"), Button("Back")
separator = (Textbox(20, 1, "", 0, 0), Textbox(20, 1, "", 0, 0))
rows = [
(Textbox(20, 1, "Memory size (GB):", 0, 0), self.memory),
separator,
(Textbox(20, 1, "Swap size (GB):", 0, 0), self.swap),
separator,
(Textbox(20, 1, "Nr. of CPUs:", 0, 0), self.vcpu),
separator,
(Textbox(20, 1, "CPU usage limit (%):", 0, 0), self.vcpulimit),
separator,
(Textbox(20, 1, "Disk size (GB):", 0, 0), self.disk),
separator,
(Textbox(20, 1, "IO Priority:", 0, 0), self.ioprio),
separator,
(Textbox(20, 1, "Bind mounts:", 0, 0), self.bind_mounts),
(Textbox(20, 1, "", 0, 0),
Textbox(20, 1, "/src1,/dst1;/srcN,..", 0, 0)),
separator,
(Textbox(20, 1, "Hostname:", 0, 0), self.hostname),
separator,
(Textbox(20, 1, "", 0, 0), self.onboot),
separator,
(Textbox(20, 1, "Boot order:", 0, 0), self.bootorder),
separator,
(Textbox(20, 1, "VEID:", 0, 0), self.ctid),
separator,
(button_exit, button_save)
]
form = GridForm(self.screen, self.title, 2, len(rows))
for i, row in enumerate(rows):
for j, cell in enumerate(row):
form.add(cell, j, i)
return form.runOnce() != button_exit
def validate(self):
# TODO disallow decrease of disk size, which would break OS
Form.validate(self)
bm_valid = self.bind_mounts.validate()
if bm_valid:
error_str = "\n".join([s[1] for s in bm_valid])
self.errors.append(("bind_mounts", "%s" % error_str))
if self.memory.value() < self.settings["memory_min"]:
err_msg = ("Memory size can not be lower than minimum defined in template: %s GB" %
self.settings["memory_min"])
self.errors.append(("memory", err_msg))
return not self.errors
class OpenVZMigrationForm(Form):
def __init__(self, screen, title):
self.target = HostnameField("target host", '')
self.live = CheckboxField("live", default=0, display_name='(risky)')
Form.__init__(self, screen, title, [self.target, self.live])
def display(self):
button_save, button_exit = Button("Migrate"), Button("Back")
separator = (Textbox(20, 1, "", 0, 0), Textbox(20, 1, "", 0, 0))
rows = [
(Textbox(20, 1, "Hostname/IP:", 0, 0), self.target),
separator,
(Textbox(20, 1, "Live migration:", 0, 0), self.live),
separator,
(button_save, button_exit)]
form = GridForm(self.screen, self.title, 2, len(rows))
for i, row in enumerate(rows):
for j, cell in enumerate(row):
form.add(cell, j, i)
return form.runOnce() != button_exit
def validate(self):
Form.validate(self)
return not self.errors
class GenericTemplateEditForm(Form):
separator = (Textbox(20, 1, "", 0, 0), Textbox(20, 1, "", 0, 0))
def _define_fields(self, settings):
self.memory = FloatField("memory", settings["memory"])
self.memory_min = FloatField("memory_min", settings.get("memory_min", ""),
display_name="min memory", required=False)
self.vcpu = FloatField("vcpu", settings["vcpu"])
self.vcpu_min = FloatField("vcpu_min", settings.get("vcpu_min", ""),
display_name="min vcpu", required=False)
self.template_name = StringField('template_name', settings.get('template_name'),
display_name='template name', required=False)
return [self.memory,
self.memory_min,
self.vcpu,
self.vcpu_min,
self.template_name]
def _define_view(self, button_save, button_exit):
rows = [
(Textbox(20, 1, "Template name:", 0, 0), self.template_name),
self.separator,
(Textbox(20, 1, "Memory size (GB):", 0, 0), self.memory),
(Textbox(20, 1, "Min memory size (GB):", 0, 0), self.memory_min),
self.separator,
(Textbox(20, 1, "Number of CPUs:", 0, 0), self.vcpu),
(Textbox(20, 1, "Min number of CPUs:", 0, 0), self.vcpu_min),
self.separator,
(button_exit, button_save)
]
return rows
def __init__(self, screen, title, settings):
self.settings = settings
super(GenericTemplateEditForm, self).__init__(screen, title,
self._define_fields(settings))
def display(self):
button_save, button_exit = Button("Edit"), Button("Cancel")
rows = self._define_view(button_save, button_exit)
form = GridForm(self.screen, self.title, 2, len(rows))
for i, row in enumerate(rows):
for j, cell in enumerate(row):
form.add(cell, j, i)
return form.runOnce() != button_exit
class KvmTemplateEditForm(GenericTemplateEditForm):
def _define_fields(self, settings):
self.username = StringField('username', settings.get('username', ''),
display_name='username', required=False)
self.password = PasswordField('<PASSWORD>', settings['passwd'],
display_name='password', required=True)
self.password2 = PasswordField('<PASSWORD>', settings['passwd'],
display_name='password', required=True)
fields = super(KvmTemplateEditForm, self)._define_fields(settings)
fields.append(self.username)
fields.append(self.password)
fields.append(self.password2)
return fields
def _define_view(self, button_save, button_exit):
rows = super(KvmTemplateEditForm, self)._define_view(button_save, button_exit)
rows.insert(-2, self.separator)
rows.insert(-2, (Textbox(20, 1, 'Default admin:', 0, 0), self.username))
rows.insert(-2, self.separator)
rows.insert(-2, (Textbox(20, 1, 'Password:', 0, 0), self.password))
rows.insert(-2, (Textbox(20, 1, 'Password x2:', 0, 0), self.password2))
return rows
|
StarcoderdataPython
|
3229844
|
#!/usr/bin/env python3
import json
import os
import subprocess
import argparse
import getpass
from base64 import b64encode
from collections import namedtuple
def get_args():
"""Parse any command line arguments"""
parser = argparse.ArgumentParser()
parser.add_argument("--url", default="", help="the Jamf Pro Server URL")
parser.add_argument(
"--user", default="", help="a user with the rights to delete a policy"
)
parser.add_argument(
"--password",
default="",
help="password of the user with the rights to delete a policy",
)
args = parser.parse_args()
return args
def make_tmp_dir(tmp_dir="/tmp/jamf_upload"):
"""make the tmp directory"""
if not os.path.exists(tmp_dir):
os.mkdir(tmp_dir)
return tmp_dir
def request(url, auth):
tmp_dir = make_tmp_dir()
headers_file = os.path.join(tmp_dir, "curl_headers_from_jamf_upload.txt")
output_file = os.path.join(tmp_dir, "curl_output_from_jamf_upload.txt")
# cookie_jar = os.path.join(tmp_dir, "curl_cookies_from_jamf_upload.txt")
# build the curl command
curl_cmd = [
"/usr/bin/curl",
"-X",
"POST",
"-D",
headers_file,
"--output",
output_file,
url,
]
curl_cmd.extend(["--header", "authorization: Basic {}".format(auth)])
curl_cmd.extend(["--header", "Content-type: application/json"])
print("\ncurl command:\n{}".format(" ".join(curl_cmd)))
print("(note this may omit essential quotation marks - do not copy-and-paste!")
try:
subprocess.check_output(curl_cmd)
except subprocess.CalledProcessError:
print(f"ERROR: possible URL error ({url}) or timeout.")
exit()
r = namedtuple("r", ["headers", "status_code", "output"])
try:
with open(headers_file, "r") as file:
headers = file.readlines()
r.headers = [x.strip() for x in headers]
for header in r.headers:
if "HTTP/1.1" in header and "Continue" not in header:
r.status_code = int(header.split()[1])
except IOError:
print("WARNING: {} not found".format(headers_file))
if os.path.exists(output_file) and os.path.getsize(output_file) > 0:
with open(output_file, "rb") as file:
r.output = json.load(file)
else:
print(f"No output from request ({output_file} not found or empty)")
return r
def encode_creds(jamf_user, jamf_password):
"""encode the username and password into a basic auth b64 encoded string so that we can
get the session token"""
credentials = f"{jamf_user}:{jamf_password}"
enc_creds_bytes = b64encode(credentials.encode("utf-8"))
enc_creds = str(enc_creds_bytes, "utf-8")
print(enc_creds)
return enc_creds
def get_creds_from_args(args):
"""call me directly - I return the all the creds and a hash of necesary ones too"""
jamf_url = ""
jamf_user = ""
jamf_password = ""
# CLI arguments override any values from a prefs file
if args.url:
jamf_url = args.url
elif not jamf_url:
jamf_url = input("Enter Jamf Pro Server URL : ")
if args.user:
jamf_user = args.user
elif not jamf_user:
jamf_user = input(
"Enter a Jamf Pro user with API rights to upload a package : "
)
if args.password:
jamf_password = args.password
elif not jamf_password:
jamf_password = getpass.getpass(
"Enter the password for '{}' : ".format(jamf_user)
)
# encode the username and password into a basic auth b64 encoded string so that we can
# get the session token
enc_creds = encode_creds(jamf_user, jamf_password)
return jamf_url, enc_creds
def get_uapi_token(jamf_url, enc_creds):
"""get a token for the Jamf Pro API"""
url = "{}/uapi/auth/tokens".format(jamf_url)
r = request(url, enc_creds)
if r.status_code == 200:
try:
token = str(r.output["token"])
print(f"Session token received (status code={r.status_code})")
return token
except KeyError:
print(f"ERROR: No token received (status code={r.status_code})")
return
else:
print(f"ERROR: No token received (status code={r.status_code})")
return
def main():
"""Do the main thing here"""
# parse the command line arguments
args = get_args()
# grab values from a prefs file if supplied
jamf_url, enc_creds = get_creds_from_args(args)
# now get the session token
token = get_uapi_token(jamf_url, enc_creds)
print(token)
if __name__ == "__main__":
main()
|
StarcoderdataPython
|
3215701
|
# -*- coding: utf-8 -*-
from hashlib import sha512
from openprocurement.api.utils import (
json_view,
APIResource,
save_tender,
ROUTE_PREFIX,
context_unpack
)
from openprocurement.tender.openeu.utils import qualifications_resource
from openprocurement.relocation.api.utils import (
extract_transfer, update_ownership, save_transfer
)
from openprocurement.relocation.api.validation import (
validate_ownership_data, validate_complaint_accreditation_level
)
@qualifications_resource(name='Qualification complaint ownership',
path='/tenders/{tender_id}/qualifications/{qualification_id}/complaints/{complaint_id}/ownership',
description="Qualification complaint Ownership")
class QualificationComplaintOwnershipResource(APIResource):
@json_view(permission='create_complaint',
validators=(validate_complaint_accreditation_level,
validate_ownership_data,))
def post(self):
complaint = self.request.context
tender = self.request.validated['tender']
award_id = self.request.validated['qualification_id']
qualification_id = self.request.validated['qualification_id']
data = self.request.validated['ownership_data']
if complaint.transfer_token == sha512(data['transfer']).hexdigest():
location = self.request.route_path('Tender EU Qualification Complaints', tender_id=tender.id, qualification_id=qualification_id, complaint_id=complaint.id)
location = location[len(ROUTE_PREFIX):] # strips /api/<version>
transfer = extract_transfer(self.request, transfer_id=data['id'])
if transfer.get('usedFor') and transfer.get('usedFor') != location:
self.request.errors.add('body', 'transfer', 'Transfer already used')
self.request.errors.status = 403
return
else:
self.request.errors.add('body', 'transfer', 'Invalid transfer')
self.request.errors.status = 403
return
update_ownership(complaint, transfer)
transfer.usedFor = location
self.request.validated['transfer'] = transfer
if save_transfer(self.request):
self.LOGGER.info('Updated transfer relation {}'.format(transfer.id),
extra=context_unpack(self.request, {'MESSAGE_ID': 'transfer_relation_update'}))
if save_tender(self.request):
self.LOGGER.info('Updated qualification {} complaint {} ownership of tender {}'.format(complaint.id, qualification_id, tender.id),
extra=context_unpack(self.request, {'MESSAGE_ID': 'qualification_complaint_ownership_update'}, {'complaint_id': complaint.id, 'qualification_id': qualification_id, 'tender_id': tender.id}))
return {'data': complaint.serialize('view')}
|
StarcoderdataPython
|
3308010
|
<gh_stars>10-100
import os
from glob import glob
import yaml
from jinja2 import Environment, FileSystemLoader, StrictUndefined
BASE_PATH = os.path.dirname(__file__) or "."
LIST_EXAMPLES_PATH = "{}/../test/unit/test_parser/list/".format(BASE_PATH)
LIST_EXAMPLES = sorted([x for x in glob("{}/*".format(LIST_EXAMPLES_PATH))])
LEAF_EXAMPLES_PATH = "{}/../test/unit/test_parser/leaf/".format(BASE_PATH)
LEAF_EXAMPLES = sorted([x for x in glob("{}/*".format(LEAF_EXAMPLES_PATH))])
def indent_text(text, indent=4):
return "\n".join(["{}{}".format(" " * indent, l) for l in text.splitlines()])
def get_examples(EXAMPLES):
examples = []
for e in EXAMPLES:
with open("{}/mocked.txt".format(e), "r") as f:
mocked = f.read()
with open("{}/example.yaml".format(e), "r") as f:
example = yaml.load(f.read())
examples.append({"mocked": mocked, "example": example})
return examples
def get_directives():
with open("{}/_dynamic/parser_directives.yaml".format(BASE_PATH), "r") as f:
return yaml.load(f.read())
def render(template_file, **kwargs):
env = Environment(
loader=FileSystemLoader(BASE_PATH), trim_blocks=True, undefined=StrictUndefined
)
jinja_filters = {
"to_yaml": lambda obj: yaml.dump(obj, default_flow_style=False),
"indent": indent_text,
}
env.filters.update(jinja_filters)
template = env.get_template(template_file)
return template.render(**kwargs)
def save_text(text, filename):
with open("{}/{}".format(BASE_PATH, filename), "w+") as f:
f.write(text)
if __name__ == "__main__":
examples = get_examples(LIST_EXAMPLES)
text = render("_dynamic/examples_list.j2", examples=examples)
save_text(text, "developers/parsers/examples_list.rst")
examples = get_examples(LEAF_EXAMPLES)
text = render("_dynamic/examples_leaf.j2", examples=examples)
save_text(text, "developers/parsers/examples_leaf.rst")
directives = get_directives()
text = render("_dynamic/parsers.j2", directives=directives)
save_text(text, "developers/parsers/dynamic_directives.rst")
|
StarcoderdataPython
|
3261990
|
<gh_stars>0
from setuptools import setup, Extension
from torch.utils import cpp_extension
import os
module_path = os.path.dirname(__file__)
setup(name='op_cpp',
ext_modules=[cpp_extension.CUDAExtension(name="fused",
sources=["fused_bias_act.cpp", "fused_bias_act_kernel.cu"], include_dirs=cpp_extension.include_paths(),),
cpp_extension.CUDAExtension(name="upfirdn2d",
sources=["upfirdn2d.cpp", "upfirdn2d_kernel.cu"],include_dirs=cpp_extension.include_paths(),),
],
cmdclass={'build_ext': cpp_extension.BuildExtension})
|
StarcoderdataPython
|
1666332
|
#!/usr/bin/env python
# encoding: utf-8
from __future__ import unicode_literals
import itertools
import datetime
import logging
from collections import OrderedDict, namedtuple
import scraperwiki
from table_names import _RAW_SALES_TABLE
_SECTOR_NAME = {
'central-gov': 'Central government',
'local-gov': 'Local government',
'not-for-profit': 'Not for profit',
'wider-public-sector': 'Other wider public sector',
'unknown-sector': 'Unknown',
}
_SME_LARGE_NAME = {
'sme': 'Small and medium enterprises',
'large': 'Large enterprises',
}
_LOT_NAME = {
'iaas': 'Cloud Infrastructure as a Service (IaaS)',
'paas': 'Cloud Platform as a Service (PaaS)',
'saas': 'Cloud Software as a Service (SaaS)',
'css': 'Cloud Support Services (CSS)',
}
class SpendingGroupKey(namedtuple('SpendingGroupKey',
'month,year,lot,sector,sme_large')):
"""
A 'spending group' is a specific combination of month, year, lot (type),
government sector and sme/large.
"""
def __str__(self):
return str(self.key())
def __unicode__(self):
return unicode(self.key())
def key(self):
return "{year}_{month:02d}_lot{lot}_{sector}_{sme_large}".format(
year=self.year, month=self.month, lot=self.lot, sector=self.sector,
sme_large=self.sme_large)
class CompanyTypeKey(namedtuple('CompanyTypeKey',
'month,year,sme_large')):
def __str__(self):
return str(self.key())
def __unicode__(self):
return unicode(self.key())
def key(self):
return "{year}_{month:02d}_{sme_large}".format(
year=self.year, month=self.month, sme_large=self.sme_large)
def calculate_aggregated_sales(keys):
for key in keys:
monthly_spend, transaction_count = get_monthly_spend_and_count(key)
cumulative_spend = get_cumulative_spend(key)
cumulative_count = get_cumulative_count(key)
yield make_aggregated_sales_row(key, monthly_spend, transaction_count,
cumulative_spend, cumulative_count)
def make_aggregated_sales_row(key, monthly_total, transaction_count,
cumulative_spend, cumulative_count):
return OrderedDict([
('_id', unicode(key)),
('_timestamp', datetime.datetime(key.year, key.month, 1)),
('lot', _LOT_NAME[key.lot]),
('sector', _SECTOR_NAME[key.sector]),
('sme_large', _SME_LARGE_NAME[key.sme_large]),
('monthly_spend', monthly_total),
('count', transaction_count),
('cumulative_spend', cumulative_spend),
('cumulative_count', cumulative_count),
])
def get_distinct_month_years():
for row in scraperwiki.sqlite.select(
'month, year FROM {table} GROUP BY year, month'
' ORDER BY year, month'.format(table=_RAW_SALES_TABLE)):
yield (row['month'], row['year'])
def get_monthly_spend_and_count(key):
query = ('ROUND(SUM(total_ex_vat), 2) AS total_spend, '
'COUNT(*) AS invoice_count '
'FROM {table} '
'WHERE year={year} '
'AND month={month} '
'AND lot="{lot}" '
'AND customer_sector="{sector}" '
'AND supplier_type="{sme_large}"'.format(
table=_RAW_SALES_TABLE,
year=key.year,
month=key.month,
lot=key.lot,
sector=key.sector,
sme_large=key.sme_large))
logging.debug(query)
result = scraperwiki.sqlite.select(query)[0]
logging.debug(result)
spend, count = 0.0, 0
if result['total_spend'] is not None:
spend = float(result['total_spend'])
if result['invoice_count'] is not None:
count = int(result['invoice_count'])
return (spend, count)
def get_cumulative_spend(key):
"""
Get the sum of spending for this category up to and including the given
month.
"""
query = ('ROUND(SUM(total_ex_vat), 2) AS total '
'FROM {table} '
'WHERE date <= "{year}-{month:02}-01" '
'AND lot="{lot}" '
'AND customer_sector="{sector}" '
'AND supplier_type="{sme_large}"'.format(
table=_RAW_SALES_TABLE,
year=key.year,
month=key.month,
lot=key.lot,
sector=key.sector,
sme_large=key.sme_large))
logging.debug(query)
result = scraperwiki.sqlite.select(query)
logging.debug(result)
value = result[0]['total']
return float(result[0]['total']) if value is not None else 0.0
def get_cumulative_count(key):
"""
Get the sum of spending for this category up to and including the given
month.
"""
query = ('COUNT(*) AS total '
'FROM {table} '
'WHERE date <= "{year}-{month:02}-01" '
'AND lot="{lot}" '
'AND customer_sector="{sector}" '
'AND supplier_type="{sme_large}"'.format(
table=_RAW_SALES_TABLE,
year=key.year,
month=key.month,
lot=key.lot,
sector=key.sector,
sme_large=key.sme_large))
logging.debug(query)
result = scraperwiki.sqlite.select(query)
logging.debug(result)
value = result[0]['total']
return float(result[0]['total']) if value is not None else 0
def make_spending_group_keys():
month_and_years = get_distinct_month_years()
all_lots = _LOT_NAME.keys() # ie [1, 2, 3, 4]
all_sectors = _SECTOR_NAME.keys() # ie ['central-gov', 'local-gov']
all_sme_large = _SME_LARGE_NAME.keys() # ie ['sme', 'large']
for (month, year), lot, sector, sme_large in itertools.product(
month_and_years, all_lots, all_sectors, all_sme_large):
yield SpendingGroupKey(month=month, year=year, lot=lot, sector=sector,
sme_large=sme_large)
def make_company_type_keys():
month_and_years = get_distinct_month_years()
all_sme_large = _SME_LARGE_NAME.keys() # ie ['sme', 'large']
for (month, year), sme_large in itertools.product(
month_and_years, all_sme_large):
yield CompanyTypeKey(month=month, year=year, sme_large=sme_large)
|
StarcoderdataPython
|
3273614
|
<reponame>edmondchuc/oxigraph-admin<filename>oxigraph_admin/api/api_v1/endpoints/security.py
from fastapi import APIRouter, Body
from fastapi.responses import JSONResponse
from oxigraph_admin import crud
from oxigraph_admin.schemas.security import SecuritySettings
router = APIRouter()
@router.get('/security', response_model=SecuritySettings)
def security_get():
response = crud.security.get_security_settings()
return response
@router.post('/security')
def security_post(security_settings: SecuritySettings):
crud.security.set_security(security_settings)
return JSONResponse({'detail': f'Security set to {security_settings.enabled}'})
|
StarcoderdataPython
|
3208450
|
<reponame>Eric-Muthemba/qontroverse
# -*- coding: utf-8 -*-
model = {
u'yn ': 0,
u'dd ': 1,
u' yn': 2,
u' y ': 3,
u'ydd': 4,
u'eth': 5,
u'th ': 6,
u' i ': 7,
u'aet': 8,
u'd y': 9,
u'ch ': 10,
u'od ': 11,
u'ol ': 12,
u'edd': 13,
u' ga': 14,
u' gw': 15,
u"'r ": 16,
u'au ': 17,
u'ddi': 18,
u'ad ': 19,
u' cy': 20,
u' gy': 21,
u' ei': 22,
u' o ': 23,
u'iad': 24,
u'yr ': 25,
u'an ': 26,
u'bod': 27,
u'wed': 28,
u' bo': 29,
u' dd': 30,
u'el ': 31,
u'n y': 32,
u' am': 33,
u'di ': 34,
u'edi': 35,
u'on ': 36,
u' we': 37,
u' ym': 38,
u' ar': 39,
u' rh': 40,
u'odd': 41,
u' ca': 42,
u' ma': 43,
u'ael': 44,
u'oed': 45,
u'dae': 46,
u'n a': 47,
u'dda': 48,
u'er ': 49,
u'h y': 50,
u'all': 51,
u'ei ': 52,
u' ll': 53,
u'am ': 54,
u'eu ': 55,
u'fod': 56,
u'fyd': 57,
u'l y': 58,
u'n g': 59,
u'wyn': 60,
u'd a': 61,
u'i g': 62,
u'mae': 63,
u'neu': 64,
u'os ': 65,
u' ne': 66,
u'd i': 67,
u'dod': 68,
u'dol': 69,
u'n c': 70,
u'r h': 71,
u'wyd': 72,
u'wyr': 73,
u'ai ': 74,
u'ar ': 75,
u'in ': 76,
u'rth': 77,
u' fy': 78,
u' he': 79,
u' me': 80,
u' yr': 81,
u"'n ": 82,
u'dia': 83,
u'est': 84,
u'h c': 85,
u'hai': 86,
u'i d': 87,
u'id ': 88,
u'r y': 89,
u'y b': 90,
u' dy': 91,
u' ha': 92,
u'ada': 93,
u'i b': 94,
u'n i': 95,
u'ote': 96,
u'rot': 97,
u'tes': 98,
u'y g': 99,
u'yd ': 100,
u' ad': 101,
u' mr': 102,
u' un': 103,
u'cyn': 104,
u'dau': 105,
u'ddy': 106,
u'edo': 107,
u'i c': 108,
u'i w': 109,
u'ith': 110,
u'lae': 111,
u'lla': 112,
u'nd ': 113,
u'oda': 114,
u'ryd': 115,
u'tho': 116,
u' a ': 117,
u' dr': 118,
u'aid': 119,
u'ain': 120,
u'ddo': 121,
u'dyd': 122,
u'fyn': 123,
u'gyn': 124,
u'hol': 125,
u'io ': 126,
u'o a': 127,
u'wch': 128,
u'wyb': 129,
u'ybo': 130,
u'ych': 131,
u' br': 132,
u' by': 133,
u' di': 134,
u' fe': 135,
u' na': 136,
u" o'": 137,
u' pe': 138,
u'art': 139,
u'byd': 140,
u'dro': 141,
u'gal': 142,
u'l e': 143,
u'lai': 144,
u'mr ': 145,
u'n n': 146,
u'r a': 147,
u'rhy': 148,
u'wn ': 149,
u'ynn': 150,
u' on': 151,
u' r ': 152,
u'cae': 153,
u'd g': 154,
u'd o': 155,
u'd w': 156,
u'gan': 157,
u'gwy': 158,
u'n d': 159,
u'n f': 160,
u'n o': 161,
u'ned': 162,
u'ni ': 163,
u"o'r": 164,
u'r d': 165,
u'ud ': 166,
u'wei': 167,
u'wrt': 168,
u' an': 169,
u' cw': 170,
u' da': 171,
u' ni': 172,
u' pa': 173,
u' pr': 174,
u' wy': 175,
u'd e': 176,
u'dai': 177,
u'dim': 178,
u'eud': 179,
u'gwa': 180,
u'idd': 181,
u'im ': 182,
u'iri': 183,
u'lwy': 184,
u'n b': 185,
u'nol': 186,
u'r o': 187,
u'rwy': 188,
u' ch': 189,
u' er': 190,
u' fo': 191,
u' ge': 192,
u' hy': 193,
u" i'": 194,
u' ro': 195,
u' sa': 196,
u' tr': 197,
u'bob': 198,
u'cwy': 199,
u'cyf': 200,
u'dio': 201,
u'dyn': 202,
u'eit': 203,
u'hel': 204,
u'hyn': 205,
u'ich': 206,
u'll ': 207,
u'mdd': 208,
u'n r': 209,
u'ond': 210,
u'pro': 211,
u'r c': 212,
u'r g': 213,
u'red': 214,
u'rha': 215,
u'u a': 216,
u'u c': 217,
u'u y': 218,
u'y c': 219,
u'ymd': 220,
u'ymr': 221,
u'yw ': 222,
u' ac': 223,
u' be': 224,
u' bl': 225,
u' co': 226,
u' os': 227,
u'adw': 228,
u'ae ': 229,
u'af ': 230,
u'd p': 231,
u'efn': 232,
u'eic': 233,
u'en ': 234,
u'eol': 235,
u'es ': 236,
u'fer': 237,
u'gel': 238,
u'h g': 239,
u'hod': 240,
u'ied': 241,
u'ir ': 242,
u'laf': 243,
u'n h': 244,
u'na ': 245,
u'nyd': 246,
u'odo': 247,
u'ofy': 248,
u'rdd': 249,
u'rie': 250,
u'ros': 251,
u'stw': 252,
u'twy': 253,
u'yda': 254,
u'yng': 255,
u' at': 256,
u' de': 257,
u' go': 258,
u' id': 259,
u' oe': 260,
u' â ': 261,
u"'ch": 262,
u'ac ': 263,
u'ach': 264,
u"ae'": 265,
u'al ': 266,
u'bl ': 267,
u'd c': 268,
u'd l': 269,
u'dan': 270,
u'dde': 271,
u'ddw': 272,
u'dir': 273,
u'dla': 274,
u'ed ': 275,
u'ela': 276,
u'ell': 277,
u'ene': 278,
u'ewn': 279,
u'gyd': 280,
u'hau': 281,
u'hyw': 282,
u'i a': 283,
u'i f': 284,
u'iol': 285,
u'ion': 286,
u'l a': 287,
u'l i': 288,
u'lia': 289,
u'med': 290,
u'mon': 291,
u'n s': 292,
u'no ': 293,
u'obl': 294,
u'ola': 295,
u'ref': 296,
u'rn ': 297,
u'thi': 298,
u'un ': 299,
}
|
StarcoderdataPython
|
54278
|
<reponame>tamasf97/Platform
__all__ = ['models', 'api']
|
StarcoderdataPython
|
135747
|
from abc import ABC
from typing import Optional
from cogbot.cogs.robo_mod.robo_mod_action_log_entry import RoboModActionLogEntry
from cogbot.cogs.robo_mod.robo_mod_trigger import RoboModTrigger
from cogbot.lib.dict_repr import DictRepr
class RoboModAction(ABC, DictRepr):
async def init(self, state: "RoboModServerState", data: dict) -> "RoboModAction":
""" Initialize the instance asynchronously, and return itself. """
await self.update(state, data)
return self
async def apply_and_log(self, trigger: RoboModTrigger):
await self.apply(trigger)
await self.maybe_log(trigger)
async def maybe_log(self, trigger: RoboModTrigger):
log_entry = await self.log(trigger)
if log_entry:
await log_entry.do_log(trigger)
# NOTE #override
async def log(self, trigger: RoboModTrigger) -> Optional[RoboModActionLogEntry]:
""" Return a log entry for this action, if any. """
# NOTE #override
async def update(self, state: "RoboModServerState", data: dict):
""" Initialize the instance asynchronously. """
# NOTE #override
async def apply(self, trigger: RoboModTrigger):
""" Apply the action. """
|
StarcoderdataPython
|
171483
|
<reponame>drtnf/cits3403-pair-up<gh_stars>1-10
import unittest, os, time
from app import app, db
from app.models import Student, Project, Lab
from selenium import webdriver
basedir = os.path.abspath(os.path.dirname(__file__))
#To do, find simple way for switching from test context to development to production.
class SystemTest(unittest.TestCase):
driver = None
def setUp(self):
self.driver = webdriver.Firefox(executable_path=os.path.join(basedir,'geckodriver'))
if not self.driver:
self.skipTest('Web browser not available')
else:
db.init_app(app)
db.create_all()
s1 = Student(id='22222222',first_name='Test',surname='Case',cits3403=True)
s2 = Student(id='11111111',first_name='Unit',surname='Test',cits3403=True)
lab = Lab(lab='test-lab',time='now')
db.session.add(s1)
db.session.add(s2)
db.session.add(lab)
db.session.commit()
self.driver.maximize_window()
self.driver.get('http://localhost:5000/')
def tearDown(self):
if self.driver:
self.driver.close()
db.session.query(Student).delete()
db.session.query(Project).delete()
db.session.query(Lab).delete()
db.session.commit()
db.session.remove()
def test_register(self):
s = Student.query.get('22222222')
self.assertEqual(s.first_name,'Test',msg='student exists in db')
self.driver.get('http://localhost:5000/register')
self.driver.implicitly_wait(5)
num_field = self.driver.find_element_by_id('student_number')
num_field.send_keys('22222222')
pref_name = self.driver.find_element_by_id('prefered_name')
pref_name.send_keys('Testy')
new_pin = self.driver.find_element_by_id('new_pin')
new_pin.send_keys('0000')
new_pin2 = self.driver.find_element_by_id('new_pin2')
new_pin2.send_keys('0000')
time.sleep(1)
self.driver.implicitly_wait(5)
submit = self.driver.find_element_by_id('submit')
submit.click()
#check login success
self.driver.implicitly_wait(5)
time.sleep(1)
logout = self.driver.find_element_by_partial_link_text('Logout')
self.assertEqual(logout.get_attribute('innerHTML'), 'Logout Testy', msg='Logged in')
if __name__=='__main__':
unittest.main(verbosity=2)
|
StarcoderdataPython
|
3347764
|
<filename>Sentiment_API_Server/DSFunctions.py
'''Functions for actions in the Tweet Better website.'''
'''TODO: present the score (-1 to 1) as a percentage (0 is 50%)'''
'''TODO: is it necessary on DS part to make a plot.ly graphing function?
To retrieve stuff from the API?'''
#imports
import string
from string import punctuation
import re
import nltk
nltk.download('stopwords')
nltk.download('punkt')
nltk.download('wordnet')
nltk.download('vader_lexicon')
from nltk.sentiment.vader import SentimentIntensityAnalyzer
from nltk.corpus import stopwords
from nltk.tokenize import word_tokenize
from nltk.stem.wordnet import WordNetLemmatizer
def process_tweet(tweet):
'''converts a single tweet into a form understandable by predictor.'''
'''returns overall sentiment score between -1 and 1'''
#nltk stopwords are words too common to be useful
#not stopwords as in profanity
stop_words = stopwords.words('english')
#convert to string
tweet = str(tweet)
#remove urls
url = re.compile('http.+? ')
tweet = re.sub(url, '', tweet)
#remove tagged users
user = re.compile('@.+? ')
tweet = re.sub(user, "", tweet)
#clean html tags and literals
tag = re.compile('<.*?>')
tweet = re.sub(tag, '', tweet)
tweet = tweet.replace(r"\n", " ")
tweet = tweet.replace(r"b'", "")
tweet = tweet.replace(r"\r", "")
#temp. tokenize to clean on word level
words = word_tokenize(tweet)
#remove spaces (will be added in again)
words = [word.strip() for word in words]
#lowercase
words = [word.lower() for word in words]
#remove stopwords
words = [word for word in words if not word in stop_words]
#lemmatize (remove prefixes and suffixes)
lemmatizer = WordNetLemmatizer()
words = [lemmatizer.lemmatize(word) for word in words]
fin_tweet = " ".join(words)
#predict
sid = SentimentIntensityAnalyzer()
scores = sid.polarity_scores(fin_tweet)
score = scores['compound']
return score
def score_input(tweet):
'''for use when a user types a new tweet.'''
'''returns a statement depending on score.'''
score = process_tweet(tweet)
if score < float(-0.50):
return print("Your tweet might be more negative than it reads to you now.\nIt has an overall sentiment score of "+str(score*100)+"%.\nDo you still want to send the tweet or should it be drafted?")
elif score > float(0.50):
return print("You're improving Twitter one tweet at a time!\nThis tweet has a highly positive sentiment score of "+str(score*100)+"%.\nSend now?")
else:
return print("This is a pretty neutral tweet, with an overall sentiment of "+str(score)+".\nSend now?")
def score_timeline(timeline):
'''for use when a user requests to evaluate entire timeline.'''
'''returns a list of scores (for graphing by time)'''
scores = []
for tweet in timeline:
score = process_tweet(tweet)
scores.append(score)
return scores
|
StarcoderdataPython
|
194408
|
import boto3
import logging
import os
def lambda_handler(event, context):
logger = logging.getLogger()
logger.setLevel(logging.INFO)
region = os.environ['AWS_REGION']
ec2 = boto3.client('ec2', region_name=region)
cidr_blocks = event['cidr_blocks']
tgw_route_table_id = event['tgw_route_table_id']
for cidr_block in cidr_blocks:
# find all routes that belong in a specific CIDR block or its subnets
routes = ec2.search_transit_gateway_routes(
TransitGatewayRouteTableId=tgw_route_table_id,
Filters=[
{
'Name': 'route-search.subnet-of-match',
'Values': [
cidr_block,
]
},
{
'Name': 'attachment.resource-type',
'Values': [
'vpc',
]
}
]
)
# array with the CIDR blocks that were identified
cidr_blocks_for_routes = (
[d['DestinationCidrBlock'] for d in routes['Routes']]
)
# if a static route does not already exis
if len(cidr_blocks_for_routes) > 0:
if cidr_block not in cidr_blocks_for_routes:
tgw_attachements = (
[d['TransitGatewayAttachments'] for d in routes['Routes']]
)
tgw_vpc_attachements = (
[d for d in tgw_attachements if d[0]['ResourceType'] in ['vpc']] # noqa: E501
)
# identify attachment id for VPC
tgw_attach_id = tgw_vpc_attachements[0][0]['TransitGatewayAttachmentId'] # noqa: E501
logger.info('Creating route for: %s' % str(cidr_block))
ec2.create_transit_gateway_route(
DestinationCidrBlock=cidr_block,
TransitGatewayRouteTableId=tgw_route_table_id,
TransitGatewayAttachmentId=tgw_attach_id
)
####################
# start - cleanup
# find all static routes to vpcs
all_static_routes = ec2.search_transit_gateway_routes(
TransitGatewayRouteTableId=tgw_route_table_id,
Filters=[
{
'Name': 'type',
'Values': [
'static',
]
},
{
'Name': 'attachment.resource-type',
'Values': [
'vpc',
]
}
]
)
# array with the CIDR blocks that were identified
all_cidr_blocks_for_static_routes = (
[d['DestinationCidrBlock'] for d in all_static_routes['Routes']]
)
for cidr_block in all_cidr_blocks_for_static_routes:
if cidr_block not in cidr_blocks:
logger.info('Deleting route for: %s' % str(cidr_block))
ec2.delete_transit_gateway_route(
DestinationCidrBlock=cidr_block,
TransitGatewayRouteTableId=tgw_route_table_id
)
####################
# end - cleanup
####################
|
StarcoderdataPython
|
114302
|
<reponame>ciholas/cdp-geofencing
# Ciholas, Inc. - www.ciholas.com
# Licensed under: creativecommons.org/licenses/by/4.0
class Zone:
"""A zone in the form of a polygon"""
def __init__(self, name, vertices, color):
self.name = name
self.vertices = vertices
self.color = color
x_list = []
y_list = []
for vertex in self.vertices:
x_list.append(vertex[0])
y_list.append(vertex[1])
# Needs to be stored for future tests
self.y_list = y_list
# Finding these extrema helps with calculating where a point is in respect to the zone
self.x_max = max(x_list)
self.x_min = min(x_list)
self.y_max = max(y_list)
self.y_min = min(y_list)
# It is easier to calculate position based off each side, so we will now store every line
side_num = 0
sides = []
for vertex in self.vertices:
side_num += 1
try:
sides.append(Line(vertex, self.vertices[side_num]))
except Exception:
sides.append(Line(vertex, self.vertices[0]))
self.sides = sides
def __str__(self):
return self.name
def is_inside(self, point_ray):
"""Determines if the given point, which has been transformed into a ray, is inside or outside
of the zone.
"""
x = point_ray.x1
y = point_ray.y1
if (x > self.x_max or x < self.x_min or y > self.y_max or y < self.y_min):
return False
else:
# Since the last method didn't work, it is time for the resource expensive test
# The ray will be compared to every line. If there is an intersection, it will be tracked
# If there is an odd number of intersections, the point is inside the zone
# If there is an even number of intersections, the point is outside the zone
# If the ray is tangent, it will be counted as two intersections
intersections = 0
for side in self.sides:
if point_ray.is_intersected(side):
intersections += 1
# If a line went through a vertex, it counted as two intersections. This will fix that.
# If a line hit a vertex as a tangent, it had to of been at the the Y max or Y min.
# We want these counted as 2 intersections, but other vertex intersections should be just 1
if y in self.y_list and y != self.y_max and y != self.y_min:
for vertex in self.vertices:
if vertex[1] != self.y_max and vertex[1] != self.y_min and vertex[1] == y and vertex[0] > x:
intersections -= 1
if intersections % 2 == 1:
return True
else:
return False
class Line:
"""Holds the data for a line in the form of two points"""
def __init__(self, point_1, point_2):
self.x1 = point_1[0]
self.y1 = point_1[1]
self.x2 = point_2[0]
self.y2 = point_2[1]
# a, b, c are used with determining interception.
# It is more efficient to calculate them once in the construction
# than it is to calculate them every time they are used.
self.intercept_coeff_a = self.y2 - self.y1
self.intercept_coeff_b = self.x1 - self.x2
self.intercept_coeff_c = (self.x2 * self.y1) - (self.x1 * self.y2)
def is_intersected(self, line):
"""Determines if this line intersects the given line."""
d1 = (self.intercept_coeff_a * line.x1) + (self.intercept_coeff_b * line.y1) + self.intercept_coeff_c
d2 = (self.intercept_coeff_a * line.x2) + (self.intercept_coeff_b * line.y2) + self.intercept_coeff_c
if (d1 > 0 and d2 > 0) or (d1 < 0 and d2 < 0):
return False
d1 = (line.intercept_coeff_a * self.x1) + (line.intercept_coeff_b * self.y1) + line.intercept_coeff_c
d2 = (line.intercept_coeff_a * self.x2) + (line.intercept_coeff_b * self.y2) + line.intercept_coeff_c
if (d1 > 0 and d2 > 0) or (d1 < 0 and d2 < 0):
return False
# If both of the above checks pass, the two lines could still be collinear.
if (abs((self.intercept_coeff_a * line.intercept_coeff_b) - (line.intercept_coeff_a * self.intercept_coeff_b)) < .00001):
return False
return True
|
StarcoderdataPython
|
4821209
|
<gh_stars>1-10
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import pandas as pd
from bs4 import BeautifulSoup
import json
import os
from pprint import pprint
import datetime as dt
import geopy
from geopy.geocoders import GoogleV3
from geopy.exc import GeocoderTimedOut
def main():
geolocator = GoogleV3(api_key=os.environ['GOOGLE_API_KEY'])
with open("source/belper.xml", 'rb') as fp:
soup = BeautifulSoup(fp, "xml")
for facility in soup.find_all('Einrichtung'):
output = {
"name": facility.Name.text,
"type": "Sporthalle"
}
properties = { }
facility_number = facility.Nummer.text
with open("source/einr.xml", 'rb') as fp_einr:
soup_einr = BeautifulSoup(fp_einr, 'xml')
number_tag = soup_einr.find('Nummer', text=facility_number)
print(number_tag.text)
output["description"] = number_tag.find_next_sibling('Typ').text
properties["construction_year"] = number_tag.find_next_sibling('Baujahr').text
location = number_tag.parent.find('FunktionKey', text='SSA')
if location:
street = location.parent.Strasse.text
zip = location.parent.Plz.text
city = location.parent.Ort.text
address_string = street + ', ' + zip + ' ' + city
output["address"] = {
"street": street,
"zip": zip,
"city": city
}
locator = geolocator.geocode(address_string, timeout=10)
geo = {
"lat": locator.latitude if locator else '',
"lon": locator.longitude if locator else ''
}
output["address"]["geo"] = geo
properties['allocations'] = []
for allocation in facility.find_all('Belegung'):
day = allocation.Wochentag.text
day_long = get_full_day_name(day)
start = allocation.Start.text.replace('24:', '0:')
start_time = dt.datetime.strptime(start, '%H:%M').time()
start_time_float = start_time.hour+start_time.minute/60.0
end = allocation.Ende.text.replace('24:', '0:')
end_time = dt.datetime.strptime(end, '%H:%M').time()
end_time_float = end_time.hour+end_time.minute/60.0
activity = allocation.Taetigkeit.text
field = allocation.Bereich.text
club = allocation.find('Nutzer-Nutzergruppen').text
comment = allocation.Kommentar1.text
segment = allocation.SegName.text
properties['allocations'].append({
"time": day + ' ' + start + '-' + end,
"x-time-es": {
"day": day_long,
"start": start_time_float,
"end": end_time_float
},
"segment": segment,
"activity": activity,
"field": field,
"club": club,
"comment": comment
})
output["properties"] = properties
with open('target/sportstaette_' + number_tag.text + '.json', 'w') as outfile:
json.dump(output, outfile)
# df = pd.DataFrame(json).drop_duplicates(keep='first')
# print(df)
def get_full_day_name(day_abbr):
mapping_table = {
'Mo': 'Montag',
'Di': 'Dienstag',
'Mi': 'Mittwoch',
'Do': 'Donnerstag',
'Fr': 'Freitag',
'Sa': 'Samstag',
'So': 'Sonntag'
}
return mapping_table[day_abbr]
if __name__ == '__main__':
main()
|
StarcoderdataPython
|
30985
|
<gh_stars>0
"""A setuptools based setup module.
See:
https://packaging.python.org/en/latest/distributing.html
"""
from setuptools import setup, find_packages
from codecs import open
from os import path
here = path.abspath(path.dirname(__file__))
with open(path.join(here, 'README.rst'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='python-core-utils',
version='0.5.0',
description='Python core utility functions',
long_description=long_description,
url='https://github.com/ajaniv/python-core-utils',
author='<NAME>',
author_email='<EMAIL>',
license='MIT',
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 4 - Alpha',
'Intended Audience :: Developers',
'Topic :: Software Development :: Libraries :: Python Modules',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7'
],
keywords='python core utility functions',
packages=find_packages(exclude=['contrib', 'docs', 'tests']),
install_requires=['Pillow>=4.1.1',
'docutils>=0.14'],
extras_require={
'dev': ['check-manifest'],
'test': ['coverage'],
},
test_suite='tests'
)
|
StarcoderdataPython
|
1789182
|
import time
import datetime
import numpy as np
from sklearn.metrics import f1_score
import random
import torch
import pandas as pd
from sklearn.preprocessing import LabelEncoder
from sklearn.pipeline import Pipeline
import os
# Function to set the random seeds for reproducibility
def fix_the_random(seed_val = 42):
random.seed(seed_val)
np.random.seed(seed_val)
torch.manual_seed(seed_val)
torch.cuda.manual_seed_all(seed_val)
def format_time(elapsed):
'''
Takes a time in seconds and returns a string hh:mm:ss
'''
# Round to the nearest second.
elapsed_rounded = int(round((elapsed)))
# Format as hh:mm:ss
return str(datetime.timedelta(seconds=elapsed_rounded))
# Function to calculate the accuracy of our predictions vs labels
def flat_accuracy(preds, labels):
pred_flat = np.argmax(preds, axis=1).flatten()
labels_flat = labels.flatten()
return np.sum(pred_flat == labels_flat) / len(labels_flat)
def flat_fscore(preds, labels):
pred_flat = np.argmax(preds, axis=1).flatten()
labels_flat = labels.flatten()
return f1_score(labels_flat, pred_flat, average='macro')
class MultiColumnLabelEncoder:
def __init__(self,columns = None):
self.columns = columns # array of column names to encode
def fit(self,X,y=None):
return self # not relevant here
def transform(self,X):
'''
Transforms columns of X specified in self.columns using
LabelEncoder(). If no columns specified, transforms all
columns in X.
'''
output = X.copy()
if self.columns is not None:
for col in self.columns:
output[col] = LabelEncoder().fit_transform(output[col])
else:
for colname,col in output.iteritems():
output[colname] = LabelEncoder().fit_transform(col)
return output
def fit_transform(self,X,y=None):
return self.fit(X,y).transform(X)
# Function to save models
def save_model(model,tokenizer,params):
if(params['to_save']==True):
if(params['csv_file']=='*_full.csv'):
translate='translated'
else:
translate='actual'
# Saving best-practices: if you use defaults names for the model, you can reload it using from_pretrained()
if(params['how_train']!='all'):
output_dir = 'models_saved/'+params['path_files']+'_'+params['language']+'_'+translate+'_'+params['how_train']+'_'+str(params['sample_ratio'])
else:
output_dir = 'models_saved/'+params['path_files']+'_'+translate+'_'+params['how_train']+'_'+str(params['sample_ratio'])
if(params['save_only_bert']):
model=model.bert
output_dir=output_dir+'_only_bert/'
else:
output_dir=output_dir+'/'
print(output_dir)
# Create output directory if needed
if not os.path.exists(output_dir):
os.makedirs(output_dir)
print("Saving model to %s" % output_dir)
# Save a trained model, configuration and tokenizer using `save_pretrained()`.
# They can then be reloaded using `from_pretrained()`
model_to_save = model.module if hasattr(model, 'module') else model # Take care of distributed/parallel training
model_to_save.save_pretrained(output_dir)
tokenizer.save_pretrained(output_dir)
|
StarcoderdataPython
|
77027
|
<gh_stars>0
import unittest
from unittest import mock
from wp.exceptions import WordSizeException, WordNotInWordListException, ImpossiblePathException
from wp.main import WordPath
def create_mock_open(file_text):
# mock_open does not implement iteration on return object as 'open' do
m_open = mock.mock_open(read_data=file_text)
m_open.return_value.__iter__ = lambda self: self
m_open.return_value.__next__ = lambda self: self.readline()
return m_open
class TestWordPath(unittest.TestCase):
def test_load_word_list(self):
"""
Ensure that file data is loaded into a list with one word per list position
"""
expected_dict = ['first', 'third']
file_text = '\n'.join(expected_dict + ['five'])
word_path = WordPath()
with mock.patch('wp.main.open', create_mock_open(file_text=file_text)):
word_path.load_word_list('test', 5)
self.assertListEqual(expected_dict, word_path.word_list)
def test_word_in_word_list(self):
"""
WordPath.in_word_list should return True if given word exists in WordPath.word_list
"""
word_path = WordPath()
with mock.patch('wp.main.open', create_mock_open(file_text='\n'.join(['one', 'two', 'three']))):
word_path.load_word_list('test', 3)
self.assertTrue(word_path.in_word_list('two'))
def test_word_not_in_word_list(self):
"""
WordPath.in_word_list should return False if given word not exists in WordPath.word_list
"""
word_path = WordPath()
with mock.patch('wp.main.open', create_mock_open(file_text='\n'.join(['one', 'two', 'three']))):
word_path.load_word_list('test', 3)
self.assertFalse(word_path.in_word_list('four'))
def test_find_word_size_verification(self):
"""
Ensure that find method throw an exception if words have different size
"""
word_path = WordPath()
with self.assertRaises(WordSizeException):
word_path.find('123', '1234')
def test_find_with_first_word_not_in_word_list(self):
"""
Ensure that find method throw an exception if first word is not present on WordPath.word_list
"""
word_path = WordPath()
with mock.patch('wp.main.open', create_mock_open(file_text='\n'.join(['one', 'two', 'three']))):
word_path.load_word_list('test', 3)
with self.assertRaises(WordNotInWordListException):
word_path.find('two', 'six')
def test_find_with_last_word_not_in_word_list(self):
"""
Ensure that find method throw an exception if last word is not present on WordPath.word_list
"""
word_path = WordPath()
with mock.patch('wp.main.open', create_mock_open(file_text='\n'.join(['one', 'two', 'three']))):
word_path.load_word_list('test', 3)
with self.assertRaises(WordSizeException):
word_path.find('one', 'four')
def test_populate_chain(self):
"""
Ensure that populate_chain method return the expected dict
"""
expected_dict = ['cat', 'cag', 'cog', 'dog']
file_text = '\n'.join(expected_dict + ['fog', 'zog'])
word_path = WordPath()
with mock.patch('wp.main.open', create_mock_open(file_text=file_text)):
word_path.load_word_list('test', 3)
word_path.chain.append('cat')
word_path.populate_chain('cat', 'dog')
self.assertListEqual(expected_dict, word_path.chain)
def test_find_with_no_viable_path(self):
"""
Ensure that find method raise an Exception when have no viable path between given words
"""
file_text = '\n'.join(['aaa', 'cat', 'cog', 'dog', 'fog', 'zog'])
word_path = WordPath()
with mock.patch('wp.main.open', create_mock_open(file_text=file_text)):
word_path.load_word_list('test', 3)
with self.assertRaises(ImpossiblePathException):
word_path.find('cat', 'dog')
def test_find_with_viable_path(self):
"""
Ensure that find method return the expected dict
"""
expected_dict = ['cat', 'cag', 'cog', 'dog']
file_text = '\n'.join(['aaa', 'cat', 'cag', 'cog', 'caf', 'dog', 'fog', 'zog'])
word_path = WordPath()
with mock.patch('wp.main.open', create_mock_open(file_text=file_text)):
word_path.load_word_list('test', 3)
self.assertListEqual(expected_dict, word_path.find('cat', 'dog'))
|
StarcoderdataPython
|
126237
|
<filename>motion_analysis/gui/layout/motion_modelling_dialog_ui.py
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'D:\Research\physics\workspace\mosi_app_mgtools\motion_analysis\GUI\layout\.\motion_modelling_dialog.ui',
# licensing of 'D:\Research\physics\workspace\mosi_app_mgtools\motion_analysis\GUI\layout\.\motion_modelling_dialog.ui' applies.
#
# Created: Wed Jan 15 17:42:08 2020
# by: pyside2-uic running on PySide2 5.13.1
#
# WARNING! All changes made in this file will be lost!
from PySide2 import QtCore, QtGui, QtWidgets
class Ui_Dialog(object):
def setupUi(self, Dialog):
Dialog.setObjectName("Dialog")
Dialog.resize(426, 371)
self.verticalLayout = QtWidgets.QVBoxLayout(Dialog)
self.verticalLayout.setObjectName("verticalLayout")
self.horizontalLayout_3 = QtWidgets.QHBoxLayout()
self.horizontalLayout_3.setObjectName("horizontalLayout_3")
self.label = QtWidgets.QLabel(Dialog)
self.label.setObjectName("label")
self.horizontalLayout_3.addWidget(self.label)
spacerItem = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout_3.addItem(spacerItem)
self.nameLineEdit = QtWidgets.QLineEdit(Dialog)
self.nameLineEdit.setMaximumSize(QtCore.QSize(16777215, 16777215))
self.nameLineEdit.setObjectName("nameLineEdit")
self.horizontalLayout_3.addWidget(self.nameLineEdit)
self.verticalLayout.addLayout(self.horizontalLayout_3)
self.horizontalLayout = QtWidgets.QHBoxLayout()
self.horizontalLayout.setObjectName("horizontalLayout")
self.label_5 = QtWidgets.QLabel(Dialog)
self.label_5.setObjectName("label_5")
self.horizontalLayout.addWidget(self.label_5)
spacerItem1 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout.addItem(spacerItem1)
self.splineBasisFactorLineEdit = QtWidgets.QLineEdit(Dialog)
self.splineBasisFactorLineEdit.setMaximumSize(QtCore.QSize(40, 16777215))
self.splineBasisFactorLineEdit.setObjectName("splineBasisFactorLineEdit")
self.horizontalLayout.addWidget(self.splineBasisFactorLineEdit)
self.verticalLayout.addLayout(self.horizontalLayout)
self.horizontalLayout_12 = QtWidgets.QHBoxLayout()
self.horizontalLayout_12.setObjectName("horizontalLayout_12")
self.label_10 = QtWidgets.QLabel(Dialog)
self.label_10.setObjectName("label_10")
self.horizontalLayout_12.addWidget(self.label_10)
self.selectAllJointsButton = QtWidgets.QPushButton(Dialog)
self.selectAllJointsButton.setObjectName("selectAllJointsButton")
self.horizontalLayout_12.addWidget(self.selectAllJointsButton)
self.selectJointChildrenButton = QtWidgets.QPushButton(Dialog)
self.selectJointChildrenButton.setObjectName("selectJointChildrenButton")
self.horizontalLayout_12.addWidget(self.selectJointChildrenButton)
self.deselectJointChildrenButton = QtWidgets.QPushButton(Dialog)
self.deselectJointChildrenButton.setObjectName("deselectJointChildrenButton")
self.horizontalLayout_12.addWidget(self.deselectJointChildrenButton)
self.clearSelectedJointsButton = QtWidgets.QPushButton(Dialog)
self.clearSelectedJointsButton.setObjectName("clearSelectedJointsButton")
self.horizontalLayout_12.addWidget(self.clearSelectedJointsButton)
spacerItem2 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout_12.addItem(spacerItem2)
self.verticalLayout.addLayout(self.horizontalLayout_12)
self.jointTableWidget = QtWidgets.QTableWidget(Dialog)
self.jointTableWidget.setMaximumSize(QtCore.QSize(16777215, 200))
self.jointTableWidget.setColumnCount(2)
self.jointTableWidget.setObjectName("jointTableWidget")
self.jointTableWidget.setColumnCount(2)
self.jointTableWidget.setRowCount(0)
item = QtWidgets.QTableWidgetItem()
self.jointTableWidget.setHorizontalHeaderItem(0, item)
item = QtWidgets.QTableWidgetItem()
self.jointTableWidget.setHorizontalHeaderItem(1, item)
self.verticalLayout.addWidget(self.jointTableWidget)
self.horizontalLayout_4 = QtWidgets.QHBoxLayout()
self.horizontalLayout_4.setObjectName("horizontalLayout_4")
self.label_6 = QtWidgets.QLabel(Dialog)
self.label_6.setObjectName("label_6")
self.horizontalLayout_4.addWidget(self.label_6)
spacerItem3 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout_4.addItem(spacerItem3)
self.jointListLineEdit = QtWidgets.QLineEdit(Dialog)
self.jointListLineEdit.setMaximumSize(QtCore.QSize(16777215, 16777215))
self.jointListLineEdit.setText("")
self.jointListLineEdit.setObjectName("jointListLineEdit")
self.horizontalLayout_4.addWidget(self.jointListLineEdit)
self.selectJointsFromStringButton = QtWidgets.QPushButton(Dialog)
self.selectJointsFromStringButton.setObjectName("selectJointsFromStringButton")
self.horizontalLayout_4.addWidget(self.selectJointsFromStringButton)
self.verticalLayout.addLayout(self.horizontalLayout_4)
self.horizontalLayout_2 = QtWidgets.QHBoxLayout()
self.horizontalLayout_2.setObjectName("horizontalLayout_2")
spacerItem4 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout_2.addItem(spacerItem4)
self.selectButton = QtWidgets.QPushButton(Dialog)
self.selectButton.setObjectName("selectButton")
self.horizontalLayout_2.addWidget(self.selectButton)
self.cancelButton = QtWidgets.QPushButton(Dialog)
self.cancelButton.setObjectName("cancelButton")
self.horizontalLayout_2.addWidget(self.cancelButton)
self.verticalLayout.addLayout(self.horizontalLayout_2)
self.retranslateUi(Dialog)
QtCore.QMetaObject.connectSlotsByName(Dialog)
def retranslateUi(self, Dialog):
Dialog.setWindowTitle(QtWidgets.QApplication.translate("Dialog", "Create Statistical Motion Model", None, -1))
self.label.setText(QtWidgets.QApplication.translate("Dialog", "Name", None, -1))
self.label_5.setText(QtWidgets.QApplication.translate("Dialog", "Spine Basis Factor", None, -1))
self.splineBasisFactorLineEdit.setText(QtWidgets.QApplication.translate("Dialog", "20", None, -1))
self.label_10.setText(QtWidgets.QApplication.translate("Dialog", "Joint List", None, -1))
self.selectAllJointsButton.setText(QtWidgets.QApplication.translate("Dialog", "Select All", None, -1))
self.selectJointChildrenButton.setText(QtWidgets.QApplication.translate("Dialog", "Select Children", None, -1))
self.deselectJointChildrenButton.setText(QtWidgets.QApplication.translate("Dialog", "Clear Children", None, -1))
self.clearSelectedJointsButton.setText(QtWidgets.QApplication.translate("Dialog", "Clear All", None, -1))
self.jointTableWidget.horizontalHeaderItem(0).setText(QtWidgets.QApplication.translate("Dialog", "Selected", None, -1))
self.jointTableWidget.horizontalHeaderItem(1).setText(QtWidgets.QApplication.translate("Dialog", "Name", None, -1))
self.label_6.setText(QtWidgets.QApplication.translate("Dialog", "Joint Filter List", None, -1))
self.selectJointsFromStringButton.setText(QtWidgets.QApplication.translate("Dialog", "Update", None, -1))
self.selectButton.setText(QtWidgets.QApplication.translate("Dialog", "Accept", None, -1))
self.cancelButton.setText(QtWidgets.QApplication.translate("Dialog", "Cancel", None, -1))
|
StarcoderdataPython
|
120583
|
import bisect
import copy
import datetime
import uuid
import operator
import pytz
import calendar
from elasticsearch_dsl.query import Range, Terms
def string_to_datetime(timestamp):
try:
return datetime.datetime.strptime(timestamp, "%Y-%m-%dT%H:%M:%S.%fZ")
except ValueError:
return datetime.datetime.strptime(timestamp, "%Y-%m-%dT%H:%M:%SZ")
def get_version_field(stix_object):
if "version" in stix_object:
return stix_object["version"]
elif "modified" in stix_object:
return stix_object["modified"]
elif "created" in stix_object:
return stix_object["created"]
else:
return stix_object["date_added"]
def locate_version(stix_objects, locator):
object_ids = []
processed_objects = []
for stix_object in stix_objects:
position = bisect.bisect_left(object_ids, stix_object["id"])
if not processed_objects or position >= len(object_ids) or object_ids[position] != stix_object["id"]:
object_ids.insert(position, stix_object["id"])
processed_objects.insert(position, stix_object)
else:
if locator(get_version_field(stix_object), get_version_field(processed_objects[position])):
processed_objects[position] = stix_object
return processed_objects
def check_for_dupes(final_objects, final_ids, matched_objects):
for stix_object in matched_objects:
found = 0
position = bisect.bisect_left(final_ids, stix_object["id"])
if not final_objects or position > len(final_ids) - 1 or final_ids[position] != stix_object["id"]:
final_ids.insert(position, stix_object["id"])
final_objects.insert(position, stix_object)
else:
stix_object_time = get_version_field(stix_object)
while position != len(final_ids) and stix_object["id"] == final_ids[position]:
if get_version_field(final_objects[position]) == stix_object_time:
found = 1
break
else:
position = position + 1
if found == 1:
continue
else:
final_ids.insert(position, stix_object["id"])
final_objects.insert(position, stix_object)
class Helper:
@classmethod
def fetch_objects_by_versions(cls, stix_objects, versions):
final_objects = []
final_ids = []
if versions is None:
versions = "last"
if "all" in versions:
return stix_objects
versions = versions.split(",")
specific_versions = []
for version in versions:
if version is not 'first' and version is not 'last':
specific_versions.append(version)
if specific_versions:
object_ids = []
matched_objects = []
for stix_object in stix_objects:
stix_object_version = get_version_field(stix_object)
if stix_object_version in specific_versions:
position = bisect.bisect_left(object_ids, stix_object["id"])
object_ids.insert(position, stix_object["id"])
matched_objects.insert(position, stix_object)
final_ids = object_ids
final_objects = matched_objects
if "first" in versions:
matched_objects = locate_version(stix_objects, operator.lt)
check_for_dupes(final_objects, final_ids, matched_objects)
if "last" in versions:
matched_objects = locate_version(stix_objects, operator.gt)
check_for_dupes(final_objects, final_ids, matched_objects)
return final_objects
@classmethod
def paginate(cls, pages_name, items, more=False, next_id=None):
pages = {}
if items:
pages[pages_name] = items
if pages_name == "objects" or pages_name == "versions":
if next_id and pages:
pages["next"] = next_id
if pages:
pages["more"] = more
return pages
@classmethod
def determine_version(cls, new_obj, request_time):
"""Grab the modified time if present, if not grab created time,
if not grab request time provided by server."""
return new_obj.get("modified", new_obj.get("created", cls.datetime_to_string(request_time)))
@classmethod
def determine_spec_version(cls, obj):
"""Given a STIX 2.x object, determine its spec version."""
missing = ("created", "modified")
if all(x not in obj for x in missing):
# Special case: only SCOs are 2.1 objects and they don't have a spec_version
# For now the only way to identify one is checking the created and modified
# are both missing.
return "2.1"
return obj.get("spec_version", "2.0")
@classmethod
def get_timestamp(cls):
"""Get current time with UTC offset"""
return datetime.datetime.now(tz=pytz.UTC)
@classmethod
def skip_special_characters(cls, string):
"""Get current time with UTC offset"""
return string.replace('-', '\\-')
@classmethod
def datetime_to_string(cls, dttm):
"""Given a datetime instance, produce the string representation
with microsecond precision"""
# 1. Convert to timezone-aware
# 2. Convert to UTC
# 3. Format in ISO format with microsecond precision
if dttm.tzinfo is None or dttm.tzinfo.utcoffset(dttm) is None:
# dttm is timezone-naive; assume UTC
zoned = pytz.UTC.localize(dttm)
else:
zoned = dttm.astimezone(pytz.UTC)
return zoned.strftime("%Y-%m-%dT%H:%M:%S.%fZ")
@classmethod
def datetime_to_string_stix(cls, dttm):
"""Given a datetime instance, produce the string representation
with millisecond precision"""
# 1. Convert to timezone-aware
# 2. Convert to UTC
# 3. Format in ISO format with millisecond precision,
# except for objects defined with higher precision
# 4. Add "Z"
if dttm.tzinfo is None or dttm.tzinfo.utcoffset(dttm) is None:
# dttm is timezone-naive; assume UTC
zoned = pytz.UTC.localize(dttm)
else:
zoned = dttm.astimezone(pytz.UTC)
ts = zoned.strftime("%Y-%m-%dT%H:%M:%S")
ms = zoned.strftime("%f")
if len(ms[3:].rstrip("0")) >= 1:
ts = ts + "." + ms + "Z"
else:
ts = ts + "." + ms[:3] + "Z"
return ts
@classmethod
def datetime_to_float(cls, dttm):
"""Given a datetime instance, return its representation as a float"""
# Based on this solution: https://stackoverflow.com/questions/30020988/python3-datetime-timestamp-in-python2
if dttm.tzinfo is None:
return calendar.timegm(dttm.utctimetuple()) + dttm.microsecond / 1e6
else:
return (dttm - datetime.datetime(1970, 1, 1, tzinfo=pytz.UTC)).total_seconds()
@classmethod
def float_to_datetime(cls, timestamp_float):
"""Given a floating-point number, produce a datetime instance"""
return datetime.datetime.utcfromtimestamp(timestamp_float)
@classmethod
def generate_status(
cls, request_time, status, succeeded, failed, pending,
successes=None, failures=None, pendings=None,
):
"""Generate Status Resource as defined in TAXII 2.1 section (4.3.1) <link here>`__."""
status = {
"id": str(uuid.uuid4()),
"status": status,
"request_timestamp": request_time,
"total_count": succeeded + failed + pending,
"success_count": succeeded,
"failure_count": failed,
"pending_count": pending,
}
if successes:
status["successes"] = successes
if failures:
status["failures"] = failures
if pendings:
status["pendings"] = pendings
return status
@classmethod
def generate_status_details(cls, id, version, message=None):
"""Generate Status Details as defined in TAXII 2.1 section (4.3.1) <link here>`__."""
status_details = {
"id": id,
"version": version
}
if message:
status_details["message"] = message
return status_details
@classmethod
def get_custom_headers(cls, manifest_resource):
"""Generates the X-TAXII-Date-Added headers based on a manifest resource"""
headers = {}
times = sorted(map(lambda x: x["date_added"], manifest_resource.get("objects", [])))
if len(times) > 0:
headers["X-TAXII-Date-Added-First"] = times[0]
headers["X-TAXII-Date-Added-Last"] = times[-1]
return headers
@classmethod
def parse_request_parameters(cls, filter_args):
"""Generates a dict with params received from client"""
session_args = {}
for key, value in filter_args.items():
if key != "limit" and key != "next":
session_args[key] = set(value.replace(" ", "").split(","))
return session_args
@classmethod
def find_att(cls, obj):
"""
Used for finding the version attribute of an ambiguous object. Manifests
use the "version" field, but objects will use "modified", or if that's not
available, the "created" field.
Args:
obj (dict): manifest or stix object
Returns:
string value of the field from the object to use for versioning
"""
if "version" in obj:
return cls.string_to_datetime(obj["version"])
elif "modified" in obj:
return cls.string_to_datetime(obj["modified"])
elif "created" in obj:
return cls.string_to_datetime(obj["created"])
else:
return cls.string_to_datetime(obj["date_added"])
@classmethod
def find_version_attribute(cls, obj):
if "modified" in obj:
return "modified"
elif "created" in obj:
return "created"
elif "date_added" in obj:
return "date_added"
@classmethod
def check_for_dupes(cls, final_match, final_track, res):
for obj in res:
found = 0
pos = bisect.bisect_left(final_track, obj["id"])
if not final_match or pos > len(final_track) - 1 or final_track[pos] != obj["id"]:
final_track.insert(pos, obj["id"])
final_match.insert(pos, obj)
else:
obj_time = cls.find_att(obj)
while pos != len(final_track) and obj["id"] == final_track[pos]:
if cls.find_att(final_match[pos]) == obj_time:
found = 1
break
else:
pos = pos + 1
if found == 1:
continue
else:
final_track.insert(pos, obj["id"])
final_match.insert(pos, obj)
@classmethod
def check_version(cls, data, relate):
id_track = []
res = []
for obj in data:
pos = bisect.bisect_left(id_track, obj["id"])
if not res or pos >= len(id_track) or id_track[pos] != obj["id"]:
id_track.insert(pos, obj["id"])
res.insert(pos, obj)
else:
if relate(cls.find_att(obj), cls.find_att(res[pos])):
res[pos] = obj
return res
@classmethod
def remove_hidden_field(cls, objs):
for obj in objs:
if "date_added" in obj:
del obj["date_added"]
@classmethod
def find_added_headers(cls, headers, manifest, obj):
obj_time = cls.find_att(obj)
for man in manifest:
if man["id"] == obj["id"] and obj_time == cls.find_att(man):
if len(headers) == 0:
headers["X-TAXII-Date-Added-First"] = man["date_added"]
else:
headers["X-TAXII-Date-Added-Last"] = man["date_added"]
@classmethod
def update_manifest(cls, new_obj, api_info, collection_id, request_time):
collections = api_info.get("collections", [])
media_type_fmt = "application/stix+json;version={}"
for collection in collections:
if collection_id == collection["id"]:
version = cls.determine_version(new_obj, request_time)
request_time = cls.datetime_to_string(request_time)
media_type = media_type_fmt.format(cls.determine_spec_version(new_obj))
# version is a single value now, therefore a new manifest is always created
collection["manifest"].append(
{
"id": new_obj["id"],
"date_added": request_time,
"version": version,
"media_type": media_type,
},
)
# if the media type is new, attach it to the collection
if media_type not in collection["media_types"]:
collection["media_types"].append(media_type)
# quit once you have found the collection that needed updating
break
class Filter:
def __init__(self, filter_args):
self.filter_args = filter_args
def sort_and_paginate(self, data, limit, manifest):
temp = None
next_save = {}
headers = {}
new = []
if len(data) == 0:
return new, next_save, headers
if manifest:
manifest.sort(key=lambda x: x['date_added'])
for man in manifest:
man_time = Helper.find_att(man)
for check in data:
check_time = Helper.find_att(check)
if check['id'] == man['id'] and check_time == man_time:
if len(headers) == 0:
headers["X-TAXII-Date-Added-First"] = man["date_added"]
new.append(check)
temp = man
if len(new) == limit:
headers["X-TAXII-Date-Added-Last"] = man["date_added"]
break
if limit and limit < len(data):
next_save = new[limit:]
new = new[:limit]
else:
headers["X-TAXII-Date-Added-Last"] = temp["date_added"]
else:
data.sort(key=lambda x: x['date_added'])
if limit and limit < len(data):
next_save = data[limit:]
data = data[:limit]
headers["X-TAXII-Date-Added-First"] = data[0]["date_added"]
headers["X-TAXII-Date-Added-Last"] = data[-1]["date_added"]
new = data
return new, next_save, headers
@staticmethod
def filter_by_id(data, id_):
id_ = id_.split(",")
match_objects = []
for obj in data:
if "id" in obj and any(s == obj["id"] for s in id_):
match_objects.append(obj)
return match_objects
def filter_by_added_after(self, data, manifest_info, added_after_date):
added_after_timestamp = string_to_datetime()
new_results = []
# for manifest objects and versions
if manifest_info is None:
for obj in data:
if string_to_datetime() > added_after_timestamp:
new_results.append(obj)
# for other objects with manifests
else:
for obj in data:
obj_time = Helper.find_att(obj)
for item in manifest_info:
item_time = Helper.find_att(item)
if item["id"] == obj["id"] and item_time == obj_time and \
string_to_datetime() > added_after_timestamp:
new_results.append(obj)
break
return new_results
def filter_by_version(self, data, version):
# final_match is a sorted list of objects
final_match = []
# final_track is a sorted list of id's
final_track = []
# return most recent object versions unless otherwise specified
if version is None:
version = "last"
version_indicators = version.split(",")
if "all" in version_indicators:
# if "all" is in the list, just return everything
return data
actual_dates = [string_to_datetime() for x in version_indicators if x != "first" and x != "last"]
# if a specific version is given, filter for objects with that value
if actual_dates:
id_track = []
res = []
for obj in data:
obj_time = Helper.find_att(obj)
if obj_time in actual_dates:
pos = bisect.bisect_left(id_track, obj["id"])
id_track.insert(pos, obj["id"])
res.insert(pos, obj)
final_match = res
final_track = id_track
if "first" in version_indicators:
res = Helper.check_version(data, operator.lt)
Helper.check_for_dupes(final_match, final_track, res)
if "last" in version_indicators:
res = Helper.check_version(data, operator.gt)
Helper.check_for_dupes(final_match, final_track, res)
return final_match
def filter_by_type(self, data, type_):
type_ = type_.split(",")
match_objects = []
for obj in data:
if "type" in obj and any(s == obj["type"] for s in type_):
match_objects.append(obj)
elif "id" in obj and any(s == obj["id"].split("--")[0] for s in type_):
match_objects.append(obj)
return match_objects
def filter_by_spec_version(self, data, spec_):
match_objects = []
if spec_:
spec_ = spec_.split(",")
for obj in data:
if "media_type" in obj:
if any(s == obj["media_type"].split("version=")[1] for s in spec_):
match_objects.append(obj)
elif any(s == Helper.determine_spec_version(obj) for s in spec_):
match_objects.append(obj)
else:
for obj in data:
add = True
if "media_type" in obj:
s1 = obj["media_type"].split("version=")[1]
else:
s1 = Helper.determine_spec_version(obj)
for match in data:
if "media_type" in match:
s2 = match["media_type"].split("version=")[1]
else:
s2 = Helper.determine_spec_version(match)
if obj["id"] == match["id"] and s2 > s1:
add = False
if add:
match_objects.append(obj)
return match_objects
def process_filter(self, data, allowed=(), manifest_info=(), limit=None):
filtered_by_type = []
filtered_by_id = []
filtered_by_spec_version = []
filtered_by_added_after = []
filtered_by_version = []
final_match = []
save_next = []
headers = {}
# match for type and id filters first
match_type = self.filter_args.get("match[type]")
if match_type and "type" in allowed:
filtered_by_type = self.filter_by_type(data, match_type)
else:
filtered_by_type = copy.deepcopy(data)
match_id = self.filter_args.get("match[id]")
if match_id and "id" in allowed:
filtered_by_id = self.filter_by_id(filtered_by_type, match_id)
else:
filtered_by_id = filtered_by_type
# match for added_after
added_after_date = self.filter_args.get("added_after")
if added_after_date:
filtered_by_added_after = self.filter_by_added_after(filtered_by_id, manifest_info, added_after_date)
else:
filtered_by_added_after = filtered_by_id
# match for spec_version
match_spec_version = self.filter_args.get("match[spec_version]")
if "spec_version" in allowed:
filtered_by_spec_version = self.filter_by_spec_version(filtered_by_added_after, match_spec_version)
else:
filtered_by_spec_version = filtered_by_added_after
# match for version, and get rid of duplicates as appropriate
if "version" in allowed:
match_version = self.filter_args.get("match[version]")
filtered_by_version = self.filter_by_version(filtered_by_spec_version, match_version)
else:
filtered_by_version = filtered_by_spec_version
# sort objects by date_added of manifest and paginate as necessary
final_match, save_next, headers = self.sort_and_paginate(filtered_by_version, limit, manifest_info)
return final_match, save_next, headers
@staticmethod
def get_total_results(response_dict):
total_results = response_dict.get('hits', {}).get('total')
if not str(total_results).isdigit():
total_results = total_results.get('value')
total_dict = response_dict.get('hits').get('total')
else:
total_dict = {
'value': total_results,
}
return total_dict, total_results
class Pagination:
def __init__(self, **kwargs):
self.next = {}
def set_next(self, objects, args):
u = str(uuid.uuid4())
if "limit" in args:
del args["limit"]
for arg in args:
new_list = args[arg].split(',')
new_list.sort()
args[arg] = new_list
d = {"objects": objects, "args": args, "request_time": Helper.datetime_to_float(Helper.get_timestamp())}
self.next[u] = d
return u
def get_next(self, filter_args, manifest, lim):
n = filter_args["next"]
if n in self.next:
for arg in filter_args:
new_list = filter_args[arg].split(',')
new_list.sort()
filter_args[arg] = new_list
del filter_args["next"]
del filter_args["limit"]
if filter_args != self.next[n]["args"]:
raise Exception("The server did not understand the request or filter parameters: "
"params changed over subsequent transaction", 400)
t = self.next[n]["objects"]
length = len(self.next[n]["objects"])
headers = {}
ret = []
if length <= lim:
limit = length
more = False
nex = None
else:
limit = lim
more = True
for i in range(0, limit):
x = t.pop(0)
ret.append(x)
if len(headers) == 0:
Helper.find_added_headers(headers, manifest, x)
if i == limit - 1:
Helper.find_added_headers(headers, manifest, x)
if not more:
self.next.pop(n)
else:
nex = n
return ret, more, headers, nex
else:
raise Exception("The server did not understand the request or filter parameters: 'next' not valid", 400)
|
StarcoderdataPython
|
63321
|
print '... Importing simuvex/engines/vex/expressions/get.py ...'
from angr.engines.vex.expressions.get import *
|
StarcoderdataPython
|
4820190
|
<filename>GNS3/ConfigParsers/cisco_ios.py
import re
# Regex to find the host name
hostname_cisco_regex = re.compile(r'hostname\s+(?P<name>\S+)')
# Regex to find interface definitions as well as assigned addresses
interface_cisco_regex = re.compile(
r'interface\s+FastEthernet(?P<adapter>\d)/(?P<port>\d)(?!\n no ip address)(.*?)\n\s*'
r'ip\s+address\s+'
r'(?P<address1>\d\d?\d?)\.(?P<address2>\d\d?\d?)\.'
r'(?P<address3>\d\d?\d?)\.(?P<address4>\d\d?\d?)\s+'
r'(?P<mask1>\d\d?\d?)\.(?P<mask2>\d\d?\d?)\.'
r'(?P<mask3>\d\d?\d?)\.(?P<mask4>\d\d?\d?)',
re.DOTALL | re.MULTILINE
)
# Regex to find the router id (there is no differentiation between ospf or bgp router-id)
router_id_cisco_regex = re.compile(r'router-id\s+(?P<address>\d\d?\d?\.\d\d?\d?\.\d\d?\d?\.\d\d?\d?)')
def get_hostname(config):
"""
Find the host name in a Cisco router configuration
:param config: Configuration as string
:return: host name in the given configuration
"""
host = hostname_cisco_regex.search(config)
return host.group('name')
def get_router_id(config):
"""
Find the router id if it is defined in the given Cisco config
:param config: Configuration as string
:return: Router id as string if one is found, None otherwise
"""
rid = router_id_cisco_regex.search(config)
if rid is not None:
return rid.group('address')
else:
return None
def get_interfaces(config):
"""
Find all configured interfaces in the given Cisco config and return their definitions
:param config: Cisco configuration as string
:return: tuple of interface address as string, mask address as string, adapter of the interface as int,
and port of the interface as int
"""
interfaces = []
for i in interface_cisco_regex.finditer(config):
interface_address = ('.'.join([
i.group('address1'),
i.group('address2'),
i.group('address3'),
i.group('address4')
]))
adapter = i.group('adapter')
port = i.group('port')
mask = ('.'.join([
i.group('mask1'),
i.group('mask2'),
i.group('mask3'),
i.group('mask4')
]))
interfaces.append((interface_address, mask, int(adapter), int(port)))
return interfaces
|
StarcoderdataPython
|
3211546
|
<gh_stars>0
from os import write
import sympy
n = 13
def prime_test(number, witness):
if witness >= number:
raise ValueError("witness must be smaller than the number")
elif number % 2 == 0:
return False
factor = (number - 1)/2
d = 1
while factor % 2 == 0:
d += 1
factor = factor / 2
factor = int(factor)
result = []
for i in range(d):
result.append(pow(witness, factor * (2 ** d), number))
val = not (1 not in result and (number - 1) not in result)
return val
def determine_liars(pow_of_two):
liars = {}
for index in range(1, pow_of_two + 1):
hi_bound = 2 ** index
lo_bound = hi_bound >> 1
for number in range(lo_bound, hi_bound):
if sympy.isprime(number):
continue
for witness in range(2, number, 2):
test_result = prime_test(number, witness)
if test_result == True:
key = "_" + str(witness)
if key not in liars:
liars.update({key: 1})
else:
liars.update({key: liars[key] + 1})
with open("data/liars-" + str(index) + ".csv", "w") as f:
f.write("witness,lie_count\n")
for liar in liars.keys():
f.write(liar[1:] + "," + str(liars.get(liar)) + "\n")
determine_liars(n)
# print(prime_test(9, 2))
|
StarcoderdataPython
|
89648
|
#!/usr/bin/env python
# Copyright (c) 2014, Robot Control and Pattern Recognition Group, Warsaw University of Technology
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the Warsaw University of Technology nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL <COPYright HOLDER> BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import roslib
roslib.load_manifest('velma_common')
import rospy
import sensor_msgs.msg
import geometry_msgs.msg
import actionlib
import actionlib_msgs.msg
import cartesian_trajectory_msgs.msg
import barrett_hand_controller_msgs.msg
import controller_manager_msgs.srv
import std_srvs.srv
import control_msgs.msg
import velma_fk_ik
import headkinematics
import tf
from tf import *
from tf.transformations import *
import tf_conversions.posemath as pm
from tf2_msgs.msg import *
import PyKDL
import math
import numpy as np
import copy
import urdf_parser_py.urdf
class MoveHandAction(object):
# create messages that are used to publish feedback/result
_feedback = barrett_hand_controller_msgs.msg.BHMoveFeedback()
_result = barrett_hand_controller_msgs.msg.BHMoveResult()
def __init__(self, name, robot_state):
self.robot_state = robot_state
self._action_name = name
self._as = actionlib.SimpleActionServer(self._action_name, barrett_hand_controller_msgs.msg.BHMoveAction, execute_cb=self.execute_cb, auto_start = False)
self._as.start()
def execute_cb(self, goal):
while True:
js_pos = self.robot_state.getJsPos()
allIdle = True
for dof_idx in range(len(goal.q)):
joint_name = goal.name[dof_idx]
diff = goal.q[dof_idx] - js_pos[joint_name]
if abs(diff) > 0.0001:
allIdle = False
if abs(diff) > abs(goal.v[dof_idx] * 0.01):
if diff > 0:
diff = goal.v[dof_idx] * 0.01
else:
diff = -goal.v[dof_idx] * 0.01
self.robot_state.js.position[ self.robot_state.joint_name_idx_map[joint_name] ] += diff
self.robot_state.updateJointLimits(self.robot_state.js)
self.robot_state.updateMimicJoints(self.robot_state.js)
if allIdle:
break
rospy.sleep(0.01)
self._result.error_code = 0
self._as.set_succeeded(self._result)
class MoveImpAction(object):
# create messages that are used to publish feedback/result
_feedback = cartesian_trajectory_msgs.msg.CartesianImpedanceFeedback()
_result = cartesian_trajectory_msgs.msg.CartesianImpedanceResult()
def __init__(self, name, robot_state):
self.robot_state = robot_state
self._action_name = name
self._as = actionlib.SimpleActionServer(self._action_name, cartesian_trajectory_msgs.msg.CartesianImpedanceAction, execute_cb=self.execute_cb, auto_start = False)
self._as.start()
def execute_cb(self, goal):
# helper variables
success = True
self._result.error_code = 0
self._as.set_succeeded(self._result)
return
class MoveToolAction(object):
# create messages that are used to publish feedback/result
_feedback = cartesian_trajectory_msgs.msg.CartesianTrajectoryFeedback()
_result = cartesian_trajectory_msgs.msg.CartesianTrajectoryResult()
def __init__(self, name, robot_state, wrist_name):
self.robot_state = robot_state
self.wrist_name = wrist_name
self._action_name = name
self._as = actionlib.SimpleActionServer(self._action_name, cartesian_trajectory_msgs.msg.CartesianTrajectoryAction, execute_cb=self.execute_cb, auto_start = False)
self._as.start()
def execute_cb(self, goal):
# helper variables
success = True
print "MoveCartesianTrajectory ", self._action_name, " points: ", len(goal.trajectory.points)
init_T_B_W = self.robot_state.fk_solver.calculateFk(self.wrist_name, self.robot_state.getJsPos())
init_T_W_T = self.robot_state.T_W_T[self.wrist_name]
current_dest_point_idx = 0
while True:
if self._as.is_preempt_requested():
rospy.loginfo('%s: Preempted' % self._action_name)
self._as.set_preempted()
return
time_now = rospy.Time.now()
time_from_start = (time_now - goal.trajectory.header.stamp).to_sec()
if time_from_start <= 0:
rospy.sleep(0.01)
continue
while time_from_start > goal.trajectory.points[current_dest_point_idx].time_from_start.to_sec():
current_dest_point_idx += 1
if current_dest_point_idx >= len(goal.trajectory.points):
break
if current_dest_point_idx >= len(goal.trajectory.points):
break
dest_time = goal.trajectory.points[current_dest_point_idx].time_from_start.to_sec()
dest_T_W_T = pm.fromMsg(goal.trajectory.points[current_dest_point_idx].pose)
if current_dest_point_idx > 0:
prev_time = goal.trajectory.points[current_dest_point_idx-1].time_from_start.to_sec()
prev_T_W_T = pm.fromMsg(goal.trajectory.points[current_dest_point_idx-1].pose)
else:
prev_time = 0.0
prev_T_W_T = init_T_W_T
f = (time_from_start-prev_time) / (dest_time - prev_time)
if f < 0 or f > 1:
print "error: MoveCartesianTrajectory f: ", str(f)
diff_T_W_T = PyKDL.diff(prev_T_W_T, dest_T_W_T, 1.0)
T_W_Ti = PyKDL.addDelta(prev_T_W_T, diff_T_W_T, f)
T_B_Tcmd = self.robot_state.arm_cmd[self.wrist_name]
T_B_Wi = T_B_Tcmd * T_W_Ti.Inverse()
self.robot_state.T_W_T[self.wrist_name] = T_W_Ti
if not self.robot_state.joint_impedance_active:
q_out = self.robot_state.fk_solver.simulateTrajectory(self.wrist_name, self.robot_state.getJsPos(), T_B_Wi)
if q_out == None:
self._result.error_code = -3 # PATH_TOLERANCE_VIOLATED
self._as.set_aborted(self._result)
return
for i in range(7):
joint_name = self.robot_state.fk_solver.ik_joint_state_name[self.wrist_name][i]
self.robot_state.js.position[ self.robot_state.joint_name_idx_map[joint_name] ] = q_out[i]
# publish the feedback
self._feedback.header.stamp = time_now
self._feedback.desired = pm.toMsg(T_W_Ti)
self._feedback.actual = pm.toMsg(T_W_Ti)
self._feedback.error = pm.toMsg(PyKDL.Frame())
self._as.publish_feedback(self._feedback)
rospy.sleep(0.01)
self.robot_state.T_W_T[self.wrist_name] = dest_T_W_T
self._result.error_code = 0
self._as.set_succeeded(self._result)
class MoveJointTrajectory(object):
# create messages that are used to publish feedback/result
_feedback = control_msgs.msg.FollowJointTrajectoryFeedback()
_result = control_msgs.msg.FollowJointTrajectoryResult()
def __init__(self, name, robot_state):
self.robot_state = robot_state
self._action_name = name
self._as = actionlib.SimpleActionServer(self._action_name, control_msgs.msg.FollowJointTrajectoryAction, execute_cb=self.execute_cb, auto_start = False)
self._as.start()
def execute_cb(self, goal):
# helper variables
success = True
print "MoveJointTrajectory ", self._action_name, " points: ", len(goal.trajectory.points)
if not self.robot_state.joint_impedance_active:
self._result.error_code = 0
self._as.set_succeeded(self._result)
print "ERROR MoveJointTrajectory wrong state"
# verify the trajectory
if len(goal.trajectory.points) == 0:
self._result.error_code = -1 # INVALID_GOAL
self._as.set_aborted(self._result)
return
init_pos = []
for joint_name in goal.trajectory.joint_names:
joint_idx = self.robot_state.joint_name_idx_map[joint_name]
init_pos.append(self.robot_state.js.position[joint_idx])
current_dest_point_idx = 0
while True:
if self._as.is_preempt_requested():
rospy.loginfo('%s: Preempted' % self._action_name)
self._as.set_preempted()
return
time_now = rospy.Time.now()
time_from_start = (time_now - goal.trajectory.header.stamp).to_sec()
if time_from_start <= 0:
rospy.sleep(0.001)
continue
while time_from_start > goal.trajectory.points[current_dest_point_idx].time_from_start.to_sec():
current_dest_point_idx += 1
if current_dest_point_idx >= len(goal.trajectory.points):
break
if current_dest_point_idx >= len(goal.trajectory.points):
break
dest_time = goal.trajectory.points[current_dest_point_idx].time_from_start.to_sec()
dest_pos = goal.trajectory.points[current_dest_point_idx].positions
if current_dest_point_idx > 0:
prev_time = goal.trajectory.points[current_dest_point_idx-1].time_from_start.to_sec()
prev_pos = goal.trajectory.points[current_dest_point_idx-1].positions
else:
prev_time = 0.0
prev_pos = init_pos
f = (time_from_start-prev_time) / (dest_time - prev_time)
if f < 0 or f > 1:
print "error: MoveJointTrajectory f: ", str(f)
idx = 0
for joint_name in goal.trajectory.joint_names:
self.robot_state.js.position[ self.robot_state.joint_name_idx_map[joint_name] ] = prev_pos[idx] * (1.0-f) + dest_pos[idx] * f
idx += 1
self.robot_state.updateMimicJoints(self.robot_state.js)
# publish the feedback
self._feedback.header.stamp = time_now
# self._feedback.desired = pm.toMsg(T_B_Ti)
# self._feedback.actual = pm.toMsg(T_B_Ti)
# self._feedback.error = pm.toMsg(PyKDL.Frame())
self._as.publish_feedback(self._feedback)
rospy.sleep(0.001)
self._result.error_code = 0
self._as.set_succeeded(self._result)
class MoveCartesianTrajectory(object):
# create messages that are used to publish feedback/result
_feedback = cartesian_trajectory_msgs.msg.CartesianTrajectoryFeedback()
_result = cartesian_trajectory_msgs.msg.CartesianTrajectoryResult()
def __init__(self, name, robot_state, wrist_name):
self.robot_state = robot_state
self.wrist_name = wrist_name
self._action_name = name
self._as = actionlib.SimpleActionServer(self._action_name, cartesian_trajectory_msgs.msg.CartesianTrajectoryAction, execute_cb=self.execute_cb, auto_start = False)
self._as.start()
def execute_cb(self, goal):
# helper variables
success = True
if self.robot_state.joint_impedance_active:
self._result.error_code = 0
self._as.set_succeeded(self._result)
print "MoveCartesianTrajectory ", self._action_name, " points: ", len(goal.trajectory.points)
init_T_B_W = self.robot_state.fk_solver.calculateFk(self.wrist_name, self.robot_state.getJsPos())
init_T_B_T = init_T_B_W * self.robot_state.T_W_T[self.wrist_name]
current_dest_point_idx = 0
while True:
if self._as.is_preempt_requested():
rospy.loginfo('%s: Preempted' % self._action_name)
self._as.set_preempted()
return
time_now = rospy.Time.now()
time_from_start = (time_now - goal.trajectory.header.stamp).to_sec()
if time_from_start <= 0:
rospy.sleep(0.01)
continue
while time_from_start > goal.trajectory.points[current_dest_point_idx].time_from_start.to_sec():
current_dest_point_idx += 1
if current_dest_point_idx >= len(goal.trajectory.points):
break
if current_dest_point_idx >= len(goal.trajectory.points):
break
dest_time = goal.trajectory.points[current_dest_point_idx].time_from_start.to_sec()
dest_T_B_T = pm.fromMsg(goal.trajectory.points[current_dest_point_idx].pose)
if current_dest_point_idx > 0:
prev_time = goal.trajectory.points[current_dest_point_idx-1].time_from_start.to_sec()
prev_T_B_T = pm.fromMsg(goal.trajectory.points[current_dest_point_idx-1].pose)
else:
prev_time = 0.0
prev_T_B_T = init_T_B_T
f = (time_from_start-prev_time) / (dest_time - prev_time)
if f < 0 or f > 1:
print "error: MoveCartesianTrajectory f: ", str(f)
diff_T_B_T = PyKDL.diff(prev_T_B_T, dest_T_B_T, 1.0)
T_B_Ti = PyKDL.addDelta(prev_T_B_T, diff_T_B_T, f)
T_B_Wi = T_B_Ti * self.robot_state.T_W_T[self.wrist_name].Inverse()
q_out = self.robot_state.fk_solver.simulateTrajectory(self.wrist_name, self.robot_state.getJsPos(), T_B_Wi)
if q_out == None:
self._result.error_code = -3 # PATH_TOLERANCE_VIOLATED
self._as.set_aborted(self._result)
return
for i in range(7):
joint_name = self.robot_state.fk_solver.ik_joint_state_name[self.wrist_name][i]
self.robot_state.js.position[ self.robot_state.joint_name_idx_map[joint_name] ] = q_out[i]
self.robot_state.arm_cmd[self.wrist_name] = T_B_Ti
# publish the feedback
self._feedback.header.stamp = time_now
self._feedback.desired = pm.toMsg(T_B_Ti)
self._feedback.actual = pm.toMsg(T_B_Ti)
self._feedback.error = pm.toMsg(PyKDL.Frame())
self._as.publish_feedback(self._feedback)
rospy.sleep(0.01)
self._result.error_code = 0
self._as.set_succeeded(self._result)
class VelmaFake:
#
# joint state
#
def updateMimicJoints(self, js):
position_map = {}
for i in range(len(js.name)):
position_map[js.name[i]] = js.position[i]
for i in range(len(js.name)):
joint_name = js.name[i]
if joint_name in self.mimic_joints_map:
js.position[i] = position_map[self.mimic_joints_map[joint_name].joint] * self.mimic_joints_map[joint_name].multiplier
if self.mimic_joints_map[joint_name].offset != None:
js.position[i] += self.mimic_joints_map[joint_name].offset
def updateJointLimits(self, js):
for i in range(len(js.name)):
joint_name = js.name[i]
if joint_name in self.mimic_joints_map:
continue
if js.position[i] < self.joint_name_limit_map[joint_name].lower:
js.position[i] = self.joint_name_limit_map[joint_name].lower
elif js.position[i] > self.joint_name_limit_map[joint_name].upper:
js.position[i] = self.joint_name_limit_map[joint_name].upper
def initJointStatePublisher(self):
self.pub_js = rospy.Publisher("/joint_states", sensor_msgs.msg.JointState, queue_size=100)
self.robot = urdf_parser_py.urdf.URDF.from_parameter_server()
self.mimic_joints_map = {}
self.joint_name_limit_map = {}
self.joint_name_idx_map = {}
self.js = sensor_msgs.msg.JointState()
joint_idx_ros = 0
for i in range(len(self.robot.joints)):
joint = self.robot.joints[i]
# print joint.name
if joint.joint_type == "fixed":
continue
if joint.mimic != None:
self.mimic_joints_map[joint.name] = joint.mimic
self.joint_name_limit_map[joint.name] = joint.limit
self.joint_name_idx_map[joint.name] = joint_idx_ros
self.js.name.append(joint.name)
self.js.position.append(0)
joint_idx_ros += 1
# print "self.js.name"
# print self.js.name
# the URDF does not contain mimic joint information for the grippers
# the following code adds mimic joints for grippers
hand_mimic = [
["right_HandFingerThreeKnuckleThreeJoint", "right_HandFingerThreeKnuckleTwoJoint", 0.33333333, 0.0],
["right_HandFingerOneKnuckleThreeJoint", "right_HandFingerOneKnuckleTwoJoint", 0.33333333, 0.0],
["right_HandFingerTwoKnuckleOneJoint", "right_HandFingerOneKnuckleOneJoint", 1.0, 0.0],
["right_HandFingerTwoKnuckleThreeJoint", "right_HandFingerTwoKnuckleTwoJoint", 0.33333333, 0.0],
["left_HandFingerThreeKnuckleThreeJoint", "left_HandFingerThreeKnuckleTwoJoint", 0.33333333, 0.0],
["left_HandFingerOneKnuckleThreeJoint", "left_HandFingerOneKnuckleTwoJoint", 0.33333333, 0.0],
["left_HandFingerTwoKnuckleOneJoint", "left_HandFingerOneKnuckleOneJoint", 1.0, 0.0],
["left_HandFingerTwoKnuckleThreeJoint", "left_HandFingerTwoKnuckleTwoJoint", 0.33333333, 0.0]
]
for mimic in hand_mimic:
if not mimic[0] in self.mimic_joints_map:
self.mimic_joints_map[mimic[0]] = urdf_parser_py.urdf.JointMimic()
self.mimic_joints_map[mimic[0]].joint = mimic[1]
self.mimic_joints_map[mimic[0]].multiplier = mimic[2]
self.mimic_joints_map[mimic[0]].offset = mimic[3]
self.updateJointLimits(self.js)
self.updateMimicJoints(self.js)
def publishJointStates(self):
self.js.header.stamp = rospy.Time.now()
self.pub_js.publish(self.js)
def simSetJointPosition(self, joint_name, joint_position):
self.js.position[self.joint_name_idx_map[joint_name]] = joint_position
def setInitialJointPosition(self):
# self.simSetJointPosition("torso_1_joint", -80.0/180.0*math.pi)
self.simSetJointPosition("right_arm_0_joint", -20.0/180.0*math.pi)
self.simSetJointPosition("right_arm_1_joint", -90.0/180.0*math.pi)
self.simSetJointPosition("right_arm_2_joint", 90.0/180.0*math.pi)
self.simSetJointPosition("right_arm_3_joint", 90.0/180.0*math.pi)
self.simSetJointPosition("right_arm_4_joint", 0.0/180.0*math.pi)
self.simSetJointPosition("right_arm_5_joint", -90.0/180.0*math.pi)
self.simSetJointPosition("right_arm_6_joint", 0.0/180.0*math.pi)
self.simSetJointPosition("left_arm_0_joint", 20.0/180.0*math.pi)
self.simSetJointPosition("left_arm_1_joint", 90.0/180.0*math.pi)
self.simSetJointPosition("left_arm_2_joint", -90.0/180.0*math.pi)
self.simSetJointPosition("left_arm_3_joint", -90.0/180.0*math.pi)
self.simSetJointPosition("left_arm_4_joint", 0.0/180.0*math.pi)
self.simSetJointPosition("left_arm_5_joint", 90.0/180.0*math.pi)
self.simSetJointPosition("left_arm_6_joint", 0.0/180.0*math.pi)
self.updateJointLimits(self.js)
self.updateMimicJoints(self.js)
def getJsPos(self):
js_pos = {}
j_idx = 0
for joint_name in self.js.name:
js_pos[joint_name] = self.js.position[j_idx]
j_idx += 1
return js_pos
def initTactilePublisher(self):
self.pub_tact = {
"left":rospy.Publisher('/left_hand/BHPressureState', barrett_hand_controller_msgs.msg.BHPressureState, queue_size=100),
"right":rospy.Publisher('/right_hand/BHPressureState', barrett_hand_controller_msgs.msg.BHPressureState, queue_size=100)}
self.tact = {"left":barrett_hand_controller_msgs.msg.BHPressureState(), "right":barrett_hand_controller_msgs.msg.BHPressureState()}
for gripper_name in self.tact:
for i in range(24):
self.tact[gripper_name].finger1_tip.append(0)
self.tact[gripper_name].finger2_tip.append(0)
self.tact[gripper_name].finger3_tip.append(0)
self.tact[gripper_name].palm_tip.append(0)
def publishTactile(self):
self.pub_tact["left"].publish(self.tact["left"])
self.pub_tact["right"].publish(self.tact["right"])
def initTactileInfoPublisher(self):
self.pub_tact_info = {
"left":rospy.Publisher('/left_hand/tactile_info_out', barrett_hand_controller_msgs.msg.BHPressureInfo, queue_size=100),
"right":rospy.Publisher('/right_hand/tactile_info_out', barrett_hand_controller_msgs.msg.BHPressureInfo, queue_size=100)}
self.tact_info = {"left":barrett_hand_controller_msgs.msg.BHPressureInfo(), "right":barrett_hand_controller_msgs.msg.BHPressureInfo()}
pc, p1, p2, fc, f1, f2 = self.getTactileGeometry()
sensor_center = [fc, fc, fc, pc]
sensor_h1 = [f1, f1, f1, p1]
sensor_h2 = [f2, f2, f2, p2]
for gripper_name in self.tact_info:
for sensor_id in range(4):
elem = barrett_hand_controller_msgs.msg.BHPressureInfoElement()
# TODO:
# elem.frame_id = ""
for i in range(24):
elem.center.append( geometry_msgs.msg.Vector3( sensor_center[sensor_id][i][0]*0.001, sensor_center[sensor_id][i][1]*0.001, sensor_center[sensor_id][i][2]*0.001 ) )
elem.halfside1.append( geometry_msgs.msg.Vector3( sensor_h1[sensor_id][i][0]*0.001, sensor_h1[sensor_id][i][1]*0.001, sensor_h1[sensor_id][i][2]*0.001 ) )
elem.halfside2.append( geometry_msgs.msg.Vector3( sensor_h2[sensor_id][i][0]*0.001, sensor_h2[sensor_id][i][1]*0.001, sensor_h2[sensor_id][i][2]*0.001 ) )
elem.force_per_unit.append( 1.0/256.0 )
self.tact_info[gripper_name].sensor.append(elem)
def publishTactileInfo(self):
self.pub_tact_info["left"].publish(self.tact_info["left"])
self.pub_tact_info["right"].publish(self.tact_info["right"])
def headLookAtCallback(self, msg):
T_B_Pla = pm.fromMsg(msg)
self.head_look_at = T_B_Pla.p
def rightHandResetCallback(self, msg):
self.js.position[ self.joint_name_idx_map["right_HandFingerOneKnuckleOneJoint"] ] = 0.0
self.js.position[ self.joint_name_idx_map["right_HandFingerOneKnuckleTwoJoint"] ] = 0.0
self.js.position[ self.joint_name_idx_map["right_HandFingerTwoKnuckleTwoJoint"] ] = 0.0
self.js.position[ self.joint_name_idx_map["right_HandFingerThreeKnuckleTwoJoint"] ] = 0.0
self.updateJointLimits(self.js)
self.updateMimicJoints(self.js)
def leftHandResetCallback(self, msg):
self.js.position[ self.joint_name_idx_map["left_HandFingerOneKnuckleOneJoint"] ] = 0.0
self.js.position[ self.joint_name_idx_map["left_HandFingerOneKnuckleTwoJoint"] ] = 0.0
self.js.position[ self.joint_name_idx_map["left_HandFingerTwoKnuckleTwoJoint"] ] = 0.0
self.js.position[ self.joint_name_idx_map["left_HandFingerThreeKnuckleTwoJoint"] ] = 0.0
self.updateJointLimits(self.js)
self.updateMimicJoints(self.js)
def __init__(self):
self.initTactilePublisher()
self.initTactileInfoPublisher()
self.initJointStatePublisher()
self.setInitialJointPosition()
self.fk_solver = velma_fk_ik.VelmaFkIkSolver([], None)
self.arm_cmd = {}
self.arm_cmd["right_arm_7_link"] = self.fk_solver.calculateFk("right_arm_7_link", self.getJsPos())
self.arm_cmd["left_arm_7_link"] = self.fk_solver.calculateFk("left_arm_7_link", self.getJsPos())
self.T_W_T = {}
self.T_W_T["right_arm_7_link"] = PyKDL.Frame()
self.T_W_T["left_arm_7_link"] = PyKDL.Frame()
self.joint_impedance_active = False
self.head_look_at = PyKDL.Vector(1,0,1.8)
v_rot = 0.800
v_lean = 0.375
v_head = 0.392
h_cam = 0.0
v_cam = 0.225
self.head_kin = headkinematics.HeadKinematics(v_rot, v_lean, v_head, h_cam, v_cam)
rospy.Subscriber('/head_lookat_pose', geometry_msgs.msg.Pose, self.headLookAtCallback)
rospy.Subscriber('/right_hand/reset_fingers', std_msgs.msg.Empty, self.rightHandResetCallback)
rospy.Subscriber('/left_hand/reset_fingers', std_msgs.msg.Empty, self.leftHandResetCallback)
#stream("Hand.calibrate_tactile_sensors", ros.comm.topic("calibrate_tactile_sensors"))
#stream("Hand.set_median_filter", ros.comm.topic("set_median_filter"))
# conman switch fake service callback
def handle_switch_controller(self, req):
if len(req.start_controllers) == 2 and 'JntImp' in req.start_controllers and 'TrajectoryGeneratorJoint' in req.start_controllers and len(req.stop_controllers) == 3 and 'CImp' in req.stop_controllers and 'PoseIntLeft' in req.stop_controllers and 'PoseIntRight' in req.stop_controllers:
self.joint_impedance_active = True
print "switched to joint impedance"
return controller_manager_msgs.srv.SwitchControllerResponse(True)
elif len(req.stop_controllers) == 2 and 'JntImp' in req.stop_controllers and 'TrajectoryGeneratorJoint' in req.stop_controllers and len(req.start_controllers) == 3 and 'CImp' in req.start_controllers and 'PoseIntLeft' in req.start_controllers and 'PoseIntRight' in req.start_controllers:
self.joint_impedance_active = False
print "switched to cartesian impedance"
return controller_manager_msgs.srv.SwitchControllerResponse(True)
print "ERROR: handle_switch_controller: ", req.start_controllers, req.stop_controllers, req.strictness
return controller_manager_msgs.srv.SwitchControllerResponse(False)
def getTactileGeometry(self):
palm_sensor_center = [
[ 22, 15.9, 77.5 ], [ 11, 15.9, 77.5 ], [ 0, 15.9, 77.5 ], [ -11, 15.9, 77.5 ], [ -22, 15.9, 77.5 ],
[ 33, 5.3, 77.5 ], [ 22, 5.3, 77.5 ], [ 11, 5.3, 77.5 ], [ 0, 5.3, 77.5 ], [ -11, 5.3, 77.5 ], [ -22, 5.3, 77.5 ], [ -33, 5.3, 77.5 ],
[ 33, -5.3, 77.5 ], [ 22, -5.3, 77.5 ], [ 11, -5.3, 77.5 ], [ 0, -5.3, 77.5 ], [ -11, -5.3, 77.5 ], [ -22, -5.3, 77.5 ], [ -33, -5.3, 77.5 ],
[ 22, -15.9, 77.5 ], [ 11, -15.9, 77.5 ], [ 0, -15.9, 77.5 ], [ -11, -15.9, 77.5 ], [ -22, -15.9, 77.5 ] ]
palm_sensor_halfside1 = [
[ 5, 0, 0 ], [ 5, 0, 0 ], [ 5, 0, 0 ], [ 5, 0, 0 ], [ 5, 0, 0 ],
[ 5, 0, 0 ], [ 5, 0, 0 ], [ 5, 0, 0 ], [ 5, 0, 0 ], [ 5, 0, 0 ], [ 5, 0, 0 ], [ 5, 0, 0 ],
[ 5, 0, 0 ], [ 5, 0, 0 ], [ 5, 0, 0 ], [ 5, 0, 0 ], [ 5, 0, 0 ], [ 5, 0, 0 ], [ 5, 0, 0 ],
[ 5, 0, 0 ], [ 5, 0, 0 ], [ 5, 0, 0 ], [ 5, 0, 0 ], [ 5, 0, 0 ] ]
palm_sensor_halfside2 = [
[ 0, 4.85, 0 ], [ 0, 4.85, 0 ], [ 0, 4.85, 0 ], [ 0, 4.85, 0 ], [ 0, 4.85, 0 ],
[ 0, 4.85, 0 ], [ 0, 4.85, 0 ], [ 0, 4.85, 0 ], [ 0, 4.85, 0 ], [ 0, 4.85, 0 ], [ 0, 4.85, 0 ], [ 0, 4.85, 0 ],
[ 0, 4.85, 0 ], [ 0, 4.85, 0 ], [ 0, 4.85, 0 ], [ 0, 4.85, 0 ], [ 0, 4.85, 0 ], [ 0, 4.85, 0 ], [ 0, 4.85, 0 ],
[ 0, 4.85, 0 ], [ 0, 4.85, 0 ], [ 0, 4.85, 0 ], [ 0, 4.85, 0 ], [ 0, 4.85, 0 ] ]
finger_sensor_center = [
[ 22.25, -9.5, 5.2 ], [22.25, -9.5, 0 ], [ 22.25, -9.5, -5.2 ],
[ 28.25, -9.5, 5.2 ], [ 28.25, -9.5, 0 ], [ 28.25, -9.5, -5.2 ],
[ 34.2484, -9.41371, 5.2 ], [ 34.2484, -9.41371, 0 ], [ 34.2484, -9.41371, -5.2 ],
[ 40.2349, -9.05695, 5.2 ], [ 40.2349, -9.05695, 0 ], [ 40.2349, -9.05695, -5.2 ],
[ 46.1912, -8.35887, 5.2 ], [ 46.1912, -8.35887, 0 ], [ 46.1912, -8.35887, -5.2 ],
[ 51.0813, -7.1884, 5.2 ], [ 51.0813, -7.1884, 0 ], [ 51.0813, -7.1884, -5.2 ],
[ 53.8108, -5.14222, 5.2 ], [ 53.8108, -5.14222, 0 ], [ 53.8108, -5.14222, -5.2 ],
[ 55.4163, -2.13234, 5.2 ], [ 55.4163, -2.13234, 0 ], [ 55.4163, -2.13234, -5.2 ] ]
finger_sensor_halfside1 = [
[ 2.75, 0, 0 ], [ 2.75, 0, 0 ], [ 2.75, 0, 0 ],
[ 2.75, 0, 0 ], [ 2.75, 0, 0 ], [ 2.75, 0, 0 ],
[ 2.74837, 0.085096, 0 ], [ 2.74837, 0.085096, 0 ], [ 2.74837, 0.085096, 0 ],
[ 2.73902, 0.241919, 0 ], [ 2.73902, 0.241919, 0 ], [ 2.73902, 0.241919, 0 ],
[ 2.72073, 0.397956, 0 ], [ 2.72073, 0.397956, 0 ], [ 2.72073, 0.397956, 0 ],
[ 1.35885, 0.614231, 0 ], [ 1.35885, 0.614231, 0 ], [ 1.35885, 0.614231, 0 ],
[ 0.970635, 1.13209, 0 ], [ 0.970635, 1.13209, 0 ], [ 0.970635, 1.13209, 0 ],
[ 0.399575, 1.4367, 0 ], [ 0.399575, 1.4367, 0 ], [ 0.399575, 1.4367, 0 ] ]
finger_sensor_halfside2 = [
[ 0, 0, 2.35 ], [ 0, 0, 2.35 ], [ 0, 0, 2.35 ],
[ 0, 0, 2.35 ], [ 0, 0, 2.35 ], [ 0, 0, 2.35 ],
[ 0, 0, 2.35 ], [ 0, 0, 2.35 ], [ 0, 0, 2.35 ],
[ 0, 0, 2.35 ], [ 0, 0, 2.35 ], [ 0, 0, 2.35 ],
[ 0, 0, 2.35 ], [ 0, 0, 2.35 ], [ 0, 0, 2.35 ],
[ 0, 0, 2.35 ], [ 0, 0, 2.35 ], [ 0, 0, 2.35 ],
[ 0, 0, 2.35 ], [ 0, 0, 2.35 ], [ 0, 0, 2.35 ],
[ 0, 0, 2.35 ], [ 0, 0, 2.35 ], [ 0, 0, 2.35 ] ]
return palm_sensor_center, palm_sensor_halfside1, palm_sensor_halfside2, finger_sensor_center, finger_sensor_halfside1, finger_sensor_halfside2
def sendToolTransform(self, wrist_name):
q = self.T_W_T[wrist_name+"_arm_7_link"].M.GetQuaternion()
p = self.T_W_T[wrist_name+"_arm_7_link"].p
self.br.sendTransform([p[0], p[1], p[2]], [q[0], q[1], q[2], q[3]], rospy.Time.now(), wrist_name+"_arm_tool", wrist_name+"_arm_7_link")
def spin(self):
self.br = tf.TransformBroadcaster()
# conman switch fake service
rospy.Service('/controller_manager/switch_controller', controller_manager_msgs.srv.SwitchController, self.handle_switch_controller)
move_effector_right = MoveCartesianTrajectory("/right_arm/cartesian_trajectory", self, "right_arm_7_link")
move_tool_right = MoveToolAction("/right_arm/tool_trajectory", self, "right_arm_7_link")
move_imp_right = MoveImpAction("/right_arm/cartesian_impedance", self)
move_effector_left = MoveCartesianTrajectory("/left_arm/cartesian_trajectory", self, "left_arm_7_link")
move_tool_left = MoveToolAction("/left_arm/tool_trajectory", self, "left_arm_7_link")
move_imp_left = MoveImpAction("/left_arm/cartesian_impedance", self)
move_joint = MoveJointTrajectory("/spline_trajectory_action_joint", self)
move_hand = MoveHandAction("/right_hand/move_hand", self)
move_hand = MoveHandAction("/left_hand/move_hand", self)
time_diff = 0.05
print "fake Velma interface is running"
while not rospy.is_shutdown():
self.publishJointStates()
right_arm_cmd = pm.toMsg(self.arm_cmd["right_arm_7_link"])
left_arm_cmd = pm.toMsg(self.arm_cmd["left_arm_7_link"])
self.br.sendTransform([right_arm_cmd.position.x, right_arm_cmd.position.y, right_arm_cmd.position.z], [right_arm_cmd.orientation.x, right_arm_cmd.orientation.y, right_arm_cmd.orientation.z, right_arm_cmd.orientation.w], rospy.Time.now(), "right_arm_cmd", "torso_base")
self.br.sendTransform([left_arm_cmd.position.x, left_arm_cmd.position.y, left_arm_cmd.position.z], [left_arm_cmd.orientation.x, left_arm_cmd.orientation.y, left_arm_cmd.orientation.z, left_arm_cmd.orientation.w], rospy.Time.now(), "left_arm_cmd", "torso_base")
self.head_kin.UpdateTargetPosition(self.head_look_at.x(), self.head_look_at.y(), self.head_look_at.z())
self.head_kin.TransformTargetToHeadFrame()
joint_pan, joint_tilt = self.head_kin.CalculateHeadPose()
current_head_pan = self.getJsPos()["head_pan_joint"]
current_head_tilt = self.getJsPos()["head_tilt_joint"]
head_pan_diff = joint_pan - current_head_pan
head_tilt_diff = joint_tilt - current_head_tilt
if head_pan_diff > 45.0/180.0*math.pi*time_diff:
head_pan_diff = 45.0/180.0*math.pi*time_diff
elif head_pan_diff < -45.0/180.0*math.pi*time_diff:
head_pan_diff = -45.0/180.0*math.pi*time_diff
if head_tilt_diff > 45.0/180.0*math.pi*time_diff:
head_tilt_diff = 45.0/180.0*math.pi*time_diff
elif head_tilt_diff < -45.0/180.0*math.pi*time_diff:
head_tilt_diff = -45.0/180.0*math.pi*time_diff
self.simSetJointPosition("head_pan_joint", current_head_pan + head_pan_diff)
self.simSetJointPosition("head_tilt_joint", current_head_tilt + head_tilt_diff)
self.sendToolTransform("right")
self.sendToolTransform("left")
self.publishTactile()
self.publishTactileInfo()
rospy.sleep(time_diff)
if __name__ == '__main__':
rospy.init_node('velma_fake')
v = VelmaFake()
v.spin()
|
StarcoderdataPython
|
3276229
|
<filename>src/figtag/apps/run.py
from typing import Any
from figtag.manage import Cog
from figtag.run import run
import logging
from os import environ
class Runner(Cog): # pragma: no cover
@staticmethod
def initialize(arg_parse: Any) -> None:
parser = arg_parse.add_parser(
"run",
help=('Driver script to call all the relevant components '
'and produce an image index')
)
required = parser.add_argument_group('required arguments')
required.add_argument(
'-query', dest='query', action='store', required=True,
help='The query to perform against OpenI, or a local '
'file containing data obtained from OpenI')
required.add_argument(
'-vae-model-file', dest='vae_model_path', action='store', required=True,
help='The path to the file where the VAE model is saved')
required.add_argument(
'-kmeans-model-file', dest='kmeans_model_path', action='store', required=True,
help='The path to the file where the K-Means model is saved')
required.add_argument(
'-mesh-terms-file', dest='mesh_terms_file', action='store', required=True,
help='The path to a file containing MeSH terms')
parser.add_argument(
'-o', dest='output_folder', action='store', default='',
help='Path to the folder where to put all output files')
parser.add_argument(
'-file-limit', dest='file_limit', action='store', default=0,
help='Max number of files to process', type=int)
parser.set_defaults(func=Runner.execute)
@staticmethod
def execute(args: Any) -> int:
FIGTAG_LOGLEVEL = environ.get('FIGTAG_LOGLEVEL', 'WARNING').upper()
logging.basicConfig(level=FIGTAG_LOGLEVEL)
return run(args.query,
args.vae_model_path, args.kmeans_model_path,
args.mesh_terms_file,
args.output_folder,
args.file_limit)
|
StarcoderdataPython
|
1742523
|
<filename>examples/python/misc/realsense.py
# Open3D: www.open3d.org
# The MIT License (MIT)
# See license file or visit www.open3d.org for details
# examples/python/misc/realsense.py
# Simple example to show RealSense camera discovery and frame capture
import open3d as o3d
if __name__ == "__main__":
o3d.t.io.RealSenseSensor.list_devices()
rscam = o3d.t.io.RealSenseSensor()
rscam.start_capture()
print(rscam.get_metadata())
for fid in range(5):
rgbd_frame = rscam.capture_frame()
o3d.io.write_image(f"color{fid:05d}.jpg",
rgbd_frame.color.to_legacy_image())
o3d.io.write_image(f"depth{fid:05d}.png",
rgbd_frame.depth.to_legacy_image())
print("Frame: {}, time: {}s".format(fid, rscam.get_timestamp() * 1e-6))
rscam.stop_capture()
|
StarcoderdataPython
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.