id
stringlengths 1
7
| text
stringlengths 6
1.03M
| dataset_id
stringclasses 1
value |
---|---|---|
3351377
|
from setuptools import setup
with open('README.md', 'r') as f:
long_description = f.read()
with open('LICENSE', 'r') as f:
license = f.read()
setup(
name='tensorsim',
version='0.0.1',
description='Simulate predictions by a simulated accuracy',
long_description=long_description,
license=license,
author='<NAME>',
author_email='<EMAIL>',
url='https://github.com/MedleyLabs/tensorsim',
packages=['tensorsim'],
)
|
StarcoderdataPython
|
1691213
|
<filename>huskar_api/service/admin/user.py
from __future__ import absolute_import
import uuid
import datetime
from flask import abort
from werkzeug.security import safe_str_cmp
from huskar_api.models import DBSession, cache_manager
from huskar_api.models.auth import User
from huskar_api.extras.email import deliver_email, EmailTemplate
_PASSWORD_RESET_KEY = '%s:reset_password:{username}:token' % __name__
_PASSWORD_RESET_DURATION = datetime.timedelta(minutes=10)
_redis_client = cache_manager.make_client(namespace='%s:v1' % __name__)
# TODO deprecate
def request_to_reset_password(username):
user = User.get_by_name(username)
if not user or user.is_application:
abort(404, u'user {0} not found'.format(username))
if not user.email:
abort(403, u'user {0} does not have email'.format(username))
# Generate and record the token
token = uuid.uuid4()
_redis_client.set(
raw_key=_PASSWORD_RESET_KEY.format(username=username),
val=token.hex, expiration_time=_PASSWORD_RESET_DURATION)
deliver_email(EmailTemplate.PASSWORD_RESET, user.email, {
'username': user.username,
'token': token,
'expires_in': _PASSWORD_RESET_DURATION,
})
return user, token
# TODO deprecate
def reset_password(username, token, new_password):
key = _PASSWORD_RESET_KEY.format(username=username)
expected_token = _redis_client.get(key)
if expected_token and safe_str_cmp(token.hex, expected_token):
_redis_client.delete(key)
user = User.get_by_name(username)
if user is None or user.is_application:
abort(404, u'user {0} not found'.format(username))
user.change_password(new_password)
else:
abort(403, u'token is expired')
return user
# TODO deprecate
def change_email(user, new_email):
with DBSession().close_on_exit(False):
user.email = new_email
|
StarcoderdataPython
|
1675528
|
from unittest import TestCase
from conductr_cli.test.cli_test_case import CliTestCase, strip_margin
from conductr_cli import conduct_info
try:
from unittest.mock import patch, MagicMock # 3.3 and beyond
except ImportError:
from mock import patch, MagicMock
class TestConductInfoCommand(TestCase, CliTestCase):
default_args = {
'ip': '127.0.0.1',
'port': 9005,
'verbose': False,
'long_ids': False
}
default_url = 'http://127.0.0.1:9005/bundles'
def test_no_bundles(self):
http_method = self.respond_with(text='[]')
stdout = MagicMock()
with patch('requests.get', http_method), patch('sys.stdout', stdout):
conduct_info.info(MagicMock(**self.default_args))
http_method.assert_called_with(self.default_url)
self.assertEqual(
strip_margin("""|ID NAME #REP #STR #RUN
|"""),
self.output(stdout))
def test_stopped_bundle(self):
http_method = self.respond_with(text="""[
{
"attributes": { "bundleName": "test-bundle" },
"bundleId": "45e0c477d3e5ea92aa8d85c0d8f3e25c",
"bundleExecutions": [],
"bundleInstallations": [1]
}
]""")
stdout = MagicMock()
with patch('requests.get', http_method), patch('sys.stdout', stdout):
conduct_info.info(MagicMock(**self.default_args))
http_method.assert_called_with(self.default_url)
self.assertEqual(
strip_margin("""|ID NAME #REP #STR #RUN
|45e0c47 test-bundle 1 0 0
|"""),
self.output(stdout))
def test_one_running_one_starting_one_stopped(self):
http_method = self.respond_with(text="""[
{
"attributes": { "bundleName": "test-bundle-1" },
"bundleId": "45e0c477d3e5ea92aa8d85c0d8f3e25c",
"bundleExecutions": [{"isStarted": true}],
"bundleInstallations": [1]
},
{
"attributes": { "bundleName": "test-bundle-2" },
"bundleId": "45e0c477d3e5ea92aa8d85c0d8f3e25c-c52e3f8d0c58d8aa29ae5e3d774c0e54",
"bundleExecutions": [{"isStarted": false}],
"bundleInstallations": [1]
},
{
"attributes": { "bundleName": "test-bundle-3" },
"bundleId": "45e0c477d3e5ea92aa8d85c0d8f3e25c",
"bundleExecutions": [],
"bundleInstallations": [1]
}
]""")
stdout = MagicMock()
with patch('requests.get', http_method), patch('sys.stdout', stdout):
conduct_info.info(MagicMock(**self.default_args))
http_method.assert_called_with(self.default_url)
self.assertEqual(
strip_margin("""|ID NAME #REP #STR #RUN
|45e0c47 test-bundle-1 1 0 1
|45e0c47-c52e3f8 test-bundle-2 1 1 0
|45e0c47 test-bundle-3 1 0 0
|"""),
self.output(stdout))
def test_one_running_one_stopped_verbose(self):
http_method = self.respond_with(text="""[
{
"attributes": { "bundleName": "test-bundle-1" },
"bundleId": "45e0c477d3e5ea92aa8d85c0d8f3e25c",
"bundleExecutions": [{"isStarted": true},{"isStarted": true},{"isStarted": true}],
"bundleInstallations": [1,2,3]
},
{
"attributes": { "bundleName": "test-bundle-2" },
"bundleId": "c52e3f8d0c58d8aa29ae5e3d774c0e54",
"bundleExecutions": [],
"bundleInstallations": [1,2,3]
}
]""")
stdout = MagicMock()
with patch('requests.get', http_method), patch('sys.stdout', stdout):
args = self.default_args.copy()
args.update({'verbose': True})
conduct_info.info(MagicMock(**args))
http_method.assert_called_with(self.default_url)
self.assertEqual(
strip_margin("""|[
| {
| "attributes": {
| "bundleName": "test-bundle-1"
| },
| "bundleExecutions": [
| {
| "isStarted": true
| },
| {
| "isStarted": true
| },
| {
| "isStarted": true
| }
| ],
| "bundleId": "45e0c477d3e5ea92aa8d85c0d8f3e25c",
| "bundleInstallations": [
| 1,
| 2,
| 3
| ]
| },
| {
| "attributes": {
| "bundleName": "test-bundle-2"
| },
| "bundleExecutions": [],
| "bundleId": "c52e3f8d0c58d8aa29ae5e3d774c0e54",
| "bundleInstallations": [
| 1,
| 2,
| 3
| ]
| }
|]
|ID NAME #REP #STR #RUN
|45e0c47 test-bundle-1 3 0 3
|c52e3f8 test-bundle-2 3 0 0
|"""),
self.output(stdout))
def test_long_ids(self):
http_method = self.respond_with(text="""[
{
"attributes": { "bundleName": "test-bundle" },
"bundleId": "45e0c477d3e5ea92aa8d85c0d8f3e25c",
"bundleExecutions": [],
"bundleInstallations": [1]
}
]""")
stdout = MagicMock()
with patch('requests.get', http_method), patch('sys.stdout', stdout):
args = self.default_args.copy()
args.update({'long_ids': True})
conduct_info.info(MagicMock(**args))
http_method.assert_called_with(self.default_url)
self.assertEqual(
strip_margin("""|ID NAME #REP #STR #RUN
|45e0c477d3e5ea92aa8d85c0d8f3e25c test-bundle 1 0 0
|"""),
self.output(stdout))
def test_double_digits(self):
http_method = self.respond_with(text="""[
{
"attributes": { "bundleName": "test-bundle" },
"bundleId": "45e0c477d3e5ea92aa8d85c0d8f3e25c",
"bundleExecutions": [],
"bundleInstallations": [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
}
]""")
stdout = MagicMock()
with patch('requests.get', http_method), patch('sys.stdout', stdout):
conduct_info.info(MagicMock(**self.default_args))
http_method.assert_called_with(self.default_url)
self.assertEqual(
strip_margin("""|ID NAME #REP #STR #RUN
|45e0c47 test-bundle 10 0 0
|"""),
self.output(stdout))
def test_has_error(self):
http_method = self.respond_with(text="""[
{
"attributes": { "bundleName": "test-bundle" },
"bundleId": "45e0c477d3e5ea92aa8d85c0d8f3e25c",
"bundleExecutions": [],
"bundleInstallations": [1, 2, 3, 4, 5, 6, 7, 8, 9, 10],
"hasError": true
}
]""")
stdout = MagicMock()
with patch('requests.get', http_method), patch('sys.stdout', stdout):
conduct_info.info(MagicMock(**self.default_args))
http_method.assert_called_with(self.default_url)
self.assertEqual(
strip_margin("""|ID NAME #REP #STR #RUN
|! 45e0c47 test-bundle 10 0 0
|There are errors: use `conduct events` or `conduct logs` for further information
|"""),
self.output(stdout))
def test_failure_invalid_address(self):
http_method = self.raise_connection_error('test reason', self.default_url)
stderr = MagicMock()
with patch('requests.get', http_method), patch('sys.stderr', stderr):
conduct_info.info(MagicMock(**self.default_args))
http_method.assert_called_with(self.default_url)
self.assertEqual(
self.default_connection_error.format(self.default_url),
self.output(stderr))
|
StarcoderdataPython
|
141370
|
import cgi
import logging
from normality import slugify
from followthemoney import model
from followthemoney.types import registry
from sqlalchemy.dialects.postgresql import JSONB
from sqlalchemy.orm.attributes import flag_modified
from aleph.core import db, cache
from aleph.model.metadata import Metadata
from aleph.model.collection import Collection
from aleph.model.common import DatedModel
from aleph.model.document_record import DocumentRecord
from aleph.model.document_tag import DocumentTag
from aleph.util import filter_texts
log = logging.getLogger(__name__)
class Document(db.Model, DatedModel, Metadata):
MAX_TAGS = 10000
SCHEMA = 'Document'
SCHEMA_FOLDER = 'Folder'
SCHEMA_PACKAGE = 'Package'
SCHEMA_WORKBOOK = 'Workbook'
SCHEMA_TEXT = 'PlainText'
SCHEMA_HTML = 'HyperText'
SCHEMA_PDF = 'Pages'
SCHEMA_IMAGE = 'Image'
SCHEMA_AUDIO = 'Audio'
SCHEMA_VIDEO = 'Video'
SCHEMA_TABLE = 'Table'
SCHEMA_EMAIL = 'Email'
STATUS_PENDING = 'pending'
STATUS_SUCCESS = 'success'
STATUS_FAIL = 'fail'
id = db.Column(db.BigInteger, primary_key=True)
content_hash = db.Column(db.Unicode(65), nullable=True, index=True)
foreign_id = db.Column(db.Unicode, unique=False, nullable=True, index=True)
schema = db.Column(db.String(255), nullable=False)
status = db.Column(db.Unicode(10), nullable=True)
meta = db.Column(JSONB, default={})
error_message = db.Column(db.Unicode(), nullable=True)
body_text = db.Column(db.Unicode(), nullable=True)
body_raw = db.Column(db.Unicode(), nullable=True)
uploader_id = db.Column(db.Integer, db.ForeignKey('role.id'), nullable=True) # noqa
parent_id = db.Column(db.BigInteger, db.ForeignKey('document.id'), nullable=True, index=True) # noqa
children = db.relationship('Document', lazy='dynamic', backref=db.backref('parent', uselist=False, remote_side=[id])) # noqa
collection_id = db.Column(db.Integer, db.ForeignKey('collection.id'), nullable=False, index=True) # noqa
collection = db.relationship(Collection, backref=db.backref('documents', lazy='dynamic')) # noqa
def __init__(self, **kw):
self.meta = {}
super(Document, self).__init__(**kw)
@property
def model(self):
return model.get(self.schema)
@property
def name(self):
if self.title is not None:
return self.title
if self.file_name is not None:
return self.file_name
if self.source_url is not None:
return self.source_url
@property
def supports_records(self):
# Slightly unintuitive naming: this just checks the document type,
# not if there actually are any records.
return self.schema in [self.SCHEMA_PDF, self.SCHEMA_TABLE]
@property
def supports_pages(self):
return self.schema == self.SCHEMA_PDF
@property
def supports_nlp(self):
structural = [
Document.SCHEMA,
Document.SCHEMA_PACKAGE,
Document.SCHEMA_FOLDER,
Document.SCHEMA_WORKBOOK,
Document.SCHEMA_VIDEO,
Document.SCHEMA_AUDIO,
]
return self.schema not in structural
@property
def ancestors(self):
if self.parent_id is None:
return []
key = cache.key('ancestors', self.id)
ancestors = cache.get_list(key)
if len(ancestors):
return ancestors
parent_key = cache.key('ancestors', self.parent_id)
ancestors = cache.get_list(parent_key)
if not len(ancestors):
ancestors = []
parent = Document.by_id(self.parent_id)
if parent is not None:
ancestors = parent.ancestors
ancestors.append(self.parent_id)
if self.model.is_a(model.get(self.SCHEMA_FOLDER)):
cache.set_list(key, ancestors, expire=cache.EXPIRE)
return ancestors
def update(self, data):
props = ('title', 'summary', 'author', 'crawler', 'source_url',
'file_name', 'mime_type', 'headers', 'date', 'authored_at',
'modified_at', 'published_at', 'retrieved_at', 'languages',
'countries', 'keywords')
for prop in props:
value = data.get(prop, self.meta.get(prop))
setattr(self, prop, value)
db.session.add(self)
def update_meta(self):
flag_modified(self, 'meta')
def delete_records(self):
pq = db.session.query(DocumentRecord)
pq = pq.filter(DocumentRecord.document_id == self.id)
pq.delete()
db.session.flush()
def delete_tags(self):
pq = db.session.query(DocumentTag)
pq = pq.filter(DocumentTag.document_id == self.id)
pq.delete()
db.session.flush()
def delete(self, deleted_at=None):
self.delete_records()
self.delete_tags()
db.session.delete(self)
@classmethod
def delete_by_collection(cls, collection_id, deleted_at=None):
documents = db.session.query(cls.id)
documents = documents.filter(cls.collection_id == collection_id)
documents = documents.subquery()
pq = db.session.query(DocumentRecord)
pq = pq.filter(DocumentRecord.document_id.in_(documents))
pq.delete(synchronize_session=False)
pq = db.session.query(DocumentTag)
pq = pq.filter(DocumentTag.document_id.in_(documents))
pq.delete(synchronize_session=False)
pq = db.session.query(cls)
pq = pq.filter(cls.collection_id == collection_id)
pq.delete(synchronize_session=False)
def raw_texts(self):
yield self.title
yield self.file_name
yield self.source_url
yield self.summary
yield self.author
if self.status != self.STATUS_SUCCESS:
return
yield self.body_text
if self.supports_records:
# iterate over all the associated records.
pq = db.session.query(DocumentRecord)
pq = pq.filter(DocumentRecord.document_id == self.id)
pq = pq.order_by(DocumentRecord.index.asc())
for record in pq.yield_per(10000):
yield from record.raw_texts()
@property
def texts(self):
yield from filter_texts(self.raw_texts())
@classmethod
def by_keys(cls, parent_id=None, collection_id=None, foreign_id=None,
content_hash=None):
"""Try and find a document by various criteria."""
q = cls.all()
q = q.filter(Document.collection_id == collection_id)
if parent_id is not None:
q = q.filter(Document.parent_id == parent_id)
if foreign_id is not None:
q = q.filter(Document.foreign_id == foreign_id)
elif content_hash is not None:
q = q.filter(Document.content_hash == content_hash)
else:
raise ValueError("No unique criterion for document.")
document = q.first()
if document is None:
document = cls()
document.schema = cls.SCHEMA
document.collection_id = collection_id
if parent_id is not None:
document.parent_id = parent_id
if foreign_id is not None:
document.foreign_id = foreign_id
if content_hash is not None:
document.content_hash = content_hash
db.session.add(document)
return document
@classmethod
def by_id(cls, id, collection_id=None):
if id is None:
return
q = cls.all()
q = q.filter(cls.id == id)
if collection_id is not None:
q = q.filter(cls.collection_id == collection_id)
return q.first()
@classmethod
def by_collection(cls, collection_id=None):
q = cls.all()
q = q.filter(cls.collection_id == collection_id)
return q
@classmethod
def find_ids(cls, collection_id=None, failed_only=False):
q = cls.all_ids()
if collection_id is not None:
q = q.filter(cls.collection_id == collection_id)
if failed_only:
q = q.filter(cls.status != cls.STATUS_SUCCESS)
q = q.order_by(cls.id.asc())
return q
def to_proxy(self):
meta = dict(self.meta)
headers = meta.pop('headers', {})
headers = {slugify(k, sep='_'): v for k, v in headers.items()}
proxy = model.get_proxy({
'id': str(self.id),
'schema': self.model,
'properties': meta
})
proxy.set('contentHash', self.content_hash)
proxy.set('parent', self.parent_id)
proxy.set('ancestors', self.ancestors)
proxy.set('processingStatus', self.status)
proxy.set('processingError', self.error_message)
proxy.set('fileSize', meta.get('file_size'))
proxy.set('fileName', meta.get('file_name'))
if not proxy.has('fileName'):
disposition = headers.get('content_disposition')
if disposition is not None:
_, attrs = cgi.parse_header(disposition)
proxy.set('fileName', attrs.get('filename'))
proxy.set('mimeType', meta.get('mime_type'))
if not proxy.has('mimeType'):
proxy.set('mimeType', headers.get('content_type'))
proxy.set('language', meta.get('languages'))
proxy.set('country', meta.get('countries'))
proxy.set('authoredAt', meta.get('authored_at'))
proxy.set('modifiedAt', meta.get('modified_at'))
proxy.set('publishedAt', meta.get('published_at'))
proxy.set('retrievedAt', meta.get('retrieved_at'))
proxy.set('sourceUrl', meta.get('source_url'))
proxy.set('messageId', meta.get('message_id'), quiet=True)
proxy.set('inReplyTo', meta.get('in_reply_to'), quiet=True)
proxy.set('bodyText', self.body_text, quiet=True)
proxy.set('bodyHtml', self.body_raw, quiet=True)
columns = meta.get('columns')
proxy.set('columns', registry.json.pack(columns), quiet=True)
proxy.set('headers', registry.json.pack(headers), quiet=True)
pdf = 'application/pdf'
if meta.get('extension') == 'pdf' or proxy.first('mimeType') == pdf:
proxy.set('pdfHash', self.content_hash, quiet=True)
proxy.add('pdfHash', meta.get('pdf_version'), quiet=True)
q = db.session.query(DocumentTag)
q = q.filter(DocumentTag.document_id == self.id)
q = q.filter(DocumentTag.type.in_(DocumentTag.MAPPING.keys()))
q = q.order_by(DocumentTag.weight.desc())
q = q.limit(Document.MAX_TAGS)
for tag in q.all():
prop = DocumentTag.MAPPING.get(tag.type)
if prop is not None:
proxy.add(prop, tag.text)
return proxy
def to_dict(self):
proxy = self.to_proxy()
data = proxy.to_full_dict()
data.update(self.to_dict_dates())
data.update({
'name': self.name,
'status': self.status,
'foreign_id': self.foreign_id,
'document_id': self.id,
'collection_id': self.collection_id,
'error_message': self.error_message,
'uploader_id': self.uploader_id,
'bulk': False,
})
return data
def __repr__(self):
return '<Document(%r,%r,%r)>' % (self.id, self.schema, self.title)
|
StarcoderdataPython
|
3306782
|
<gh_stars>0
from collections import Counter, OrderedDict
import argparse
import pickle
import csv
import sys
from plotly import graph_objects as go
import torch as th
from vocab import WordVocab
#
def parse_args(for_train=True) -> dict:
parser = argparse.ArgumentParser()
parser.add_argument("-ds", help="path to dataset of concat insns", required=True, \
dest="ds_path")
parser.add_argument("-vocab", help="path to pickled vocab", required=True, \
dest="vocab_path")
parser.add_argument("-gpu", help="1: use gpu, 0: cpu", required=True, \
dest="gpu", type=int)
parser.add_argument("-cpt", help="model checkpoint save dir path", required=True if for_train else False, \
dest="cpt_dir")
parser.add_argument("-model", help="model checkpoint load path", required=True if not(for_train) else False, \
dest="model_path")
parser.add_argument("-bat_sz", help="batch size", required=True, \
dest="bat_sz", type=int)
parser.add_argument("-eps", help="number of epochs", required=True if for_train else False, \
dest="epochs", type=int)
parser.add_argument("-plt", help="plot path", required=True if not(for_train) else False, \
dest="plt_path")
##########
# defaults
##########
# determined using utils.get_max_seq_len
MAX_INSN_LEN = 4
SEQ_LEN = 2*MAX_INSN_LEN + 3
args = vars(parser.parse_args())
args.update({'seq_len': SEQ_LEN})
return args
# load pickled WordVocab instance into memory
def load_vocab(path: str) -> WordVocab:
with open(path, "rb") as fh:
return pickle.load(fh)
# used if manual Dataset in-memory instantiation desired
def load_ds(path: str):
asm_tokens = []
with open(path, 'r') as fh:
reader = csv.reader(fh, delimiter=',')
curr_bb = []
for l in reader:
if not(l==[]):
curr_bb.append(l)
else:
asm_tokens.append(curr_bb)
curr_bb = []
asm_tokens.append(curr_bb)
return asm_tokens
# from list of basic block delineated token lists, get frequencies of all unique tokens
def sorted_tok_freqs(bbs: list) -> dict:
all_toks = [tok for bb in ds for insn in bb for tok in insn]
tok_counts = Counter(all_toks)
tok_counts = sorted(tok_counts.items(), key=lambda x: x[1], reverse=True)
tok_counts = OrderedDict(tok_counts)
return tok_counts
# first 5 tokens are special tokens
def is_special_token(token):
return token <= 4
# in-place replacement of masked tokens (as returned by DataLoader) with their unmasked values
def replace_masked_tokens(masked_insns: th.Tensor, lm_labels: th.Tensor, mask_idx=4):
for insn, mask_labels in zip(masked_insns, lm_labels):
for t_idx,token in enumerate(insn):
if token==mask_idx:
insn[t_idx] = mask_labels[t_idx]
# determine largest seq len from nested list of basic block delineated token sequences
def get_max_seq_len(insns_path: str) -> int:
bb_insns = load_ds(insns_path)
max_len = 0
for bb in bb_insns:
for insn in bb:
# pairs of insns are concatenated, delimited by tab
insn = insn[0].split('\t')
max_len = max(max_len, len(insn[0].split(' ')), len(insn[1].split(' ')))
return max_len
#
def plot_2d_scatter(vals: list, annots: list, write_path: str):
figure = go.Figure()
scatter = go.Scatter(x = [e[0] for e in vals], y = [e[1] for e in vals], \
text = annots, \
mode = 'text+markers', \
textposition="bottom center")
figure.add_trace(scatter)
figure.update_layout(title = { 'text': 'x86_64 BERT embeddings', 'x': 0.5, \
'font': {'size': 32} },
width = 1600, \
height = 1200)
figure.update_xaxes(title = { 'text': 'T-SNE dim 0', 'font': {'size':24} })
figure.update_yaxes(title = { 'text': 'T-SNE dim 1', 'font': {'size':24} })
figure.write_image(write_path)
|
StarcoderdataPython
|
135323
|
import operator
import numpy as np
import bitpacking.packing as pk
from boolnet.utils import PackedMatrix
FUNCTIONS = {
'add': operator.add,
'sub': operator.sub,
'mul': operator.mul,
'div': operator.floordiv,
'mod': operator.mod,
}
def to_binary(value, num_bits):
# little-endian
return np.flipud(np.array(
[int(i) for i in np.binary_repr(value, num_bits)]))
def two_input_mapping(num_bits_per_operand, functor):
# Upper limit
upper = 2**num_bits_per_operand
# generate dict for function
function = {i1*upper + i2: functor(i1, i2) % upper
for i1 in range(upper)
for i2 in range(upper)}
return function
def binmap_from_function(func, Ni, No):
M = np.zeros((len(func), Ni+No), dtype=np.uint8)
# views into M
I, T = np.split(M, [Ni], axis=1)
for idx, (inp, out) in enumerate(func.items()):
I[idx] = to_binary(inp, Ni)[:Ni]
T[idx] = to_binary(out, No)[:No]
return PackedMatrix(pk.packmat(M), M.shape[0], Ni)
def mapping_to_file(function, numbits, numout_limit, outfile):
if not outfile:
outfile = '{}{}.npz'.format(function, numbits)
n = numbits
func = two_input_mapping(n, FUNCTIONS[function])
Ni = 2*n
No = n
if numout_limit:
No = numout_limit
Mp = binmap_from_function(func, Ni, No)
np.savez(outfile, matrix=Mp, Ni=Ni, Ne=Mp.Ne)
def mapping_from_file(filename):
with np.load(filename) as f:
return PackedMatrix(f['matrix'], f['Ne'], f['Ni'])
|
StarcoderdataPython
|
4828355
|
import os
import unittest
import yaml
from io import StringIO
from unittest import mock
from .. import *
from mike import mkdocs_utils
class Stream(StringIO):
def __init__(self, name, data=''):
super().__init__(data)
self.name = name
def close(self):
pass
def mock_open_files(files):
def wrapper(filename, *args, **kwargs):
name = os.path.basename(filename)
return Stream(name, files[name])
return wrapper
# This mostly just tests `load_config` from MkDocs, but we want to be sure it
# behaves as we want it.
class TestLoadConfig(unittest.TestCase):
def test_default(self):
os.chdir(os.path.join(test_data_dir, 'basic_theme'))
cfg = mkdocs_utils.load_config()
self.assertEqual(cfg['site_dir'], os.path.abspath('site'))
self.assertEqual(cfg['remote_name'], 'origin')
self.assertEqual(cfg['remote_branch'], 'gh-pages')
self.assertEqual(cfg['use_directory_urls'], True)
def test_abs_path(self):
cfg = mkdocs_utils.load_config(
os.path.join(test_data_dir, 'basic_theme', 'mkdocs.yml')
)
self.assertEqual(cfg['site_dir'],
os.path.join(test_data_dir, 'basic_theme', 'site'))
self.assertEqual(cfg['remote_name'], 'origin')
self.assertEqual(cfg['remote_branch'], 'gh-pages')
self.assertEqual(cfg['use_directory_urls'], True)
def test_custom_site_dir(self):
os.chdir(os.path.join(test_data_dir, 'site_dir'))
cfg = mkdocs_utils.load_config()
self.assertEqual(cfg['site_dir'], os.path.abspath('built_docs'))
self.assertEqual(cfg['remote_name'], 'origin')
self.assertEqual(cfg['remote_branch'], 'gh-pages')
self.assertEqual(cfg['use_directory_urls'], True)
def test_remote(self):
os.chdir(os.path.join(test_data_dir, 'remote'))
cfg = mkdocs_utils.load_config()
self.assertEqual(cfg['site_dir'], os.path.abspath('site'))
self.assertEqual(cfg['remote_name'], 'myremote')
self.assertEqual(cfg['remote_branch'], 'mybranch')
self.assertEqual(cfg['use_directory_urls'], True)
def test_no_directory_urls(self):
os.chdir(os.path.join(test_data_dir, 'no_directory_urls'))
cfg = mkdocs_utils.load_config()
self.assertEqual(cfg['site_dir'], os.path.abspath('site'))
self.assertEqual(cfg['remote_name'], 'origin')
self.assertEqual(cfg['remote_branch'], 'gh-pages')
self.assertEqual(cfg['use_directory_urls'], False)
def test_nonexist(self):
os.chdir(os.path.join(test_data_dir, 'basic_theme'))
with self.assertRaisesRegex(FileNotFoundError, r"'nonexist.yml'"):
mkdocs_utils.load_config('nonexist.yml')
with self.assertRaisesRegex(FileNotFoundError, r"'nonexist.yml'"):
mkdocs_utils.load_config(['nonexist.yml', 'nonexist2.yml'])
cfg = mkdocs_utils.load_config(['nonexist.yml', 'mkdocs.yml'])
self.assertEqual(cfg['site_dir'], os.path.abspath('site'))
self.assertEqual(cfg['remote_name'], 'origin')
self.assertEqual(cfg['remote_branch'], 'gh-pages')
self.assertEqual(cfg['use_directory_urls'], True)
class TestInjectPlugin(unittest.TestCase):
def test_no_plugins(self):
out = Stream('mike-mkdocs.yml')
cfg = '{}'
with mock.patch('builtins.open', mock.mock_open(read_data=cfg)), \
mock.patch('mike.mkdocs_utils.NamedTemporaryFile',
return_value=out), \
mock.patch('os.remove') as mremove: # noqa
with mkdocs_utils.inject_plugin('mkdocs.yml') as f:
self.assertEqual(f, out.name)
newcfg = yaml.load(out.getvalue(), Loader=yaml.Loader)
mremove.assert_called_once()
self.assertEqual(newcfg, {'plugins': ['mike', 'search']})
def test_other_plugins(self):
out = Stream('mike-mkdocs.yml')
cfg = 'plugins:\n - foo\n - bar:\n option: true'
with mock.patch('builtins.open', mock.mock_open(read_data=cfg)), \
mock.patch('mike.mkdocs_utils.NamedTemporaryFile',
return_value=out), \
mock.patch('os.remove') as mremove: # noqa
with mkdocs_utils.inject_plugin('mkdocs.yml') as f:
self.assertEqual(f, out.name)
newcfg = yaml.load(out.getvalue(), Loader=yaml.Loader)
mremove.assert_called_once()
self.assertEqual(newcfg, {'plugins': [
'mike', 'foo', {'bar': {'option': True}},
]})
def test_other_plugins_dict(self):
out = Stream('mike-mkdocs.yml')
cfg = 'plugins:\n foo: {}\n bar:\n option: true'
with mock.patch('builtins.open', mock.mock_open(read_data=cfg)), \
mock.patch('mike.mkdocs_utils.NamedTemporaryFile',
return_value=out), \
mock.patch('os.remove') as mremove: # noqa
with mkdocs_utils.inject_plugin('mkdocs.yml') as f:
self.assertEqual(f, out.name)
newcfg = yaml.load(out.getvalue(), Loader=yaml.Loader)
mremove.assert_called_once()
self.assertEqual(newcfg, {'plugins': {
'mike': {}, 'foo': {}, 'bar': {'option': True},
}})
self.assertEqual(
list(newcfg['plugins'].items()),
[('mike', {}), ('foo', {}), ('bar', {'option': True})]
)
def test_mike_plugin(self):
out = Stream('mike-mkdocs.yml')
cfg = 'plugins:\n - mike'
with mock.patch('builtins.open', mock.mock_open(read_data=cfg)), \
mock.patch('mike.mkdocs_utils.NamedTemporaryFile',
return_value=out), \
mock.patch('os.remove') as mremove: # noqa
with mkdocs_utils.inject_plugin('mkdocs.yml') as f:
self.assertEqual(f, 'mkdocs.yml')
self.assertEqual(out.getvalue(), '')
mremove.assert_not_called()
def test_mike_plugin_options(self):
out = Stream('mike-mkdocs.yml')
cfg = 'plugins:\n - mike:\n option: true'
with mock.patch('builtins.open', mock.mock_open(read_data=cfg)), \
mock.patch('mike.mkdocs_utils.NamedTemporaryFile',
return_value=out), \
mock.patch('os.remove') as mremove: # noqa
with mkdocs_utils.inject_plugin('mkdocs.yml') as f:
self.assertEqual(f, 'mkdocs.yml')
self.assertEqual(out.getvalue(), '')
mremove.assert_not_called()
def test_inherit(self):
out = Stream('mike-mkdocs.yml')
main_cfg = 'INHERIT: mkdocs-base.yml\nplugins:\n foo: {}\n'
base_cfg = 'plugins:\n bar: {}\n'
files = {'mkdocs.yml': main_cfg, 'mkdocs-base.yml': base_cfg}
with mock.patch('builtins.open', mock_open_files(files)), \
mock.patch('mike.mkdocs_utils.NamedTemporaryFile',
return_value=out), \
mock.patch('os.path.exists', return_value=True), \
mock.patch('os.remove') as mremove: # noqa
with mkdocs_utils.inject_plugin('mkdocs.yml') as f:
self.assertEqual(f, 'mike-mkdocs.yml')
newcfg = yaml.load(out.getvalue(), Loader=yaml.Loader)
mremove.assert_called_once()
self.assertEqual(newcfg, {'plugins': {
'mike': {}, 'bar': {}, 'foo': {},
}})
self.assertEqual(
list(newcfg['plugins'].items()),
[('mike', {}), ('bar', {}), ('foo', {})]
)
class TestBuild(unittest.TestCase):
def test_build(self):
self.stage = stage_dir('build')
copytree(os.path.join(test_data_dir, 'basic_theme'), self.stage)
mkdocs_utils.build('mkdocs.yml', '1.0', verbose=False)
self.assertTrue(os.path.exists('site/index.html'))
def test_build_directory(self):
self.stage = stage_dir('build')
copytree(os.path.join(test_data_dir, 'basic_theme'), self.stage)
# Change to a different directory to make sure that everything works,
# including paths being relative to mkdocs.yml (which MkDocs itself is
# responsible for).
with pushd(this_dir):
mkdocs_utils.build(os.path.join(self.stage, 'mkdocs.yml'),
'1.0', verbose=False)
self.assertTrue(os.path.exists('site/index.html'))
class TestVersion(unittest.TestCase):
def test_version(self):
self.assertRegex(mkdocs_utils.version(), r'\S+')
|
StarcoderdataPython
|
3224289
|
#!/usr/bin/evn python
# -*- coding:utf-8 -*-
#python version 2.7.10
from selenium import webdriver
import time
driver = webdriver.Firefox()
driver.get("http://mail.126.com")
#设置隐士等待10s
driver.implicitly_wait(10)
def login():
driver.switch_to.frame("x-URS-iframe")
driver.find_element_by_name("email").clear()
driver.find_element_by_name("email").send_keys("username")
driver.find_element_by_name("password").clear()
driver.find_element_by_name("password").send_keys("password")
driver.find_element_by_id("dologin").click()
def logout():
driver.switch_to_default_content()
#driver.find_element_by_link_text(u"退出").click() #success
#driver.find_element_by_xpath("/html/body/header/div/ul/li[18]/a").click() #success
driver.find_element_by_xpath("//ul[@id='_mail_component_6_6']/li[18]/a").click() #success
#driver.find_element_by_xpath("//li[@id='_mail_component_41_41']/a").click() #success
login()
time.sleep(3)
logout()
driver.quit()
|
StarcoderdataPython
|
1633298
|
import logging
import tensorflow as tf
from data_all import get_dataset, get_train_pipeline
from training_all import train
from model_small import BIGBIGAN_G, BIGBIGAN_D_F, BIGBIGAN_D_H, BIGBIGAN_D_J, BIGBIGAN_E
import numpy as np
import os
from PIL import Image
def save_image(img, fname):
img = img*255.0
img = Image.fromarray(img.astype(np.uint8))
img.save(fname)
def visualize(train_data):
out_dir = "images_pos_vis"
if not os.path.exists(out_dir):
os.makedirs(out_dir)
for image, label in train_data:
img, img_aug = tf.split(image, 2, axis=-1)
images = img.numpy()
images_aug = img_aug.numpy()
print(images.shape, images_aug.shape, np.min(images), np.max(images), np.min(images_aug), np.max(images_aug))
for idx, (img, img_aug) in enumerate(zip(images, images_aug)):
if idx == 10:
break
save_image(img, os.path.join(out_dir, "img_" + str(idx)+".png"))
save_image(img_aug, os.path.join(out_dir, "img_aug_" + str(idx)+".png"))
break
def set_up_train(config):
# Setup tensorflow
tf.config.threading.set_inter_op_parallelism_threads(8)
tf.config.threading.set_intra_op_parallelism_threads(8)
physical_devices = tf.config.experimental.list_physical_devices('GPU')
tf.config.experimental.set_memory_growth(physical_devices[0], True)
# Load dataset
logging.info('Getting dataset...')
train_data, _ = get_dataset(config)
# setup input pipeline
logging.info('Generating input pipeline...')
train_data = get_train_pipeline(train_data, config)
# visualize(train_data)
# get model
logging.info('Prepare model for training...')
weight_init = tf.initializers.orthogonal()
if config.dataset == 'mnist':
weight_init = tf.initializers.TruncatedNormal(mean=0.0, stddev=0.02)
model_generator = BIGBIGAN_G(config, weight_init)
model_discriminator_f = BIGBIGAN_D_F(config, weight_init)
model_discriminator_h = BIGBIGAN_D_H(config, weight_init)
model_discriminator_j = BIGBIGAN_D_J(config, weight_init)
model_encoder = BIGBIGAN_E(config, weight_init)
# train
logging.info('Start training...')
train(config=config,
gen=model_generator,
disc_f=model_discriminator_f,
disc_h=model_discriminator_h,
disc_j=model_discriminator_j,
model_en=model_encoder,
train_data=train_data)
# Finished
logging.info('Training finished ;)')
|
StarcoderdataPython
|
4805066
|
<reponame>carolineyuchen/MMRIV
import torch, add_path
import numpy as np
import os,sys
from methods.mnist_x_model_selection_method import MNISTXModelSelectionMethod
from methods.mnist_xz_model_selection_method import MNISTXZModelSelectionMethod
from methods.mnist_z_model_selection_method import MNISTZModelSelectionMethod
from scenarios.abstract_scenario import AbstractScenario
from joblib import Parallel, delayed
from MMR_IVs.util import ROOT_PATH, load_data
import random
random.seed(527)
SCENARIOS_NAMES = ["mnist_x", "mnist_z", "mnist_xz"]
SCENARIO_METHOD_CLASSES = {
"mnist_x": MNISTXModelSelectionMethod,
"mnist_z": MNISTZModelSelectionMethod,
"mnist_xz": MNISTXZModelSelectionMethod,
}
RESULTS_FOLDER = ROOT_PATH + "/results/mnist/"
def run_experiment(scenario_name,repid,model_id=None,training=False):
# set random seed
seed = 527
torch.manual_seed(seed)
np.random.seed(seed)
num_reps = 10
print("\nLoading " + scenario_name + "...")
train, dev, test = load_data(ROOT_PATH+'/data/'+scenario_name+'/main.npz',Torch=True,verbal=True)
means = []
for rep in range(num_reps):
method_class = SCENARIO_METHOD_CLASSES[scenario_name]
method = method_class(enable_cuda=torch.cuda.is_available())
if training:
if rep < repid:
continue
elif rep >repid:
break
else:
pass
print('here')
method.fit(train.x, train.z, train.y, dev.x, dev.z, dev.y,
g_dev=dev.g,rep=rep,model_id=None,
verbose=True)
g_pred_test = method.predict(test.x)
mse = float(((g_pred_test - test.g) ** 2).mean())
print("---------------")
print("finished running methodology on scenario ",scenario_name)
print("MSE on test:", mse)
print("")
print("saving results...")
folder = ROOT_PATH+"/results/mnist/" + scenario_name + "/"
file_name = "deepgmm_%d.npz" % rep
save_path = os.path.join(folder, file_name)
os.makedirs(folder, exist_ok=True)
np.savez(save_path, x=test.w, y=test.y, g_true=test.g,
g_hat=g_pred_test.detach())
else:
folder = ROOT_PATH+"/results/mnist/" + scenario_name + "/"
file_name = "deepgmm_%d.npz" % rep
save_path = os.path.join(folder, file_name)
if os.path.exists(save_path):
res = np.load(save_path)
means += [((res['g_true']-res['g_hat'])**2).mean()]
else:
print(save_path, ' not exists')
return means
def main():
for scenario in SCENARIOS_NAMES:
run_experiment(scenario)
if __name__ == "__main__":
for sid in range(3):
for repid in range(10):
run_experiment(SCENARIOS_NAMES[sid], repid, training=True)
for s in SCENARIOS_NAMES:
means = run_experiment(s, 0, 3, training=False)
print(means)
mean = np.mean(means)
std = np.std(means)
print("{} {:.3f} $pm$ {:.3f}".format(s, mean,std))
|
StarcoderdataPython
|
1666259
|
<filename>agent_reactor.py
'''
'''
import threading
import time
from protocol import State, Direction
from facing import Facing
from atoms import Position, Velocity, Face
from observer import Emitter, Listener, Event
from packet_event import PacketEvent
class TickEvent(Event):
pass
class StopEvent(Event):
pass
class GameInfo:
def __init__(self):
self.entity_id = None
self.game_mode = None
self.dimension = None
self.difficulty = None
self.level_type = None
class ModelReactor:
SECONDS_PER_GAME_TICK = 0.05
def __init__(self, packet_factory, connection):
self.factory = packet_factory
self.connection = connection
self.tick_emitter = Emitter(TickEvent)
self.stop_emitter = Emitter(StopEvent)
RESPONSE_PACKETS = ('client_command', 'teleport_confirm', 'flying',
'position_look', 'block_dig',
'entity_action', 'block_place', 'chat',
'use_entity')
for name in RESPONSE_PACKETS:
prop_name = name + '_packet'
packet = packet_factory.get_by_name(State.PLAY,
Direction.TO_SERVER, name)
setattr(self, prop_name, packet)
self.dead = True
self.respawn_timer = None
self.facing = Facing()
self.position = Position()
self.velocity = Velocity()
self.last_time = None
self.tick_counter = 0
self.game_info = GameInfo()
self.dig_ticks_remaining = None
self.respond = True
self.responder_thread = threading.Thread(target=self.responder)
# map of (x,z) --> chunk data
self.chunks = {}
def stop(self):
# TODO use a queue.Queue for this instead?
self.respond = False
if self.responder_thread.is_alive():
self.responder_thread.join()
def responder(self):
'''This is the method that gets called in a separate thread.'''
# TODO use a queue.Queue for this instead?
while self.respond:
if self.last_time is not None:
now = time.perf_counter()
if (now - self.last_time) >= self.SECONDS_PER_GAME_TICK:
self.last_time = now
self.on_tick_local()
self.tick_emitter()
def on_tick_local(self):
if self.respawn_timer is not None:
self.respawn_timer -= 1
if self.respawn_timer <= 0:
packet = self.client_command_packet()
packet.fields.actionId = 0
self.connection.send(packet)
self.respawn_timer = None
return
if not self.velocity.stopped and self.tick_counter % 2 == 0:
pkt = self.position_look_packet()
pkt.fields.x = self.position.x + self.velocity.x
pkt.fields.y = self.position.y + self.velocity.y
pkt.fields.z = self.position.z + self.velocity.z
pkt.fields.yaw = self.facing.yaw
pkt.fields.pitch = self.facing.pitch
pkt.fields.onGround = True
self.connection.send(pkt)
self.position.x += self.velocity.x
self.position.y += self.velocity.y
self.position.z += self.velocity.z
if self.dig_ticks_remaining is not None:
self.dig_ticks_remaining -= 1
if self.dig_ticks_remaining == 0:
self.dig_ticks_remaining = None
dig = self.block_dig_packet()
dig.fields.status = 2
dig.fields.location.x = 0
dig.fields.location.y = 0
dig.fields.location.z = 0
dig.fields.face = 0
self.connection.send(dig)
if self.tick_counter % 20 == 0:
pkt = self.position_look_packet()
pkt.fields.x = self.position.x
pkt.fields.y = self.position.y
pkt.fields.z = self.position.z
pkt.fields.yaw = self.facing.yaw
pkt.fields.pitch = self.facing.pitch
pkt.fields.onGround = True
self.connection.send(pkt)
elif self.tick_counter % 1 == 0:
pkt = self.flying_packet()
pkt.fields.onGround = True
self.connection.send(pkt)
@Listener(PacketEvent, area=State.PLAY, key='login')
def on_player_login(self, event):
packet = event.packet
self.game_info.entity_id = packet.fields.entityId
self.game_info.game_mode = packet.fields.gameMode
self.game_info.dimension = packet.fields.dimension
self.game_info.difficulty = packet.fields.difficulty
self.game_info.level_type = packet.fields.levelType
settings = self.factory.get_by_name(State.PLAY, Direction.TO_SERVER,
'settings')()
settings.fields.locale = 'en_CA'
settings.fields.viewDistance = 3
settings.fields.chatFlags = 0
settings.fields.chatColors = False
settings.fields.skinParts = 0x7d
settings.fields.mainHand = 0
self.connection.send(settings)
@Listener(PacketEvent, area=State.PLAY, key='update_time')
def on_update_time(self, event):
if self.last_time is None:
self.last_time = time.perf_counter()
self.responder_thread.start()
@Listener(PacketEvent, area=State.PLAY, key='update_health')
def on_health(self, event):
packet = event.packet
print('on_health: {}, food: {}, saturation: {}'.format(
packet.fields.health, packet.fields.food,
packet.fields.foodSaturation))
if packet.fields.health > 0:
self.dead = False
else:
self.dead = True
self.respawn_timer = 20
@Listener(PacketEvent, area=State.PLAY, key='respawn')
def on_respawn(self, event):
packet = event.packet
print('on_respawn')
packet = self.client_command_packet()
packet.fields.actionId = 0
self.connection.send(packet)
@Listener(PacketEvent, area=State.PLAY, key='position')
def on_position(self, event):
packet = event.packet
self.position.x = packet.fields.x
self.position.y = packet.fields.y
self.position.z = packet.fields.z
print('on_position, X: {}, Y: {}, Z: {}, '
'Yaw: {}, Pitch: {}, teleport ID: {}'.format(
packet.fields.x, packet.fields.y, packet.fields.z,
packet.fields.yaw, packet.fields.pitch,
packet.fields.teleportId))
self.do_stop()
teleport_id = packet.fields.teleportId
tpc = self.teleport_confirm_packet()
tpc.fields.teleportId = teleport_id
self.connection.send(tpc)
@Listener(PacketEvent, area=State.PLAY, key='map_chunk')
def on_map_chunk(self, event):
packet = event.packet
x, z = packet.fields.x, packet.fields.z
self.chunks[(x, z)] = packet
@Listener(PacketEvent, area=State.PLAY, key='unload_chunk')
def on_unload_chunk(self, event):
packet = event.packet
x, z = packet.fields.chunkX, packet.fields.chunkZ
self.chunks.pop((x, z), None)
def do_stop(self):
self.velocity.reset()
self.stop_emitter()
def crouch(self):
ea = self.entity_action_packet()
ea.fields.entityId = self.game_info.entity_id
# TODO are there constants for these in minecraft-data?
ea.fields.actionId = 0
ea.fields.jumpBoost = 0
self.connection.send(ea)
def stand(self):
ea = self.entity_action_packet()
ea.fields.entityId = self.game_info.entity_id
# TODO are there constants for these in minecraft-data?
ea.fields.actionId = 1
ea.fields.jumpBoost = 0
self.connection.send(ea)
def dig(self, target_location):
if self.dig_ticks_remaining is not None:
return
# start digging
dig = self.block_dig_packet()
dig.fields.status = 0
dig.fields.location.x = target_location.x
dig.fields.location.y = target_location.y
dig.fields.location.z = target_location.z
# TODO this isn't accurate...but does it need to be?
dig.fields.face = 0
self.connection.send(dig)
# schedule a "stop digging" response
# TODO can we figure out how long this should actually be? or can
# we wait for a packet from the server and then stop?
self.dig_ticks_remaining = 20
def place_block(self, target_location, face):
assert isinstance(face, Face)
bp = self.block_place_packet()
bp.fields.location.x = target_location.x
bp.fields.location.y = target_location.y
bp.fields.location.z = target_location.z
bp.fields.direction = face
bp.fields.hand = 0
bp.fields.cursorX = 0.5
bp.fields.cursorY = 0.5
bp.fields.cursorZ = 0.5
self.connection.send(bp)
def say(self, message, sender=None):
# TODO for some reason this only seems to work if sender != None
# TODO we should probably, by default, only chat with our "owner"
chat = self.chat_packet()
if sender != 'Server':
chat.fields.message = "/msg {} {}".format(sender, message)
else:
chat.fields.message = message
self.connection.send(chat)
def use_entity(self, target_entity_id, hand):
assert(hand in [0, 1])
use_packet = self.use_entity_packet()
use_packet.fields.target = target_entity_id
use_packet.fields.mouse = 0 # 0=interact, 1=attack, 2=interact at
use_packet.fields.hand = hand # 0=main, 1=offhand
self.connection.send(use_packet)
def leave(self):
# TODO this doesn't exit cleanly, since the dispatcher threads will
# continue to run after we disconnect the socket
self.connection.disconnect()
|
StarcoderdataPython
|
14123
|
<gh_stars>0
import time
import pytest
# preparing selenium and chrome web driver manager
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
from webdriver_manager.chrome import ChromeDriverManager
# importing os for environmental variable, and docker-compose up
import os
@pytest.fixture(scope="session")
def browser():
driver = webdriver.Chrome(ChromeDriverManager().install(), options=options)
driver.get("http://localhost:1667")
options = webdriver.ChromeOptions()
options.add_argument('--headless')
options.add_argument('--disable-gpu')
return driver
|
StarcoderdataPython
|
1617382
|
"""Test helpers for Freebox."""
from unittest.mock import patch
import pytest
@pytest.fixture(autouse=True)
def mock_path():
"""Mock path lib."""
with patch("homeassistant.components.freebox.router.Path"):
yield
|
StarcoderdataPython
|
3249259
|
<reponame>kaushnian/TradingView_Machine_Learning<filename>OptimizeLongTakeprofit.py
from selenium.common.exceptions import NoSuchElementException, StaleElementReferenceException, TimeoutException
from selenium.webdriver.support.ui import WebDriverWait
import time
import numpy as np
from TradeViewGUI import Main
from my_functions import Functions
url = 'https://www.tradingview.com/chart/'
class LongTakeProfit(Functions):
def __init__(self):
Main.__init__(self)
self.driver = self.create_driver()
self.run_script()
def run_script(self):
"""find the best take profit value."""
# Loading Webpage.
try:
my_range = np.arange(float(self.minLongTakeprofitValue.text()), float(self.maxLongTakeprofitValue.text()), float(self.LongIncrementValue.text()))
except ValueError:
print("\nValue Error: Make sure all available text input boxes are filled with a number for script to run properly.\n")
return
wait = WebDriverWait(self.driver, 10)
try:
self.driver.get(url)
except Exception:
print('WebDriver Error: Please Check Your FireFox Profile Path Is Correct.\n')
print('Find Your Firefox Path Instructions. https://imgur.com/gallery/rdCqeT5 ')
self.click_strategy_tester()
try:
self.click_overview()
except NoSuchElementException:
time.sleep(1)
self.click_overview()
print("Generating Max Profit For Take Profit.")
print("Loading script...\n")
self.click_settings_button(wait)
self.click_input_tab()
self.click_enable_long_strategy_checkbox()
self.click_rest_all_inputs()
self.click_ok_button()
# Searching for best take profit for your strategy.
for number in my_range:
count = round(number, 2)
try:
self.click_settings_button(wait)
self.click_long_takeprofit_input(count, wait)
self.get_net_profit_takeprofit(count, wait)
except (StaleElementReferenceException, TimeoutException, NoSuchElementException):
print("script has timed out.")
break
# adding the best take profit to your strategy on TradingView.
self.click_settings_button(wait)
best_key = self.find_best_takeprofit()
self.click_long_takeprofit_input(best_key, wait)
time.sleep(1)
# Printing Results of the best take profit value found.
print("\n----------Results----------\n")
self.click_overview()
self.print_best_takeprofit()
self.click_performance_summary()
self.print_total_closed_trades()
self.print_win_rate()
self.print_net_profit()
self.print_max_drawdown()
self.print_sharpe_ratio()
self.print_sortino_ratio()
self.print_win_loss_ratio()
self.print_avg_win_trade()
self.print_avg_loss_trade()
self.print_avg_bars_in_winning_trades()
# print("\n----------More Results----------\n")
# self.print_gross_profit()
# self.print_gross_loss()
# self.print_buy_and_hold_return()
# self.print_max_contracts_held()
# self.print_open_pl()
# self.print_commission_paid()
# self.print_total_open_trades()
# self.print_number_winning_trades()
# self.print_number_losing_trades()
# self.print_percent_profitable()
# self.print_avg_trade()
# self.print_avg_win_trade()
# self.print_avg_loss_trade()
# self.print_largest_winning_trade()
# self.print_largest_losing_trade()
# self.print_avg_bars_in_trades()
# self.print_avg_bars_in_winning_trades()
# self.print_avg_bars_in_losing_trades()
# self.print_margin_calls()
|
StarcoderdataPython
|
4817457
|
<filename>src/indor/command_register.py
from .indor_exceptions import ClassPropertyNotFound
from .command import Command
from .command_factory import CommandFactory
class CommandRegister(type(Command)):
def __init__(cls, name, bases, dic):
cls.property_name_for_printer = 'pretty_name'
if cls.property_name_for_printer not in dic:
raise ClassPropertyNotFound(name, cls.property_name_for_printer)
super(CommandRegister, cls).__init__(name, bases, dic)
CommandFactory().add_class(name, cls)
|
StarcoderdataPython
|
1758355
|
from .conf import MODULES
from zcrmsdk.Handler import APIHandler
from zcrmsdk.CLException import ZCRMException
from zcrmsdk.Utility import APIConstants
from zcrmsdk.Request import APIRequest
class BlueprintAPI():
def __init__(self):
self._MAIN_URL = '{module_api_name}/{record_id}/actions/blueprint'
self.modules_api_names = MODULES
def get_record_blueprint(self,module,id):
handler_ins=APIHandler()
handler_ins.request_url_path=self._MAIN_URL.format(module_api_name=self.modules_api_names[module],
record_id=id)
handler_ins.request_method=APIConstants.REQUEST_METHOD_GET
handler_ins.request_api_key=APIConstants.DATA
apiResponse=APIRequest(handler_ins).get_api_response()
return apiResponse.response_json
def update_record_blueprint(self,module,id,transition_id,additional_data={}):
handler_ins=APIHandler()
handler_ins.request_url_path=self._MAIN_URL.format(module_api_name=self.modules_api_names[module],
record_id=id)
handler_ins.request_method=APIConstants.REQUEST_METHOD_PUT
handler_ins.request_api_key=APIConstants.DATA
handler_ins.request_body={"blueprint":[{"transition_id":transition_id,"data":additional_data}]}
apiResponse=APIRequest(handler_ins).get_api_response()
return apiResponse.response_json
|
StarcoderdataPython
|
3264188
|
import pytest
from practice_atcoder.typical_ninety.no08code import question
class Test(object):
@pytest.mark.parametrize("s,expect", [
("attcoderer", "6"),
("aattccooddeerr", "128"),
("atcoderatcoderatcoderatcoderatcoderatcoderatcoderatcoderatcoder", "6435"),
("<KEY>", "99337"),
])
def test(self, s, expect):
assert question(s) == expect
|
StarcoderdataPython
|
3205139
|
<gh_stars>1-10
from django import template
from templatetag_sugar.register import tag
from templatetag_sugar.parser import *
register = template.Library()
from pws.models import Entry
from pws.forms import EntryForm
from django.urls import reverse
from django.db.models import Q
@tag(register, [Variable(), Constant("as"), Name()])
def empty_password(context, user, asvar):
context[asvar] = user.check_password('')
return ""
@register.inclusion_tag('pws/pwlist.html', takes_context=True)
def include_pwlist(context):
f = context.get('entry_filter')
entries = Entry.objects.filter(Q(name__contains=f) | Q(user__contains=f) | Q(email__contains=f) | Q(extra__contains=f)) if f else Entry.objects.all()
return {
'entry_list': entries,
'entry_filter': f,
'plain': context.get('plain'),
}
@register.inclusion_tag('pws/pwform-flat.html', takes_context=True)
def include_pwform(context):
return {
'pwform': context.get('pwform', EntryForm()),
'edit': context.get('edit', False),
'action': context.get('action', reverse('update')),
}
@register.inclusion_tag('pws/inputerror.html')
def inputerror(messagewrap):
return {
'message': messagewrap,
}
@register.inclusion_tag('pws/inputerror_message.html')
def inputerror_message(message):
return {
'message': message,
}
@register.inclusion_tag('pws/deleteform.html')
def deleteform(delete_id, delete_text="X", action=reverse('delete')):
return {
'delete_id': delete_id,
'delete_text': delete_text,
'action': action,
}
|
StarcoderdataPython
|
1757183
|
from typing import Union, Sized
from dataclasses import dataclass
from torch.utils.data import Dataset, DataLoader
from torch.utils.data import TensorDataset as _TensorDataset
from hearth.containers import TensorDict
from hearth._collate import default_collate
class BatchesMixin:
"""mixin for supporting batches and collate method from datasets."""
def collate(self, batch):
return default_collate(batch)
def batches(
self,
batch_size: int = 32,
shuffle: bool = False,
drop_last: bool = False,
num_workers: int = 0,
**kwargs,
) -> DataLoader:
"""return a Dataloader that iterates over batches of this dataset.
Note:
this method supports additional keyword args which will be passed to the dataloader.
Args:
batch_size: Defaults to 32.
shuffle: if True shuffle the dataset. Defaults to False.
drop_last: if true drop the last batch if it is less than batch size. Defaults to False.
num_workers: number of workers. Defaults to 0.
"""
return DataLoader(
self, # type: ignore
batch_size=batch_size,
num_workers=num_workers,
drop_last=drop_last,
shuffle=shuffle,
collate_fn=self.collate,
**kwargs,
)
@dataclass
class XYDataset(Dataset, BatchesMixin):
"""basic dataset that returns a tuple of inputs and targets.
supports :class:`hearth.containers.TensorDataset`
"""
x: Union[Sized, TensorDict]
y: Union[Sized, TensorDict]
def __len__(self) -> int:
if isinstance(self.x, TensorDict):
return len(next(iter(self.x.values()))) # type: ignore
return len(self.x)
def __getitem__(self, index):
return self.x[index], self.y[index]
class TensorDataset(_TensorDataset, BatchesMixin):
pass
|
StarcoderdataPython
|
89931
|
import os
import sys
#import json
#import datetime
#import numpy as np
#import skimage.draw
# Root directory of the project
ROOT_DIR = os.path.abspath("../../")
# Import Mask RCNN
sys.path.append(ROOT_DIR) # To find local version of the library
from mrcnn.config import Config
from mrcnn import model as modellib, utils
import keras.models
import tensorflow as tf
from tensorflow.python.framework import graph_io
# Path to trained weights file
#COCO_WEIGHTS_PATH = os.path.join(ROOT_DIR, "mask_rcnn_coco.h5")
# Directory to save logs and model checkpoints, if not provided
# through the command line argument --logs
DEFAULT_LOGS_DIR = os.path.join(ROOT_DIR, "logs")
DEFAULT_SAVE_PB = os.path.join(ROOT_DIR, "logs")
############################################################
# Configurations
############################################################
class ConvConfig(Config):
"""Configuration for training on the toy dataset.
Derives from the base Config class and overrides some values.
"""
# Give the configuration a recognizable name
NAME = "ASG_Conv"
# We use a GPU with 12GB memory, which can fit two images.
# Adjust down if you use a smaller GPU.
IMAGES_PER_GPU = 2
# Number of classes (including background)
NUM_CLASSES = 1 + 1 # Background + balloon
# Number of training steps per epoch
STEPS_PER_EPOCH = 100
# Skip detections with < 90% confidence
DETECTION_MIN_CONFIDENCE = 0.9
#def train(model):
# """Train the model."""
# # Training dataset.
# dataset_train = BalloonDataset()
# dataset_train.load_balloon(args.dataset, "train")
# dataset_train.prepare()
#
# # Validation dataset
# dataset_val = BalloonDataset()
# dataset_val.load_balloon(args.dataset, "val")
# dataset_val.prepare()
#
# # *** This training schedule is an example. Update to your needs ***
# # Since we're using a very small dataset, and starting from
# # COCO trained weights, we don't need to train too long. Also,
# # no need to train all layers, just the heads should do it.
# print("Training network heads")
# model.train(dataset_train, dataset_val,
# learning_rate=config.LEARNING_RATE,
# epochs=30,
# layers='heads')
def freeze_graph(graph, session, output, save_pb_dir='.', save_pb_name='frozen_model.pb', save_pb_as_text=False):
with graph.as_default():
graphdef_inf = tf.graph_util.remove_training_nodes(graph.as_graph_def())
graphdef_frozen = tf.graph_util.convert_variables_to_constants(session, graphdef_inf, output)
graph_io.write_graph(graphdef_frozen, save_pb_dir, save_pb_name, as_text=False)
if save_pb_as_text:
graph_io.write_graph(graphdef_frozen, save_pb_dir, save_pb_name+"txt", as_text=True)
return graphdef_frozen
############################################################
# Main - Converter
############################################################
if __name__ == '__main__':
import argparse
# Parse command line arguments
parser = argparse.ArgumentParser(
description='Convert H5 to PB')
parser.add_argument('--weights', required=True,
metavar="/path/to/weights.h5",
help="Path to weights .h5 file or 'coco'")
parser.add_argument('--logs', required=False,
default=DEFAULT_LOGS_DIR,
metavar="/path/to/logs/",
help='Logs and checkpoints directory (default=logs/)')
parser.add_argument('--save', required=False,
default=DEFAULT_SAVE_PB,
metavar="/path/to/pb/",
help='Path to save pb too)')
args = parser.parse_args()
print("Weights: ", args.weights)
print("Logs: ", args.logs)
print("save: ", args.save)
class InferenceConfig(ConvConfig):
# Set batch size to 1 since we'll be running inference on
# one image at a time. Batch size = GPU_COUNT * IMAGES_PER_GPU
GPU_COUNT = 1
IMAGES_PER_GPU = 1
config = InferenceConfig()
model = modellib.MaskRCNN(mode="inference", config=config, model_dir=args.logs)
weights_path = args.weights
model.load_weights(args.weights, by_name=True)
#model.keras_model.save('./mymodel.hdf5')
session = tf.keras.backend.get_session()
input_names = [t.op.name for t in model.keras_model.inputs]
output_names = [t.op.name for t in model.keras_model.outputs]
# Prints input and output nodes names, take notes of them.
#print(input_names, output_names)
print ("Input Names: {}".format(input_names))
print ("Output Names: {}".format(output_names))
text_file = open(args.save.rstrip()+"/IO_layers.txt", "w")
text_file.write("Input Names: {}\n\n".format(input_names))
text_file.write("Output Names: {}".format(output_names))
text_file.close()
frozen_graph = freeze_graph(session.graph, session, [out.op.name for out in model.keras_model.outputs], save_pb_dir=args.save, save_pb_as_text=True)
#frozen_graph_txt = freeze_graphTxt(session.graph, session, [out.op.name for out in model.keras_model.outputs], save_pb_dir=args.save)
trt_graph = trt.create_inference_graph(
input_graph_def=frozen_graph,
outputs=output_names,
max_batch_size=2,
max_workspace_size_bytes=1 << 25,
precision_mode='FP16',
minimum_segment_size=50)
graph_io.write_graph(trt_graph, args.save.rstrip(), "trt_graph.pb", as_text=False)
graph_io.write_graph(trt_graph, args.save.rstrip(), "trt_graph.pbtxt", as_text=True)
#graph_io.write_graph(frozen_graph, "./model/", "test_frozen_graph.pb", as_text=False)
#graph_io.write_graph(frozen_graph_txt, "./model/", "test_frozen_graph.pbtxt", as_text=True)
|
StarcoderdataPython
|
3319484
|
import media
import json
def read_movies_file(file):
movie_file = open(file)
# reference to read json file http://stackoverflow.com/a/2835672
with movie_file as data_file:
data = json.load(data_file)
movies_list = data["movies"]
movies = []
for movie in movies_list:
movies.append(media.Movie(movie["name"], movie["description"], movie["trailer_url"], movie["poster_url"]))
return movies
|
StarcoderdataPython
|
107387
|
<gh_stars>1-10
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# -*- coding: utf-8 -*-
from telegram import ext
from . import config
import inspect
from functools import wraps
class Application(object):
"""
Application is the central object. It's permit to you to add command, to run and to stop your bot. By default,
the bot have the command help, which reference all your commands if there are documented in the docstring format,
and the unknown message if the command doesn't exist.
"""
_instance = None
class __Application(object):
def __init__(self):
"""
Initialisation of the bot application. During the initialization, the construct will attempt to get and
to connect to the Telegram API
"""
self._config = config.Configuration()
self._updater = ext.Updater(token=self._config.token)
self._dispatcher = self._updater.dispatcher
self._help_command = {}
self._admin_command = {}
self._help_cmd_str = ""
self._help_admin_cmd_str = ""
def command(self, cmd_name: str="", group: int=ext.dispatcher.DEFAULT_GROUP, filters: ext.Filters=None,
allow_edited: bool=False, pass_args: bool=False, pass_update_queue: bool=False,
pass_job_queue: bool=False, pass_user_data: bool=False, pass_chat_data: bool=False):
"""
Create an CommandHandler and add it to the main dispatcher
:param cmd_name:
:param group:
:param filters:
:param allow_edited:
:param pass_args:
:param pass_update_queue:
:param pass_job_queue:
:param pass_user_data:
:param pass_chat_data:
:return:
"""
def decorator(callback):
name = cmd_name
if name == "":
name = callback.__name__
self._dispatcher.add_handler(handler=ext.CommandHandler(command=name, callback=callback,
filters=filters, allow_edited=allow_edited,
pass_args=pass_args,
pass_update_queue=pass_update_queue,
pass_job_queue=pass_job_queue,
pass_user_data=pass_user_data,
pass_chat_data=pass_chat_data), group=group)
if callback.__name__ in self._admin_command.keys():
del self._admin_command[callback.__name__]
self._admin_command[callback] = name
else:
self._help_command[callback] = name
return decorator
def restricted(self, callback):
"""
Restrict the usage of callback
:param callback:
:return:
"""
self._admin_command[callback.__name__] = None
@wraps(callback)
def wrapped(bot, update, *args, **kwargs):
user_id = update.effective_user.id
# TODO see to use bot.get_chat_administrators function
if user_id not in self._config.admins:
self._unknown_command(bot, update)
print("Unauthorized access denied for {}.".format(user_id))
return
return callback(bot, update, *args, **kwargs)
return wrapped
def inline(self, group: int=ext.dispatcher.DEFAULT_GROUP, pass_update_queue: bool=False,
pass_job_queue: bool=False, pattern=None, pass_groups: bool=False, pass_groupdict: bool=False,
pass_user_data: bool=False, pass_chat_data: bool=False):
"""
Create an InlineQueryHandler and add it to the main dispatcher
:param group:
:param pass_update_queue:
:param pass_job_queue:
:param pattern:
:param pass_groups:
:param pass_groupdict:
:param pass_user_data:
:param pass_chat_data:
:return:
"""
def decorator(callback):
self._dispatcher.add_handler(handler=ext.InlineQueryHandler(callback=callback,
pass_update_queue=pass_update_queue,
pass_job_queue=pass_job_queue,
pattern=pattern, pass_groups=pass_groups,
pass_groupdict=pass_groupdict,
pass_user_data=pass_user_data,
pass_chat_data=pass_chat_data), group=group)
return decorator
def message(self, filters: ext.Filters=ext.Filters.text, group: int=ext.dispatcher.DEFAULT_GROUP,
allow_edited: bool=False, pass_update_queue: bool=False, pass_job_queue: bool=False,
pass_user_data: bool=False, pass_chat_data: bool=False, message_updates: bool=True,
channel_post_updates: bool=True, edited_updates: bool=False):
"""
Create a MessageHandler and add it to the main dispatcher
:param filters:
:param group:
:param allow_edited:
:param pass_update_queue:
:param pass_job_queue:
:param pass_user_data:
:param pass_chat_data:
:param message_updates:
:param channel_post_updates:
:param edited_updates:
:return:
"""
def decorator(callback):
self._dispatcher.add_handler(handler=ext.MessageHandler(filters=filters, callback=callback,
allow_edited=allow_edited,
pass_update_queue=pass_update_queue,
pass_job_queue=pass_job_queue,
pass_user_data=pass_user_data,
pass_chat_data=pass_chat_data,
message_updates=message_updates,
channel_post_updates=channel_post_updates,
edited_updates=edited_updates), group=group)
return decorator
def run(self):
"""
Run your bot with all command added
"""
self._generate_help()
self._dispatcher.add_handler(handler=ext.CommandHandler("help", self._help()),
group=ext.dispatcher.DEFAULT_GROUP)
self._dispatcher.add_handler(handler=ext.MessageHandler(ext.Filters.command, self._unknown_command),
group=ext.dispatcher.DEFAULT_GROUP)
self._updater.start_polling()
self.stop()
@staticmethod
def _unknown_command(bot, update):
bot.send_message(chat_id=update.message.chat_id, text="Sorry I don't understant your request")
def _generate_help(self):
self._help_cmd_str = "\n".join(["{}: {}".format(cmd, inspect.cleandoc(inspect.getdoc(callback)))
for callback, cmd in self._help_command.items()])
self._help_admin_cmd_str = "\n".join(["{}: {}".format(cmd, inspect.cleandoc(inspect.getdoc(callback)))
for callback, cmd in self._admin_command.items()])
def _help(self):
"""
Add the help command
:return: the function decorated
"""
def _help_command(bot, update):
help_str = self._help_cmd_str
# TODO see to use bot.get_chat_administrators function
if update.effective_user.id in self._config.admins:
help_str += "\n" + self._help_admin_cmd_str
bot.send_message(chat_id=update.message.chat_id, text=help_str)
return _help_command
def stop(self):
"""
Stop your bot
"""
self._updater.idle()
self._updater.stop()
def __new__(cls, *args, **kwargs):
"""
Create an instance of __Application
:param args: Not used
:param kwargs: Not used
:return: __Application instance
"""
if cls._instance is None:
cls._instance = Application.__Application()
return cls._instance
|
StarcoderdataPython
|
3270903
|
<reponame>aliyyousuff/MOOC
# - ProblemSet4.py *- coding: utf-8 -*-
"""
Problem 4_1:
Write a function that will sort an alphabetic list (or list of words) into
alphabetical order. Make it sort independently of whether the letters are
capital or lowercase. First print out the wordlist, then sort and print out
the sorted list.
Here is my run on the list firstline below (note that the wrapping was added
when I pasted it into the file -- this is really two lines in the output).
problem4_1(firstline)
['Happy', 'families', 'are', 'all', 'alike;', 'every', 'unhappy', 'family',
'is', 'unhappy', 'in', 'its', 'own', 'way.', '<NAME>', '<NAME>']
['alike;', 'all', '<NAME>', 'are', 'every', 'families', 'family',
'Happy', 'in', 'is', 'its', '<NAME>', 'own', 'unhappy', 'unhappy', 'way.']
"""
#%%
firstline = ["Happy", "families", "are", "all", "alike;", "every", \
"unhappy", "family", "is", "unhappy", "in", "its", "own", \
"way.", "<NAME>", "<NAME>"]
#%%
def problem4_1(wordlist):
""" Takes a word list prints it, sorts it, and prints the sorted list """
pass # replace this pass (a do-nothing) statement with your code
#%%
"""
Problem 4_2:
Write a function that will compute and print the mean and standard deviation
of a list of real numbers (like the following). Of course, the length of the
list could be different. Don't forget to import any libraries that you might
need.
Here is my run on the list of 25 floats create below:
problem4_2(numList)
51.528
30.81215290541488
"""
#%%
import random
numList = []
random.seed(150)
for i in range(0,25):
numList.append(round(100*random.random(),1))
#%%
def problem4_2(ran_list):
""" Compute the mean and standard deviation of a list of floats """
pass # replace this pass (a do-nothing) statement with your code
#%%
"""
Problem 4_3:
Write a function problem4_3(product, cost) so that you can enter the product
and its cost and it will print out nicely. Specifically, allow 25 characters
for the product name and left-justify it in that space; allow 6 characters for
the cost and right justify it in that space with 2 decimal places. Precede the
cost with a dollar-sign. There should be no other spaces in the output.
Here is how one of my runs looks:
problem4_3("toothbrush",2.6)
toothbrush $ 2.60
"""
#%%
def problem4_3(product, cost):
""" Prints the product name in a space of 25 characters, left-justified
and the price in a space of 6 characters, right-justified"""
pass # replace this pass (a do-nothing) statement with your code
#%%
"""
Problem 4_4:
This problem is to build on phones.py. You add a new menu item
r) Reorder
This will reorder the names/numbers in the phone list alphabetically by name.
This may sound difficult at first thought, but it really is straight forward.
You need to add two lines to the main_loop and one line to menu_choice to print
out the new menu option (and add 'r' to the list of acceptable choices). In
addition you need to add a new function to do the reordering: I called mine
reorder_phones(). Here is a start for this very short function:
def reorder_phones():
global phones # this insures that we use the one at the top
pass # replace this pass (a do-nothing) statement with your code
Note: The auto-grader will run your program, choose menu items s, r, s, and q
in that order. It will provide an unsorted CSV file and see if your program
reorders it appropriately. The grader will provide a version of myphones.csv
that has a different set of names in it from the ones we used in the lesson.
This difference in data will, of course, not matter with a well coded program.
Below the result of this added function is shown using the names used in class.
Note that name is a single field. Reorder by that field, don't try to separate
first and last name and reorder by one or the other --- just treat name as a
single field that you re-order by. Also, in this case upper/lower case won't
matter.
TIP: phones[] is a list of lists (each sublist is a [name, phone]. It looks
complicated to sort. Just pretend that each sublist is a single name item and
code it accordingly. It will work. This is a beginner course and this sort
function requires only one line and no fancy outside material to make it work.)
The main thrust of this problem is to add in the various pieces to make a new
menu entry.
Before:
Choice: s
Name Phone Number
1 <NAME> (212) 842-2527
2 <NAME> (212) 452-8145
3 <NAME> (212) 452-8723
After:
Choice: s
Name Phone Number
1 <NAME> (212) 452-8723
2 <NAME> (212) 452-8145
3 <NAME> (212) 842-2527
"""
|
StarcoderdataPython
|
105353
|
import os
import subprocess
import uno
import unohelper
from com.sun.star.connection import NoConnectException
from com.sun.star.lang import IllegalArgumentException
class SpreadScript(object):
def __init__(self, file_name=None):
"""Initialise the class.
:arg str file_name: File name.
"""
self._desktop = None
self._start_soffice()
self._connect_soffice()
if file_name:
self.open(file_name)
def __del__(self):
"""Close the soffice instance."""
if self._desktop:
self.close()
def _start_soffice(self):
"""Start soffice in the background."""
process_id = os.fork()
if not process_id:
subprocess.call(
'soffice --accept="socket,host=localhost,port=2002;urp;" ' +
'--norestore --nologo --nodefault --headless', shell=True)
exit()
def _connect_soffice(self):
"""Connect to a running soffice instance."""
context = uno.getComponentContext()
resolver = context.ServiceManager.createInstanceWithContext(
'com.sun.star.bridge.UnoUrlResolver', context)
while True:
try:
context = resolver.resolve(
'uno:socket,host=localhost,port=2002;urp;' +
'StarOffice.ComponentContext')
except NoConnectException:
pass
else:
break
self._desktop = context.ServiceManager.createInstanceWithContext(
'com.sun.star.frame.Desktop', context)
def _get_cell_text(self, column, row):
return self._interface.getCellByPosition(column, row).getString()
def _get_cell_value(self, column, row):
return self._interface.getCellByPosition(column, row).getValue()
def _get_cell_formula(self, column, row):
return self._interface.getCellByPosition(column, row).getFormula()
def _get_link(self, column, row):
return self._get_cell_formula(column, row)[1:].replace(
'$', '').split('.')
def _set_cell_value(self, sheet, cell, value):
self._sheets.getByName(sheet).getCellRangeByName(cell).setValue(value)
def _read_table(self, column):
"""Read names and values from a table.
:arg int column: Upper-left coordinate of the table content.
:returns dict: Table content.
"""
inputs = {}
row = 3
while True:
name = self._get_cell_text(column, row)
value = self._get_cell_value(column + 1, row)
if not name:
break
inputs[name] = value
row += 1
return inputs
def _write_table(self, column, data):
"""Write values to a table.
:arg int column: Upper-left coordinate of the table content.
:arg dict data: Data to be written.
"""
row = 3
while True:
name = self._get_cell_text(column, row)
if not name:
break
if name in data:
sheet, cell = self._get_link(column + 1, row)
self._set_cell_value(sheet, cell, data[name])
row += 1
def open(self, file_name):
"""Open a spreadsheet.
:arg str file_name: File name.
"""
doc_url = unohelper.systemPathToFileUrl(os.path.abspath(file_name))
try:
self._desktop.loadComponentFromURL(doc_url, '_blank', 0, ())
except IllegalArgumentException as error:
raise ValueError('no such file or format not supported')
self._sheets = self._desktop.getCurrentComponent().Sheets
if 'Interface' not in self._sheets:
raise ValueError('no sheet named "Interface" found')
self._interface = self._sheets.Interface
def close(self):
"""Close the soffice instance."""
self._desktop.terminate()
def read_input(self):
return self._read_table(1)
def write_input(self, data):
self._write_table(1, data)
def read_output(self):
return self._read_table(4)
|
StarcoderdataPython
|
1667129
|
<filename>api/__init__.py
from flask_restplus import Api, Resource
from .taskController import *
api = Api(
version='1.0',
title='API',
description='api',
)
ns = api.namespace('api', description='task api namespace')
@ns.route('/tasks')
class TaskList(Resource):
@api.doc('get all tasks')
def get(self):
return taskController.getAllTasks()
@api.doc('start a task')
def post(self):
return taskController.createTask()
@ns.route('/tasks/<uuid>')
class Task(Resource):
@api.doc('get a task')
def get(self, uuid):
return taskController.getTaskByUuid(uuid)
|
StarcoderdataPython
|
116309
|
# -*- coding: utf-8 -*-
# Copyright 2015 www.suishouguan.com
#
# Licensed under the Private License (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://github.com/samuelbaizg/ssguan/blob/master/LICENSE
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
import unittest
from ssguan.ignitor.base.error import Error, ProgramError
from ssguan.ignitor.base.error import RequiredError, ChoiceError, TypeBoolError, CompareError, TypeDatetimeError, TypeDateError, \
TypeDictError, LengthError, RangeError
from ssguan.ignitor.orm import config as orm_config
from ssguan.ignitor.orm import dbpool, properti, config as dbconfig
from ssguan.ignitor.orm.error import IllegalWordError, UniqueError
from ssguan.ignitor.orm.model import Model
from ssguan.ignitor.orm.validator import LengthValidator, RangeValidator, CompareValidator, \
IllegalValidator, UniqueValidator
from ssguan.ignitor.utility import kind
class StringPropertyTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
dbpool.create_db(orm_config.get_default_dbinfo(), dropped=True)
def test_multiline(self):
try:
class TModel(Model):
@classmethod
def meta_domain(cls):
return "test"
sf_1 = properti.StringProperty()
tmodel = TModel()
tmodel.sf_1 = 'abc\ndef'
self.assertTrue(True)
tmodel.sf_1 = 'abcdef'
tmodel.row_version = 1
tmodel.created_by = -1
tmodel.modified_by = -1
tmodel.validate_props()
self.assertTrue(True)
except ProgramError as e:
self.assertIn("sf_1 is not multi-line", str(e))
def test_length(self):
try:
class TModel(Model):
@classmethod
def meta_domain(cls):
return "test"
sf_2 = properti.StringProperty(length=10)
tmodel = TModel()
tmodel.sf_2 = "12345678901"
tmodel.created_by = "-1"
tmodel.modified_by = "-1"
tmodel.validate_props()
self.assertTrue(False)
tmodel.sf_2 = "abcdefgh"
tmodel.row_version = 1
tmodel.created_by = "-1"
tmodel.modified_by = "-1"
tmodel.validate_props()
self.assertTrue(True)
except Error as e:
self.assertIsInstance(e, LengthError)
def test_required(self):
class TModel(Model):
@classmethod
def meta_domain(cls):
return "def test_"
sf_3 = properti.StringProperty(required=True)
try:
tmodel = TModel()
tmodel.sf_3 = None
tmodel.validate_props()
self.assertTrue(False)
except RequiredError:
self.assertTrue(True)
try:
tmodel = TModel()
tmodel.validate_props()
self.assertTrue(False)
except RequiredError:
self.assertTrue(True)
def test_choices(self):
try:
class TModel(Model):
@classmethod
def meta_domain(cls):
return "test"
sf_4 = properti.StringProperty(choices=['a', 'b', 'c'])
tmodel = TModel()
tmodel.sf_4 = 'd'
tmodel.create(1)
self.assertTrue(False)
tmodel.sf_4 = 'c'
self.assertTrue(True)
except ChoiceError:
self.assertTrue(True)
def test_default(self):
class TModel(Model):
@classmethod
def meta_domain(cls):
return "test"
sf_5 = properti.StringProperty(default='abcd')
tmodel = TModel()
self.assertEqual(tmodel.sf_5, 'abcd')
self.assertNotEqual(tmodel.sf_5, 'abcf')
try:
class TModel(Model):
@classmethod
def meta_domain(cls):
return "test"
sf_7 = properti.StringProperty(default='d', choices=['a', 'b', 'c'])
TModel(created_by="-1", modified_by="-1").validate_props()
self.assertTrue(False)
except ChoiceError:
self.assertTrue(True)
def test_illeagalvalidator(self):
try:
class TModel(Model):
@classmethod
def meta_domain(cls):
return "test"
sf_6 = properti.StringProperty(validator=IllegalValidator())
tmodel = TModel()
tmodel.sf_6 = "abcdef"
self.assertTrue(True)
tmodel.sf_6 = u"江泽民"
tmodel.create(1)
self.assertTrue(False)
tmodel.sf_6 = u"骚女"
tmodel.update(1)
self.assertTrue(False)
tmodel.sf_6 = u"2005年十大欠抽人"
tmodel.update(1)
self.assertTrue(False)
tmodel.sf_6 = u"2005语录排行"
tmodel.update(1)
self.assertTrue(False)
except IllegalWordError:
self.assertTrue(True)
def test_uniquevalidator(self):
try:
class TModelVal(Model):
@classmethod
def meta_domain(cls):
return "test"
sf_6 = properti.StringProperty(validator=UniqueValidator("sf_6"))
TModelVal.create_schema()
tmodel = TModelVal()
tmodel.sf_6 = "abcdef"
tmodel.create(1)
tmodel = TModelVal()
tmodel.sf_6 = "abcdef"
tmodel.create(1)
self.assertTrue(False)
except UniqueError:
self.assertTrue(True)
tmodel.sf_6 = "abcdef11"
tmodel.create(1)
self.assertTrue(True)
try:
query = TModelVal.all()
tm = query.get()
tmodel = TModelVal()
tmodel.sf_6 = tm.sf_6
tmodel.create(1)
self.assertTrue(False)
except UniqueError:
self.assertTrue(True)
TModelVal.delete_schema()
@classmethod
def tearDownClass(cls):
dbpool.drop_db(orm_config.get_default_dbinfo())
class IntegerPropertyTest(unittest.TestCase):
def test_length(self):
try:
class TModel(Model):
@classmethod
def meta_domain(cls):
return "test"
if_1 = properti.IntegerProperty(length=11)
if_2 = properti.StringProperty(length=8)
tmodel = TModel()
tmodel.created_by = "-1"
tmodel.modified_by = "-1"
tmodel.if_1 = 0x8fffffffffffffff
tmodel.validate_props()
self.assertTrue(False)
except LengthError:
self.assertTrue(True)
class BooleanPropertyTest(unittest.TestCase):
def test_type(self):
class TModel(Model):
@classmethod
def meta_domain(cls):
return "test"
bf_1 = properti.BooleanProperty()
tmodel = TModel(entityinst=True)
tmodel.bf_1 = 1
self.assertTrue(isinstance(tmodel.bf_1, bool))
self.assertEqual(tmodel.bf_1, True)
tmodel.bf_1 = False
self.assertTrue(isinstance(tmodel.bf_1, bool))
self.assertEqual(tmodel.bf_1, False)
def test_int(self):
class TModel(Model):
@classmethod
def meta_domain(cls):
return "test"
bf_1 = properti.BooleanProperty()
tmodel = TModel(entityinst=True, bf_1=1)
self.assertTrue(isinstance(tmodel.bf_1, bool))
self.assertEqual(tmodel.bf_1, True)
tmodel = TModel(entityinst=True, bf_1=0)
self.assertTrue(isinstance(tmodel.bf_1, bool))
self.assertEqual(tmodel.bf_1, False)
def test_default(self):
class TModel(Model):
@classmethod
def meta_domain(cls):
return "test"
bf_2 = properti.BooleanProperty(default=True)
tmodel = TModel()
self.assertEqual(tmodel.bf_2, True)
class TModel2(Model):
@classmethod
def meta_domain(cls):
return "test"
bf_2 = properti.BooleanProperty(default=1)
tmodel = TModel()
self.assertTrue(isinstance(tmodel.bf_2, bool))
self.assertEqual(tmodel.bf_2, True)
def test_required(self):
class TModel(Model):
@classmethod
def meta_domain(cls):
return "test"
bf_1 = properti.BooleanProperty()
try:
tmodel = TModel()
tmodel.bf_1 = None
tmodel.validate_props()
self.assertTrue(False)
except RequiredError:
self.assertTrue(True)
try:
class TModel(Model):
@classmethod
def meta_domain(cls):
return "test"
bf_1 = properti.BooleanProperty(default=None)
tmodel = TModel()
tmodel.validate_props()
self.assertTrue(False)
except RequiredError:
self.assertTrue(True)
def test_validate(self):
class TModel(Model):
@classmethod
def meta_domain(cls):
return "test"
dtf_2 = properti.BooleanProperty()
tmodel = TModel()
try:
tmodel.dtf_2 = properti.DateTimeProperty.utcnow()
except ProgramError as e:
self.assertIn("must be a bool", str(e))
try:
tmodel.dtf_2 = 1
tmodel.created_by = "-1"
tmodel.modified_by = "-1"
tmodel.validate_props()
self.assertTrue(False)
except Error as e:
self.assertIsInstance(e, TypeBoolError)
tmodel = TModel(entityinst=True)
tmodel.dtf_2 = 1
self.assertTrue(isinstance(tmodel.dtf_2, bool))
self.assertTrue(tmodel.dtf_2)
def test_length(self):
class TModel(Model):
@classmethod
def meta_domain(cls):
return "test"
dtf_2 = properti.StringProperty(validator=LengthValidator(minlength=2, maxlength=5))
tmodel = TModel()
try:
tmodel.dtf_2 = "a"
tmodel.created_by = "1"
tmodel.modified_by = "1"
tmodel.validate_props()
self.assertTrue(False)
except LengthError:
self.assertTrue(True)
try:
tmodel.dtf_2 = "abcdef1"
tmodel.validate_props()
self.assertTrue(False)
except LengthError:
self.assertTrue(True)
try:
tmodel.dtf_2 = "ab"
tmodel.validate_props()
self.assertTrue(True)
except LengthError:
self.assertTrue(False)
try:
tmodel.dtf_2 = "abcde"
tmodel.validate_props()
self.assertTrue(True)
except LengthError:
self.assertTrue(False)
def test_range_int(self):
class TModel(Model):
@classmethod
def meta_domain(cls):
return "test"
dtf_2 = properti.IntegerProperty(validator=RangeValidator(minimum=2, maximum=5))
tmodel = TModel()
try:
tmodel.dtf_2 = 1
tmodel.created_by = "-1"
tmodel.modified_by = "-1"
tmodel.validate_props()
self.assertTrue(False)
except RangeError:
self.assertTrue(True)
try:
tmodel.dtf_2 = 6
tmodel.validate_props()
self.assertTrue(False)
except RangeError:
self.assertTrue(True)
try:
tmodel.dtf_2 = 5
tmodel.validate_props()
self.assertTrue(True)
except RangeError:
self.assertTrue(False)
try:
tmodel.dtf_2 = 2
tmodel.validate_props()
self.assertTrue(True)
except RangeError:
self.assertTrue(False)
def test_range_float(self):
class TModel(Model):
@classmethod
def meta_domain(cls):
return "test"
dtf_2 = properti.FloatProperty(validator=RangeValidator(minimum=2.0, maximum=5.0))
tmodel = TModel()
tmodel.created_by = "-1"
tmodel.modified_by = "-1"
try:
tmodel.dtf_2 = 1.0
tmodel.validate_props()
self.assertTrue(False)
except RangeError:
self.assertTrue(True)
try:
tmodel.dtf_2 = 6.5
tmodel.validate_props()
self.assertTrue(False)
except RangeError:
self.assertTrue(True)
try:
tmodel.dtf_2 = 5.0
self.assertTrue(True)
except RangeError:
self.assertTrue(False)
try:
tmodel.dtf_2 = 2.0
self.assertTrue(True)
except RangeError:
self.assertTrue(False)
def test_range_date(self):
class TModel(Model):
@classmethod
def meta_domain(cls):
return "test"
dtf_2 = properti.DateProperty(validator=RangeValidator(minimum=datetime.date(2014, 5, 6), maximum=datetime.date(2014, 8, 8)))
tmodel = TModel()
try:
tmodel.dtf_2 = datetime.date(2014, 5, 5)
tmodel.create(1)
self.assertTrue(False)
except RangeError:
self.assertTrue(True)
try:
tmodel.dtf_2 = datetime.date(2014, 8, 9)
tmodel.validate_props()
self.assertTrue(False)
except RangeError:
self.assertTrue(True)
try:
tmodel.dtf_2 = datetime.date(2014, 5, 6)
self.assertTrue(True)
except RangeError:
self.assertTrue(False)
try:
tmodel.dtf_2 = datetime.date(2014, 8, 8)
self.assertTrue(True)
except RangeError:
self.assertTrue(False)
def test_compare_int(self):
class TModel(Model):
@classmethod
def meta_domain(cls):
return "test"
dtf_1 = properti.IntegerProperty(validator=CompareValidator("=", limit=2))
dtf_2 = properti.IntegerProperty(validator=CompareValidator(">=", limit=2))
dtf_3 = properti.IntegerProperty(validator=CompareValidator("<=", limit=2))
dtf_4 = properti.IntegerProperty(validator=CompareValidator(">", limit=2))
dtf_5 = properti.IntegerProperty(validator=CompareValidator("<", limit=2))
tmodel = TModel()
try:
tmodel.dtf_1 = 3
tmodel.create(1)
self.assertTrue(False)
except CompareError:
self.assertTrue(True)
tmodel = TModel()
try:
tmodel.dtf_2 = 1
tmodel.create(1)
self.assertTrue(False)
except CompareError:
self.assertTrue(True)
tmodel = TModel()
try:
tmodel.dtf_3 = 4
tmodel.create(1)
self.assertTrue(False)
except CompareError:
self.assertTrue(True)
tmodel = TModel()
try:
tmodel.dtf_4 = 0
tmodel.create(1)
self.assertTrue(False)
except CompareError:
self.assertTrue(True)
tmodel = TModel()
try:
tmodel.dtf_5 = 7
tmodel.create(1)
self.assertTrue(False)
except CompareError:
self.assertTrue(True)
def test_compare_date(self):
class TModel(Model):
@classmethod
def meta_domain(cls):
return "test"
dtf_1 = properti.DateProperty(validator=CompareValidator("=", limit=datetime.date(2014, 2, 1)))
dtf_2 = properti.DateProperty(validator=CompareValidator(">=", limit=datetime.date(2014, 2, 1)))
dtf_3 = properti.DateProperty(validator=CompareValidator("<=", limit=datetime.date(2014, 2, 1)))
dtf_4 = properti.DateProperty(validator=CompareValidator(">", limit=datetime.date(2014, 2, 1)))
dtf_5 = properti.DateProperty(validator=CompareValidator("<", limit=datetime.date(2014, 2, 1)))
tmodel = TModel()
try:
tmodel.dtf_1 = datetime.date(2014, 3, 1)
tmodel.create(1)
self.assertTrue(False)
except CompareError:
self.assertTrue(True)
tmodel = TModel()
try:
tmodel.dtf_2 = datetime.date(2014, 1, 1)
tmodel.create(1)
self.assertTrue(False)
except CompareError:
self.assertTrue(True)
tmodel = TModel()
try:
tmodel.dtf_3 = datetime.date(2014, 3, 1)
tmodel.create(1)
self.assertTrue(False)
except CompareError:
self.assertTrue(True)
tmodel = TModel()
try:
tmodel.dtf_4 = datetime.date(2014, 2, 1)
tmodel.create(1)
self.assertTrue(False)
except CompareError:
self.assertTrue(True)
tmodel = TModel()
try:
tmodel.dtf_5 = datetime.date(2014, 6, 1)
tmodel.create(1)
self.assertTrue(False)
except CompareError:
self.assertTrue(True)
def test_compare_between_prop(self):
class TModel(Model):
@classmethod
def meta_domain(cls):
return "test"
dtf_1 = properti.DateProperty(validator=CompareValidator(">=", property_name="dtf_2"))
dtf_2 = properti.DateProperty()
tmodel = TModel()
try:
tmodel.dtf_1 = datetime.date(2014, 1, 1)
tmodel.dtf_2 = datetime.date(2014, 2, 1)
tmodel.create(1)
self.assertTrue(False)
except CompareError:
self.assertTrue(True)
class DateTimePropertyTest(unittest.TestCase):
def test_auto_utcnow(self):
class TModel(Model):
@classmethod
def meta_domain(cls):
return "test"
dtf_2 = properti.DateTimeProperty(auto_utcnow=True)
tmodel = TModel()
self.assertIsNotNone(tmodel.dtf_2)
def test_validate(self):
class TModel(Model):
@classmethod
def meta_domain(cls):
return "test"
dtf_2 = properti.DateTimeProperty()
tmodel = TModel()
try:
tmodel.dtf_2 = 123
tmodel.create(1)
self.assertFalse(True)
except Error as e:
self.assertIsInstance(e, TypeDatetimeError)
tmodel = TModel()
try:
tmodel.dtf_2 = "fafafa"
tmodel.create(1)
self.assertFalse(True)
except TypeDatetimeError:
self.assertTrue(True)
class DatePropertyTest(unittest.TestCase):
def test_auto_utctoday(self):
class TModel(Model):
@classmethod
def meta_domain(cls):
return "test"
dtf_2 = properti.DateProperty(auto_utctoday=True)
tmodel = TModel()
self.assertEqual(tmodel.dtf_2, properti.DateProperty.utctoday())
def test_validate(self):
class TModel(Model):
@classmethod
def meta_domain(cls):
return "test"
dtf_2 = properti.DateProperty()
tmodel = TModel()
try:
tmodel.dtf_2 = "aaa"
tmodel.create(1)
self.assertFalse(True)
except TypeDateError:
self.assertTrue(True)
class DictPropertyTest(unittest.TestCase):
class TModel66(Model):
@classmethod
def meta_domain(cls):
return "test"
dtf_11 = properti.DictProperty()
@classmethod
def setUpClass(cls):
dbpool.create_db(dbconfig.get_default_dbinfo(), dropped=True)
cls.TModel66.create_schema()
def test_value(self):
tmodel = self.TModel66()
tmodel.dtf_11 = {"aa":"bb"}
tmodel2 = tmodel.create(1)
key = tmodel2.key()
self.assertIsInstance(tmodel2.dtf_11, dict)
try:
tmodel.dtf_11 = kind.utcnow()
tmodel2 = tmodel.create(1)
self.assertTrue(False)
except TypeDictError:
self.assertTrue(True)
self.assertIsInstance(tmodel2.dtf_11, datetime.datetime)
tm = self.TModel66.get_by_key(key)
self.assertIsInstance(tm.dtf_11, dict)
tm.dtf_11 = {'cc':'ae'}
tm1 = tm.update(None)
self.assertEqual(tm1.dtf_11, {'cc':'ae'})
tm.dtf_11 = ['cc', 'ae']
try:
tm1 = tm.update(None)
self.assertTrue(False)
except TypeDictError:
self.assertTrue(True)
self.assertEqual(tm1.dtf_11, ['cc', 'ae'])
def test_validate(self):
class TModel67(Model):
@classmethod
def meta_domain(cls):
return "test"
dtf_11 = properti.DictProperty(length=5)
tmodel = TModel67()
try:
tmodel.dtf_11 = {"aa":"bb"}
tmodel.create(1)
self.assertFalse(True)
except LengthError:
self.assertTrue(True)
@classmethod
def tearDownClass(cls):
cls.TModel66.delete_schema()
dbpool.drop_db(dbconfig.get_default_dbinfo())
|
StarcoderdataPython
|
3318689
|
#!/usr/bin/python3
# Author: GMFTBY
# Time: 2019.9.19
from metric.metric import *
import argparse
import random
from utils import load_word_embedding
import pickle
from tqdm import tqdm
from bert_score import score
import ipdb
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Evaluate the model")
parser.add_argument('--model', type=str, default='HRED', help='model name')
parser.add_argument('--dataset', type=str, default='ubuntu')
parser.add_argument('--file', type=str, default=None, help='result file')
parser.add_argument('--cf', type=int, default=1, help='cf mode')
parser.add_argument('--embedding', type=str, default='/home/lt/data/File/wordembedding/glove/glove.6B.300d.txt')
parser.add_argument('--dim', type=int, default=300)
args = parser.parse_args()
# create the word embedding
# dic = load_word_embedding(args.embedding, dimension=args.dim)
with open('./data/dict.pkl', 'rb') as f:
dic = pickle.load(f)
# load the file data
tp, fn, fp, tn = 0, 0, 0, 0
rl, tl = False, False
silence_wrong, whole_counter = 0, 0
with open(args.file) as f:
ref, tgt = [], []
for idx, line in enumerate(f.readlines()):
if idx % 4 == 1:
if "- ref:" in line:
rl = False
elif "+ ref:" in line:
rl = True
srcline = line.replace("- ref: ", "").replace('<sos>', '').replace('<eos>', '').strip()
srcline = srcline.replace("+ ref: ", "").replace('<sos>', '').replace('<eos>', '').strip()
elif idx % 4 == 2:
if "- tgt:" in line:
tl = False
elif "+ tgt:" in line:
tl = True
tgtline = line.replace("- tgt: ", "").replace('<sos>', '').replace('<eos>', '').strip()
tgtline = tgtline.replace("+ tgt: ", "").replace('<sos>', '').replace('<eos>', '').strip()
elif idx % 4 == 3:
# counter
whole_counter += 1
# stat the tp, fn, fp, tn
if rl and tl:
tp += 1
elif rl and not tl:
fn += 1
elif not rl and tl:
fp += 1
else:
tn += 1
if args.cf == 1:
if rl and tl:
ref.append(srcline.split())
tgt.append(tgtline.split())
if (tl and 'silence' in tgtline) or (not tl and 'silence' not in tgtline):
silence_wrong += 1
else:
if 'silence' in tgtline or 'silence' in srcline:
pass
else:
ref.append(srcline.split())
tgt.append(tgtline.split())
# filter
if args.cf == 0:
# idx_ = random.sample(list(range(len(ref))), int(0.85 * len(ref)))
# ref = [i for idx, i in enumerate(ref) if idx in idx_]
# tgt = [i for idx, i in enumerate(tgt) if idx in idx_]
pass
else:
print(f'[!] test ({len(ref)}|{round(len(ref) / (tp + fn), 4)}) examples')
print(f'[!] true acc: {round(tp / (tp + fn), 4)}, false acc: {round(tn / (tn + fp), 4)}')
print(f'[!] silence error ratio: {round(silence_wrong / whole_counter, 4)}')
assert len(ref) == len(tgt)
# BLEU and embedding-based metric
bleu1_sum, bleu2_sum, bleu3_sum, bleu4_sum, embedding_average_sum, counter, ve_sum = 0, 0, 0, 0, 0, 0, 0
for rr, cc in tqdm(zip(ref, tgt)):
bleu1_sum += cal_BLEU([rr], cc, ngram=1)
bleu2_sum += cal_BLEU([rr], cc, ngram=2)
bleu3_sum += cal_BLEU([rr], cc, ngram=3)
bleu4_sum += cal_BLEU([rr], cc, ngram=4)
embedding_average_sum += cal_embedding_average(rr, cc, dic)
ve_sum += cal_vector_extrema(rr, cc, dic)
counter += 1
# Distinct-1, Distinct-2
candidates = []
for line in tgt:
candidates.extend(line)
# ipdb.set_trace()
distinct_1, distinct_2 = cal_Distinct(candidates)
# BERTScore
newrefs, newcands = [' '.join(i) for i in ref], [' '.join(i) for i in tgt]
# ipdb.set_trace()
_, _, bert_scores = score(newcands, newrefs, lang='en')
bert_sum = np.mean(bert_scores.tolist())
print(f'Model {args.model} Result')
print(f'BLEU-1: {round(bleu1_sum / counter, 4)}')
print(f'BLEU-2: {round(bleu2_sum / counter, 4)}')
print(f'BLEU-3: {round(bleu3_sum / counter, 4)}')
print(f'BLEU-4: {round(bleu4_sum / counter, 4)}')
print(f'BERTScore: {round(bert_sum, 4)}')
print(f'Embedding Average: {round(embedding_average_sum / counter, 4)}')
print(f'Vector Extrema: {round(ve_sum / counter, 4)}')
print(f'Distinct-1: {round(distinct_1, 4)}; Distinct-2: {round(distinct_2, 4)}')
if args.cf == 1:
macro_f1, micro_f1, acc = cal_acc_f1(tp, fn, fp, tn)
print(f'Decision Acc: {acc}')
print(f'Decision macro-F1: {macro_f1}, Decision micro-F1: {micro_f1}')
|
StarcoderdataPython
|
131655
|
from .generate_tests import *
from .create_cfg import *
from .mutation_analysis import *
|
StarcoderdataPython
|
1738223
|
# Created by zhouwang on 2018/5/5.
from .base import BaseRequestHandler, permission
import datetime
import pymysql
import logging
logger = logging.getLogger()
def argements_valid(handler, pk=None):
error = dict()
name = handler.get_argument('name', '')
path = handler.get_argument('path', '')
comment = handler.get_argument('comment', '')
host = handler.get_argument('host', '')
monitor_choice = handler.get_argument('monitor_choice', '0')
if not path:
error['path'] = 'Required'
else:
select_sql = 'SELECT id FROM logfile WHERE name="%s" %s'
select_arg = (pymysql.escape_string(name), 'and id!="%d"' % pk if pk else '')
count = handler.cursor.execute(select_sql % select_arg)
if count:
error['path'] = 'Already existed'
for i, j in ((name, 'name'), (host, 'host'), (comment, 'comment')):
if not i:
error[j] = 'Required'
if monitor_choice not in ('0', '-1'):
error['monitor_choice'] = 'Invalid'
data = dict(name=name,
path=path,
comment=comment,
host=host,
hosts=host.split(','),
monitor_choice=int(monitor_choice))
return error, data
def add_valid(func):
def _wrapper(self):
error, self.reqdata = argements_valid(self)
if error:
return dict(code=400, msg='Bad POST data', error=error)
return func(self)
return _wrapper
def query_valid(func):
def _wrapper(self, pk):
error = dict()
if not pk and self.request.arguments:
argument_keys = self.request.arguments.keys()
query_keys = ['id', 'name', 'host', 'path', 'comment', 'create_time',
'order', 'search', 'offset', 'limit', 'sort']
error = {key: 'Bad key' for key in argument_keys if key not in query_keys}
if error:
return dict(code=400, msg='Bad GET param', error=error)
return func(self, pk)
return _wrapper
def update_valid(func):
def _wrapper(self, pk):
select_sql = 'SELECT id FROM logfile WHERE id="%d"' % pk
count = self.cursor.execute(select_sql)
if not count:
return {'code': 404, 'msg': 'Update row not found'}
error, self.reqdata = argements_valid(self, pk)
if error:
return dict(code=400, msg='Bad PUT param', error=error)
return func(self, pk)
return _wrapper
def del_valid(func):
def _wrapper(self, pk):
select_sql = 'SELECT id FROM logfile WHERE id="%d"' % pk
count = self.cursor.execute(select_sql)
if not count:
return dict(code=404, msg='Delete row not found')
return func(self, pk)
return _wrapper
class Handler(BaseRequestHandler):
@permission()
def get(self, pk=0):
''' Query logfile '''
response_data = self._query(int(pk))
self._write(response_data)
@permission(role=2)
def post(self):
''' Add logfile '''
response_data = self._add()
self._write(response_data)
@permission(role=2)
def put(self, pk=0):
''' Update logfile '''
response_data = self._update(int(pk))
self._write(response_data)
@permission(role=2)
def delete(self, pk=0):
''' Delete logfile '''
response_data = self._del(int(pk))
self._write(response_data)
@query_valid
def _query(self, pk):
fields = search_fields = ['id', 'name', 'host', 'path', 'comment', 'create_time']
where, order, limit = self.select_sql_params(int(pk), fields, search_fields)
self.cursor.execute(self.select_sql % (where, order, limit))
results = self.cursor.dictfetchall()
if limit:
self.cursor.execute(self.total_sql % where)
total = self.cursor.dictfetchone().get('total')
return dict(code=200, msg='Query Successful', data=results, total=total)
return dict(code=200, msg='Query Successful', data=results)
@add_valid
def _add(self):
try:
with self.transaction(atomic=True):
insert_arg = (self.reqdata['name'], self.reqdata['host'], self.reqdata['path'],
datetime.datetime.now().strftime('%Y-%m-%d %H:%M'), self.reqdata['comment'],
self.reqdata['monitor_choice'])
self.cursor.execute(self.insert_sql, insert_arg)
self.cursor.execute(self.last_insert_id_sql)
insert = self.cursor.dictfetchone()
insert_host_mp_args = [(insert['id'], host) for host in self.reqdata['hosts']]
self.cursor.executemany(self.insert_host_mp_sql, insert_host_mp_args)
except Exception as e:
logger.error('Add logfile failed: %s' % str(e))
return dict(code=500, msg='Add failed')
else:
return dict(code=200, msg='Add successful', data=insert)
@update_valid
def _update(self, pk):
try:
with self.transaction(atomic=True):
update_arg = (self.reqdata['name'], self.reqdata['host'], self.reqdata['path'],
self.reqdata['comment'], self.reqdata['monitor_choice'], pk)
self.cursor.execute(self.update_sql, update_arg)
delete_host_mp_arg = (pk,)
self.cursor.execute(self.delete_host_mp_sql, delete_host_mp_arg)
insert_host_mp_args = [(pk, host) for host in self.reqdata['hosts']]
self.cursor.executemany(self.insert_host_mp_sql, insert_host_mp_args)
except Exception as e:
logger.error('Update logfile failed: %s' % str(e))
return dict(code=500, msg='Update failed')
else:
return dict(code=200, msg='Update successful', data=dict(id=pk))
@del_valid
def _del(self, pk):
try:
with self.transaction(atomic=True):
delete_arg = (pk,)
self.cursor.execute(self.delete_sql, delete_arg)
self.cursor.execute(self.delete_host_mp_sql, delete_arg)
self.cursor.execute(self.delete_monitor_sql, delete_arg)
except Exception as e:
logger.error('Delete logfile failed: %s' % str(e))
return dict(code=500, msg='Delete failed')
else:
return dict(code=200, msg='Delete successful')
insert_sql = \
'INSERT INTO logfile (name, host, path, create_time, comment, monitor_choice) VALUES (%s, %s, %s, %s, %s, %s)'
insert_host_mp_sql = 'INSERT INTO logfile_host (logfile_id, host) VALUES (%s, %s)'
update_sql = 'UPDATE logfile SET name=%s, host=%s, path=%s, comment=%s, monitor_choice=%s WHERE id=%s'
delete_sql = 'DELETE FROM logfile WHERE id=%s'
delete_host_mp_sql = 'DELETE FROM logfile_host WHERE logfile_id=%s'
delete_monitor_sql = 'DELETE FROM monitor_item WHERE logfile_id=%s'
last_insert_id_sql = 'SELECT LAST_INSERT_ID() as id'
select_sql = '''
SELECT
id, name, host, path,
date_format(create_time, "%%Y-%%m-%%d %%H:%%i:%%s") as create_time,
comment, monitor_choice
FROM
logfile
%s %s %s
'''
total_sql = 'SELECT count(*) as total FROM logfile %s'
|
StarcoderdataPython
|
4832147
|
from dataclasses import dataclass
import re
enum_count = 0
def iota(reset = False):
global enum_count
if reset:
enum_count = 0
value = enum_count
enum_count += 1
return value
# Token types
PAPER_KW = iota()
EXPERIMENT_KW= iota()
LET_KW = iota()
FOR_KW= iota()
PARFOR_KW = iota()
LEFT_PAR = iota()
RIGHT_PAR = iota()
LEFT_CB = iota()
RIGHT_CB = iota()
LEFT_BR = iota()
RIGHT_BR = iota()
COMMA = iota()
ID = iota()
EQUALS = iota()
STRING = iota()
NUMBER = iota()
WHITESPACE = iota()
COMMENT = iota()
EOF = iota()
# Definitions
TOKEN_DEFINITIONS = [
[WHITESPACE, "^\s+"],
[WHITESPACE, "^\/\/[^\r?\n]+\r?\n"],
[PAPER_KW, "^paper"],
[EXPERIMENT_KW, "^experiment"],
[LET_KW, "^let"],
[LEFT_PAR, "^\("],
[RIGHT_PAR, "^\)"],
[LEFT_CB, "^\{"],
[RIGHT_CB, "^\}"],
[LEFT_BR, "^\["],
[RIGHT_BR, "^\]"],
[COMMA, "^,"],
[ID, "^[a-zA-Z_][a-zA-Z_\d]*"],
[EQUALS, "^\="],
[STRING, "^\"[^\"]*\""],
[NUMBER, "^[+-]?([0-9]+\.?[0-9]*|\.[0-9]+)"],
]
# Tokenizer
@dataclass
class Token:
type: int
value: str
class Tokenizer:
def __init__(self, text: str):
self.text = text
self.cursor = 0
def get_next_token(self):
while True:
if self.cursor >= len(self.text):
return Token(EOF, "")
matched = self.match()
if matched:
return matched
raise Exception("Something went wrong")
def get_all_tokens(self):
self.cursor = 0
matches = []
while self.cursor < len(self.text):
matches.append(self.get_next_token())
self.cursor = 0
return matches
def match(self):
for token_type, regex in TOKEN_DEFINITIONS:
substr = self.text[self.cursor:]
match = re.search(regex, substr)
if not match:
continue
content = match.group(0)
self.cursor += len(content)
if token_type == WHITESPACE:
return None
return Token(token_type, content)
raise Exception(f"Could not match token at position {self.cursor}: \"{self.text[self.cursor]}\"")
|
StarcoderdataPython
|
51832
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'CharakterInfo.ui'
#
# Created by: PyQt5 UI code generator 5.15.6
#
# WARNING: Any manual changes made to this file will be lost when pyuic5 is
# run again. Do not edit this file unless you know what you are doing.
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_Form(object):
def setupUi(self, Form):
Form.setObjectName("Form")
Form.resize(974, 721)
self.gridLayout = QtWidgets.QGridLayout(Form)
self.gridLayout.setContentsMargins(20, 20, 20, 20)
self.gridLayout.setHorizontalSpacing(20)
self.gridLayout.setVerticalSpacing(10)
self.gridLayout.setObjectName("gridLayout")
self.verticalLayout_4 = QtWidgets.QVBoxLayout()
self.verticalLayout_4.setObjectName("verticalLayout_4")
self.labelEinstellungen = QtWidgets.QLabel(Form)
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.labelEinstellungen.setFont(font)
self.labelEinstellungen.setObjectName("labelEinstellungen")
self.verticalLayout_4.addWidget(self.labelEinstellungen)
self.groupBox_3 = QtWidgets.QGroupBox(Form)
self.groupBox_3.setTitle("")
self.groupBox_3.setObjectName("groupBox_3")
self.gridLayout_5 = QtWidgets.QGridLayout(self.groupBox_3)
self.gridLayout_5.setContentsMargins(20, 20, 20, 20)
self.gridLayout_5.setObjectName("gridLayout_5")
self.checkReq = QtWidgets.QCheckBox(self.groupBox_3)
self.checkReq.setChecked(True)
self.checkReq.setObjectName("checkReq")
self.gridLayout_5.addWidget(self.checkReq, 1, 0, 1, 2)
self.comboHausregeln = QtWidgets.QComboBox(self.groupBox_3)
self.comboHausregeln.setObjectName("comboHausregeln")
self.gridLayout_5.addWidget(self.comboHausregeln, 4, 1, 1, 1)
self.label_5 = QtWidgets.QLabel(self.groupBox_3)
self.label_5.setObjectName("label_5")
self.gridLayout_5.addWidget(self.label_5, 4, 0, 1, 1)
self.label_7 = QtWidgets.QLabel(self.groupBox_3)
self.label_7.setObjectName("label_7")
self.gridLayout_5.addWidget(self.label_7, 9, 0, 1, 1)
self.checkUeberPDF = QtWidgets.QCheckBox(self.groupBox_3)
self.checkUeberPDF.setObjectName("checkUeberPDF")
self.gridLayout_5.addWidget(self.checkUeberPDF, 3, 0, 1, 2)
self.label_6 = QtWidgets.QLabel(self.groupBox_3)
self.label_6.setObjectName("label_6")
self.gridLayout_5.addWidget(self.label_6, 6, 0, 1, 1)
self.checkFinanzen = QtWidgets.QCheckBox(self.groupBox_3)
self.checkFinanzen.setChecked(True)
self.checkFinanzen.setObjectName("checkFinanzen")
self.gridLayout_5.addWidget(self.checkFinanzen, 2, 0, 1, 2)
self.comboCharsheet = QtWidgets.QComboBox(self.groupBox_3)
self.comboCharsheet.setObjectName("comboCharsheet")
self.gridLayout_5.addWidget(self.comboCharsheet, 6, 1, 1, 1)
self.labelReload = QtWidgets.QLabel(self.groupBox_3)
self.labelReload.setStyleSheet("background-color: rgb(255, 255, 0); color: black;")
self.labelReload.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignVCenter)
self.labelReload.setWordWrap(True)
self.labelReload.setObjectName("labelReload")
self.gridLayout_5.addWidget(self.labelReload, 11, 0, 1, 2)
self.comboRegelnGroesse = QtWidgets.QComboBox(self.groupBox_3)
self.comboRegelnGroesse.setObjectName("comboRegelnGroesse")
self.comboRegelnGroesse.addItem("")
self.comboRegelnGroesse.addItem("")
self.comboRegelnGroesse.addItem("")
self.gridLayout_5.addWidget(self.comboRegelnGroesse, 9, 1, 1, 1)
self.checkRegeln = QtWidgets.QCheckBox(self.groupBox_3)
self.checkRegeln.setChecked(True)
self.checkRegeln.setTristate(False)
self.checkRegeln.setObjectName("checkRegeln")
self.gridLayout_5.addWidget(self.checkRegeln, 8, 0, 1, 2)
self.label_10 = QtWidgets.QLabel(self.groupBox_3)
self.label_10.setObjectName("label_10")
self.gridLayout_5.addWidget(self.label_10, 10, 0, 1, 1)
self.listRegelKategorien = QtWidgets.QListView(self.groupBox_3)
self.listRegelKategorien.setMaximumSize(QtCore.QSize(280, 80))
self.listRegelKategorien.setHorizontalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOff)
self.listRegelKategorien.setObjectName("listRegelKategorien")
self.gridLayout_5.addWidget(self.listRegelKategorien, 10, 1, 1, 1)
self.verticalLayout_4.addWidget(self.groupBox_3)
spacerItem = QtWidgets.QSpacerItem(20, 20, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Fixed)
self.verticalLayout_4.addItem(spacerItem)
self.labelEP = QtWidgets.QLabel(Form)
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.labelEP.setFont(font)
self.labelEP.setObjectName("labelEP")
self.verticalLayout_4.addWidget(self.labelEP)
self.groupBox_2 = QtWidgets.QGroupBox(Form)
self.groupBox_2.setTitle("")
self.groupBox_2.setObjectName("groupBox_2")
self.gridLayout_4 = QtWidgets.QGridLayout(self.groupBox_2)
self.gridLayout_4.setContentsMargins(20, 20, 20, 20)
self.gridLayout_4.setObjectName("gridLayout_4")
self.gridLayout_2 = QtWidgets.QGridLayout()
self.gridLayout_2.setObjectName("gridLayout_2")
self.spinFertigkeitenSpent = QtWidgets.QSpinBox(self.groupBox_2)
self.spinFertigkeitenSpent.setFocusPolicy(QtCore.Qt.NoFocus)
self.spinFertigkeitenSpent.setAlignment(QtCore.Qt.AlignCenter)
self.spinFertigkeitenSpent.setReadOnly(True)
self.spinFertigkeitenSpent.setButtonSymbols(QtWidgets.QAbstractSpinBox.NoButtons)
self.spinFertigkeitenSpent.setMaximum(999999)
self.spinFertigkeitenSpent.setObjectName("spinFertigkeitenSpent")
self.gridLayout_2.addWidget(self.spinFertigkeitenSpent, 3, 1, 1, 1)
self.spinUebernatuerlichPercent = QtWidgets.QSpinBox(self.groupBox_2)
self.spinUebernatuerlichPercent.setFocusPolicy(QtCore.Qt.NoFocus)
self.spinUebernatuerlichPercent.setAlignment(QtCore.Qt.AlignCenter)
self.spinUebernatuerlichPercent.setReadOnly(True)
self.spinUebernatuerlichPercent.setButtonSymbols(QtWidgets.QAbstractSpinBox.NoButtons)
self.spinUebernatuerlichPercent.setMaximum(100)
self.spinUebernatuerlichPercent.setObjectName("spinUebernatuerlichPercent")
self.gridLayout_2.addWidget(self.spinUebernatuerlichPercent, 6, 2, 1, 1)
self.labelUeber3 = QtWidgets.QLabel(self.groupBox_2)
self.labelUeber3.setMinimumSize(QtCore.QSize(230, 0))
font = QtGui.QFont()
font.setItalic(False)
self.labelUeber3.setFont(font)
self.labelUeber3.setObjectName("labelUeber3")
self.gridLayout_2.addWidget(self.labelUeber3, 8, 0, 1, 1)
self.spinProfanPercent = QtWidgets.QSpinBox(self.groupBox_2)
self.spinProfanPercent.setFocusPolicy(QtCore.Qt.NoFocus)
self.spinProfanPercent.setAlignment(QtCore.Qt.AlignCenter)
self.spinProfanPercent.setReadOnly(True)
self.spinProfanPercent.setButtonSymbols(QtWidgets.QAbstractSpinBox.NoButtons)
self.spinProfanPercent.setMaximum(100)
self.spinProfanPercent.setObjectName("spinProfanPercent")
self.gridLayout_2.addWidget(self.spinProfanPercent, 2, 2, 1, 1)
self.spinVorteileSpent = QtWidgets.QSpinBox(self.groupBox_2)
self.spinVorteileSpent.setFocusPolicy(QtCore.Qt.NoFocus)
self.spinVorteileSpent.setAlignment(QtCore.Qt.AlignCenter)
self.spinVorteileSpent.setReadOnly(True)
self.spinVorteileSpent.setButtonSymbols(QtWidgets.QAbstractSpinBox.NoButtons)
self.spinVorteileSpent.setMaximum(99999999)
self.spinVorteileSpent.setObjectName("spinVorteileSpent")
self.gridLayout_2.addWidget(self.spinVorteileSpent, 1, 1, 1, 1)
self.spinAttributeSpent = QtWidgets.QSpinBox(self.groupBox_2)
self.spinAttributeSpent.setFocusPolicy(QtCore.Qt.NoFocus)
self.spinAttributeSpent.setAlignment(QtCore.Qt.AlignCenter)
self.spinAttributeSpent.setReadOnly(True)
self.spinAttributeSpent.setButtonSymbols(QtWidgets.QAbstractSpinBox.NoButtons)
self.spinAttributeSpent.setMaximum(99999999)
self.spinAttributeSpent.setObjectName("spinAttributeSpent")
self.gridLayout_2.addWidget(self.spinAttributeSpent, 0, 1, 1, 1)
self.spinUeberTalenteSpent = QtWidgets.QSpinBox(self.groupBox_2)
self.spinUeberTalenteSpent.setFocusPolicy(QtCore.Qt.NoFocus)
self.spinUeberTalenteSpent.setAlignment(QtCore.Qt.AlignCenter)
self.spinUeberTalenteSpent.setReadOnly(True)
self.spinUeberTalenteSpent.setButtonSymbols(QtWidgets.QAbstractSpinBox.NoButtons)
self.spinUeberTalenteSpent.setMaximum(999999)
self.spinUeberTalenteSpent.setObjectName("spinUeberTalenteSpent")
self.gridLayout_2.addWidget(self.spinUeberTalenteSpent, 8, 1, 1, 1)
self.spinFreieSpent = QtWidgets.QSpinBox(self.groupBox_2)
self.spinFreieSpent.setFocusPolicy(QtCore.Qt.NoFocus)
self.spinFreieSpent.setAlignment(QtCore.Qt.AlignCenter)
self.spinFreieSpent.setReadOnly(True)
self.spinFreieSpent.setButtonSymbols(QtWidgets.QAbstractSpinBox.NoButtons)
self.spinFreieSpent.setMaximum(999999)
self.spinFreieSpent.setObjectName("spinFreieSpent")
self.gridLayout_2.addWidget(self.spinFreieSpent, 5, 1, 1, 1)
self.spinUeberFertigkeitenPercent = QtWidgets.QSpinBox(self.groupBox_2)
self.spinUeberFertigkeitenPercent.setFocusPolicy(QtCore.Qt.NoFocus)
self.spinUeberFertigkeitenPercent.setAlignment(QtCore.Qt.AlignCenter)
self.spinUeberFertigkeitenPercent.setReadOnly(True)
self.spinUeberFertigkeitenPercent.setButtonSymbols(QtWidgets.QAbstractSpinBox.NoButtons)
self.spinUeberFertigkeitenPercent.setMaximum(100)
self.spinUeberFertigkeitenPercent.setObjectName("spinUeberFertigkeitenPercent")
self.gridLayout_2.addWidget(self.spinUeberFertigkeitenPercent, 7, 2, 1, 1)
self.label_2 = QtWidgets.QLabel(self.groupBox_2)
self.label_2.setMinimumSize(QtCore.QSize(230, 0))
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.label_2.setFont(font)
self.label_2.setObjectName("label_2")
self.gridLayout_2.addWidget(self.label_2, 1, 0, 1, 1)
self.spinAttributePercent = QtWidgets.QSpinBox(self.groupBox_2)
self.spinAttributePercent.setFocusPolicy(QtCore.Qt.NoFocus)
self.spinAttributePercent.setAlignment(QtCore.Qt.AlignCenter)
self.spinAttributePercent.setReadOnly(True)
self.spinAttributePercent.setButtonSymbols(QtWidgets.QAbstractSpinBox.NoButtons)
self.spinAttributePercent.setMaximum(100)
self.spinAttributePercent.setObjectName("spinAttributePercent")
self.gridLayout_2.addWidget(self.spinAttributePercent, 0, 2, 1, 1)
self.spinUeberTalentePercent = QtWidgets.QSpinBox(self.groupBox_2)
self.spinUeberTalentePercent.setFocusPolicy(QtCore.Qt.NoFocus)
self.spinUeberTalentePercent.setAlignment(QtCore.Qt.AlignCenter)
self.spinUeberTalentePercent.setReadOnly(True)
self.spinUeberTalentePercent.setButtonSymbols(QtWidgets.QAbstractSpinBox.NoButtons)
self.spinUeberTalentePercent.setMaximum(100)
self.spinUeberTalentePercent.setObjectName("spinUeberTalentePercent")
self.gridLayout_2.addWidget(self.spinUeberTalentePercent, 8, 2, 1, 1)
self.labelUeber1 = QtWidgets.QLabel(self.groupBox_2)
self.labelUeber1.setMinimumSize(QtCore.QSize(230, 0))
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.labelUeber1.setFont(font)
self.labelUeber1.setObjectName("labelUeber1")
self.gridLayout_2.addWidget(self.labelUeber1, 6, 0, 1, 1)
self.label_4 = QtWidgets.QLabel(self.groupBox_2)
self.label_4.setMinimumSize(QtCore.QSize(230, 0))
font = QtGui.QFont()
font.setItalic(False)
self.label_4.setFont(font)
self.label_4.setObjectName("label_4")
self.gridLayout_2.addWidget(self.label_4, 5, 0, 1, 1)
self.spinUebernatuerlichSpent = QtWidgets.QSpinBox(self.groupBox_2)
self.spinUebernatuerlichSpent.setFocusPolicy(QtCore.Qt.NoFocus)
self.spinUebernatuerlichSpent.setAlignment(QtCore.Qt.AlignCenter)
self.spinUebernatuerlichSpent.setReadOnly(True)
self.spinUebernatuerlichSpent.setButtonSymbols(QtWidgets.QAbstractSpinBox.NoButtons)
self.spinUebernatuerlichSpent.setMaximum(999999)
self.spinUebernatuerlichSpent.setObjectName("spinUebernatuerlichSpent")
self.gridLayout_2.addWidget(self.spinUebernatuerlichSpent, 6, 1, 1, 1)
self.spinUeberFertigkeitenSpent = QtWidgets.QSpinBox(self.groupBox_2)
self.spinUeberFertigkeitenSpent.setFocusPolicy(QtCore.Qt.NoFocus)
self.spinUeberFertigkeitenSpent.setAlignment(QtCore.Qt.AlignCenter)
self.spinUeberFertigkeitenSpent.setReadOnly(True)
self.spinUeberFertigkeitenSpent.setButtonSymbols(QtWidgets.QAbstractSpinBox.NoButtons)
self.spinUeberFertigkeitenSpent.setMaximum(999999)
self.spinUeberFertigkeitenSpent.setObjectName("spinUeberFertigkeitenSpent")
self.gridLayout_2.addWidget(self.spinUeberFertigkeitenSpent, 7, 1, 1, 1)
self.spinFreiePercent = QtWidgets.QSpinBox(self.groupBox_2)
self.spinFreiePercent.setFocusPolicy(QtCore.Qt.NoFocus)
self.spinFreiePercent.setAlignment(QtCore.Qt.AlignCenter)
self.spinFreiePercent.setReadOnly(True)
self.spinFreiePercent.setButtonSymbols(QtWidgets.QAbstractSpinBox.NoButtons)
self.spinFreiePercent.setMaximum(100)
self.spinFreiePercent.setObjectName("spinFreiePercent")
self.gridLayout_2.addWidget(self.spinFreiePercent, 5, 2, 1, 1)
self.spinFertigkeitenPercent = QtWidgets.QSpinBox(self.groupBox_2)
self.spinFertigkeitenPercent.setFocusPolicy(QtCore.Qt.NoFocus)
self.spinFertigkeitenPercent.setAlignment(QtCore.Qt.AlignCenter)
self.spinFertigkeitenPercent.setReadOnly(True)
self.spinFertigkeitenPercent.setButtonSymbols(QtWidgets.QAbstractSpinBox.NoButtons)
self.spinFertigkeitenPercent.setMaximum(100)
self.spinFertigkeitenPercent.setObjectName("spinFertigkeitenPercent")
self.gridLayout_2.addWidget(self.spinFertigkeitenPercent, 3, 2, 1, 1)
self.spinTalentePercent = QtWidgets.QSpinBox(self.groupBox_2)
self.spinTalentePercent.setFocusPolicy(QtCore.Qt.NoFocus)
self.spinTalentePercent.setAlignment(QtCore.Qt.AlignCenter)
self.spinTalentePercent.setReadOnly(True)
self.spinTalentePercent.setButtonSymbols(QtWidgets.QAbstractSpinBox.NoButtons)
self.spinTalentePercent.setMaximum(100)
self.spinTalentePercent.setObjectName("spinTalentePercent")
self.gridLayout_2.addWidget(self.spinTalentePercent, 4, 2, 1, 1)
self.spinProfanSpent = QtWidgets.QSpinBox(self.groupBox_2)
self.spinProfanSpent.setFocusPolicy(QtCore.Qt.NoFocus)
self.spinProfanSpent.setAlignment(QtCore.Qt.AlignCenter)
self.spinProfanSpent.setReadOnly(True)
self.spinProfanSpent.setButtonSymbols(QtWidgets.QAbstractSpinBox.NoButtons)
self.spinProfanSpent.setMaximum(999999)
self.spinProfanSpent.setObjectName("spinProfanSpent")
self.gridLayout_2.addWidget(self.spinProfanSpent, 2, 1, 1, 1)
self.label = QtWidgets.QLabel(self.groupBox_2)
self.label.setMinimumSize(QtCore.QSize(230, 0))
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.label.setFont(font)
self.label.setObjectName("label")
self.gridLayout_2.addWidget(self.label, 0, 0, 1, 1)
self.label_9 = QtWidgets.QLabel(self.groupBox_2)
self.label_9.setMinimumSize(QtCore.QSize(230, 0))
font = QtGui.QFont()
font.setItalic(False)
self.label_9.setFont(font)
self.label_9.setObjectName("label_9")
self.gridLayout_2.addWidget(self.label_9, 4, 0, 1, 1)
self.labelUeber2 = QtWidgets.QLabel(self.groupBox_2)
self.labelUeber2.setMinimumSize(QtCore.QSize(230, 0))
font = QtGui.QFont()
font.setItalic(False)
self.labelUeber2.setFont(font)
self.labelUeber2.setObjectName("labelUeber2")
self.gridLayout_2.addWidget(self.labelUeber2, 7, 0, 1, 1)
self.label_8 = QtWidgets.QLabel(self.groupBox_2)
self.label_8.setMinimumSize(QtCore.QSize(230, 0))
font = QtGui.QFont()
font.setItalic(False)
self.label_8.setFont(font)
self.label_8.setObjectName("label_8")
self.gridLayout_2.addWidget(self.label_8, 3, 0, 1, 1)
self.label_3 = QtWidgets.QLabel(self.groupBox_2)
self.label_3.setMinimumSize(QtCore.QSize(230, 0))
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.label_3.setFont(font)
self.label_3.setObjectName("label_3")
self.gridLayout_2.addWidget(self.label_3, 2, 0, 1, 1)
self.spinTalenteSpent = QtWidgets.QSpinBox(self.groupBox_2)
self.spinTalenteSpent.setFocusPolicy(QtCore.Qt.NoFocus)
self.spinTalenteSpent.setAlignment(QtCore.Qt.AlignCenter)
self.spinTalenteSpent.setReadOnly(True)
self.spinTalenteSpent.setButtonSymbols(QtWidgets.QAbstractSpinBox.NoButtons)
self.spinTalenteSpent.setMaximum(999999)
self.spinTalenteSpent.setObjectName("spinTalenteSpent")
self.gridLayout_2.addWidget(self.spinTalenteSpent, 4, 1, 1, 1)
self.spinVorteilePercent = QtWidgets.QSpinBox(self.groupBox_2)
self.spinVorteilePercent.setFocusPolicy(QtCore.Qt.NoFocus)
self.spinVorteilePercent.setAlignment(QtCore.Qt.AlignCenter)
self.spinVorteilePercent.setReadOnly(True)
self.spinVorteilePercent.setButtonSymbols(QtWidgets.QAbstractSpinBox.NoButtons)
self.spinVorteilePercent.setMaximum(100)
self.spinVorteilePercent.setObjectName("spinVorteilePercent")
self.gridLayout_2.addWidget(self.spinVorteilePercent, 1, 2, 1, 1)
self.gridLayout_4.addLayout(self.gridLayout_2, 0, 0, 1, 1)
self.verticalLayout_4.addWidget(self.groupBox_2)
spacerItem1 = QtWidgets.QSpacerItem(20, 40, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)
self.verticalLayout_4.addItem(spacerItem1)
self.gridLayout.addLayout(self.verticalLayout_4, 0, 1, 1, 1)
self.verticalLayout_3 = QtWidgets.QVBoxLayout()
self.verticalLayout_3.setObjectName("verticalLayout_3")
self.labelNotiz = QtWidgets.QLabel(Form)
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.labelNotiz.setFont(font)
self.labelNotiz.setObjectName("labelNotiz")
self.verticalLayout_3.addWidget(self.labelNotiz)
self.groupBox = QtWidgets.QGroupBox(Form)
self.groupBox.setTitle("")
self.groupBox.setObjectName("groupBox")
self.gridLayout_3 = QtWidgets.QGridLayout(self.groupBox)
self.gridLayout_3.setContentsMargins(20, 20, 20, 20)
self.gridLayout_3.setObjectName("gridLayout_3")
self.teNotiz = QtWidgets.QPlainTextEdit(self.groupBox)
self.teNotiz.setPlainText("")
self.teNotiz.setObjectName("teNotiz")
self.gridLayout_3.addWidget(self.teNotiz, 0, 0, 1, 1)
self.verticalLayout_3.addWidget(self.groupBox)
self.gridLayout.addLayout(self.verticalLayout_3, 0, 0, 1, 1)
self.retranslateUi(Form)
QtCore.QMetaObject.connectSlotsByName(Form)
Form.setTabOrder(self.teNotiz, self.checkReq)
Form.setTabOrder(self.checkReq, self.checkFinanzen)
Form.setTabOrder(self.checkFinanzen, self.checkUeberPDF)
Form.setTabOrder(self.checkUeberPDF, self.comboHausregeln)
Form.setTabOrder(self.comboHausregeln, self.comboCharsheet)
Form.setTabOrder(self.comboCharsheet, self.checkRegeln)
Form.setTabOrder(self.checkRegeln, self.comboRegelnGroesse)
def retranslateUi(self, Form):
_translate = QtCore.QCoreApplication.translate
Form.setWindowTitle(_translate("Form", "Form"))
self.labelEinstellungen.setText(_translate("Form", "Charakter-Einstellungen"))
self.checkReq.setToolTip(_translate("Form", "Falls abgewählt, werden sämtliche Voraussetzungsprüfungen für Vorteile, übernatürliche Fertigkeiten usw. deaktiviert."))
self.checkReq.setText(_translate("Form", "Voraussetzungen überprüfen"))
self.label_5.setText(_translate("Form", "Hausregeln:"))
self.label_7.setText(_translate("Form", "Regelschriftgröße:"))
self.checkUeberPDF.setToolTip(_translate("Form", "<html><head/><body><p>Sephrasto übernimmt automatisch alle übernatürlichen Fertigkeiten in den Charakterbogen, deren FW mindestens 1 beträgt und für welche du mindestens ein Talent aktiviert hast. Wenn du diese Option aktivierst, zeigt Sephrasto eine PDF-Spalte bei den übernatürlichen Fertigkeiten an. Mit dieser kannst du selbst entscheiden, welche Fertigkeiten in den Charakterbogen übernommen werden sollen.</p></body></html>"))
self.checkUeberPDF.setText(_translate("Form", "PDF-Ausgabe von übernatürlichen Fertigkeiten manuell auswählen"))
self.label_6.setText(_translate("Form", "Charakterbogen:"))
self.checkFinanzen.setToolTip(_translate("Form", "<html><head/><body><p>Die Finanzen spielen nur bei einem neuen Charakter eine Rolle und können nach dem ersten Abenteuer ausgeblendet werden. Auch die aktuellen Schicksalspunkte werden dann nicht mehr ausgegeben, da diese ab dem ersten Abenteuer händisch verwaltet werden.</p></body></html>"))
self.checkFinanzen.setText(_translate("Form", "Finanzen anzeigen und aktuelle Schicksalspunkte ausgeben"))
self.labelReload.setText(_translate("Form", "Der Charakter muss gespeichert und neu geladen werden, damit alle Änderungen übernommen werden können!"))
self.comboRegelnGroesse.setItemText(0, _translate("Form", "Klein"))
self.comboRegelnGroesse.setItemText(1, _translate("Form", "Mittel"))
self.comboRegelnGroesse.setItemText(2, _translate("Form", "Groß"))
self.checkRegeln.setText(_translate("Form", "Dem Charakterbogen relevante Ilaris Regeln anhängen"))
self.label_10.setText(_translate("Form", "Regelkategorien:"))
self.labelEP.setText(_translate("Form", "EP-Verteilung"))
self.spinFertigkeitenSpent.setSuffix(_translate("Form", " EP"))
self.spinUebernatuerlichPercent.setSuffix(_translate("Form", " %"))
self.labelUeber3.setText(_translate("Form", " Talente"))
self.spinProfanPercent.setSuffix(_translate("Form", " %"))
self.spinVorteileSpent.setSuffix(_translate("Form", " EP"))
self.spinAttributeSpent.setSuffix(_translate("Form", " EP"))
self.spinUeberTalenteSpent.setSuffix(_translate("Form", " EP"))
self.spinFreieSpent.setSuffix(_translate("Form", " EP"))
self.spinUeberFertigkeitenPercent.setSuffix(_translate("Form", " %)"))
self.spinUeberFertigkeitenPercent.setPrefix(_translate("Form", "("))
self.label_2.setText(_translate("Form", "Vorteile"))
self.spinAttributePercent.setSuffix(_translate("Form", " %"))
self.spinUeberTalentePercent.setSuffix(_translate("Form", " %)"))
self.spinUeberTalentePercent.setPrefix(_translate("Form", "("))
self.labelUeber1.setText(_translate("Form", "Übernatürliche Fertigkeiten und Talente"))
self.label_4.setText(_translate("Form", " Freie Fertigkeiten"))
self.spinUebernatuerlichSpent.setSuffix(_translate("Form", " EP"))
self.spinUeberFertigkeitenSpent.setSuffix(_translate("Form", " EP"))
self.spinFreiePercent.setSuffix(_translate("Form", " %)"))
self.spinFreiePercent.setPrefix(_translate("Form", "("))
self.spinFertigkeitenPercent.setSuffix(_translate("Form", " %)"))
self.spinFertigkeitenPercent.setPrefix(_translate("Form", "("))
self.spinTalentePercent.setSuffix(_translate("Form", " %)"))
self.spinTalentePercent.setPrefix(_translate("Form", "("))
self.spinProfanSpent.setSuffix(_translate("Form", " EP"))
self.label.setText(_translate("Form", "Attribute"))
self.label_9.setText(_translate("Form", " Talente"))
self.labelUeber2.setText(_translate("Form", " Fertigkeiten"))
self.label_8.setText(_translate("Form", " Fertigkeiten"))
self.label_3.setText(_translate("Form", "Profane Fertigkeiten und Talente"))
self.spinTalenteSpent.setSuffix(_translate("Form", " EP"))
self.spinVorteilePercent.setSuffix(_translate("Form", " %"))
self.labelNotiz.setText(_translate("Form", "Notiz"))
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
Form = QtWidgets.QWidget()
ui = Ui_Form()
ui.setupUi(Form)
Form.show()
sys.exit(app.exec_())
|
StarcoderdataPython
|
3308688
|
<filename>ts3/filetransfer.py
#!/usr/bin/env python3
# The MIT License (MIT)
#
# Copyright (c) 2013-2018 <see AUTHORS.txt>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
# the Software, and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
# FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
This module contains an API for the TS3 file transfer interface.
"""
# Modules
# ------------------------------------------------
import socket
import time
import threading
# local
try:
from common import TS3Error
except ImportError:
from .common import TS3Error
# Data
# ------------------------------------------------
__all__ = [
"TS3FileTransferError",
"TS3UploadError",
"TS3DownloadError",
"TS3FileTransfer"]
# Exceptions
# ------------------------------------------------
class TS3FileTransferError(TS3Error):
"""
This is the base class for all exceptions in this module.
"""
class TS3UploadError(TS3FileTransferError):
"""
Is raised, when an upload fails.
"""
def __init__(self, send_size, err=None):
#: The number of sent bytes till the error occured.
self.send_size = send_size
#: If the upload failed due to a thrown exception, this attribute
#: contains it.
self.err = err
return None
def __str__(self):
tmp = "TS3 file upload failed. "
if self.err is not None:
tmp += str(self.err)
return tmp
class TS3DownloadError(TS3FileTransferError):
"""
Is raised, when a download fails.
"""
def __init__(self, read_size, err=None):
#: The number of read bytes untill the error occured.
self.read_size = read_size
#: If the download failed due to a thrown exception, this attribute
#: contains the original exception.
self.err = err
return None
def __str__(self):
tmp = "TS3 file download failed. "
if self.err is not None:
tmp += str(self.err)
return tmp
# Classes
# ------------------------------------------------
class TS3FileTransfer(object):
"""
A high-level TS3 file transfer handler.
The recommended methods to download or upload a file are:
* :meth:`init_download`
* :meth:`init_upload`
"""
# Counter for the client file transfer ids.
_FTID = 0
_FTID_LOCK = threading.Lock()
def __init__(self, ts3conn):
"""
Creates a new TS3FileTransfer object, that is associated with
the TS3Connection ts3conn. This means, that calls of
:meth:`init_download` and :meth:`init_upload` will use this
connection to authenticate the file transfer.
"""
self.ts3conn = ts3conn
return None
# Common stuff
# --------------------------------------------
@classmethod
def get_ftid(cls):
"""
:return:
Returns a unique id for a file transfer.
:rtype:
int
"""
with cls._FTID_LOCK:
tmp = cls._FTID
cls._FTID += 1
return tmp
@classmethod
def _ip_from_resp(self, ip_val):
"""
The value of the ip key in a TS3QueryResponse is a comma separated
list of ips and this method parses the list and returns the first ip.
>>> ts3ft._ip_from_resp('0.0.0.0,172.16.17.32')
'localhost'
>>> ts3ft._ip_from_resp('172.16.17.32,')
'172.16.17.32'
"""
ip_val = ip_val.split(",")
ip = ip_val[0]
if ip == "0.0.0.0":
ip = "localhost"
return ip
# Download
# --------------------------------------------
def init_download(self, output_file,
name, cid, cpw="", seekpos=0,
query_resp_hook=None, reporthook=None):
"""
This is the recommended method to download a file from a TS3 server.
**name**, **cid**, **cpw** and **seekpos** are the parameters for the
TS3 query command **ftinitdownload**. The parameter **clientftid** is
automatically created and unique for the whole runtime of the programm.
**query_resp_hook**, if provided, is called, when the response of the
ftinitdownload query has been received. Its single parameter is the
the response of the querry.
For downloading the file from the server, :meth:`download()` is called.
So take a look a this method for further information.
.. seealso::
* :meth:`~commands.TS3Commands.ftinitdownload`
* :func:`~urllib.request.urlretrieve`
"""
ftid = self.get_ftid()
resp = self.ts3conn.ftinitdownload(
clientftfid=ftid, name=name, cid=cid, cpw=cpw, seekpos=seekpos)
if query_resp_hook is not None:
query_resp_hook(resp)
return self.download_by_resp(
output_file=output_file, ftinitdownload_resp=resp,
seekpos=seekpos, reporthook=reporthook,
fallbackhost=self.ts3conn.telnet_conn.host)
@classmethod
def download_by_resp(cls, output_file, ftinitdownload_resp,
seekpos=0, reporthook=None, fallbackhost=None):
"""
This is *almost* a shortcut for:
>>> TS3FileTransfer.download(
... output_file = file,
... adr = (resp[0]["ip"], int(resp[0]["port"])),
... ftkey = resp[0]["ftkey"],
... seekpos = seekpos,
... total_size = resp[0]["size"],
... reporthook = reporthook
... )
Note, that the value of ``resp[0]["ip"]`` is a csv list and needs
to be parsed.
"""
if "ip" in ftinitdownload_resp[0]:
ip = cls._ip_from_resp(ftinitdownload_resp[0]["ip"])
elif fallbackhost:
ip = fallbackhost
else:
raise TS3DownloadError(0, "The response did not contain an ip.")
port = int(ftinitdownload_resp[0]["port"])
adr = (ip, port)
ftkey = ftinitdownload_resp[0]["ftkey"]
total_size = int(ftinitdownload_resp[0]["size"])
return cls.download(
output_file=output_file, adr=adr, ftkey=ftkey, seekpos=seekpos,
total_size=total_size, reporthook=reporthook)
@classmethod
def download(cls, output_file, adr, ftkey,
seekpos=0, total_size=0, reporthook=None):
"""
Downloads a file from a TS3 server in the file **output_file**. The
TS3 file transfer interface is specified with the address tuple **adr**
and the download with the file transfer key **ftkey**.
If **seekpos** and the total **size** are provided, the **reporthook**
function (lambda read_size, block_size, total_size: None) is called
after receiving a new block.
If you provide **seekpos** and **total_size**, this method will check,
if the download is complete and raise a :exc:`TS3DownloadError` if not.
Note, that if **total_size** is 0 or less, each download will be
considered as complete.
If no error is raised, the number of read bytes is returned.
:return:
The number of received bytes.
:rtype:
int
:raises TS3DownloadError:
If the download is incomplete or a socket error occured.
"""
# Convert the ftkey if necessairy
if isinstance(ftkey, str):
ftkey = ftkey.encode()
if seekpos < 0:
raise ValueError("Seekpos has to be >= 0!")
read_size = seekpos
block_size = 4096
try:
with socket.create_connection(adr) as sock:
sock.sendall(ftkey)
# Begin with the download.
if reporthook is not None:
reporthook(read_size, block_size, total_size)
while True:
data = sock.recv(block_size)
output_file.write(data)
read_size += len(data)
if reporthook is not None:
reporthook(read_size, block_size, total_size)
# Break, if the connection has been closed.
if not data:
break
# Catch all socket errors.
except OSError as err:
raise TS3DownloadError(read_size, err)
# Raise an error, if the download is not complete.
if read_size < total_size:
raise TS3DownloadError(read_size)
return read_size
# Upload
# --------------------------------------------
def init_upload(self, input_file,
name, cid, cpw="", overwrite=True, resume=False,
query_resp_hook=None, reporthook=None):
"""
This is the recommended method to upload a file to a TS3 server.
**name**, **cid**, **cpw**, **overwrite** and **resume** are the
parameters for the TS3 query command **ftinitdownload**.
The parameter **clientftid** is automatically created and unique for
the whole runtime of the programm and the value of **size** is
retrieved by the size of the **input_file**.
**query_resp_hook**, if provided, is called, when the response of the
ftinitupload query has been received. Its single parameter is the
the response of the querry.
For uploading the file to the server :meth:`upload` is called. So
take a look at this method for further information.
.. seealso::
* :meth:`~commands.TS3Commands.ftinitupload`
* :func:`~urllib.request.urlretrieve`
"""
overwrite = "1" if overwrite else "0"
resume = "1" if resume else "0"
input_file.seek(0, 2)
size = input_file.tell()
ftid = self.get_ftid()
resp = self.ts3conn.ftinitupload(
clientftfid=ftid, name=name, cid=cid, cpw=cpw, size=size,
overwrite=overwrite, resume=resume)
if query_resp_hook is not None:
query_resp_hook(resp)
return self.upload_by_resp(
input_file=input_file, ftinitupload_resp=resp,
reporthook=reporthook, fallbackhost=self.ts3conn.telnet_conn.host)
@classmethod
def upload_by_resp(cls, input_file, ftinitupload_resp,
reporthook=None, fallbackhost=None):
"""
This is *almost* a shortcut for:
>>> TS3FileTransfer.upload(
input_file = file,
adr = (resp[0]["ip"], int(resp[0]["port"])),
ftkey = resp[0]["ftkey"],
seekpos = resp[0]["seekpos"],
reporthook = reporthook
)
...
Note, that the value of ``resp[0]["ip"]`` is a csv list and needs
to be parsed.
For the final upload, :meth:`upload` is called.
"""
if "ip" in ftinitupload_resp[0]:
ip = cls._ip_from_resp(ftinitupload_resp[0]["ip"])
elif fallbackhost:
ip = fallbackhost
else:
raise TS3UploadError(0, "The response did not contain an ip.")
port = int(ftinitupload_resp[0]["port"])
adr = (ip, port)
ftkey = ftinitupload_resp[0]["ftkey"]
seekpos = int(ftinitupload_resp[0]["seekpos"])
return cls.upload(
input_file=input_file, adr=adr, ftkey=ftkey, seekpos=seekpos,
reporthook=reporthook)
@classmethod
def upload(cls, input_file, adr, ftkey,
seekpos=0, reporthook=None):
"""
Uploads the data in the file **input_file** to the TS3 server listening
at the address **adr**. **ftkey** is used to authenticate the file
transfer.
When the upload begins, the *get pointer* of the **input_file** is set
to seekpos.
If the **reporthook** function (lambda send_size, block_size, total_size)
is provided, it is called after sending a block to the server.
"""
if isinstance(ftkey, str):
ftkey = ftkey.encode()
# Get the total size of the file and put the get pointer to the correct
# position.
input_file.seek(0, 2)
total_size = input_file.tell()
input_file.seek(seekpos)
send_size = seekpos
block_size = 4096
try:
with socket.create_connection(adr) as sock:
sock.sendall(ftkey)
# Begin with the upload
if reporthook is not None:
reporthook(send_size, block_size, total_size)
while True:
data = input_file.read(block_size)
sock.sendall(data)
send_size += len(data)
if reporthook is not None:
reporthook(send_size, block_size, total_size)
if not data:
break
except OSError:
raise TS3FtUploadError(send_size, err)
# Raise an error, if the upload is not complete.
if send_size < total_size:
raise TS3FtUploadError(send_size)
return send_size
|
StarcoderdataPython
|
1760222
|
import os
import sys
from collections import OrderedDict
from matplotlib import pyplot as plt
from PIL import Image
import numpy as np
class History():
def __init__(self):
self.epoch_log = []
self.batch_log = {}
def update_epoch_log(self, log):
if type(log) not in [OrderedDict]:
raise ValueError('log must be an OrderedDict')
self.epoch_log.append(log)
return
def update_batch_log(self, key, value):
lst = self.batch_log.get(key, [])
lst.append(value)
self.batch_log[key] = lst
return
def get_epoch_log(self, key):
results = []
for d in self.epoch_log:
value = d.get(key, None)
if value is None: raise ValueError('could not find key %s' % (key))
results.append(value)
return results
def get_batch_log(self, key):
values = self.batch_log.get(key, None)
if values is None: raise ValueError('could not find key %s' % (key))
return values
def has_epoch_key(self, key):
return self.epoch_log and key in self.epoch_log[0]
def has_batch_key(self, key):
return key in self.batch_log
def show_image(img_path):
if not os.path.isfile(img_path):
raise ValueError('%s is not valid file' % (img_path))
img = plt.imread(img_path)
out = plt.imshow(img)
return out
def load_image( infilename ) :
img = Image.open( infilename )
img.load()
data = np.asarray( img, dtype="int32" )
return data
#-------------------------------------------------------------------------------
# The ProgressBar, log_to_message, and add_metrics_to_log functions
# support Keras-style training progress in Jupyter notebooks.
# Code Reference: https://github.com/henryre/pytorch-fitmodule
#-------------------------------------------------------------------------------
def add_metrics_to_log(log, metrics, y_true, y_pred, prefix=''):
for metric in metrics:
q = metric(y_true, y_pred)
log[prefix + metric.__name__] = q
return log
def log_to_message(log, precision=4, omit=[]):
fmt = "{0}: {1:." + str(precision) + "f}"
sep = " "
return sep+sep.join(fmt.format(k, v) for k, v in log.items() if k not in omit)
class ProgressBar(object):
"""Cheers @ajratner"""
def __init__(self, n, length=40):
# Protect against division by zero
self.n = max(1, n)
self.nf = float(n)
self.length = length
# Precalculate the i values that should trigger a write operation
self.ticks = set([round(i/100.0 * n) for i in range(101)])
self.ticks.add(n-1)
self.bar(0)
def bar(self, i, message=""):
"""Assumes i ranges through [0, n-1]"""
if i in self.ticks:
b = int(np.ceil(((i+1) / self.nf) * self.length))
sys.stdout.write("\r[{0}{1}] {2}%\t{3}".format(
"="*b, " "*(self.length-b), int(100*((i+1) / self.nf)), message
))
sys.stdout.flush()
def close(self, message=""):
# Move the bar to 100% before closing
self.bar(self.n-1)
sys.stdout.write("{0}\n\n".format(message))
sys.stdout.flush()
|
StarcoderdataPython
|
1605293
|
<filename>Download/exercicio028.py
#jogo da adivinhação
from random import randint
from time import sleep
computador = randint(0,5) #faz o computador pensar
print('\033[;34m-=\033[m' *30)
print('\033[;33mVou pensar em um número entre 0 e 5, tente adivinhar...\033[m' )
print('\033[;34m-=\033[m'*30)
jogador = int(input('\033[35m Em que número eu pensei?\033[m ')) #jogador tenta adivinhar
print('\033[35m PROCESSANDO...\033[m')
sleep(3)
if jogador == computador:
print('\033[;32m PARABÉNS! Você ganhou!')
else:
print('\033[1;31m GANHEI! Eu pensei no número {} e não no {}.\033[m'.format(computador, jogador))
|
StarcoderdataPython
|
3352727
|
<reponame>NoMigraine/migraine-diary-server
"""isort:skip_file"""
import logging
import os
import sys
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.insert(0, BASE_DIR)
from app.db.init_db import init_db # noqa isort:skip
from app.db.session import SessionLocal # noqa isort:skip
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
def init() -> None:
db = SessionLocal()
init_db(db)
def main() -> None:
logger.info("Creating initial data")
init()
logger.info("Initial data created")
if __name__ == "__main__":
main()
|
StarcoderdataPython
|
3278907
|
import cv2
from os.path import basename, isfile, join
from loguru import logger
from filetype import guess
from skimage import exposure
from skimage.feature import hog
class Image:
"""
Custom Image class to store more information about the Image
wrapper around the openCV image object
"""
def __init__(self, path, entity=None):
self.path = path
self.name = self.verbose_name(self.path)
self.entity = entity
@staticmethod
def isValid(path):
if isfile(path):
kind = guess(path)
if kind is not None and kind.mime.startswith("image"):
return basename(path)
return False
@staticmethod
def verbose_name(path):
return Image.isValid(path)
def __call__(self):
"""
will load the image and return
:return:
"""
logger.info('loading image: {}'.format(self.name))
image = cv2.imread(self.path)
return image
def __str__(self):
return self.name
def __repr__(self):
return f'<Data.Image: path={self.path}>'
# OpenCv Utilities
def normalize_histogram(image, grid=16):
image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
normalizer = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(grid, grid))
normalized_image = normalizer.apply(image)
return normalized_image
def display_image(image, label='image'):
"""
Method for displaying the image
:param image: Image Object
:param label: Text to be displayed on the window
:return None:
"""
# converting image to RGB before displaying
# image = cv2.cvtColor(image, cv2.COLOR_GRAY2RGB)
# displaying the image
cv2.imshow(label, image)
# waiting util the user does not press key => 0
cv2.waitKey(0)
cv2.destroyAllWindows()
def save_image(image, name, path, converter=None):
"""
Method for saving the image
:param converter:
:param image: object of image that is to be saved
:param name: name by the image should be saved
:param path: path to the directory where image should be saved
:return None:
"""
# if converter is defined
if converter is not None:
logger.debug('converting image')
# convert the image channels
image = cv2.cvtColor(image, converter)
path = join(path, name)
cv2.imwrite(path, image)
# Utilities Used by Detector.py
def rect_to_bounding_box(rect):
"""
Method for converting the dlib's rect object
to standard --coordinates of (x1, y1) and (x2, y2)
:param rect:
:return tuple: (x1, y1, x2, y2)
"""
x = rect.left()
y = rect.top()
w = rect.right() - x
h = rect.bottom() - y
return x, y, w, h
def draw_text(image, text, face_location, color=(0, 0, 255), rect=True):
"""
function to draw text on give image starting from
passed (x, y) coordinates.
:param color:
:param rect:
:param image:
:param text:
:param face_location:
:return:
"""
if rect:
x1, y1, x2, y2 = rect_to_bounding_box(face_location)
else:
x1, y1, x2, y2 = tuple(face_location)
cv2.putText(image, text, (x1, y1), cv2.FONT_HERSHEY_PLAIN, 1.5, color, 2)
def draw_rectangles(image, face_locations, color=(0, 0, 255), thickness=5, rect=True):
"""
Method for drawing Rectangles around the detected face locations
:param rect:
:param image: image-object
:param face_locations: list of tuples
:param color: tuple of three elements
:param thickness: integer value
:return image: a new image with rectangles drawn on it
"""
logger.info('Drawing rectangles around detected faces')
if rect:
face_locations = list(map(lambda location: rect_to_bounding_box(location), face_locations))
for (x1, y1, x2, y2) in face_locations:
cv2.rectangle(image, (x1, y1), (x1 + x2, y1 + y2), color, thickness)
return image
def draw_landmarks(image, landmarks=None, color=(255, 0, 0)):
for x, y in landmarks:
cv2.circle(image, (x, y), 1, color, 2)
def image_to_hog(image):
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
fd, hog_image = hog(image, orientations=8, pixels_per_cell=(16, 16), cells_per_block=(1, 1), visualize=True)
hog_image_rescaled = exposure.rescale_intensity(hog_image, in_range=(0, 10))
return hog_image_rescaled
|
StarcoderdataPython
|
1718105
|
__author__ = "<NAME>"
__version__ = "1.0"
from detectron2.data import MetadataCatalog, DatasetCatalog
from detectron2.structures import BoxMode
import os
import json
def get_dataset_dict(json_dir, json_name, destination_image_source_dir=None):
'to convert the output json file into a dictionary digestable by detectron'
# load json file
json_file = os.path.join(json_dir, json_name)
with open(json_file) as f:
imgs_anns = json.load(f)
# start making output list
# format: each image is a dictionary, with keys annotation, file_name, image_id, height, width
# annotation is a list of dictionaries, each of which contains keys bbox, bbox_mode, category_id, segmentation
output_l = []
img_l = imgs_anns['images']
annotations_l = imgs_anns['annotations']
# iterate thru each image
for image in img_l:
current_img_id = image['id']
current_img_dict = {}
current_img_annotations = []
# iterate thru each annotation
for annotation in annotations_l:
if annotation['image_id'] == current_img_id:
current_img_annotations.append({
'bbox': annotation['bbox'],
'bbox_mode': BoxMode,
'category_id': annotation['category_id'],
'segmentation': annotation['segmentation']
})
# update source image directory in case files are moved to a different directory than initially processed
if destination_image_source_dir != None:
current_img_filename = os.path.join(
destination_image_source_dir, image['file_name'].split('/')[-1])
else:
current_img_filename = image['file_name']
# generate outputs
current_img_dict.update({'annotations': current_img_annotations,
'file_name': current_img_filename,
'height': image['height'],
'image_id': current_img_id,
'width': image['width']})
output_l.append(current_img_dict)
return output_l
|
StarcoderdataPython
|
1692109
|
<reponame>QTIM-Lab/qtim_gbmSegmenter
import argparse
import sys
from qtim_gbmSegmenter.Config_Library.docker_workflow import full_pipeline, dicom_convert
from qtim_gbmSegmenter.Config_Library.docker_wrapper import docker_segmentation
class segmenter_commands(object):
def __init__(self):
parser = argparse.ArgumentParser(
description='A number of pre-packaged command used by the Quantiative Tumor Imaging Lab at the Martinos Center',
usage='''segment <command> [<args>]
The following commands are available:
pipeline Run the entire segmentation pipeline, with options to leave certain pre-processing steps out.
dicom_2_nifti Convert an inpute DICOM folder into a series of Nifti files.
''')
parser.add_argument('command', help='Subcommand to run')
args = parser.parse_args(sys.argv[1:2])
if not hasattr(self, args.command):
print 'Sorry, that\'s not one of the commands.'
parser.print_help()
exit(1)
# use dispatch pattern to invoke method with same name
getattr(self, args.command)()
def pipeline(self):
parser = argparse.ArgumentParser(
description='''segment pipeline <T2> <T1pre> <T1post> <FLAIR> <output_folder> [-gpu_num <int> -niftis -nobias -preprocessed -keep_outputs]
Segment an image from DICOMs with all preprocessing steps included.
-gpu_num <int> Which CUDA GPU ID # to use.
-niftis Input nifti files instead of DIOCM folders.
-nobias Skip the bias correction step.
-preprocessed Skip bias correction, resampling, and registration.
-no_ss [not yet implemented]
-keep_outputs Do not delete files generated from intermediary steps.
''')
parser.add_argument('T2', type=str)
parser.add_argument('T1', type=str)
parser.add_argument('T1POST', type=str)
parser.add_argument('FLAIR', type=str)
parser.add_argument('output', type=str)
parser.add_argument('-gpu_num', nargs='?', const='0', type=str)
parser.add_argument('-niftis', action='store_true')
parser.add_argument('-nobias', action='store_true')
parser.add_argument('-preprocessed', action='store_true')
parser.add_argument('-no_ss', action='store_true') # Currently non-functional
parser.add_argument('-keep_outputs', action='store_true')
args = parser.parse_args(sys.argv[2:])
docker_segmentation(args.T2, args.T1, args.T1POST, args.FLAIR, args.output, gpu_num=args.gpu_num, interactive=False, niftis=args.niftis, nobias=args.nobias, preprocessed=args.preprocessed, no_ss=args.no_ss, keep_outputs=args.keep_outputs)
def docker_pipeline(self):
parser = argparse.ArgumentParser(
description='''segment pipeline <T2> <T1pre> <T1post> <FLAIR> <output_folder> [-gpu_num <int> -niftis -nobias -preprocessed -keep_outputs]
Segment an image from DICOMs with all preprocessing steps included.
-gpu_num <int> Which CUDA GPU ID # to use.
-niftis Input nifti files instead of DIOCM folders.
-nobias Skip the bias correction step.
-preprocessed Skip bias correction, resampling, and registration.
-no_ss [not yet implemented]
-keep_outputs Do not delete files generated from intermediary steps.
''')
parser.add_argument('-T2', type=str)
parser.add_argument('-T1', type=str)
parser.add_argument('-T1POST', type=str)
parser.add_argument('-FLAIR', type=str)
parser.add_argument('-output', type=str)
parser.add_argument('-gpu_num', nargs='?', const='0', type=str)
parser.add_argument('-niftis', action='store_true')
parser.add_argument('-nobias', action='store_true')
parser.add_argument('-preprocessed', action='store_true')
parser.add_argument('-no_ss', action='store_true') # Currently non-functional
parser.add_argument('-keep_outputs', action='store_true')
args = parser.parse_args(sys.argv[2:])
print 'Beginning segmentation pipeline...'
full_pipeline(args.T2, args.T1, args.T1POST, args.FLAIR, args.output, args.gpu_num, args.niftis, args.nobias, args.preprocessed, args.no_ss, args.keep_outputs)
def dicom_2_nifti(self):
parser = argparse.ArgumentParser(
description='''segment dicom_2_nifti <input_folder> <output_folder>
Recursively convert an input folder of DICOMs into a output folder of Nifti files. File names
are determined from the DICOM SeriesDescription tag.
''')
parser.add_argument('input_folder', type=str)
parser.add_argument('output_folder', type=str)
args = parser.parse_args(sys.argv[2:])
print 'Beginning segmentation pipeline...'
dicom_convert(args.input_folder, args.output_folder)
def main():
segmenter_commands()
|
StarcoderdataPython
|
62112
|
<filename>119.pascals-triangle-ii.py
#
# @lc app=leetcode id=119 lang=python3
#
# [119] Pascal's Triangle II
#
# https://leetcode.com/problems/pascals-triangle-ii/description/
#
# algorithms
# Easy (45.89%)
# Likes: 582
# Dislikes: 182
# Total Accepted: 237.3K
# Total Submissions: 515.4K
# Testcase Example: '3'
#
# Given a non-negative index k where k ≤ 33, return the k^th index row of the
# Pascal's triangle.
#
# Note that the row index starts from 0.
#
#
# In Pascal's triangle, each number is the sum of the two numbers directly
# above it.
#
# Example:
#
#
# Input: 3
# Output: [1,3,3,1]
#
#
# Follow up:
#
# Could you optimize your algorithm to use only O(k) extra space?
#
#
# @lc code=start
class Solution:
def getRow(self, rowIndex: int) -> List[int]:
if rowIndex==0:
return [1]
elif rowIndex==1:
return [1,1]
ans = [1,1]
for i in range(1,rowIndex):
ans = [1]+[ans[j-1]+ans[j] for j in range(1,len(ans))]+[1]
return ans
# @lc code=end
|
StarcoderdataPython
|
1796029
|
## Use BFS with Queue
class Solution:
def ladderLength(self, beginWord: str, endWord: str, wordList: List[str]) -> int:
word_set = set(wordList)
queue = deque([[beginWord, 1]])
while queue:
word, seq_len = queue.popleft()
if word == endWord:
return seq_len
for i in range(len(word)):
for c in 'abcdefghijklmnopqrstuvwxyz':
next_word = word[:i] + c + word[i+1:]
if next_word in word_set:
word_set.remove(next_word)
queue.append([next_word, seq_len+1])
return 0
|
StarcoderdataPython
|
3348709
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Generated from FHIR 3.6.0-bd605d07 (http://hl7.org/fhir/StructureDefinition/MedicationKnowledge) on 2018-12-20.
# 2018, SMART Health IT.
from . import domainresource
class MedicationKnowledge(domainresource.DomainResource):
""" Definition of Medication Knowledge.
Information about a medication that is used to support knowledge.
"""
resource_type = "MedicationKnowledge"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.administrationGuidelines = None
""" Gudelines for administration of the medication.
List of `MedicationKnowledgeAdministrationGuidelines` items (represented as `dict` in JSON). """
self.amount = None
""" Amount of drug in package.
Type `Quantity` (represented as `dict` in JSON). """
self.associatedMedication = None
""" A medication resource that is associated with this medication.
List of `FHIRReference` items (represented as `dict` in JSON). """
self.code = None
""" Code that identifies this medication.
Type `CodeableConcept` (represented as `dict` in JSON). """
self.contraindication = None
""" Potential clinical issue with or between medication(s).
List of `FHIRReference` items (represented as `dict` in JSON). """
self.cost = None
""" The pricing of the medication.
List of `MedicationKnowledgeCost` items (represented as `dict` in JSON). """
self.doseForm = None
""" powder | tablets | capsule +.
Type `CodeableConcept` (represented as `dict` in JSON). """
self.drugCharacteristic = None
""" Specifies descriptive properties of the medicine.
List of `MedicationKnowledgeDrugCharacteristic` items (represented as `dict` in JSON). """
self.ingredient = None
""" Active or inactive ingredient.
List of `MedicationKnowledgeIngredient` items (represented as `dict` in JSON). """
self.intendedRoute = None
""" The intended or approved route of administration.
List of `CodeableConcept` items (represented as `dict` in JSON). """
self.kinetics = None
""" The time course of drug absorption, distribution, metabolism and
excretion of a medication from the body.
List of `MedicationKnowledgeKinetics` items (represented as `dict` in JSON). """
self.manufacturer = None
""" Manufacturer of the item.
Type `FHIRReference` (represented as `dict` in JSON). """
self.medicineClassification = None
""" Categorization of the medication within a formulary or
classification system.
List of `MedicationKnowledgeMedicineClassification` items (represented as `dict` in JSON). """
self.monitoringProgram = None
""" Program under which a medication is reviewed.
List of `MedicationKnowledgeMonitoringProgram` items (represented as `dict` in JSON). """
self.monograph = None
""" Associated documentation about the medication.
List of `MedicationKnowledgeMonograph` items (represented as `dict` in JSON). """
self.packaging = None
""" Details about packaged medications.
Type `MedicationKnowledgePackaging` (represented as `dict` in JSON). """
self.preparationInstruction = None
""" The instructions for preparing the medication.
Type `str`. """
self._preparationInstruction = None
""" extension for fhir primitive preparationInstruction"""
self.productType = None
""" Category of the medication or product.
List of `CodeableConcept` items (represented as `dict` in JSON). """
self.regulatory = None
""" Regulatory information about a medication.
List of `MedicationKnowledgeRegulatory` items (represented as `dict` in JSON). """
self.relatedMedicationKnowledge = None
""" Associated or related medication information.
List of `MedicationKnowledgeRelatedMedicationKnowledge` items (represented as `dict` in JSON). """
self.status = None
""" active | inactive | entered-in-error.
Type `str`. """
self._status = None
""" extension for fhir primitive status"""
self.synonym = None
""" Additional names for a medication.
List of `str` items. """
self._synonym = None
""" extension for fhir primitive synonym"""
super(MedicationKnowledge, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(MedicationKnowledge, self).elementProperties()
js.extend([
("administrationGuidelines", "administrationGuidelines", MedicationKnowledgeAdministrationGuidelines, True, None, False),
("amount", "amount", quantity.Quantity, False, None, False),
("associatedMedication", "associatedMedication", fhirreference.FHIRReference, True, None, False),
("code", "code", codeableconcept.CodeableConcept, False, None, False),
("contraindication", "contraindication", fhirreference.FHIRReference, True, None, False),
("cost", "cost", MedicationKnowledgeCost, True, None, False),
("doseForm", "doseForm", codeableconcept.CodeableConcept, False, None, False),
("drugCharacteristic", "drugCharacteristic", MedicationKnowledgeDrugCharacteristic, True, None, False),
("ingredient", "ingredient", MedicationKnowledgeIngredient, True, None, False),
("intendedRoute", "intendedRoute", codeableconcept.CodeableConcept, True, None, False),
("kinetics", "kinetics", MedicationKnowledgeKinetics, True, None, False),
("manufacturer", "manufacturer", fhirreference.FHIRReference, False, None, False),
("medicineClassification", "medicineClassification", MedicationKnowledgeMedicineClassification, True, None, False),
("monitoringProgram", "monitoringProgram", MedicationKnowledgeMonitoringProgram, True, None, False),
("monograph", "monograph", MedicationKnowledgeMonograph, True, None, False),
("packaging", "packaging", MedicationKnowledgePackaging, False, None, False),
("preparationInstruction", "preparationInstruction", str, False, None, False),
("_preparationInstruction", "_preparationInstruction",fhirprimitive.FHIRPrimitive, False, None, False),
("productType", "productType", codeableconcept.CodeableConcept, True, None, False),
("regulatory", "regulatory", MedicationKnowledgeRegulatory, True, None, False),
("relatedMedicationKnowledge", "relatedMedicationKnowledge", MedicationKnowledgeRelatedMedicationKnowledge, True, None, False),
("status", "status", str, False, None, False),
("_status", "_status",fhirprimitive.FHIRPrimitive, False, None, False),
("synonym", "synonym", str, True, None, False),
("_synonym", "_synonym",fhirprimitive.FHIRPrimitive, False, None, False),
])
return js
from . import backboneelement
class MedicationKnowledgeAdministrationGuidelines(backboneelement.BackboneElement):
""" Gudelines for administration of the medication.
Guidelines for the administration of the medication.
"""
resource_type = "MedicationKnowledgeAdministrationGuidelines"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.dosage = None
""" Dosage for the medication for the specific guidelines.
List of `MedicationKnowledgeAdministrationGuidelinesDosage` items (represented as `dict` in JSON). """
self.indicationCodeableConcept = None
""" Indication for use that apply to the specific administration
guidelines.
Type `CodeableConcept` (represented as `dict` in JSON). """
self.indicationReference = None
""" Indication for use that apply to the specific administration
guidelines.
Type `FHIRReference` (represented as `dict` in JSON). """
self.patientCharacteristics = None
""" Characteristics of the patient that are relevant to the
administration guidelines.
List of `MedicationKnowledgeAdministrationGuidelinesPatientCharacteristics` items (represented as `dict` in JSON). """
super(MedicationKnowledgeAdministrationGuidelines, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(MedicationKnowledgeAdministrationGuidelines, self).elementProperties()
js.extend([
("dosage", "dosage", MedicationKnowledgeAdministrationGuidelinesDosage, True, None, False),
("indicationCodeableConcept", "indicationCodeableConcept", codeableconcept.CodeableConcept, False, "indication", False),
("indicationReference", "indicationReference", fhirreference.FHIRReference, False, "indication", False),
("patientCharacteristics", "patientCharacteristics", MedicationKnowledgeAdministrationGuidelinesPatientCharacteristics, True, None, False),
])
return js
class MedicationKnowledgeAdministrationGuidelinesDosage(backboneelement.BackboneElement):
""" Dosage for the medication for the specific guidelines.
"""
resource_type = "MedicationKnowledgeAdministrationGuidelinesDosage"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.dosage = None
""" Dosage for the medication for the specific guidelines.
List of `Dosage` items (represented as `dict` in JSON). """
self.type = None
""" Type of dosage.
Type `CodeableConcept` (represented as `dict` in JSON). """
super(MedicationKnowledgeAdministrationGuidelinesDosage, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(MedicationKnowledgeAdministrationGuidelinesDosage, self).elementProperties()
js.extend([
("dosage", "dosage", dosage.Dosage, True, None, True),
("type", "type", codeableconcept.CodeableConcept, False, None, True),
])
return js
class MedicationKnowledgeAdministrationGuidelinesPatientCharacteristics(backboneelement.BackboneElement):
""" Characteristics of the patient that are relevant to the administration
guidelines.
Characteristics of the patient that are relevant to the administration
guidelines (for example, height, weight,gender, etc).
"""
resource_type = "MedicationKnowledgeAdministrationGuidelinesPatientCharacteristics"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.characteristicCodeableConcept = None
""" Specific characteristic that is relevant to the administration
guideline.
Type `CodeableConcept` (represented as `dict` in JSON). """
self.characteristicQuantity = None
""" Specific characteristic that is relevant to the administration
guideline.
Type `Quantity` (represented as `dict` in JSON). """
self.value = None
""" The specific characteristic.
List of `str` items. """
self._value = None
""" extension for fhir primitive value"""
super(MedicationKnowledgeAdministrationGuidelinesPatientCharacteristics, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(MedicationKnowledgeAdministrationGuidelinesPatientCharacteristics, self).elementProperties()
js.extend([
("characteristicCodeableConcept", "characteristicCodeableConcept", codeableconcept.CodeableConcept, False, "characteristic", True),
("characteristicQuantity", "characteristicQuantity", quantity.Quantity, False, "characteristic", True),
("value", "value", str, True, None, False),
("_value", "_value",fhirprimitive.FHIRPrimitive, False, None, False),
])
return js
class MedicationKnowledgeCost(backboneelement.BackboneElement):
""" The pricing of the medication.
The price of the medication.
"""
resource_type = "MedicationKnowledgeCost"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.cost = None
""" The price of the medication.
Type `Money` (represented as `dict` in JSON). """
self.source = None
""" The source or owner for the price information.
Type `str`. """
self._source = None
""" extension for fhir primitive source"""
self.type = None
""" The category of the cost information.
Type `CodeableConcept` (represented as `dict` in JSON). """
super(MedicationKnowledgeCost, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(MedicationKnowledgeCost, self).elementProperties()
js.extend([
("cost", "cost", money.Money, False, None, True),
("source", "source", str, False, None, False),
("_source", "_source",fhirprimitive.FHIRPrimitive, False, None, False),
("type", "type", codeableconcept.CodeableConcept, False, None, True),
])
return js
class MedicationKnowledgeDrugCharacteristic(backboneelement.BackboneElement):
""" Specifies descriptive properties of the medicine.
Specifies descriptive properties of the medicine, such as color, shape,
imprints, etc.
"""
resource_type = "MedicationKnowledgeDrugCharacteristic"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.type = None
""" Code specifying the type of characteristic of medication.
Type `CodeableConcept` (represented as `dict` in JSON). """
self.valueBase64Binary = None
""" Description of the characteristic.
Type `str`. """
self._valueBase64Binary = None
""" extension for fhir primitive valueBase64Binary"""
self.valueCodeableConcept = None
""" Description of the characteristic.
Type `CodeableConcept` (represented as `dict` in JSON). """
self.valueQuantity = None
""" Description of the characteristic.
Type `Quantity` (represented as `dict` in JSON). """
self.valueString = None
""" Description of the characteristic.
Type `str`. """
self._valueString = None
""" extension for fhir primitive valueString"""
super(MedicationKnowledgeDrugCharacteristic, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(MedicationKnowledgeDrugCharacteristic, self).elementProperties()
js.extend([
("type", "type", codeableconcept.CodeableConcept, False, None, False),
("valueBase64Binary", "valueBase64Binary", str, False, "value", False),
("_valueBase64Binary", "_valueBase64Binary",fhirprimitive.FHIRPrimitive, False, None, False),
("valueCodeableConcept", "valueCodeableConcept", codeableconcept.CodeableConcept, False, "value", False),
("valueQuantity", "valueQuantity", quantity.Quantity, False, "value", False),
("valueString", "valueString", str, False, "value", False),
("_valueString", "_valueString",fhirprimitive.FHIRPrimitive, False, None, False),
])
return js
class MedicationKnowledgeIngredient(backboneelement.BackboneElement):
""" Active or inactive ingredient.
Identifies a particular constituent of interest in the product.
"""
resource_type = "MedicationKnowledgeIngredient"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.isActive = None
""" Active ingredient indicator.
Type `bool`. """
self._isActive = None
""" extension for fhir primitive isActive"""
self.itemCodeableConcept = None
""" Medication(s) or substance(s) contained in the medication.
Type `CodeableConcept` (represented as `dict` in JSON). """
self.itemReference = None
""" Medication(s) or substance(s) contained in the medication.
Type `FHIRReference` (represented as `dict` in JSON). """
self.strength = None
""" Quantity of ingredient present.
Type `Ratio` (represented as `dict` in JSON). """
super(MedicationKnowledgeIngredient, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(MedicationKnowledgeIngredient, self).elementProperties()
js.extend([
("isActive", "isActive", bool, False, None, False),
("_isActive", "_isActive",fhirprimitive.FHIRPrimitive, False, None, False),
("itemCodeableConcept", "itemCodeableConcept", codeableconcept.CodeableConcept, False, "item", True),
("itemReference", "itemReference", fhirreference.FHIRReference, False, "item", True),
("strength", "strength", ratio.Ratio, False, None, False),
])
return js
class MedicationKnowledgeKinetics(backboneelement.BackboneElement):
""" The time course of drug absorption, distribution, metabolism and excretion
of a medication from the body.
"""
resource_type = "MedicationKnowledgeKinetics"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.areaUnderCurve = None
""" The drug concentration measured at certain discrete points in time.
List of `Quantity` items (represented as `dict` in JSON). """
self.halfLifePeriod = None
""" Time required for concentration in the body to decrease by half.
Type `Duration` (represented as `dict` in JSON). """
self.lethalDose50 = None
""" The median lethal dose of a drug.
List of `Quantity` items (represented as `dict` in JSON). """
super(MedicationKnowledgeKinetics, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(MedicationKnowledgeKinetics, self).elementProperties()
js.extend([
("areaUnderCurve", "areaUnderCurve", quantity.Quantity, True, None, False),
("halfLifePeriod", "halfLifePeriod", duration.Duration, False, None, False),
("lethalDose50", "lethalDose50", quantity.Quantity, True, None, False),
])
return js
class MedicationKnowledgeMedicineClassification(backboneelement.BackboneElement):
""" Categorization of the medication within a formulary or classification
system.
"""
resource_type = "MedicationKnowledgeMedicineClassification"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.classification = None
""" Specific category assigned to the medication.
List of `CodeableConcept` items (represented as `dict` in JSON). """
self.type = None
""" The type of category for the medication (for example, therapeutic
classification, therapeutic sub-classification).
Type `CodeableConcept` (represented as `dict` in JSON). """
super(MedicationKnowledgeMedicineClassification, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(MedicationKnowledgeMedicineClassification, self).elementProperties()
js.extend([
("classification", "classification", codeableconcept.CodeableConcept, True, None, False),
("type", "type", codeableconcept.CodeableConcept, False, None, True),
])
return js
class MedicationKnowledgeMonitoringProgram(backboneelement.BackboneElement):
""" Program under which a medication is reviewed.
The program under which the medication is reviewed.
"""
resource_type = "MedicationKnowledgeMonitoringProgram"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.name = None
""" Name of the reviewing program.
Type `str`. """
self._name = None
""" extension for fhir primitive name"""
self.type = None
""" Type of program under which the medication is monitored.
Type `CodeableConcept` (represented as `dict` in JSON). """
super(MedicationKnowledgeMonitoringProgram, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(MedicationKnowledgeMonitoringProgram, self).elementProperties()
js.extend([
("name", "name", str, False, None, False),
("_name", "_name",fhirprimitive.FHIRPrimitive, False, None, False),
("type", "type", codeableconcept.CodeableConcept, False, None, False),
])
return js
class MedicationKnowledgeMonograph(backboneelement.BackboneElement):
""" Associated documentation about the medication.
"""
resource_type = "MedicationKnowledgeMonograph"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.source = None
""" Associated documentation about the medication.
Type `FHIRReference` (represented as `dict` in JSON). """
self.type = None
""" The category of medication document.
Type `CodeableConcept` (represented as `dict` in JSON). """
super(MedicationKnowledgeMonograph, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(MedicationKnowledgeMonograph, self).elementProperties()
js.extend([
("source", "source", fhirreference.FHIRReference, False, None, False),
("type", "type", codeableconcept.CodeableConcept, False, None, False),
])
return js
class MedicationKnowledgePackaging(backboneelement.BackboneElement):
""" Details about packaged medications.
Information that only applies to packages (not products).
"""
resource_type = "MedicationKnowledgePackaging"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.quantity = None
""" The number of product units the package would contain if fully
loaded.
Type `Quantity` (represented as `dict` in JSON). """
self.type = None
""" A code that defines the specific type of packaging that the
medication can be found in.
Type `CodeableConcept` (represented as `dict` in JSON). """
super(MedicationKnowledgePackaging, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(MedicationKnowledgePackaging, self).elementProperties()
js.extend([
("quantity", "quantity", quantity.Quantity, False, None, False),
("type", "type", codeableconcept.CodeableConcept, False, None, False),
])
return js
class MedicationKnowledgeRegulatory(backboneelement.BackboneElement):
""" Regulatory information about a medication.
"""
resource_type = "MedicationKnowledgeRegulatory"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.maxDispense = None
""" The maximum number of units of the medicaton that can be dispensed
in a period.
Type `MedicationKnowledgeRegulatoryMaxDispense` (represented as `dict` in JSON). """
self.regulatoryAuthority = None
""" Specifies the authority of the regulation.
Type `FHIRReference` (represented as `dict` in JSON). """
self.schedule = None
""" Specifies the schedule of a medication in jurisdiction.
List of `MedicationKnowledgeRegulatorySchedule` items (represented as `dict` in JSON). """
self.substitution = None
""" Specifies if changes are allowed when dispensing a medication from
a regulatory perspective.
List of `MedicationKnowledgeRegulatorySubstitution` items (represented as `dict` in JSON). """
super(MedicationKnowledgeRegulatory, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(MedicationKnowledgeRegulatory, self).elementProperties()
js.extend([
("maxDispense", "maxDispense", MedicationKnowledgeRegulatoryMaxDispense, False, None, False),
("regulatoryAuthority", "regulatoryAuthority", fhirreference.FHIRReference, False, None, True),
("schedule", "schedule", MedicationKnowledgeRegulatorySchedule, True, None, False),
("substitution", "substitution", MedicationKnowledgeRegulatorySubstitution, True, None, False),
])
return js
class MedicationKnowledgeRegulatoryMaxDispense(backboneelement.BackboneElement):
""" The maximum number of units of the medicaton that can be dispensed in a
period.
"""
resource_type = "MedicationKnowledgeRegulatoryMaxDispense"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.period = None
""" The period that applies to the maximum number of units.
Type `Duration` (represented as `dict` in JSON). """
self.quantity = None
""" The maximum number of units of the medicaton that can be dispensed.
Type `Quantity` (represented as `dict` in JSON). """
super(MedicationKnowledgeRegulatoryMaxDispense, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(MedicationKnowledgeRegulatoryMaxDispense, self).elementProperties()
js.extend([
("period", "period", duration.Duration, False, None, False),
("quantity", "quantity", quantity.Quantity, False, None, True),
])
return js
class MedicationKnowledgeRegulatorySchedule(backboneelement.BackboneElement):
""" Specifies the schedule of a medication in jurisdiction.
"""
resource_type = "MedicationKnowledgeRegulatorySchedule"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.schedule = None
""" Specifies the specific drug schedule.
Type `CodeableConcept` (represented as `dict` in JSON). """
super(MedicationKnowledgeRegulatorySchedule, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(MedicationKnowledgeRegulatorySchedule, self).elementProperties()
js.extend([
("schedule", "schedule", codeableconcept.CodeableConcept, False, None, True),
])
return js
class MedicationKnowledgeRegulatorySubstitution(backboneelement.BackboneElement):
""" Specifies if changes are allowed when dispensing a medication from a
regulatory perspective.
"""
resource_type = "MedicationKnowledgeRegulatorySubstitution"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.allowed = None
""" Specifies if regulation allows for changes in the medication when
dispensing.
Type `bool`. """
self._allowed = None
""" extension for fhir primitive allowed"""
self.type = None
""" Specifies the type of substitution allowed.
Type `CodeableConcept` (represented as `dict` in JSON). """
super(MedicationKnowledgeRegulatorySubstitution, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(MedicationKnowledgeRegulatorySubstitution, self).elementProperties()
js.extend([
("allowed", "allowed", bool, False, None, True),
("_allowed", "_allowed",fhirprimitive.FHIRPrimitive, False, None, False),
("type", "type", codeableconcept.CodeableConcept, False, None, True),
])
return js
class MedicationKnowledgeRelatedMedicationKnowledge(backboneelement.BackboneElement):
""" Associated or related medication information.
Associated or related knowledge about a medication.
"""
resource_type = "MedicationKnowledgeRelatedMedicationKnowledge"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.reference = None
""" Associated documentation about the associated medication knowledge.
List of `FHIRReference` items (represented as `dict` in JSON). """
self.type = None
""" Category of medicationKnowledge.
Type `CodeableConcept` (represented as `dict` in JSON). """
super(MedicationKnowledgeRelatedMedicationKnowledge, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(MedicationKnowledgeRelatedMedicationKnowledge, self).elementProperties()
js.extend([
("reference", "reference", fhirreference.FHIRReference, True, None, True),
("type", "type", codeableconcept.CodeableConcept, False, None, True),
])
return js
from . import codeableconcept
from . import dosage
from . import duration
from . import fhirreference
from . import money
from . import quantity
from . import ratio
from . import fhirprimitive
|
StarcoderdataPython
|
100485
|
<gh_stars>1-10
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function
import sys
from workflow.notify import notify
from workflow import Workflow
GITHUB_SLUG = 'cocobear/alfred-quick-run'
def main(wf):
import subprocess
import yaml
import os
from subprocess import Popen, PIPE, STDOUT
from yaml.scanner import ScannerError
log = wf.logger.debug
#log = print
args = wf.args
if (len(args) != 1):
print('Usage: python quick_run.py <PATTERN>')
return 1
if wf.update_available:
wf.add_item(u'有更新,回车开始更新',
autocomplete='workflow:update',
valid=False,
icon=('cloud-download.png'))
rgbin = subprocess.check_output("which rg ", shell=True).strip()
query = u"" + args[0]
data_path = os.environ.get('data_path', 'None')
log('\nquery: '+ str([query]) + '\n' + 'data_path: ' + data_path)
results = []
cmds = []
remarks = []
if os.path.isdir(data_path):
p = subprocess.Popen([rgbin, query, data_path], stdout=PIPE, stderr=STDOUT)
output = p.communicate()[0]
output = output.decode('utf-8')
results = output.split('\n')[:-1]
for i in range(len(results)):
cmd = results[i].split(':')[1]
wf.logger.debug(results[i])
wf.add_item(cmd,
subtitle=cmd,
arg= cmd,
valid=True,
icon='icon.png')
wf.send_feedback()
return 0
with open(data_path, 'r') as f:
try:
data = yaml.load(f, Loader=yaml.FullLoader)
if not data:
wf.add_item('配置文件为空',
subtitle=u'回车打开配置文件进行编辑',
arg= 'open',
valid=True,
icon='error.png')
wf.send_feedback()
return 1
for i in data:
for j in i['values']:
cmds.append(j['cmd'])
remarks.append(j['remark'])
except (KeyError, ScannerError), e:
log(e)
log(e.note)
log(e.problem)
log(e.problem_mark)
wf.add_item(u'错误:'+str(e.problem_mark),
subtitle=u'回车打开配置文件进行编辑',
arg= 'open',
valid=True,
icon='error.png')
wf.send_feedback()
return 1
#wf.logger.debug(u"""\n""".join(cmds).encode('utf-8'))
p = subprocess.Popen([rgbin,'--line-number', query], stdin=PIPE, stdout=PIPE, stderr=STDOUT)
output = p.communicate(input=u"""\n""".join(cmds).encode('utf-8'))[0]
wf.logger.debug('cmds search result: '+ output)
log(type(output))
if not len(output):
p = subprocess.Popen("%s --line-number '%s' " %(rgbin, query),
shell=True, stdin=PIPE, stdout=PIPE, stderr=STDOUT)
output = p.communicate(input=u"""\n""".join(remarks).encode('utf-8'))[0]
wf.logger.debug('remarks search result: '+ output)
if len(output):
results = output.split('\n')[:-1]
for i in range(len(results)):
idx = int(results[i].split(':')[0])-1
wf.add_item(remarks[idx],
subtitle=cmds[idx],
arg = cmds[idx],
valid=True,
icon="icon.png")
wf.send_feedback()
if not len(output):
wf.add_item(u'打开配置文件', subtitle=u'配置文件默认保存在安装目录下',
arg= 'open', valid=True, icon='icon.png')
wf.send_feedback()
return 0
if __name__ == '__main__':
wf = Workflow(update_settings={'github_slug': GITHUB_SLUG})
sys.exit(wf.run(main))
|
StarcoderdataPython
|
1750199
|
<filename>tests/test_students_delete_parametrized.py
import unittest
from src.students import Students
from parameterized import parameterized, parameterized_class
from src.exampleData import Data
class StudentsParameterizedPackage(unittest.TestCase):
def setUp(self):
self.tmp = Students(Data().example)
@parameterized.expand([
(1, "Kasia", "Polak", ('1', 'Kasia', 'Polak')),
])
def test_student_delete_expand(self, id_student, name, surname, expected):
self.assertNotIn(self.tmp.deleteStudent(id_student, name, surname), expected)
@parameterized.expand([
(3, "Kasia", "Polak", "There_is_not_such_student"),
])
def test_student_delete_exceptions_expand(self, id_student, name, surname, expected):
self.assertRaisesRegex(Exception, expected, self.tmp.deleteStudent, id_student, name, surname)
@parameterized_class(("id", "name", "surname", "expected"), [
(1, "Kasia", "Polak", ('1', 'Kasia', 'Polak')),
])
class StudentParameterizedPackageClass(unittest.TestCase):
def setUp(self):
self.tmp = Students(Data().example)
def test_student_delete_class(self):
self.assertNotIn(self.tmp.deleteStudent(self.id, self.name, self.surname), self.expected)
@parameterized_class(("id", "name", "surname", "expected"), [
(3, "Kasia", "Polak", "There_is_not_such_student"),
])
class StudentParameterizedExceptionsPackageClass(unittest.TestCase):
def setUp(self):
self.tmp = Students(Data().example)
def test_student_delete_exception_class(self):
self.assertRaisesRegex(Exception, self.expected, self.tmp.deleteStudent, self.id, self.name, self.surname)
if __name__ == '__main__':
unittest.main()
|
StarcoderdataPython
|
3376967
|
# -*- coding: utf-8 -*-
# Реализация вывода текста на окна
from curses import color_pair
from .wincontent import WinContent
class WinText(WinContent):
def __init__(self, targeted_window, name, description=''):
WinContent.__init__(self, targeted_window, name, description)
self.text = ''
def new_text(self, text):
self.text = text
def draw(self):
WinContent.draw(self)
line_total = int(len(self.text) / (self.winwidth - 2))
if line_total > (self.winheight - 3):
line_total = self.winheight - 3
self.window.addstr(self.winheight - 1, 1, 'весь текст не ушел', color_pair(2))
for line_num in range(0, line_total + 1):
self.window.addstr(1 + line_num, 1, self.text[line_num * (self.winwidth - 2):(line_num + 1) * (self.winwidth - 2)])
self.window.noutrefresh()
|
StarcoderdataPython
|
3204734
|
<gh_stars>1-10
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Date : 2019-09-21 21:04:59
# @Author : <NAME> (<EMAIL>)
# @Link : https://github.com/1160300911
# @Version : 1.0s
from itertools import accumulate
from bisect import bisect_right
import random
def basic_selection(population):
"""
:param population: Population Object
:return: Individual Obejct
"""
return random.choice(population.individuals)
def fitnetss_proporitional(population):
"""
:param population: Population Object
:return: Individual Obejct
"""
# Normalize fitness values for all individuals.
fits = [(1 / population.get_disctance(indv)) for indv in population.individuals]
min_fit = min(fits)
fits = [(fit - min_fit) for fit in fits]
# Create roulette wheel.
sum_fit = sum(fits)
wheel = list(accumulate([(fit / sum_fit) for fit in fits]))
# Select an individual.
idx = bisect_right(wheel, random.random())
return population.individuals[idx]
def rank_based(population, pmin=0.1, pmax=0.9):
"""
:param population: Population Object
:param pmin: minimum probability of being selected
:param pmax: maximum probability of being selected
:return: Individual Obejct
"""
# Initialize parameters.
n = population.unit_num
sorted_indvs = sorted(population.individuals, key=population.get_disctance, reverse=True)
# Assign selection probabilities linearly.
p = lambda i: pmin + (pmax - pmin) * (i - 1) / (n - 1)
ps = [p(i) for i in range(1, n+1)]
# Normalize probabilities.
sum_p = sum(ps)
wheel = list(accumulate([(p / sum_p) for p in ps]))
# Select an individual.
idx = bisect_right(wheel, random.random())
return sorted_indvs[idx]
def tournament_selection(population, tournament_size=2):
"""
:param population: Population Object
:param tournament_size: number of individuals participating in the tournament (default is 2)
:return: Individual Obejct
"""
# Competition function.
complete = lambda competitors: min(competitors, key=population.get_disctance)
# Check validity of tournament size.
if tournament_size > len(population.individuals):
msg = 'tournament size({}) is larger than population size({})'
raise ValueError(msg.format(tournament_size, len(population.individuals)))
# Pick the winner of the group and return it.
competitors = random.sample(population.individuals, tournament_size)
return complete(competitors)
|
StarcoderdataPython
|
60250
|
import os
import sys
import json
cfg = xmlsettings.XMLSettings(os.path.join(sys.path[0],'settings.xml'))
with open(os.path.join(sys.path[0],'config.json')) as data_file:
data = json.load(data_file)
|
StarcoderdataPython
|
1605068
|
# coding: utf-8
"""
Copyright 2015 SmartBear Software
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Ref: https://github.com/swagger-api/swagger-codegen
"""
from pprint import pformat
from six import iteritems
class Power100Power(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self):
"""
Power100Power - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'odata_context': 'Odata400Context',
'odata_id': 'Odata400Id',
'odata_type': 'Odata400Type',
'description': 'ResourceDescription',
'id': 'ResourceId',
'name': 'ResourceName',
'oem': 'ResourceOem',
'power_control': 'list[Power100PowerControl]',
'power_controlodata_count': 'Odata400Count',
'power_controlodata_navigation_link': 'Odata400IdRef',
'power_supplies': 'list[Power100PowerSupply]',
'power_suppliesodata_count': 'Odata400Count',
'power_suppliesodata_navigation_link': 'Odata400IdRef',
'redundancy': 'list[RedundancyRedundancy]',
'redundancyodata_count': 'Odata400Count',
'redundancyodata_navigation_link': 'Odata400IdRef',
'voltages': 'list[Power100Voltage]',
'voltagesodata_count': 'Odata400Count',
'voltagesodata_navigation_link': 'Odata400IdRef'
}
self.attribute_map = {
'odata_context': '@odata.context',
'odata_id': '@odata.id',
'odata_type': '@odata.type',
'description': 'Description',
'id': 'Id',
'name': 'Name',
'oem': 'Oem',
'power_control': 'PowerControl',
'power_controlodata_count': '<EMAIL>',
'power_controlodata_navigation_link': '<EMAIL>',
'power_supplies': 'PowerSupplies',
'power_suppliesodata_count': '<EMAIL>',
'power_suppliesodata_navigation_link': '<EMAIL>',
'redundancy': 'Redundancy',
'redundancyodata_count': '<EMAIL>',
'redundancyodata_navigation_link': '<EMAIL>',
'voltages': 'Voltages',
'voltagesodata_count': '<EMAIL>',
'voltagesodata_navigation_link': '<EMAIL>'
}
self._odata_context = None
self._odata_id = None
self._odata_type = None
self._description = None
self._id = None
self._name = None
self._oem = None
self._power_control = None
self._power_controlodata_count = None
self._power_controlodata_navigation_link = None
self._power_supplies = None
self._power_suppliesodata_count = None
self._power_suppliesodata_navigation_link = None
self._redundancy = None
self._redundancyodata_count = None
self._redundancyodata_navigation_link = None
self._voltages = None
self._voltagesodata_count = None
self._voltagesodata_navigation_link = None
@property
def odata_context(self):
"""
Gets the odata_context of this Power100Power.
:return: The odata_context of this Power100Power.
:rtype: Odata400Context
"""
return self._odata_context
@odata_context.setter
def odata_context(self, odata_context):
"""
Sets the odata_context of this Power100Power.
:param odata_context: The odata_context of this Power100Power.
:type: Odata400Context
"""
self._odata_context = odata_context
@property
def odata_id(self):
"""
Gets the odata_id of this Power100Power.
:return: The odata_id of this Power100Power.
:rtype: Odata400Id
"""
return self._odata_id
@odata_id.setter
def odata_id(self, odata_id):
"""
Sets the odata_id of this Power100Power.
:param odata_id: The odata_id of this Power100Power.
:type: Odata400Id
"""
self._odata_id = odata_id
@property
def odata_type(self):
"""
Gets the odata_type of this Power100Power.
:return: The odata_type of this Power100Power.
:rtype: Odata400Type
"""
return self._odata_type
@odata_type.setter
def odata_type(self, odata_type):
"""
Sets the odata_type of this Power100Power.
:param odata_type: The odata_type of this Power100Power.
:type: Odata400Type
"""
self._odata_type = odata_type
@property
def description(self):
"""
Gets the description of this Power100Power.
:return: The description of this Power100Power.
:rtype: ResourceDescription
"""
return self._description
@description.setter
def description(self, description):
"""
Sets the description of this Power100Power.
:param description: The description of this Power100Power.
:type: ResourceDescription
"""
self._description = description
@property
def id(self):
"""
Gets the id of this Power100Power.
:return: The id of this Power100Power.
:rtype: ResourceId
"""
return self._id
@id.setter
def id(self, id):
"""
Sets the id of this Power100Power.
:param id: The id of this Power100Power.
:type: ResourceId
"""
self._id = id
@property
def name(self):
"""
Gets the name of this Power100Power.
:return: The name of this Power100Power.
:rtype: ResourceName
"""
return self._name
@name.setter
def name(self, name):
"""
Sets the name of this Power100Power.
:param name: The name of this Power100Power.
:type: ResourceName
"""
self._name = name
@property
def oem(self):
"""
Gets the oem of this Power100Power.
This is the manufacturer/provider specific extension moniker used to divide the Oem object into sections.
:return: The oem of this Power100Power.
:rtype: ResourceOem
"""
return self._oem
@oem.setter
def oem(self, oem):
"""
Sets the oem of this Power100Power.
This is the manufacturer/provider specific extension moniker used to divide the Oem object into sections.
:param oem: The oem of this Power100Power.
:type: ResourceOem
"""
self._oem = oem
@property
def power_control(self):
"""
Gets the power_control of this Power100Power.
This is the definition for power control function (power reading/limiting).
:return: The power_control of this Power100Power.
:rtype: list[Power100PowerControl]
"""
return self._power_control
@power_control.setter
def power_control(self, power_control):
"""
Sets the power_control of this Power100Power.
This is the definition for power control function (power reading/limiting).
:param power_control: The power_control of this Power100Power.
:type: list[Power100PowerControl]
"""
self._power_control = power_control
@property
def power_controlodata_count(self):
"""
Gets the power_controlodata_count of this Power100Power.
:return: The power_controlodata_count of this Power100Power.
:rtype: Odata400Count
"""
return self._power_controlodata_count
@power_controlodata_count.setter
def power_controlodata_count(self, power_controlodata_count):
"""
Sets the power_controlodata_count of this Power100Power.
:param power_controlodata_count: The power_controlodata_count of this Power100Power.
:type: Odata400Count
"""
self._power_controlodata_count = power_controlodata_count
@property
def power_controlodata_navigation_link(self):
"""
Gets the power_controlodata_navigation_link of this Power100Power.
:return: The power_controlodata_navigation_link of this Power100Power.
:rtype: Odata400IdRef
"""
return self._power_controlodata_navigation_link
@power_controlodata_navigation_link.setter
def power_controlodata_navigation_link(self, power_controlodata_navigation_link):
"""
Sets the power_controlodata_navigation_link of this Power100Power.
:param power_controlodata_navigation_link: The power_controlodata_navigation_link of this Power100Power.
:type: Odata400IdRef
"""
self._power_controlodata_navigation_link = power_controlodata_navigation_link
@property
def power_supplies(self):
"""
Gets the power_supplies of this Power100Power.
Details of the power supplies associated with this system or device
:return: The power_supplies of this Power100Power.
:rtype: list[Power100PowerSupply]
"""
return self._power_supplies
@power_supplies.setter
def power_supplies(self, power_supplies):
"""
Sets the power_supplies of this Power100Power.
Details of the power supplies associated with this system or device
:param power_supplies: The power_supplies of this Power100Power.
:type: list[Power100PowerSupply]
"""
self._power_supplies = power_supplies
@property
def power_suppliesodata_count(self):
"""
Gets the power_suppliesodata_count of this Power100Power.
:return: The power_suppliesodata_count of this Power100Power.
:rtype: Odata400Count
"""
return self._power_suppliesodata_count
@power_suppliesodata_count.setter
def power_suppliesodata_count(self, power_suppliesodata_count):
"""
Sets the power_suppliesodata_count of this Power100Power.
:param power_suppliesodata_count: The power_suppliesodata_count of this Power100Power.
:type: Odata400Count
"""
self._power_suppliesodata_count = power_suppliesodata_count
@property
def power_suppliesodata_navigation_link(self):
"""
Gets the power_suppliesodata_navigation_link of this Power100Power.
:return: The power_suppliesodata_navigation_link of this Power100Power.
:rtype: Odata400IdRef
"""
return self._power_suppliesodata_navigation_link
@power_suppliesodata_navigation_link.setter
def power_suppliesodata_navigation_link(self, power_suppliesodata_navigation_link):
"""
Sets the power_suppliesodata_navigation_link of this Power100Power.
:param power_suppliesodata_navigation_link: The power_suppliesodata_navigation_link of this Power100Power.
:type: Odata400IdRef
"""
self._power_suppliesodata_navigation_link = power_suppliesodata_navigation_link
@property
def redundancy(self):
"""
Gets the redundancy of this Power100Power.
Redundancy information for the power subsystem of this system or device
:return: The redundancy of this Power100Power.
:rtype: list[RedundancyRedundancy]
"""
return self._redundancy
@redundancy.setter
def redundancy(self, redundancy):
"""
Sets the redundancy of this Power100Power.
Redundancy information for the power subsystem of this system or device
:param redundancy: The redundancy of this Power100Power.
:type: list[RedundancyRedundancy]
"""
self._redundancy = redundancy
@property
def redundancyodata_count(self):
"""
Gets the redundancyodata_count of this Power100Power.
:return: The redundancyodata_count of this Power100Power.
:rtype: Odata400Count
"""
return self._redundancyodata_count
@redundancyodata_count.setter
def redundancyodata_count(self, redundancyodata_count):
"""
Sets the redundancyodata_count of this Power100Power.
:param redundancyodata_count: The redundancyodata_count of this Power100Power.
:type: Odata400Count
"""
self._redundancyodata_count = redundancyodata_count
@property
def redundancyodata_navigation_link(self):
"""
Gets the redundancyodata_navigation_link of this Power100Power.
:return: The redundancyodata_navigation_link of this Power100Power.
:rtype: Odata400IdRef
"""
return self._redundancyodata_navigation_link
@redundancyodata_navigation_link.setter
def redundancyodata_navigation_link(self, redundancyodata_navigation_link):
"""
Sets the redundancyodata_navigation_link of this Power100Power.
:param redundancyodata_navigation_link: The redundancyodata_navigation_link of this Power100Power.
:type: Odata400IdRef
"""
self._redundancyodata_navigation_link = redundancyodata_navigation_link
@property
def voltages(self):
"""
Gets the voltages of this Power100Power.
This is the definition for voltage sensors.
:return: The voltages of this Power100Power.
:rtype: list[Power100Voltage]
"""
return self._voltages
@voltages.setter
def voltages(self, voltages):
"""
Sets the voltages of this Power100Power.
This is the definition for voltage sensors.
:param voltages: The voltages of this Power100Power.
:type: list[Power100Voltage]
"""
self._voltages = voltages
@property
def voltagesodata_count(self):
"""
Gets the voltagesodata_count of this Power100Power.
:return: The voltagesodata_count of this Power100Power.
:rtype: Odata400Count
"""
return self._voltagesodata_count
@voltagesodata_count.setter
def voltagesodata_count(self, voltagesodata_count):
"""
Sets the voltagesodata_count of this Power100Power.
:param voltagesodata_count: The voltagesodata_count of this Power100Power.
:type: Odata400Count
"""
self._voltagesodata_count = voltagesodata_count
@property
def voltagesodata_navigation_link(self):
"""
Gets the voltagesodata_navigation_link of this Power100Power.
:return: The voltagesodata_navigation_link of this Power100Power.
:rtype: Odata400IdRef
"""
return self._voltagesodata_navigation_link
@voltagesodata_navigation_link.setter
def voltagesodata_navigation_link(self, voltagesodata_navigation_link):
"""
Sets the voltagesodata_navigation_link of this Power100Power.
:param voltagesodata_navigation_link: The voltagesodata_navigation_link of this Power100Power.
:type: Odata400IdRef
"""
self._voltagesodata_navigation_link = voltagesodata_navigation_link
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
|
StarcoderdataPython
|
1746356
|
<filename>solver/nmfsvd.py
#!/usr/bin/python
# -*- coding: utf-8 -*-
import logging
import numpy
def from_positive_part(may_negative):
"""
>>> from_positive_part(numpy.array([1, -2, 3]))
array([ 1., 0., 3.])
"""
return (may_negative + abs(may_negative))/2.0
def from_negative_part(may_negative):
"""
>>> from_negative_part(numpy.array([1, -2, 3]))
array([ 0., 2., 0.])
"""
return (may_negative - abs(may_negative))/(-2.0)
def guess_by_svd(A, k):
# A should be a non-negative matrix
nz = numpy.nonzero(A < 0)
if nz[0].any():
raise ValueError('non-zero elements in A',
[ ((r, c), v) for v, r, c in zip(A[nz], *nz)])
(m, n) = A.shape
if min(m, n, k) != k:
raise ValueError('rank too large', k)
W = numpy.zeros( (m, k), dtype = float)
H = numpy.zeros( (k, n), dtype = float)
# 1st SVD
U, s, V = numpy.linalg.svd(A, full_matrices = False, compute_uv = True)
#logging.error(['shape of U', U.shape])
#logging.error(['shape of V', V.shape])
#logging.info(["SVD S", s])
U = U.astype(numpy.float64)
s = s.astype(numpy.float64)
V = V.astype(numpy.float64)
# every component must have the same sign
# (Perron-Frobenius theorem)
i = 0
if numpy.average(U[:, i])>= 0:
W[:, 0] = U[:, i]
H[0, :] = s[0] * V[i, :]
else:
W[:, 0] = -U[:, i]
H[0, :] = -s[0] * V[i, :]
for i in range(1, k):
uu, vv = U[:, i], V[i, :]
uup = from_positive_part(uu)
uun = from_negative_part(uu)
vvp = from_positive_part(vv)
vvn = from_negative_part(vv)
n_uup = numpy.linalg.norm(uup)
n_vvp = numpy.linalg.norm(vvp)
n_uun = numpy.linalg.norm(uun)
n_vvn = numpy.linalg.norm(vvn)
termp = n_uup * n_vvp
termn = n_uun * n_vvn
if termp >= termn: # choose which side to use
# use positive half
W[:, i] = uup / n_uup
H[i, :] = s[i] * termp * vvp / n_vvp
else:
# use negative half
W[:, i] = uun / n_uun
H[i, :] = s[i] * termn * vvn / n_vvn
return W, H, s
def test_k():
"""
>>> guess_by_svd(numpy.array([[1,2,3], [2,1,2]]), 20)
Traceback (most recent call last):
...
ValueError: ('rank too large', 20)
"""
def test_negative():
"""
>>> guess_by_svd(numpy.array([[1,2,3], [2,-1,-2]]), 2)
Traceback (most recent call last):
...
ValueError: ('non-zero elements in A', [((1, 1), -1), ((1, 2), -2)])
"""
def test_a():
"""
>>> A = numpy.array([[1,0,1,0], [0,1,0,1], [2, 0, 2, 0]])
>>> W, H = guess_by_svd(A, 2)
>>> reconstructed = numpy.dot(W, H)
>>> numpy.allclose(A, reconstructed, atol = .00001)
True
"""
if __name__ == "__main__":
import doctest
doctest.testmod()
|
StarcoderdataPython
|
1791836
|
<filename>LeetCode/Minimum Path Sum.py
class Solution:
def minPathSum(self, grid: List[List[int]]) -> int:
numberOfRows = len(grid)
numberOfColumns = len(grid[0])
for row in range(numberOfRows):
for col in range(numberOfColumns):
if row == 0 and col == 0:
continue
elif row == 0 and col != 0:
grid[row][col] += grid[row][col - 1]
elif row != 0 and col == 0:
grid[row][col] += grid[row - 1][col]
else:
grid[row][col] += min(grid[row][col - 1], grid[row - 1][col])
return grid[numberOfRows - 1][numberOfColumns - 1]
|
StarcoderdataPython
|
3220622
|
import genprog.core as gp
import genprog.evolution as gpevo
from typing import Dict, List, Any, Set, Optional, Union, Tuple
import numpy as np
import vision_genprog.utilities
import cv2
import logging
possible_types = ['grayscale_image', 'color_image', 'binary_image',
'float', 'int', 'bool', 'vector2', 'kernel3x3']
# parametersList = [minFloat, maxFloat, minInt, maxInt, width, height]
class Interpreter(gp.Interpreter):
def __init__(self, primitive_functions_tree, image_shapeHWC):
super().__init__(primitive_functions_tree)
self.image_shapeHWC = image_shapeHWC
def FunctionDefinition(self, functionName: str, argumentsList: List[Any]) -> Any:
if functionName == 'threshold':
_, thresholdedImg = cv2.threshold(argumentsList[0], argumentsList[1], 255, cv2.THRESH_BINARY)
return thresholdedImg
elif functionName == 'mask_sum':
return cv2.countNonZero(argumentsList[0])
elif functionName.startswith('tunnel'):
return argumentsList[0]
elif functionName == 'concat_floats':
vector2 = np.array([argumentsList[0], argumentsList[1]])
return vector2
elif functionName == 'mask_average':
mask_shapeHW = argumentsList[0].shape
return cv2.countNonZero(argumentsList[0])/(mask_shapeHW[0] * mask_shapeHW[1])
elif functionName == 'sobel1_x':
sobelImg = cv2.Sobel(argumentsList[0], ddepth=cv2.CV_32F, dx=1, dy=0, ksize=3)
return (128 + argumentsList[1] * sobelImg).astype(np.uint8)
elif functionName == 'sobel1_y':
sobelImg = cv2.Sobel(argumentsList[0], ddepth=cv2.CV_32F, dx=0, dy=1, ksize=3)
return (128 + argumentsList[1] * sobelImg).astype(np.uint8)
elif functionName == 'erode':
erosion_kernel = np.ones((3, 3), np.uint8)
return cv2.erode(argumentsList[0], erosion_kernel)
elif functionName == 'dilate':
dilation_kernel = np.ones((3, 3), np.uint8)
return cv2.dilate(argumentsList[0], dilation_kernel)
elif functionName == 'mask_image':
return cv2.min(argumentsList[0], argumentsList[1])
elif functionName == 'image_average0to1':
return np.mean(argumentsList[0])/255
elif functionName == 'blur3':
return cv2.blur(argumentsList[0], ksize=(3, 3))
elif functionName == 'laplacian1':
return cv2.Laplacian(argumentsList[0], ddepth=cv2.CV_8U, ksize=1)
elif functionName == 'laplacian3':
return cv2.Laplacian(argumentsList[0], ddepth=cv2.CV_8U, ksize=3)
elif functionName == 'min':
return cv2.min(argumentsList[0], argumentsList[1])
elif functionName == 'max':
return cv2.max(argumentsList[0], argumentsList[1])
elif functionName == 'linear_combination':
# w0 * a0 + w1 * a1 + b
return (argumentsList[0] * argumentsList[1] + argumentsList[2] * argumentsList[3] + argumentsList[4]).astype(np.uint8)
elif functionName == 'intersection':
return cv2.min(argumentsList[0], argumentsList[1])
elif functionName == 'union':
return cv2.max(argumentsList[0], argumentsList[1])
elif functionName == 'inverse_mask':
return 255 - argumentsList[0]
elif functionName == 'scharr1_x':
scharrImg = cv2.Scharr(argumentsList[0], ddepth=cv2.CV_32F, dx=1, dy=0)
return (128 + argumentsList[1] * scharrImg).astype(np.uint8)
elif functionName == 'scharr1_y':
scharrImg = cv2.Scharr(argumentsList[0], ddepth=cv2.CV_32F, dx=0, dy=1)
return (128 + argumentsList[1] * scharrImg).astype(np.uint8)
elif functionName == 'correlation3x3':
return cv2.filter2D(argumentsList[0], ddepth=cv2.CV_8U, kernel=argumentsList[1])
elif functionName == 'average_kernel3x3':
return (argumentsList[0] + argumentsList[1])/2
elif functionName == 'max_kernel3x3':
return cv2.max(argumentsList[0], argumentsList[1])
elif functionName == 'min_kernel3x3':
return cv2.min(argumentsList[0], argumentsList[1])
elif functionName == 'intersection_over_union':
intersectionImg = cv2.min(argumentsList[0], argumentsList[1])
unionImg = cv2.max(argumentsList[0], argumentsList[1])
union_area = cv2.countNonZero(unionImg)
if union_area == 0:
return 0
else:
return cv2.countNonZero(intersectionImg)/union_area
elif functionName == 'canny':
return cv2.Canny(argumentsList[0], argumentsList[1], argumentsList[2])
elif functionName == 'corner_harris':
harrisImg = cv2.cornerHarris(argumentsList[0], blockSize=2, ksize=3, k=0.04)
harris_min = np.min(harrisImg)
harris_max = np.max(harrisImg)
if harris_max == harris_min:
harris_normalized = 255 * (harrisImg - harris_min)
else:
harris_normalized = 255 * (harrisImg - harris_min)/(harris_max - harris_min)
return harris_normalized.astype(np.uint8)
else:
raise NotImplementedError("image_processing.Interpreter.FunctionDefinition(): Not implemented function '{}'".format(functionName))
def CreateConstant(self, returnType: str, parametersList: Optional[ List[Any] ] ) -> str:
if returnType == 'grayscale_image':
if len(parametersList) < 6:
raise ValueError(
"image_processing.Interpreter.CreateConstant(): Creating a '{}': len(parametersList) ({}) < 6".format(
returnType, len(parametersList)))
min_value = np.random.randint(parametersList[2], parametersList[3])
max_value = np.random.randint(parametersList[2], parametersList[3])
random_img = np.random.randint(min(min_value, max_value), max(min_value, max_value), self.image_shapeHWC)
return vision_genprog.utilities.ArrayToString(random_img)
elif returnType == 'color_image':
if len(parametersList) < 6:
raise ValueError(
"image_processing.Interpreter.CreateConstant(): Creating a '{}': len(parametersList) ({}) < 6".format(
returnType, len(parametersList)))
#black_img = np.zeros((parametersList[5], parametersList[4], 3), dtype=np.uint8)
min_value = np.random.randint(parametersList[2], parametersList[3])
max_value = np.random.randint(parametersList[2], parametersList[3])
random_img = np.random.randint(min(min_value, max_value), max(min_value, max_value), self.image_shapeHWC)
return vision_genprog.utilities.ArrayToString(random_img)
elif returnType == 'binary_image':
if len(parametersList) < 6:
raise ValueError(
"image_processing.Interpreter.CreateConstant(): Creating a '{}': len(parametersList) ({}) < 6".format(
returnType, len(parametersList)))
random_img = 255 * np.random.randint(0, 2, self.image_shapeHWC)
return vision_genprog.utilities.ArrayToString(random_img)
elif returnType == 'kernel3x3':
kernel = np.random.uniform(parametersList[0], parametersList[1], (3, 3))
kernel = (kernel - kernel.mean())/kernel.std() # Standardization
return vision_genprog.utilities.ArrayToString(kernel)
elif returnType == 'float':
if len(parametersList) < 2:
raise ValueError("image_processing.Interpreter.CreateConstant(): Creating a '{}': len(parametersList) ({}) < 2".format(returnType, len(parametersList)))
value = np.random.uniform(parametersList[0], parametersList[1])
return str(value)
elif returnType == 'int':
if len(parametersList) < 4:
raise ValueError(
"image_processing.Interpreter.CreateConstant(): Creating a '{}': len(parametersList) ({}) < 4".format(
returnType, len(parametersList)))
value = np.random.randint(parametersList[2], parametersList[3] + 1)
return str(value)
elif returnType == 'bool':
if np.random.randint(0, 2) == 0:
return 'true'
else:
return 'false'
else:
raise NotImplementedError("image_processing.Interpreter.CreateConstant(): Not implemented return type '{}'".format(returnType))
def PossibleTypes(self) -> List[str]:
return possible_types
def TypeConverter(self, type: str, value: str):
if type == 'grayscale_image' or type == 'color_image' or type == 'binary_image':
array1D = vision_genprog.utilities.StringTo1DArray(value)
return np.reshape(array1D.astype(np.uint8), self.image_shapeHWC)
elif type == 'int':
return int(value)
elif type == 'float':
return float(value)
elif type == 'bool':
if value.upper() == 'TRUE':
return True
else:
return False
elif type == 'vector2':
array1D = vision_genprog.utilities.StringTo1DArray(value)
return np.reshape(array1D, (2,))
elif type == 'kernel3x3':
array1D = vision_genprog.utilities.StringTo1DArray(value)
return np.reshape(array1D, (3, 3))
else:
raise NotImplementedError("image_processing.Interpreter.TypeConverter(): Not implemented type '{}'".format(type))
|
StarcoderdataPython
|
1734260
|
#!/usr/bin/env python
from . import BaseItem
from . import FolderPainter
from . import FolderEditor
class Delegate (BaseItem.Delegate):
def __init__ (self, parent, theme):
super(Delegate, self).__init__(parent, theme)
self.Item = FolderPainter.Item(theme)
def createEditor (self, parent, option, index):
editor = FolderEditor.Editor(parent, index, self.theme)
editor.Item.controlMode = self.parent().controlMode
editor.clicked.connect(self.clickAction)
editor.leaveEditor.connect(self.leaveAction)
editor.createFolderQuery.connect(self.createFolderQuery)
editor.createFolder.connect(self.createFolderAction)
editor.link.connect(self.linkAction)
return editor
def createFolderQuery (self, index):
self.leaveAction()
self.parent().createFolderQueryBridge(index)
def createFolderAction (self, index, name):
self.parent().createFolderBridge(index, name)
def linkAction (self, index):
self.parent().linkBridge(index)
|
StarcoderdataPython
|
4842781
|
import os
import sys
import math
target = 'tagged'
pathstump = "/Users/Torri/Documents/Grad stuff/Thesis stuff/Data - Novels/Analysis/"
d = {}
tokens = 0
types = 0
proportion = 0
shannon = 0
evenness = 0
evenness2 = 0
effective = 0
for dirname, dirs, files in os.walk('.'):
if target in dirname:
author = dirname.split(os.sep)[1]
print author
output = os.path.join(dirname, '..', author + "_shannon_evenness.txt")
outfile = open(output, 'w')
print>>outfile, "Book\tTokens\tTypes\tEffective Types\tShannon\tEvenness\tEvenness2"
# .\James\tagged\..\James_MAT_50.txt
for filename in files:
d = {}
tokens = 0
types = 0
proportion = 0
shannon = 0
evenness = 0
evenness2 = 0
effective = 0
if '_tagged.txt' in filename:
book = filename.replace('_tagged.txt', '')
data = open(pathstump + author + "/" + "tagged/" + filename, 'r')
for line in data:
line = line.rstrip('\r\n')
line = line.split('\t')
lemma = line[2]
tokens += 1
if lemma in d:
d[lemma] += 1
else:
d[lemma] = 1
types += 1
for key in d:
toks_of_this_type = int(d[key])
proportion = float(toks_of_this_type) / tokens
shannon = shannon + (proportion * math.log(proportion))
shannon = shannon * -1
evenness = shannon / math.log(types)
effective = math.exp(shannon)
evenness2 = effective / types
print>>outfile, "%s\t%s\t%s\t%.2f\t%.3f\t%.3f\t%.3f" % (book, tokens, types, effective, shannon, evenness, evenness2)
|
StarcoderdataPython
|
1623871
|
import pytest
from mitmproxy.net.udp import MAX_DATAGRAM_SIZE, DatagramReader
@pytest.mark.asyncio
async def test_reader():
reader = DatagramReader()
addr = ('8.8.8.8', 53)
reader.feed_data(b'First message', addr)
with pytest.raises(AssertionError):
reader.feed_data(bytearray(MAX_DATAGRAM_SIZE + 1), addr)
reader.feed_data(b'Second message', addr)
reader.feed_eof()
assert await reader.read(65535) == b'First message'
with pytest.raises(AssertionError):
await reader.read(MAX_DATAGRAM_SIZE - 1)
assert await reader.read(65535) == b'Second message'
assert not await reader.read(65535)
|
StarcoderdataPython
|
4806520
|
<reponame>neuroailab/ffcv
from abc import ABCMeta, abstractmethod
from contextlib import AbstractContextManager
class Benchmark(AbstractContextManager, metaclass=ABCMeta):
def __init__(self, **kwargs):
pass
@abstractmethod
def run(self):
raise NotImplemented()
|
StarcoderdataPython
|
3243819
|
<reponame>nubot-nudt/BCI_Multi_Robot<gh_stars>10-100
# $Id: PlaybackSourceModule.py 2898 2010-07-08 19:09:30Z jhill $
#
# This file is part of the BCPy2000 framework, a Python framework for
# implementing modules that run on top of the BCI2000 <http://bci2000.org/>
# platform, for the purpose of realtime biosignal processing.
#
# Copyright (C) 2007-10 <NAME>, <NAME>,
# <NAME>, <NAME>
#
# <EMAIL>
#
# The BCPy2000 framework is free software: you can redistribute it
# and/or modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation, either version 3 of
# the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import os,sys,re
import numpy
from BCI2000Tools.FileReader import bcistream
class PlaybackError(EndUserError): pass
#################################################################
#################################################################
class BciSource(BciGenericSource):
#############################################################
def Description(self):
return 'plays back a pre-recorded .dat file in "slave" mode'
#############################################################
def Construct(self):
parameters = [
"Source:Playback string PlaybackFileName= % % % % // play back the named BCI2000 file (inputfile)",
"Source:Playback string PlaybackStart= 00:00:00.000 % % % // offset at which to start",
"Source:Playback matrix TestSignals= 0 { Channel Frequency Amplitude } % % % // sinusoidal signals may be added to the source signal here",
]
states = [
"SignalStopRun 1 0 0 0",
]
return (parameters, states)
#############################################################
def Preflight(self, inprop):
fn = self.params['PlaybackFileName']
# start with environmental variables but add a couple of extra possible expansions:
# (1) %DATA% or $DATA maps to the data subdir of the installation directory
dataroot = os.path.realpath(os.path.join(self.data_dir, '..'))
mappings = dict(os.environ.items() + [('DATA',dataroot)])
sub = lambda x: mappings.get(x.group(1), x.group())
if sys.platform.lower().startswith('win'):
# (2) make %HOME% or $HOME equivalent to %USERPROFILE% or %HOMEDRIVE%%HOMEPATH%
if not 'HOME' in mappings: mappings['HOME'] = mappings.get('USERPROFILE', mappings.get('HOMEDRIVE') + mappings.get('HOMEPATH'))
# expand Windoze-style environmental variables on Windoze
fn = re.sub('%(.+?)%', sub, fn)
# expand POSIX-style environmental variables on all platforms
fn = re.sub(r'\$\{(.+?)\}', sub, fn)
fn = re.sub(r'\$([A-Za-z0-9_]+)', sub, fn)
try: self.stream = bcistream(fn)
except Exception, e: raise PlaybackError(str(e))
self.blocksize = int(self.params['SampleBlockSize'])
self.master = int(self.params['EnslavePython']) != 0
nch = int(self.params['SourceCh'])
pbnch = self.stream.channels()
if nch != pbnch:
raise PlaybackError, 'mismatch between number of channels in SourceCh parameter (%d) and playback file (%d)' % (nch,pbnch)
fs = self.samplingrate()
pbfs = self.stream.samplingrate()
if fs != pbfs:
raise PlaybackError, 'mismatch between sampling rate in SamplingRate parameter (%gHz) and playback file (%gHz)' % (fs,pbfs)
#self.out_signal_props['Type'] = self.stream.headline.get('DataFormat', 'float32')
self.out_signal_props['Type'] = 'float32' # Hmm
# default data format is actually int16, but float32 is safe to cast the other formats into
self.testsig = []
ts = self.params['TestSignals']
if isinstance(ts, list):
if ts.matrixlabels()[1] != ['Channel', 'Frequency', 'Amplitude']: raise EndUserError, "TestSignals matrix must have 3 columns labelled Channel, Frequency and Amplitude"
for x in ts:
if x == ['','','']: continue
chan,freq,amp = x['Channel'], x['Frequency'], x['Amplitude']
try: chan = float(chan)
except:
if chan in self.params['ChannelNames']: chan = self.params['ChannelNames'].index(chan)
else: raise EndUserError, "unrecognized Channel name '%s' in TestSignals matrix" % chan
else:
if chan < 1 or chan > nch or chan != round(chan): raise EndUserError, "invalid Channel index %d in TestSignals matrix" % chan
chan = int(chan+0.5) - 1
try: freq = float(freq)
except: raise EndUserError, "invalid Frequency value '%s' in TestSignals matrix" % freq
try: amp = float(amp)
except: raise EndUserError, "invalid Amplitude value '%s' in TestSignals matrix" % amp
self.testsig.append([chan,freq,amp])
#############################################################
def StartRun(self):
self.stream.seek(self.params['PlaybackStart'])
self.states['SignalStopRun'] = 0
print "\nplaying back:"
print self.stream
#############################################################
def Process(self, sig):
if int(self.states['Running']) == 0:
return sig * 0
if self.states['SignalStopRun']:
self.states['Running'] = 0
self.states['SignalStopRun'] = 0
newsig,states = self.stream.decode(self.blocksize, apply_gains=False)
for chan,freq,amp in self.testsig:
amp /= float(self.params['SourceChGain'][chan])
newsig[chan,:] += amp * numpy.sin(2.0 * numpy.pi * freq * sig[0, :newsig.shape[1]])
if newsig.shape[1] < self.blocksize:
self.states['SignalStopRun'] = 1
return sig * 0
if self.stream.tell() >= self.stream.samples():
self.states['SignalStopRun'] = 1
if self.master:
for k in self.states.keys():
if not k in ('Running', 'Recording', 'AppStartTime', 'StimulusTime', 'SourceTime') and states.has_key(k):
self.states[k] = int(numpy.asarray(states[k]).flat[-1])
return newsig
#################################################################
#################################################################
|
StarcoderdataPython
|
1612407
|
<filename>napari_covid_if_annotations/layers.py
import h5py
import numpy as np
import skimage.color as skc
from vispy.color import Colormap
from .image_utils import (get_centroids, get_edge_segmentation, map_labels_to_edges,
quantile_normalize)
from .io_utils import has_table, read_image, read_table, write_image, write_table
def get_seg_kwargs(f, seg_ids, infected_labels):
seg_kwargs = {
'name': 'cell-segmentation',
'metadata': {'seg_ids': seg_ids,
'infected_labels': infected_labels,
'hide_annotated_segments': False,
'filename': f.filename}
}
return seg_kwargs
def get_centroid_kwargs(centroids, infected_labels):
# napari reorders the labels (it casts np.unique)
# so for now it's easier to just use numeric labels and have separate
# label-names to not get confused by reordering
label_names = ['unlabeled', 'infected', 'control', 'uncertain']
labels = [0, 1, 2, 3]
face_color_cycle = ['white', 'red', 'cyan', 'yellow']
edge_color_cycle = ['black', 'black', 'black', 'black']
properties = get_centroid_properties(centroids, infected_labels)
centroid_kwargs = {
'name': 'infected-vs-control',
'properties': properties,
'size': 8,
'edge_width': 2,
'edge_color': 'cell_type',
'edge_color_cycle': edge_color_cycle,
'face_color': 'cell_type',
'face_color_cycle': face_color_cycle,
'metadata': {'labels': labels,
'label_names': label_names}
}
return centroid_kwargs
def load_labels(f):
seg = read_image(f, 'cell_segmentation')
seg_ids, centroids, _, infected_labels = get_segmentation_data(f, seg, edge_width=1)
seg_kwargs = get_seg_kwargs(f, seg_ids, infected_labels)
point_kwargs = get_centroid_kwargs(centroids, infected_labels)
layers = [
(seg, seg_kwargs, 'labels'),
(centroids, point_kwargs, 'points')
]
return layers
def save_labels(layers):
layer = None
for this_layer, kwargs, layer_type in layers:
if layer_type == 'labels':
layer = this_layer
break
assert layer is not None
seg = layer.data
metadata = layer.metadata
seg_ids = metadata['seg_ids']
infected_labels = metadata['infected_labels']
assert len(seg_ids) == len(infected_labels)
assert infected_labels[0] == 0
infected_labels_columns = ['label_id', 'infected_label']
infected_labels_table = np.concatenate([seg_ids[:, None], infected_labels[:, None]], axis=1)
# we modify the save path, because we don't want to let the filenaming
# patterns go out of sync
save_path = metadata['filename']
identifier = '_annotations.h5'
if identifier not in save_path:
save_path = save_path.replace('.h5', identifier)
with h5py.File(save_path, 'a') as f:
write_image(f, 'cell_segmentation', seg)
write_table(f, 'infected_cell_labels', infected_labels_columns, infected_labels_table,
force_write=True)
return [save_path]
def get_raw_data(f, seg, saturation_factor):
serum = quantile_normalize(read_image(f, 'serum_IgG'))
marker = quantile_normalize(read_image(f, 'marker'))
nuclei = quantile_normalize(read_image(f, 'nuclei'))
bg_mask = seg == 0
def subtract_bg(im):
bg = np.median(im[bg_mask])
im -= bg
return im
serum = subtract_bg(serum)
marker = subtract_bg(marker)
nuclei = subtract_bg(nuclei)
raw = np.concatenate([marker[..., None], serum[..., None], nuclei[..., None]], axis=-1)
if saturation_factor > 1:
raw = skc.rgb2hsv(raw)
raw[..., 1] *= saturation_factor
raw = skc.hsv2rgb(raw).clip(0, 1)
return raw, marker
def get_segmentation_data(f, seg, edge_width, infected_label_name='infected_cell_labels'):
seg_ids = np.unique(seg)
# TODO log if labels were loaded or initialized to be zero
if has_table(f, infected_label_name):
_, infected_labels = read_table(f, infected_label_name)
assert infected_labels.shape[1] == 2
infected_labels = infected_labels[:, 1]
infected_labels = infected_labels.astype('int32')
# we only support labels [0, 1, 2, 3] = ['unlabeled', 'infected', 'control', 'uncertain']
expected_labels = {0, 1, 2, 3}
unique_labels = np.unique(infected_labels)
assert len(set(unique_labels) - expected_labels) == 0
# the background should always be mapped to 0
assert infected_labels[0] == 0
else:
infected_labels = np.zeros(len(seg_ids), dtype='int32')
assert seg_ids.shape == infected_labels.shape, f"{seg_ids.shape}, {infected_labels.shape}"
edges = get_edge_segmentation(seg, edge_width)
infected_edges = map_labels_to_edges(edges, seg_ids, infected_labels, remap_background=4)
centroids = get_centroids(seg)
return seg_ids, centroids, infected_edges, infected_labels
def get_centroid_properties(centroids, infected_labels):
label_values = infected_labels[1:]
assert len(label_values) == len(centroids), f"{len(label_values)}, {len(centroids)}"
properties = {'cell_type': label_values}
return properties
def get_layers_from_file(f, saturation_factor=1., edge_width=2):
seg = read_image(f, 'cell_segmentation')
raw, marker = get_raw_data(f, seg, saturation_factor)
(seg_ids, centroids,
infected_edges, infected_labels) = get_segmentation_data(f, seg, edge_width)
# the keyword arguments passed to 'add_labels' for the cell segmentation layer
seg_kwargs = get_seg_kwargs(f, seg_ids, infected_labels)
# the keyword arguments passed to 'add_image' for the edge layer
# custom colormap to have colors in sync with the point layer
cmap = Colormap([
[1., 1., 1., 1.], # label 0 is white
[1., 0., 0., 1.], # label 1 is red
[0., 1., 1., 1.], # label 2 is cyan
[1., 1., 0., 1.], # label 3 is yellow
[0., 0., 0., 0.], # Background is transparent
])
edge_kwargs = {
'name': 'cell-outlines',
'visible': False,
'colormap': cmap,
'metadata': {'edge_width': edge_width},
'contrast_limits': [0, 4]
}
# the keyword arguments passed to 'add_points' for the
# centroid layer
centroid_kwargs = get_centroid_kwargs(centroids, infected_labels)
layers = [
(raw, {'name': 'raw'}, 'image'),
(marker, {'name': 'virus-marker', 'visible': False}, 'image'),
(seg, seg_kwargs, 'labels'),
(infected_edges, edge_kwargs, 'image'),
(centroids, centroid_kwargs, 'points')
]
return layers
|
StarcoderdataPython
|
20925
|
valor = input("Digite algo: ")
print("É do tipo", type(valor))
print("Valor numérico:", valor.isnumeric())
print("Valor Alfa:", valor.isalpha())
print("Valor Alfanumérico:", valor.isalnum())
print("Valor ASCII:", valor.isascii())
print("Valor Decimal", valor.isdecimal())
print("Valor Printavel", valor.isprintable())
|
StarcoderdataPython
|
50348
|
import typing
from fiepipedesktoplib.locallymanagedtypes.shells.AbstractLocalManagedTypeCommand import LocalManagedTypeCommand
from fiepipedesktoplib.shells.AbstractShell import AbstractShell
from fiepipehoudini.data.installs import HoudiniInstall
from fiepipehoudini.routines.installs import HoudiniInstallsInteractiveRoutines
from fiepipedesktoplib.shells.ui.abspath_input_ui import AbspathInputDefaultUI
from fiepipedesktoplib.shells.ui.subpath_input_ui import SubpathInputDefaultUI
class HoudiniInstallsCommand(LocalManagedTypeCommand[HoudiniInstall]):
def get_routines(self) -> HoudiniInstallsInteractiveRoutines:
return HoudiniInstallsInteractiveRoutines(self.get_feedback_ui(), AbspathInputDefaultUI(self), SubpathInputDefaultUI(self))
def get_shell(self, item) -> AbstractShell:
return super(HoudiniInstallsCommand, self).get_shell()
def get_plugin_names_v1(self) -> typing.List[str]:
ret = super(HoudiniInstallsCommand, self).get_plugin_names_v1()
ret.append("houdini_installs_command")
return ret
def get_prompt_text(self) -> str:
return self.prompt_separator.join(['fiepipe','houdini_installs'])
|
StarcoderdataPython
|
98387
|
<gh_stars>0
import pytest
import pendulum
from elasticsearch.exceptions import NotFoundError
from share import models
from share.util import IDObfuscator
from bots.elasticsearch import tasks
from tests import factories
def index_helpers(*helpers):
tasks.index_model('creativework', [h.work.id for h in helpers])
class IndexableWorkTestHelper:
def __init__(self, elastic, index=False, num_identifiers=1, num_sources=1, date=None):
self.elastic = elastic
if date is None:
self.work = factories.AbstractCreativeWorkFactory()
else:
models.AbstractCreativeWork._meta.get_field('date_created').auto_now_add = False
self.work = factories.AbstractCreativeWorkFactory(
date_created=date,
date_modified=date,
)
models.AbstractCreativeWork._meta.get_field('date_created').auto_now_add = True
self.sources = [factories.SourceFactory() for _ in range(num_sources)]
self.work.sources.add(*[s.user for s in self.sources])
for i in range(num_identifiers):
factories.WorkIdentifierFactory(
uri='http://example.com/{}/{}'.format(self.work.id, i),
creative_work=self.work
)
if index:
index_helpers(self)
def assert_indexed(self):
encoded_id = IDObfuscator.encode(self.work)
doc = self.elastic.es_client.get(
index=self.elastic.es_index,
doc_type='creativeworks',
id=encoded_id
)
assert doc['found'] is True
assert doc['_id'] == encoded_id
return doc
def assert_not_indexed(self):
with pytest.raises(NotFoundError):
self.assert_indexed()
@pytest.mark.django_db
class TestElasticSearchBot:
def test_index(self, elastic):
helper = IndexableWorkTestHelper(elastic, index=True)
doc = helper.assert_indexed()
assert doc['_source']['title'] == helper.work.title
assert doc['_source']['sources'] == [helper.sources[0].long_title]
def test_is_deleted_gets_removed(self, elastic):
helper = IndexableWorkTestHelper(elastic, index=True)
helper.assert_indexed()
helper.work.administrative_change(is_deleted=True)
index_helpers(helper)
helper.assert_not_indexed()
def test_source_soft_deleted(self, elastic):
helper = IndexableWorkTestHelper(elastic, index=True)
helper.assert_indexed()
helper.sources[0].is_deleted = True
helper.sources[0].save()
index_helpers(helper)
doc = helper.assert_indexed()
assert doc['_source']['title'] == helper.work.title
assert doc['_source']['sources'] == []
def test_51_identifiers_rejected(self, elastic):
helper1 = IndexableWorkTestHelper(elastic, index=False, num_identifiers=50)
helper2 = IndexableWorkTestHelper(elastic, index=False, num_identifiers=51)
index_helpers(helper1, helper2)
helper1.assert_indexed()
helper2.assert_not_indexed()
def test_aggregation(self, elastic):
helper = IndexableWorkTestHelper(elastic, index=True, num_sources=4)
elastic.es_client.indices.refresh(index=elastic.es_index)
resp = elastic.es_client.search(index=elastic.es_index, doc_type='creativeworks', body={
'size': 0,
'aggregations': {
'sources': {
'terms': {'field': 'sources', 'size': 500}
}
}
})
expected = sorted([{'key': source.long_title, 'doc_count': 1} for source in helper.sources], key=lambda x: x['key'])
actual = sorted(resp['aggregations']['sources']['buckets'], key=lambda x: x['key'])
assert expected == actual
@pytest.mark.django_db
class TestIndexSource:
@pytest.fixture(autouse=True)
def elastic(self, elastic):
return elastic
def test_index(self, elastic):
source = factories.SourceFactory()
tasks.index_sources()
doc = elastic.es_client.get(index=elastic.es_index, doc_type='sources', id=source.name)
assert doc['_id'] == source.name
assert doc['_source']['name'] == source.long_title
assert doc['_source']['short_name'] == source.name
def test_index_deleted(self, elastic):
source = factories.SourceFactory(is_deleted=True)
tasks.index_sources()
with pytest.raises(NotFoundError):
elastic.es_client.get(index=elastic.es_index, doc_type='sources', id=source.name)
def test_index_no_icon(self, elastic):
source = factories.SourceFactory(icon=None)
tasks.index_sources()
with pytest.raises(NotFoundError):
elastic.es_client.get(index=elastic.es_index, doc_type='sources', id=source.name)
@pytest.mark.django_db
class TestJanitorTask:
def test_missing_records_get_indexed(self, elastic, monkeypatch, no_celery):
helper1 = IndexableWorkTestHelper(elastic, index=False)
helper2 = IndexableWorkTestHelper(elastic, index=False)
helper1.assert_not_indexed()
helper2.assert_not_indexed()
tasks.elasticsearch_janitor(to_daemon=False)
helper1.assert_indexed()
helper2.assert_indexed()
def test_date_created(self, elastic, no_celery):
fake, real = [], []
for i in range(1, 6):
fake.append(IndexableWorkTestHelper(elastic, index=False, date=pendulum.now()))
real.append(IndexableWorkTestHelper(elastic, index=False, date=pendulum.now().add(days=-i)))
real.append(IndexableWorkTestHelper(elastic, index=False, date=pendulum.now().add(days=-i)))
index_helpers(*fake)
for helper in fake:
helper.assert_indexed()
helper.work.administrative_change(is_deleted=True)
for helper in real:
helper.assert_not_indexed()
tasks.elasticsearch_janitor(to_daemon=False)
for helper in real:
helper.assert_indexed()
|
StarcoderdataPython
|
3361392
|
from py_models_parser import parse
from simple_ddl_generator import DDLGenerator
def test_ddl_from_pydantic_model():
model_from = """class Material(BaseModel):
id: int
title: str
description: Optional[str]
link: str = 'http://'
type: Optional[MaterialType]
additional_properties: Optional[Json]
created_at: Optional[datetime.datetime] = datetime.datetime.now()
updated_at: Optional[datetime.datetime]"""
result = parse(model_from)
g = DDLGenerator(result)
g.generate()
expected = """CREATE TABLE Material (
id INTEGER,
title VARCHAR,
description VARCHAR,
link VARCHAR DEFAULT 'http://',
type MaterialType,
additional_properties JSON,
created_at DATETIME DEFAULT now(),
updated_at DATETIME);
"""
assert expected == g.result
def test_enum_generated():
model_from = """
class MaterialType(str, Enum):
article = 'article'
video = 'video'
@dataclass
class Material:
id: int
description: str = None
additional_properties: Union[dict, list, tuple, anything] = None
created_at: datetime.datetime = datetime.datetime.now()
updated_at: datetime.datetime = None
@dataclass
class Material2:
id: int
description: str = None
additional_properties: Union[dict, list] = None
created_at: datetime.datetime = datetime.datetime.now()
updated_at: datetime.datetime = None
"""
result = parse(model_from)
g = DDLGenerator(result)
g.generate()
expected = """CREATE TYPE MaterialType AS ENUM ('article','video');
CREATE TABLE Material (
id INTEGER,
description VARCHAR DEFAULT NULL,
additional_properties JSON DEFAULT NULL,
created_at DATETIME DEFAULT now(),
updated_at DATETIME DEFAULT NULL);
CREATE TABLE Material2 (
id INTEGER,
description VARCHAR DEFAULT NULL,
additional_properties JSON DEFAULT NULL,
created_at DATETIME DEFAULT now(),
updated_at DATETIME DEFAULT NULL);
"""
assert expected == g.result
def test_references_from_django():
expected = """CREATE TABLE Publication (
title VARCHAR);
CREATE TABLE Article (
headline VARCHAR,
publications INTEGER FOREIGN KEY REFERENCES Publication);
"""
model_from = """
from django.db import models
class Publication(models.Model):
title = models.CharField(max_length=30)
class Meta:
ordering = ['title']
def __str__(self):
return self.title
class Article(models.Model):
headline = models.CharField(max_length=100)
publications = models.ManyToManyField(Publication)
class Meta:
ordering = ['headline']
def __str__(self):
return self.headline
"""
result = parse(model_from)
g = DDLGenerator(result)
g.generate()
assert g.result == expected
def test_primary_key_and_unique():
models_str = """
class User(db.Model):
id = db.Column(db.Integer, primary_key=True)
username = db.Column(db.String(80), unique=True, nullable=False)
email = db.Column(db.String(120), unique=True, nullable=False)
def __repr__(self):
return '<User %r>' % self.username
"""
result = parse(models_str)
g = DDLGenerator(result)
g.generate()
expected = """CREATE TABLE User (
id db.Integer PRIMARY KEY,
username db.String(80) UNIQUE,
email db.String(120) UNIQUE);
"""
expected == g.result
def test_lowercase():
expected = """CREATE TABLE person (
id db.Integer PRIMARY KEY,
name db.String(50) NOT NULL,
addresses 'Address');
CREATE TABLE address (
id db.Integer PRIMARY KEY,
email db.String(120) NOT NULL,
person_id db.Integer NOT NULL FOREIGN KEY REFERENCES 'person.id');
"""
models_str = """
class Person(db.Model):
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(50), nullable=False)
addresses = db.relationship('Address', backref='person', lazy=True)
class Address(db.Model):
id = db.Column(db.Integer, primary_key=True)
email = db.Column(db.String(120), nullable=False)
person_id = db.Column(db.Integer, db.ForeignKey('person.id'),
nullable=False)
"""
result = parse(models_str)
g = DDLGenerator(result, lowercase=True)
g.generate()
assert g.result == expected
|
StarcoderdataPython
|
1717536
|
from unittest import TestCase
from SumarySearch import utils, bad_chars, stop_words
from SumarySearch.models import Book
class TestUtils(TestCase):
def setUp(self):
self.list_of_books = [Book(1, utils.clean_string('The Book in Three Sentences:\u00a0What if we measured our lives based on ',
bad_chars,
stop_words)),
Book(2, utils.clean_string("The quicker you let go of old things, the sooner",
bad_chars,
stop_words))]
self.str1 = 'The Book in Three Sentences:\u00a0What if we measured our lives based on '
self.str2 = "???///// ;;;; ;;::: "
self.str3 = ''
def set_term_frequency(self):
for book in self.list_of_books:
book.set_term_frequency(utils.calculate_term_frequency(book.summary))
def get_idf(self):
return utils.calculate_idf(self.list_of_books)
def test_clean_string_success(self):
clean_str = "book sentences measured lives based"
self.assertEqual(self.list_of_books[0].summary, clean_str)
def test_calculate_term_frequency_success(self):
cleaned_string = utils.clean_string(self.str1, bad_chars, stop_words)
term_freq = utils.calculate_term_frequency(cleaned_string)
# term_freq = {"book": 0.2, "sentences": 0.2, "measured": 0.2, "lives": 0.2, "based":0.2}
self.assertEqual(utils.calculate_term_frequency(self.list_of_books[0].summary), term_freq)
def test_compute_normalised_vector_success(self):
self.set_term_frequency()
idf = self.get_idf()
cleaned_string = utils.clean_string(self.str1, bad_chars, stop_words)
term_freq = utils.calculate_term_frequency(cleaned_string)
expected_idf = utils.compute_normalised_vector(term_freq, idf)
self.assertEqual(utils.compute_normalised_vector(self.list_of_books[0].tf, idf), expected_idf)
def test_clean_string_fail(self):
clean_str = ""
self.assertEqual(utils.clean_string(self.str2, bad_chars, stop_words), clean_str)
def test_calculate_term_frequency_fail(self):
cleaned_string = utils.clean_string(self.str3, bad_chars, stop_words)
term_freq = {}
self.assertEqual(utils.calculate_term_frequency(cleaned_string), term_freq)
|
StarcoderdataPython
|
175468
|
"""
Setting for project
"""
logfile = "demo_project.log"
|
StarcoderdataPython
|
1627943
|
from pathlib import Path
from tempfile import TemporaryDirectory
from snakemake.shell import shell
with TemporaryDirectory(dir=Path.cwd()) as tmpdir:
shell("cmscan --rfam --cut_ga --nohmmonly"
"--tblout {output[0]} "
"{input.rfam_database} "
"{input.fasta} "
"> {output[1]}")
(Path(tmpdir)/"soft_filtered_transcripts.fasta") \
.rename(snakemake.output[0])
#cmscan --rfam --cut_ga --nohmmonly --tblout mrum-genome.tblout Rfam.cm contigs.fasta > myfavscan.cmscan
|
StarcoderdataPython
|
3266702
|
<filename>SlackESPN.py
from slackclient import SlackClient
from espnff import League
import argparse, os, time
def handle_command(ARGS, CLIENT, command, channel):
"""
Receives commands directed at the bot and determines if they
are valid commands. If so, then acts on the commands. If not,
returns back what it needs for clarification.
"""
message = '''Commands I know:
list teams
scores <optional week number>
does Brandon suck
'''
message = ""
attachments = ""
if command == "list teams":
message = '\n'.join(map(lambda x: x.team_name, ARGS.league.teams))
elif command == "does brandon suck":
message = 'yes'
elif 'scores' in command:
pieces = command.split(' ')
if len(pieces) == 1:
message = 'Current Scoreboard'
matchups = ARGS.league.scoreboard(projections=True)
else:
message = 'Scoreboard for week ' + pieces[1]
matchups = ARGS.league.scoreboard(pieces[1], projections=True)
attachments = [{
'fallback': 'A textual representation of your table data',
'fields': [
{
'title': 'Home',
'value': '\n'.join(map(lambda x: x.home_team.team_abbrev + " " + str(x.home_score) + " (" + str(x.home_projection) + ")", matchups)),
'short': True
},
{
'title': 'Away',
'value': '\n'.join(map(lambda x: x.away_team.team_abbrev + " " + str(x.away_score) + " (" + str(x.away_projection) + ")", matchups)),
'short': True
}
]
}]
CLIENT.api_call("chat.postMessage", channel=channel, text=message, attachments=attachments, as_user=True)
# CLIENT.api_call("chat.postMessage", channel=channel, text=message, as_user=True)
def parse_slack_output(ARGS, slack_rtm_output):
"""
The Slack Real Time Messaging API is an events firehose.
this parsing function returns None unless a message is
directed at the Bot, based on its ID.
"""
output_list = slack_rtm_output
if output_list and len(output_list) > 0:
for output in output_list:
if output and 'text' in output and ARGS.atbot in output['text']:
# return text after the @ mention, whitespace removed
return output['text'].split(ARGS.atbot)[1].strip().lower(), \
output['channel']
return None, None
def startloop(ARGS, client):
if client.rtm_connect():
print(ARGS.botname + " connected and running!")
while True:
command, channel = parse_slack_output(ARGS, client.rtm_read())
if command and channel:
handle_command(ARGS, CLIENT, command.strip(), channel)
time.sleep(ARGS.websocketdelay)
else:
print("Connection failed. Invalid Slack token or bot ID?")
"""
"""
def getfootballbot(ARGS, client):
api_call = client.api_call("users.list")
if api_call.get('ok'):
# retrieve all users so we can find our bot
users = api_call.get('members')
for user in users:
if 'name' in user and user.get('name') == ARGS.botname:
print("Bot ID for '" + user['name'] + "' is " + user.get('id'))
return user.get('id')
else:
raise Exception("could not find bot user with the name " + ARGS.botname)
if __name__ == '__main__':
PARSER = argparse.ArgumentParser()
PARSER.add_argument('-slacktoken', default='SLACK_FOOTBALL_TOKEN')
PARSER.add_argument('-espnleague', default='ESPN_LEAGUE')
PARSER.add_argument('-botname', default='footballbot')
PARSER.add_argument('-espns2', default='ESPNS2')
PARSER.add_argument('-swid', default='SWID')
PARSER.add_argument('-websocketdelay', type=int, default=1)
ARGS = PARSER.parse_args()
ARGS.league = League(int(os.environ.get(ARGS.espnleague)), 2017, espn_s2=os.environ.get(ARGS.espns2), swid=os.environ.get(ARGS.swid))
# sc = ARGS.league.scoreboard(projections=True)
# home_names = '\n'.join(map(lambda x: x.home_team.team_abbrev, sc))
# home_scores = '\n'.join(map(lambda x: x.home_score, sc))
# home_proj = '\n'.join(map(lambda x: x.home_projection, sc))
# print(home_scores)
# exit()
CLIENT = SlackClient(os.environ.get(ARGS.slacktoken))
BOTID = getfootballbot(ARGS, CLIENT)
ARGS.atbot = "<@" + BOTID + ">"
startloop(ARGS, CLIENT)
|
StarcoderdataPython
|
1710556
|
# coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Input pipeline tests."""
from absl.testing import parameterized
import jax
import tensorflow as tf
import tensorflow_datasets as tfds
from spin_spherical_cnns import input_pipeline
from spin_spherical_cnns.configs import default
class InputPipelineTest(tf.test.TestCase, parameterized.TestCase):
@parameterized.parameters("spherical_mnist/rotated",
"spherical_mnist/canonical")
def test_create_datasets_spherical_mnist(self, dataset):
rng = jax.random.PRNGKey(42)
config = default.get_config()
config.dataset = dataset
config.per_device_batch_size = 8
config.eval_pad_last_batch = False
dataset_loaded = False
if not dataset_loaded:
splits = input_pipeline.create_datasets(config, rng)
self.assertEqual(splits.info.features["label"].num_classes, 10)
self.assertEqual(splits.train.element_spec["input"].shape,
(1, 8, 64, 64, 1, 1))
self.assertEqual(splits.train.element_spec["label"].shape, (1, 8))
self.assertEqual(splits.validation.element_spec["input"].shape,
(1, 8, 64, 64, 1, 1))
self.assertEqual(splits.validation.element_spec["label"].shape, (1, 8))
self.assertEqual(splits.test.element_spec["input"].shape,
(1, 8, 64, 64, 1, 1))
self.assertEqual(splits.test.element_spec["label"].shape, (1, 8))
if __name__ == "__main__":
tf.test.main()
|
StarcoderdataPython
|
4838716
|
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
#
#
#
"""
模块用途描述
Authors: zhangzhenhu
Date: 2019/4/8 14:39
"""
import pandas as pd
import numpy as np
import random
import argparse
from sklearn import preprocessing
from sklearn.svm import SVC
from sklearn.svm import SVR
from sklearn.model_selection import GridSearchCV
from .model import ABCModel as Model, Evaluation
from sklearn.decomposition import PCA
from sklearn.feature_selection import SelectKBest
from sklearn.pipeline import Pipeline, FeatureUnion
class SVC_Model(Model):
name = "SVC"
# 模型参数
param = {}
description = "支持向量机分类"
def __init__(self, feature, evaluation=None, kernel='rbf'):
super(SVC_Model, self).__init__(feature=feature, evaluation=evaluation)
self.kernel = kernel
self.description = kernel + self.description
def fit(self, **kwargs):
self.feature_list = kwargs.get('feature_list', None)
k_single = kwargs.get('k_single', 0)
k_pca = kwargs.get('k_pca', 1)
self.train_x, self.train_y = self.tf_sample(self.train_x, self.train_y)
# 数据归一化
scaler = preprocessing.StandardScaler()
self.scalar_ = scaler.fit(self.train_x)
# pca
selection = SelectKBest(k=k_single)
n_components = int(len(self.feature_names) * k_pca)
pca = PCA(n_components=n_components)
combined_features = FeatureUnion([("pca", pca), ("univ_select", selection)])
self.pca = combined_features.fit(self.train_x, self.train_y)
self.pca = PCA(n_components=n_components).fit(self.train_x)
self.model = SVC(kernel=self.kernel)
fit_data = self.train_x.copy()
fit_data = self.scalar_.transform(fit_data)
fit_data = self.pca.transform(fit_data)
self.model.fit(fit_data, self.train_y)
# 评估训练集上的效果
self.train_y_pred = self.predict(self.train_x)
self.train_y = np.array(self.train_y)
self.train_y_pred = np.array(self.train_y_pred)
self.train_ev = self.evaluation.evaluate(y_true=self.train_y, y_pred=self.train_y_pred, threshold=0.5)
return self
def adjust_params(self, params=None, CV=10, scoring='accuracy', n_iter=10):
self.cvmodel = GridSearchCV(self.model, params, cv=CV, scoring=scoring, n_jobs=-1)
self.cvmodel.fit(self.train_x, self.train_y)
self.best_params = self.cvmodel.best_params_
self.best_score_ = self.cvmodel.best_score_
print('%s在%s参数下,训练集准确率最高为%s:' % (self.model_name, self.best_params, self.best_score_))
# 赋值模型以最优的参数
self.model = SVC(kernel=self.kernel)
self.model.fit(self.train_x, self.train_y)
return self.model
def predict(self, x):
assert x is not None
# x = self.select_features(x)
x = self.scalar_.transform(x)
x = self.pca.transform(x)
y_prob = self.model.predict(x)
'''
y_prob = y_prob.tolist()
y_prob = [item[1] for item in y_prob]
y_prob = np.array(y_prob)
'''
return y_prob
class SVR_Model(Model):
name = "SVR"
# 模型参数
param = {}
description = "支持向量机回归"
def __init__(self, feature, evaluation=None, kernel='rbf'):
super(SVR_Model, self).__init__(feature=feature, evaluation=evaluation)
self.kernel = kernel
self.description = kernel + self.description
def fit(self, method_name=None, k_single=0, k_pca=None):
self.train_x, self.train_y = self.tf_sample(self.train_x, self.train_y)
# 数据归一化
scaler = preprocessing.StandardScaler()
self.scalar_ = scaler.fit(self.train_x)
# pca
selection = SelectKBest(k=k_single)
n_components = int(len(self.feature_names) * k_pca)
pca = PCA(n_components=n_components)
combined_features = FeatureUnion([("pca", pca), ("univ_select", selection)])
self.pca = combined_features.fit(self.train_x, self.train_y)
self.pca = PCA(n_components=n_components).fit(self.train_x)
self.model = SVR(kernel=self.kernel)
fit_data = self.train_x.copy()
fit_data = self.scalar_.transform(fit_data)
fit_data = self.pca.transform(fit_data)
self.model.fit(fit_data, self.train_y)
# 评估训练集上的效果
self.train_y_pred = self.predict(self.train_x)
self.train_y = np.array(self.train_y)
self.train_y_pred = np.array(self.train_y_pred)
self.train_ev = self.evaluation.evaluate(y_true=self.train_y, y_pred=self.train_y_pred, threshold=0.5)
return self
def adjust_params(self, params=None, CV=10, scoring='accuracy', n_iter=10):
self.cvmodel = GridSearchCV(self.model, params, cv=CV, scoring=scoring, n_jobs=-1)
self.cvmodel.fit(self.train_x, self.train_y)
self.best_params = self.cvmodel.best_params_
self.best_score_ = self.cvmodel.best_score_
print('%s在%s参数下,训练集准确率最高为%s:' % (self.model_name, self.best_params, self.best_score_))
# 赋值模型以最优的参数
self.model = SVR(kernel=self.kernel)
self.model.fit(self.train_x, self.train_y)
return self.model
def predict(self, x):
assert x is not None
x = self.scalar_.transform(x)
x = self.pca.transform(x)
y_prob = self.model.predict(x)
'''
y_prob = y_prob.tolist()
y_prob = [item[1] for item in y_prob]
y_prob = np.array(y_prob)
'''
return y_prob
|
StarcoderdataPython
|
1701239
|
import os
import random
import torch.utils.data
from PIL import Image
import albumentations as A
import albumentations.pytorch as AT
import cv2
class ImgTransformer:
def __init__(self, img_size, color_aug=False):
self.img_size = img_size
self.color_aug = color_aug
def transform(self, image, mask):
if self.color_aug:
transforms = A.Compose(
[
A.Resize(self.img_size, self.img_size),
A.OneOf(
[
A.IAAAdditiveGaussianNoise(),
A.GaussNoise(var_limit=(100, 200)),
A.JpegCompression(quality_lower=75, p=0.2),
],
p=0.3,
),
A.OneOf(
[
A.MotionBlur(blur_limit=10, p=0.2),
A.MedianBlur(blur_limit=11, p=0.1),
A.Blur(blur_limit=11, p=0.1),
],
p=0.3,
),
A.ShiftScaleRotate(shift_limit=0, scale_limit=(-0.1, 0.4), rotate_limit=5, p=0.6),
A.Cutout(num_holes=8, max_h_size=48, max_w_size=48, p=0.3),
A.OneOf(
[
A.CLAHE(clip_limit=4),
A.IAASharpen(),
A.IAAEmboss(),
A.RandomBrightnessContrast(),
],
p=0.5,
),
A.HueSaturationValue(hue_shift_limit=20, sat_shift_limit=50, val_shift_limit=50, p=0.5),
]
)
else:
transforms = A.Compose(
[
A.Resize(self.img_size, self.img_size),
]
)
image_norm_and_to_tensor = A.Compose(
[
A.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]),
AT.ToTensorV2(transpose_mask=True),
]
)
mask_norm_and_to_tensor = A.Compose(
[
A.Normalize(mean=[0.0, 0.0, 0.0], std=[1, 1, 1]),
AT.ToTensorV2(transpose_mask=True),
]
)
transform_pair = transforms(image=image, mask=mask)
transform_image = transform_pair["image"]
transform_mask = transform_pair["mask"]
transform_image = image_norm_and_to_tensor(image=transform_image)["image"]
transform_mask = mask_norm_and_to_tensor(image=transform_mask)["image"]
return transform_image, transform_mask
def load(self, impath, maskpath):
image = cv2.imread(impath, cv2.IMREAD_COLOR)
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
mask = cv2.imread(maskpath)
return self.transform(image, mask)
class HairDataset(torch.utils.data.Dataset):
def __init__(self, data_folder, image_size=448, color_aug=False):
self.data_folder = data_folder
if not os.path.exists(self.data_folder):
raise Exception("%s not exists." % self.data_folder)
self.imagedir_path = os.path.join(data_folder, "images")
self.maskdir_path = os.path.join(data_folder, "masks")
self.image_names = os.listdir(self.imagedir_path)
self.image_size = image_size
self.transformer = ImgTransformer(image_size, color_aug)
def __getitem__(self, index):
img_path = os.path.join(self.imagedir_path, self.image_names[index])
maskfilename = os.path.join(self.maskdir_path, self.image_names[index].split(".")[0] + ".png")
transform_image, transform_mask = self.transformer.load(img_path, maskfilename)
transform_mask = transform_mask[0:1, :, :]
# random horizontal flip
hflip = random.choice([True, False])
if hflip:
transform_image = transform_image.flip([2])
transform_mask = transform_mask.flip([2])
return transform_image, transform_mask
def __len__(self):
return len(self.image_names)
|
StarcoderdataPython
|
1661741
|
from django.contrib import admin
from JobTrak.admin import JobTrakAdmin
#from mmg.jobtrak.links.models import *
#from mmg.jobtrak.core.models import *
|
StarcoderdataPython
|
1732857
|
<filename>legged_gym/envs/pat/pat_IK_config.py<gh_stars>0
# SPDX-FileCopyrightText: Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Copyright (c) 2021 ETH Zurich, <NAME>
from legged_gym.envs.base.legged_robot_config import LeggedRobotCfg, LeggedRobotCfgPPO
from legged_gym.envs.pat.pat_config import PatCfg, PatCfgPPO
import math
class PatIKCfg( PatCfg ):
class experiment:
recompute_normalization = True
experiment_name = "pat_pd_no_hist_no_norm"
n_sample_mean_est = 1000
class gait():
swing_time = 0.28
class foot_placement(PatCfg.foot_placement):
swing_height = 0.15
x_default = -0.1
y_default = 0.012
z_default = -0.38
class history:
history_length = 6
n_joints = 6
class init_state( PatCfg.init_state ):
pos = [0.0, 0.0, 0.45] # x,y,z [m]
default_joint_angles = { # = target angles [rad] when action = 0.0
'R_hip_joint': 0.3, # [rad]
'R_thigh_joint': -0.32, # [rad]
'R_calf_joint': 0.83, # [rad]
'L_hip_joint': -0.16, # [rad]
'L_thigh_joint': -0.29, # [rad]
'L_calf_joint': 0.81, # [rad]
}
class env(PatCfg.env):
num_observations = 40 #76
num_actions = 6
class terrain(PatCfg.terrain ):
mesh_type = 'plane'
measure_heights = False
class control(PatCfg.control ):
control_type = 'IK'
stiffness = {'joint': 25.} # [N*m/rad]
damping = {'joint': 0.4} # [N*m*s/rad]
decimation = 2
action_scale = 0.1
class noise:
add_noise = False
noise_level = 1.0 # scales other values
class noise_scales:
ori = 0.0
dof_pos = 0.01
pos_error = 0.001
dof_vel = 1.5
lin_vel = 0.1
ang_vel = 0.2
gravity = 0.05
height_measurements = 0.1
foot_pos = 0.001
class commands(PatCfg.commands):
heading_command = False # if true: compute ang vel command from heading error
curriculum = False
resampling_time = 30.
class ranges(PatCfg.commands.ranges):
lin_vel_x = [-0.5, 0.5] # min max [m/s]
lin_vel_y = [-0.5, 0.5] # min max [m/s]
ang_vel_yaw = [-0.5, 0.5] # min max [rad/s]
class domain_rand:
randomize_friction = True
friction_range = [0.5, 1.25]
randomize_base_mass = True
added_mass_range = [-2.0, 2.0]
push_robots = True
push_interval_s = 0.2
max_push_vel_xy = 1.
class rewards( PatCfg.rewards ):
base_height_target = 0.45
only_positive_rewards = True # if true negative total rewards are clipped at zero (avoids early termination problems)
soft_dof_pos_limit = 1. # percentage of urdf limits, values above this limit are penalized
soft_dof_vel_limit = 0.8
soft_torque_limit = 0.8
tracking_sigma = 1.0
max_contact_force = 100. # forces above this value are penalized
class scales:
tracking_lin_vel = 3.0
tracking_ang_vel = 3.0
feet_air_time = 0.3
slip = -0.08
foot_clearance = -15.0
orientation = -3.0
torques = -6e-4
base_height = -20.0
dof_vel = -6e-4
dof_acc = -0.02
body_motion = -1.5
linear_ortho_vel = 0.0
collision = -1.
class sim(PatCfg.sim):
dt = 0.005
class PatIKCfgPPO( PatCfgPPO ):
class algorithm( PatCfgPPO.algorithm ):
entropy_coef = 0.01
class runner( PatCfgPPO.runner ):
run_name = ''
experiment_name = 'pat_pd_no_hist_no_norm'
load_run = -1 #"Feb11_14-03-54_" # -1 = last run
checkpoint = -1 #"800" # -1 = last saved model
max_iterations = 1000
|
StarcoderdataPython
|
1643123
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = ['SecurityServiceArgs', 'SecurityService']
@pulumi.input_type
class SecurityServiceArgs:
def __init__(__self__, *,
type: pulumi.Input[str],
description: Optional[pulumi.Input[str]] = None,
dns_ip: Optional[pulumi.Input[str]] = None,
domain: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
ou: Optional[pulumi.Input[str]] = None,
password: Optional[pulumi.Input[str]] = None,
region: Optional[pulumi.Input[str]] = None,
server: Optional[pulumi.Input[str]] = None,
user: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a SecurityService resource.
:param pulumi.Input[str] type: The security service type - can either be active\_directory,
kerberos or ldap. Changing this updates the existing security service.
:param pulumi.Input[str] description: The human-readable description for the security service.
Changing this updates the description of the existing security service.
:param pulumi.Input[str] dns_ip: The security service DNS IP address that is used inside the
tenant network.
:param pulumi.Input[str] domain: The security service domain.
:param pulumi.Input[str] name: The name of the security service. Changing this updates the name
of the existing security service.
:param pulumi.Input[str] ou: The security service ou. An organizational unit can be added to
specify where the share ends up. New in Manila microversion 2.44.
:param pulumi.Input[str] password: The <PASSWORD>, if you specify a user.
:param pulumi.Input[str] region: The region in which to obtain the V2 Shared File System client.
A Shared File System client is needed to create a security service. If omitted, the
`region` argument of the provider is used. Changing this creates a new
security service.
:param pulumi.Input[str] server: The security service host name or IP address.
:param pulumi.Input[str] user: The security service user or group name that is used by the
tenant.
"""
pulumi.set(__self__, "type", type)
if description is not None:
pulumi.set(__self__, "description", description)
if dns_ip is not None:
pulumi.set(__self__, "dns_ip", dns_ip)
if domain is not None:
pulumi.set(__self__, "domain", domain)
if name is not None:
pulumi.set(__self__, "name", name)
if ou is not None:
pulumi.set(__self__, "ou", ou)
if password is not None:
pulumi.set(__self__, "password", password)
if region is not None:
pulumi.set(__self__, "region", region)
if server is not None:
pulumi.set(__self__, "server", server)
if user is not None:
pulumi.set(__self__, "user", user)
@property
@pulumi.getter
def type(self) -> pulumi.Input[str]:
"""
The security service type - can either be active\_directory,
kerberos or ldap. Changing this updates the existing security service.
"""
return pulumi.get(self, "type")
@type.setter
def type(self, value: pulumi.Input[str]):
pulumi.set(self, "type", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
The human-readable description for the security service.
Changing this updates the description of the existing security service.
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter(name="dnsIp")
def dns_ip(self) -> Optional[pulumi.Input[str]]:
"""
The security service DNS IP address that is used inside the
tenant network.
"""
return pulumi.get(self, "dns_ip")
@dns_ip.setter
def dns_ip(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "dns_ip", value)
@property
@pulumi.getter
def domain(self) -> Optional[pulumi.Input[str]]:
"""
The security service domain.
"""
return pulumi.get(self, "domain")
@domain.setter
def domain(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "domain", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the security service. Changing this updates the name
of the existing security service.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def ou(self) -> Optional[pulumi.Input[str]]:
"""
The security service ou. An organizational unit can be added to
specify where the share ends up. New in Manila microversion 2.44.
"""
return pulumi.get(self, "ou")
@ou.setter
def ou(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "ou", value)
@property
@pulumi.getter
def password(self) -> Optional[pulumi.Input[str]]:
"""
The user password, if you specify a user.
"""
return pulumi.get(self, "password")
@password.setter
def password(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "password", value)
@property
@pulumi.getter
def region(self) -> Optional[pulumi.Input[str]]:
"""
The region in which to obtain the V2 Shared File System client.
A Shared File System client is needed to create a security service. If omitted, the
`region` argument of the provider is used. Changing this creates a new
security service.
"""
return pulumi.get(self, "region")
@region.setter
def region(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "region", value)
@property
@pulumi.getter
def server(self) -> Optional[pulumi.Input[str]]:
"""
The security service host name or IP address.
"""
return pulumi.get(self, "server")
@server.setter
def server(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "server", value)
@property
@pulumi.getter
def user(self) -> Optional[pulumi.Input[str]]:
"""
The security service user or group name that is used by the
tenant.
"""
return pulumi.get(self, "user")
@user.setter
def user(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "user", value)
@pulumi.input_type
class _SecurityServiceState:
def __init__(__self__, *,
description: Optional[pulumi.Input[str]] = None,
dns_ip: Optional[pulumi.Input[str]] = None,
domain: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
ou: Optional[pulumi.Input[str]] = None,
password: Optional[pulumi.Input[str]] = None,
project_id: Optional[pulumi.Input[str]] = None,
region: Optional[pulumi.Input[str]] = None,
server: Optional[pulumi.Input[str]] = None,
type: Optional[pulumi.Input[str]] = None,
user: Optional[pulumi.Input[str]] = None):
"""
Input properties used for looking up and filtering SecurityService resources.
:param pulumi.Input[str] description: The human-readable description for the security service.
Changing this updates the description of the existing security service.
:param pulumi.Input[str] dns_ip: The security service DNS IP address that is used inside the
tenant network.
:param pulumi.Input[str] domain: The security service domain.
:param pulumi.Input[str] name: The name of the security service. Changing this updates the name
of the existing security service.
:param pulumi.Input[str] ou: The security service ou. An organizational unit can be added to
specify where the share ends up. New in Manila microversion 2.44.
:param pulumi.Input[str] password: The user password, if you specify a user.
:param pulumi.Input[str] project_id: The owner of the Security Service.
:param pulumi.Input[str] region: The region in which to obtain the V2 Shared File System client.
A Shared File System client is needed to create a security service. If omitted, the
`region` argument of the provider is used. Changing this creates a new
security service.
:param pulumi.Input[str] server: The security service host name or IP address.
:param pulumi.Input[str] type: The security service type - can either be active\_directory,
kerberos or ldap. Changing this updates the existing security service.
:param pulumi.Input[str] user: The security service user or group name that is used by the
tenant.
"""
if description is not None:
pulumi.set(__self__, "description", description)
if dns_ip is not None:
pulumi.set(__self__, "dns_ip", dns_ip)
if domain is not None:
pulumi.set(__self__, "domain", domain)
if name is not None:
pulumi.set(__self__, "name", name)
if ou is not None:
pulumi.set(__self__, "ou", ou)
if password is not None:
pulumi.set(__self__, "password", password)
if project_id is not None:
pulumi.set(__self__, "project_id", project_id)
if region is not None:
pulumi.set(__self__, "region", region)
if server is not None:
pulumi.set(__self__, "server", server)
if type is not None:
pulumi.set(__self__, "type", type)
if user is not None:
pulumi.set(__self__, "user", user)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
The human-readable description for the security service.
Changing this updates the description of the existing security service.
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter(name="dnsIp")
def dns_ip(self) -> Optional[pulumi.Input[str]]:
"""
The security service DNS IP address that is used inside the
tenant network.
"""
return pulumi.get(self, "dns_ip")
@dns_ip.setter
def dns_ip(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "dns_ip", value)
@property
@pulumi.getter
def domain(self) -> Optional[pulumi.Input[str]]:
"""
The security service domain.
"""
return pulumi.get(self, "domain")
@domain.setter
def domain(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "domain", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the security service. Changing this updates the name
of the existing security service.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def ou(self) -> Optional[pulumi.Input[str]]:
"""
The security service ou. An organizational unit can be added to
specify where the share ends up. New in Manila microversion 2.44.
"""
return pulumi.get(self, "ou")
@ou.setter
def ou(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "ou", value)
@property
@pulumi.getter
def password(self) -> Optional[pulumi.Input[str]]:
"""
The user password, if you specify a user.
"""
return pulumi.get(self, "password")
@password.setter
def password(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "password", value)
@property
@pulumi.getter(name="projectId")
def project_id(self) -> Optional[pulumi.Input[str]]:
"""
The owner of the Security Service.
"""
return pulumi.get(self, "project_id")
@project_id.setter
def project_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "project_id", value)
@property
@pulumi.getter
def region(self) -> Optional[pulumi.Input[str]]:
"""
The region in which to obtain the V2 Shared File System client.
A Shared File System client is needed to create a security service. If omitted, the
`region` argument of the provider is used. Changing this creates a new
security service.
"""
return pulumi.get(self, "region")
@region.setter
def region(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "region", value)
@property
@pulumi.getter
def server(self) -> Optional[pulumi.Input[str]]:
"""
The security service host name or IP address.
"""
return pulumi.get(self, "server")
@server.setter
def server(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "server", value)
@property
@pulumi.getter
def type(self) -> Optional[pulumi.Input[str]]:
"""
The security service type - can either be active\_directory,
kerberos or ldap. Changing this updates the existing security service.
"""
return pulumi.get(self, "type")
@type.setter
def type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "type", value)
@property
@pulumi.getter
def user(self) -> Optional[pulumi.Input[str]]:
"""
The security service user or group name that is used by the
tenant.
"""
return pulumi.get(self, "user")
@user.setter
def user(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "user", value)
class SecurityService(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
description: Optional[pulumi.Input[str]] = None,
dns_ip: Optional[pulumi.Input[str]] = None,
domain: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
ou: Optional[pulumi.Input[str]] = None,
password: Optional[pulumi.Input[str]] = None,
region: Optional[pulumi.Input[str]] = None,
server: Optional[pulumi.Input[str]] = None,
type: Optional[pulumi.Input[str]] = None,
user: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
Use this resource to configure a security service.
> **Note:** All arguments including the security service password will be
stored in the raw state as plain-text. [Read more about sensitive data in
state](https://www.terraform.io/docs/state/sensitive-data.html).
A security service stores configuration information for clients for
authentication and authorization (AuthN/AuthZ). For example, a share server
will be the client for an existing service such as LDAP, Kerberos, or
Microsoft Active Directory.
Minimum supported Manila microversion is 2.7.
## Example Usage
```python
import pulumi
import pulumi_openstack as openstack
securityservice1 = openstack.sharedfilesystem.SecurityService("securityservice1",
description="created by terraform",
dns_ip="192.168.199.10",
domain="example.com",
ou="CN=Computers,DC=example,DC=com",
password="<PASSWORD>",
server="192.168.199.10",
type="active_directory",
user="joinDomainUser")
```
## Import
This resource can be imported by specifying the ID of the security service
```sh
$ pulumi import openstack:sharedfilesystem/securityService:SecurityService securityservice_1 <id>
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] description: The human-readable description for the security service.
Changing this updates the description of the existing security service.
:param pulumi.Input[str] dns_ip: The security service DNS IP address that is used inside the
tenant network.
:param pulumi.Input[str] domain: The security service domain.
:param pulumi.Input[str] name: The name of the security service. Changing this updates the name
of the existing security service.
:param pulumi.Input[str] ou: The security service ou. An organizational unit can be added to
specify where the share ends up. New in Manila microversion 2.44.
:param pulumi.Input[str] password: The user password, if you specify a user.
:param pulumi.Input[str] region: The region in which to obtain the V2 Shared File System client.
A Shared File System client is needed to create a security service. If omitted, the
`region` argument of the provider is used. Changing this creates a new
security service.
:param pulumi.Input[str] server: The security service host name or IP address.
:param pulumi.Input[str] type: The security service type - can either be active\_directory,
kerberos or ldap. Changing this updates the existing security service.
:param pulumi.Input[str] user: The security service user or group name that is used by the
tenant.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: SecurityServiceArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Use this resource to configure a security service.
> **Note:** All arguments including the security service password will be
stored in the raw state as plain-text. [Read more about sensitive data in
state](https://www.terraform.io/docs/state/sensitive-data.html).
A security service stores configuration information for clients for
authentication and authorization (AuthN/AuthZ). For example, a share server
will be the client for an existing service such as LDAP, Kerberos, or
Microsoft Active Directory.
Minimum supported Manila microversion is 2.7.
## Example Usage
```python
import pulumi
import pulumi_openstack as openstack
securityservice1 = openstack.sharedfilesystem.SecurityService("securityservice1",
description="created by terraform",
dns_ip="192.168.199.10",
domain="example.com",
ou="CN=Computers,DC=example,DC=com",
password="s8cret",
server="192.168.199.10",
type="active_directory",
user="joinDomainUser")
```
## Import
This resource can be imported by specifying the ID of the security service
```sh
$ pulumi import openstack:sharedfilesystem/securityService:SecurityService securityservice_1 <id>
```
:param str resource_name: The name of the resource.
:param SecurityServiceArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(SecurityServiceArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
description: Optional[pulumi.Input[str]] = None,
dns_ip: Optional[pulumi.Input[str]] = None,
domain: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
ou: Optional[pulumi.Input[str]] = None,
password: Optional[pulumi.Input[str]] = None,
region: Optional[pulumi.Input[str]] = None,
server: Optional[pulumi.Input[str]] = None,
type: Optional[pulumi.Input[str]] = None,
user: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = SecurityServiceArgs.__new__(SecurityServiceArgs)
__props__.__dict__["description"] = description
__props__.__dict__["dns_ip"] = dns_ip
__props__.__dict__["domain"] = domain
__props__.__dict__["name"] = name
__props__.__dict__["ou"] = ou
__props__.__dict__["password"] = password
__props__.__dict__["region"] = region
__props__.__dict__["server"] = server
if type is None and not opts.urn:
raise TypeError("Missing required property 'type'")
__props__.__dict__["type"] = type
__props__.__dict__["user"] = user
__props__.__dict__["project_id"] = None
super(SecurityService, __self__).__init__(
'openstack:sharedfilesystem/securityService:SecurityService',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
description: Optional[pulumi.Input[str]] = None,
dns_ip: Optional[pulumi.Input[str]] = None,
domain: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
ou: Optional[pulumi.Input[str]] = None,
password: Optional[pulumi.Input[str]] = None,
project_id: Optional[pulumi.Input[str]] = None,
region: Optional[pulumi.Input[str]] = None,
server: Optional[pulumi.Input[str]] = None,
type: Optional[pulumi.Input[str]] = None,
user: Optional[pulumi.Input[str]] = None) -> 'SecurityService':
"""
Get an existing SecurityService resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] description: The human-readable description for the security service.
Changing this updates the description of the existing security service.
:param pulumi.Input[str] dns_ip: The security service DNS IP address that is used inside the
tenant network.
:param pulumi.Input[str] domain: The security service domain.
:param pulumi.Input[str] name: The name of the security service. Changing this updates the name
of the existing security service.
:param pulumi.Input[str] ou: The security service ou. An organizational unit can be added to
specify where the share ends up. New in Manila microversion 2.44.
:param pulumi.Input[str] password: The user password, if you specify a user.
:param pulumi.Input[str] project_id: The owner of the Security Service.
:param pulumi.Input[str] region: The region in which to obtain the V2 Shared File System client.
A Shared File System client is needed to create a security service. If omitted, the
`region` argument of the provider is used. Changing this creates a new
security service.
:param pulumi.Input[str] server: The security service host name or IP address.
:param pulumi.Input[str] type: The security service type - can either be active\_directory,
kerberos or ldap. Changing this updates the existing security service.
:param pulumi.Input[str] user: The security service user or group name that is used by the
tenant.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _SecurityServiceState.__new__(_SecurityServiceState)
__props__.__dict__["description"] = description
__props__.__dict__["dns_ip"] = dns_ip
__props__.__dict__["domain"] = domain
__props__.__dict__["name"] = name
__props__.__dict__["ou"] = ou
__props__.__dict__["password"] = password
__props__.__dict__["project_id"] = project_id
__props__.__dict__["region"] = region
__props__.__dict__["server"] = server
__props__.__dict__["type"] = type
__props__.__dict__["user"] = user
return SecurityService(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def description(self) -> pulumi.Output[Optional[str]]:
"""
The human-readable description for the security service.
Changing this updates the description of the existing security service.
"""
return pulumi.get(self, "description")
@property
@pulumi.getter(name="dnsIp")
def dns_ip(self) -> pulumi.Output[Optional[str]]:
"""
The security service DNS IP address that is used inside the
tenant network.
"""
return pulumi.get(self, "dns_ip")
@property
@pulumi.getter
def domain(self) -> pulumi.Output[Optional[str]]:
"""
The security service domain.
"""
return pulumi.get(self, "domain")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
The name of the security service. Changing this updates the name
of the existing security service.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def ou(self) -> pulumi.Output[Optional[str]]:
"""
The security service ou. An organizational unit can be added to
specify where the share ends up. New in Manila microversion 2.44.
"""
return pulumi.get(self, "ou")
@property
@pulumi.getter
def password(self) -> pulumi.Output[Optional[str]]:
"""
The user password, if you specify a user.
"""
return pulumi.get(self, "password")
@property
@pulumi.getter(name="projectId")
def project_id(self) -> pulumi.Output[str]:
"""
The owner of the Security Service.
"""
return pulumi.get(self, "project_id")
@property
@pulumi.getter
def region(self) -> pulumi.Output[str]:
"""
The region in which to obtain the V2 Shared File System client.
A Shared File System client is needed to create a security service. If omitted, the
`region` argument of the provider is used. Changing this creates a new
security service.
"""
return pulumi.get(self, "region")
@property
@pulumi.getter
def server(self) -> pulumi.Output[Optional[str]]:
"""
The security service host name or IP address.
"""
return pulumi.get(self, "server")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
The security service type - can either be active\_directory,
kerberos or ldap. Changing this updates the existing security service.
"""
return pulumi.get(self, "type")
@property
@pulumi.getter
def user(self) -> pulumi.Output[Optional[str]]:
"""
The security service user or group name that is used by the
tenant.
"""
return pulumi.get(self, "user")
|
StarcoderdataPython
|
3236403
|
# Copyright (c) 2020, <NAME>.
# Distributed under the MIT License. See LICENSE for more info.
"""
Explained variance
==================
This example will show the explained variance from a
`principal component analysis
<https://en.wikipedia.org/wiki/Principal_component_analysis>`_
as a function of the number of principal components considered.
"""
from matplotlib import pyplot as plt
import pandas as pd
from sklearn.datasets import load_wine
from sklearn.preprocessing import scale
from sklearn.decomposition import PCA
from psynlig import pca_explained_variance
plt.style.use('seaborn-talk')
data_set = load_wine()
data = pd.DataFrame(data_set['data'], columns=data_set['feature_names'])
data = scale(data)
pca = PCA()
pca.fit_transform(data)
pca_explained_variance(pca, marker='o', markersize=16, alpha=0.8)
plt.show()
|
StarcoderdataPython
|
161844
|
<gh_stars>1-10
import csv
import inspect
import os
import pathlib
import string
from collections import defaultdict
from pathlib import Path
from typing import Any
import pytest
import rich
import typer
from hypothesis import given
from hypothesis import strategies
from hypothesis import strategies as st
from hypothesis.core import given
from rich.table import Column
from toolz import itertoolz
from flyswot import inference
# flake8: noqa
text_strategy = st.text(string.ascii_letters, min_size=1)
def test__image_prediction_item_raises_error_when_too_few_params():
"""It raises Typerror when passed too few items"""
with pytest.raises(TypeError):
item = inference.ImagePredictionItem("A")
def test_image_prediction_items_match(tmp_path):
im_path = tmp_path / "image.tif"
label = "flysheet"
confidence = 0.9
item = inference.ImagePredictionItem(im_path, label, confidence)
assert item.path == im_path
assert item.predicted_label == "flysheet"
assert item.confidence == 0.9
assert type(item.path) == pathlib.PosixPath or pathlib.WindowsPath
assert type(item.confidence) == float
@pytest.fixture(scope="session")
def imfile(tmpdir_factory):
image_dir = tmpdir_factory.mktemp("images")
imfile = image_dir.join("image1.tif")
imfile.ensure()
return imfile
@given(confidence=st.floats(max_value=100.0), label=text_strategy)
def test_image_prediction_item(confidence, label, imfile: Any):
item = inference.ImagePredictionItem(imfile, label, confidence)
assert item.path == imfile
assert item.predicted_label == label
assert item.confidence == confidence
assert type(item.confidence) == float
@given(confidence=st.floats(max_value=100.0), label=text_strategy)
def test_multi_image_prediction_item(confidence, label, imfile: Any):
item = inference.MultiLabelImagePredictionItem(
imfile, [{confidence: label}, {confidence: label}]
)
assert item.path == imfile
assert type(item.predictions) == list
assert type(item.predictions[0]) == dict
@given(confidence=st.floats(max_value=100.0), label=text_strategy)
def test_prediction_batch(confidence: float, label: str, imfile: Any):
item = inference.ImagePredictionItem(imfile, label, confidence)
item2 = inference.ImagePredictionItem(imfile, label, confidence)
batch = inference.PredictionBatch([item, item2])
assert batch.batch_labels
assert len(list(batch.batch_labels)) == 2
assert hasattr(batch.batch_labels, "__next__")
@given(
confidence=st.floats(min_value=0.0, max_value=100.0),
label=text_strategy,
)
def test_multi_prediction_batch(confidence: float, label: str, imfile: Any):
item = inference.MultiLabelImagePredictionItem(
imfile, [{confidence: label}, {confidence: label}]
)
item2 = inference.MultiLabelImagePredictionItem(
imfile, [{confidence: label}, {confidence: label}]
)
batch = inference.MultiPredictionBatch([item, item2])
assert batch.batch
assert type(batch.batch) == list
assert type(batch.batch[0]) == inference.MultiLabelImagePredictionItem
assert batch.batch_labels
assert len(list(batch.batch_labels)) == 2
assert hasattr(batch.batch_labels, "__next__")
for labels in batch.batch_labels:
for label in labels:
assert label == label
FIXTURE_DIR = os.path.join(
os.path.dirname(os.path.realpath(__file__)),
"test_files",
)
@pytest.mark.datafiles(
os.path.join(
FIXTURE_DIR,
"fly_fse.jpg",
)
)
def test_predict_directory(datafiles, tmp_path) -> None:
inference.predict_directory(
datafiles,
tmp_path,
pattern="fse",
bs=1,
image_format=".jpg",
model_name="latest",
)
csv_file = list(tmp_path.rglob("*.csv"))
assert csv_file
with open(csv_file[0], newline="") as csvfile:
reader = csv.DictReader(csvfile)
for row in reader:
assert row["path"]
assert row["directory"]
columns = defaultdict(list)
with open(csv_file[0], newline="") as csvfile:
reader = csv.DictReader(csvfile)
for row in reader:
for (k, v) in row.items():
columns[k].append(v)
assert any("prediction" in k for k in columns)
labels = [columns[k] for k in columns if "prediction" in k]
confidences = [columns[k] for k in columns if "confidence" in k]
# check all labels are strings
assert all(map(lambda x: isinstance(x, str), (itertoolz.concat(labels))))
# check all confidences can be cast to float
assert all(
map(
lambda x: isinstance(x, float),
map(lambda x: float(x), (itertoolz.concat(confidences))),
)
)
@pytest.mark.datafiles(
os.path.join(
FIXTURE_DIR,
"fly_fse.jpg",
)
)
def test_predict_directory_local_mult(datafiles, tmp_path) -> None:
inference.predict_directory(
datafiles,
tmp_path,
pattern="fse",
bs=1,
image_format=".jpg",
model_name="20210629",
model_path="tests/test_files/mult/",
)
csv_file = list(tmp_path.rglob("*.csv"))
assert csv_file
with open(csv_file[0], newline="") as csvfile:
reader = csv.DictReader(csvfile)
for row in reader:
assert row["path"]
assert row["directory"]
columns = defaultdict(list)
with open(csv_file[0], newline="") as csvfile:
reader = csv.DictReader(csvfile)
for row in reader:
for (k, v) in row.items():
columns[k].append(v)
assert any("prediction" in k for k in columns)
labels = [columns[k] for k in columns if "prediction" in k]
confidences = [columns[k] for k in columns if "confidence" in k]
# check all labels are strings
assert all(map(lambda x: isinstance(x, str), (itertoolz.concat(labels))))
# check all confidences can be cast to float
assert all(
map(
lambda x: isinstance(x, float),
map(lambda x: float(x), (itertoolz.concat(confidences))),
)
)
def test_csv_header():
with pytest.raises(NotImplementedError):
inference.create_csv_header("string", Path("."))
def test_csv_header_multi(tmp_path):
prediction = inference.MultiLabelImagePredictionItem(Path("."), [{0.8: "label"}])
batch = inference.MultiPredictionBatch([prediction, prediction])
csv_fname = tmp_path / "test.csv"
inference.create_csv_header(batch, csv_fname)
with open(csv_fname, "r") as f:
reader = csv.DictReader(f)
list_of_column_names = [header for header in reader.fieldnames]
assert "path" in list_of_column_names
assert "directory" in list_of_column_names
def test_csv_header_single(tmp_path):
predicton = inference.ImagePredictionItem(Path("."), "label", 0.6)
batch = inference.PredictionBatch([predicton])
csv_fname = tmp_path / "test.csv"
inference.create_csv_header(batch, csv_fname)
with open(csv_fname, "r") as f:
reader = csv.DictReader(f)
list_of_column_names = [header for header in reader.fieldnames]
assert "path" in list_of_column_names
assert "directory" in list_of_column_names
assert "confidence" in list_of_column_names
def test_csv_batch():
with pytest.raises(NotImplementedError):
inference.write_batch_preds_to_csv("string", Path("."))
def test_csv_batch_single(tmp_path):
predicton = inference.ImagePredictionItem(Path("."), "label", 0.6)
batch = inference.PredictionBatch([predicton])
csv_fname = tmp_path / "test.csv"
inference.write_batch_preds_to_csv(batch, csv_fname)
with open(csv_fname, "r") as f:
reader = csv.DictReader(f)
for row in reader:
assert "label" in row
@given(
strategies.lists(text_strategy, min_size=2),
text_strategy,
)
def test_print_table(labels, title):
table = inference.print_table(labels, title, print=False)
assert isinstance(table, rich.table.Table)
assert table.title == title
unique = itertoolz.count(itertoolz.unique(labels))
assert table.row_count == unique + 1
assert all(
label in getattr(itertoolz.first(table.columns), "_cells") for label in labels
)
table = inference.print_table(labels, title, print=True)
@given(strategies.lists(text_strategy, min_size=1))
def test_check_files(l):
inference.check_files(l, "fse", Path("."))
def test_check_files_emopty():
with pytest.raises(typer.Exit):
inference.check_files([], "fse", Path("."))
def test_make_layout():
layout = inference.make_layout()
assert layout
assert isinstance(layout, rich.layout.Layout)
# def test_predict_empty_directory(tmp_path) -> None:
# test_dir = tmp_path / "test"
# test_dir.mkdir()
# with pytest.raises(typer.Exit):
# inference.predict_directory(test_dir)
def test_onnx_session_attributes():
session = inference.OnnxInferenceSession(
Path("tests/test_files/mult/20210629/model/2021-06-29-model.onnx"),
Path("tests/test_files/mult/20210629/model/vocab.txt"),
)
assert session
assert inspect.ismethod(session.predict_image)
assert inspect.ismethod(session.predict_batch)
assert inspect.ismethod(session._postprocess)
assert inspect.ismethod(session._load_image)
|
StarcoderdataPython
|
182114
|
<reponame>juanaugusto/Serializer-Training-GloboNetworkAPI
# -*- coding: utf-8 -*-
class CookieHandler(object):
"""This class intends to Handle the cookie field described by the
OpenFlow Specification and present in OpenVSwitch.
Cookie field has 64 bits. The first 32-bits are assigned to the id
of ACL input. The next 4 bits are assigned to the operation type and
the remaining 28 bits are filled by zeros.
"""
@staticmethod
def get_cookie(id_acl, src_port=0, dst_port=0):
id_acl = format(int(id_acl), '032b')
src_port = format(int(src_port), '016b')
dst_port = format(int(dst_port), '016b')
cookie = id_acl + src_port + dst_port
return int(cookie, 2)
@staticmethod
def get_id_acl(cookie):
cookie = format(cookie, '064b')
return int(cookie[0:32], 2)
@staticmethod
def get_src_port(cookie):
cookie = format(cookie, '064b')
return int(cookie[32:48], 2)
@staticmethod
def get_dst_port(cookie):
cookie = format(cookie, '064b')
return int(cookie[48:64], 2)
|
StarcoderdataPython
|
108625
|
import six
import graphene
from graphene_django.filter.filterset import setup_filterset, GrapheneFilterSetMixin
from graphene_django.registry import get_global_registry
from graphql_relay.connection.connectiontypes import Connection, PageInfo, Edge
from graphql_relay.connection.arrayconnection import get_offset_with_default, offset_to_cursor
from graphene.types.argument import to_arguments
from django_filters import FilterSet, OrderingFilter
def _get_aggregate_class(name, registry):
""" Get the aggregate class as a subclass of ObjectType """
classname = str("Aggregate%s" % name)
aggregate_class = registry._registry.get(classname)
if not aggregate_class:
aggregate_class = type(
classname,
(graphene.ObjectType,),
{'count': graphene.Int()}
)
registry._registry[classname] = aggregate_class
return aggregate_class
def get_connection_class(model):
""" Get the connection class as a subclass of Connection """
registry = get_global_registry()
name = model._meta.object_name
aggregate_class = _get_aggregate_class(name, registry)
def resolve_aggregate(self, info):
return aggregate_class(count=self.length)
classname = str("%sConnection" % name)
connection_class = registry._registry.get(classname)
if not connection_class:
connection_class = type(
classname,
(graphene.Connection, ),
{
'aggregate': graphene.Field(aggregate_class),
'resolve_aggregate': resolve_aggregate,
'Meta': {'abstract': True},
}
)
registry._registry[classname] = connection_class
return connection_class
def get_where_unique_input_field(model):
""" Get the where input field as a subclass of InputObjectType """
registry = get_global_registry()
classname = str("%sWhereUniqueInput" % model._meta.object_name)
where_class = registry._registry.get(classname)
if not where_class:
where_class = type(
classname,
(graphene.InputObjectType, ),
{'id': graphene.ID(required=True)},
)
registry._registry[classname] = where_class
return graphene.NonNull(where_class)
def get_where_input_field(filter_fields, model):
""" Get the where input field as a subclass of InputObjectType """
registry = get_global_registry()
classname = str("%sWhereInput" % model._meta.object_name)
where_class = registry._registry.get(classname)
if not where_class:
where_class = type(
classname,
(graphene.InputObjectType, ),
to_arguments(filter_fields),
)
registry._registry[classname] = where_class
return where_class()
def get_ordering_field(filter_field, model):
""" Get the ordering fields as Enum """
registry = get_global_registry()
args = {}
for choice in filter_field.field.choices.choices:
field_name, label = choice
suffix = 'DESC' if field_name.startswith('-') else 'ASC'
name = '{}_{}'.format(field_name.replace('-', ''), suffix)
args[name] = field_name
classname = str("%sOrderByInput" % model._meta.object_name)
order_class = registry._registry.get(classname)
if not order_class:
order_class = type(
classname,
(graphene.Enum, ),
args
)
registry._registry[classname] = order_class
return order_class()
def get_filtering_args_from_filterset(filterset_class, type):
""" Inspect a FilterSet and produce the arguments to pass to
a Graphene Field. These arguments will be available to
filter against in the GraphQL
"""
from graphene_django.forms.converter import convert_form_field
args = {}
for name, filter_field in six.iteritems(filterset_class.base_filters):
if isinstance(filter_field, OrderingFilter):
field_type = get_ordering_field(filter_field, type._meta.model)
else:
field_type = convert_form_field(filter_field.field).Argument()
field_type.description = filter_field.label
args[name] = field_type
return args
def get_filterset_class(filterset_class, **meta):
"""Get the class to be used as the FilterSet"""
if filterset_class:
# If were given a FilterSet class, then set it up and
# return it
return setup_filterset(filterset_class)
return custom_filterset_factory(**meta)
def custom_filterset_factory(model, filterset_base_class=FilterSet, order_by=None, **meta):
""" Create a filterset for the given model using the provided meta data
"""
meta.update({"model": model})
meta_class = type(str("Meta"), (object,), meta)
meta_dict = dict(Meta=meta_class)
assert order_by in [None, False], \
'order_by field can only be None or False'
if order_by is None:
order_by = tuple(field.name for field in model._meta.fields)
if order_by:
meta_dict.update({'order_by': OrderingFilter(fields=order_by)})
filterset = type(
str("%sFilterSet" % model._meta.object_name),
(filterset_base_class, GrapheneFilterSetMixin),
meta_dict,
)
return filterset
def connection_from_list_slice(list_slice, args=None, connection_type=None,
edge_type=None, pageinfo_type=None,
slice_start=0, list_length=0, list_slice_length=None):
'''
Given a slice (subset) of an array, returns a connection object for use in
GraphQL.
This function is similar to `connectionFromArray`, but is intended for use
cases where you know the cardinality of the connection, consider it too large
to materialize the entire array, and instead wish pass in a slice of the
total result large enough to cover the range specified in `args`.
'''
connection_type = connection_type or Connection
edge_type = edge_type or Edge
pageinfo_type = pageinfo_type or PageInfo
args = args or {}
before = args.get('before')
after = args.get('after')
first = args.get('first')
last = args.get('last')
skip = args.get('skip', 0)
if list_slice_length is None:
list_slice_length = len(list_slice)
slice_end = slice_start + list_slice_length
before_offset = get_offset_with_default(before, list_length)
after_offset = get_offset_with_default(after, -1)
start_offset = max(
slice_start - 1,
skip - 1,
after_offset,
-1
) + 1
end_offset = min(
slice_end,
before_offset,
list_length
)
if isinstance(first, int):
end_offset = min(
end_offset,
start_offset + first
)
if isinstance(last, int):
start_offset = max(
start_offset,
end_offset - last
)
# If supplied slice is too large, trim it down before mapping over it.
_slice = list_slice[
max(start_offset - slice_start, 0):
list_slice_length - (slice_end - end_offset)
]
edges = [
edge_type(
node=node,
cursor=offset_to_cursor(start_offset + i)
)
for i, node in enumerate(_slice)
]
first_edge_cursor = edges[0].cursor if edges else None
last_edge_cursor = edges[-1].cursor if edges else None
lower_bound = after_offset + 1 if after else 0
upper_bound = before_offset if before else list_length
if isinstance(connection_type, graphene.List):
return _slice
else:
return connection_type(
edges=edges,
page_info=pageinfo_type(
start_cursor=first_edge_cursor,
end_cursor=last_edge_cursor,
has_previous_page=isinstance(last, int) and start_offset > lower_bound,
has_next_page=isinstance(first, int) and end_offset < upper_bound
)
)
|
StarcoderdataPython
|
3333497
|
<filename>ternary_operator.py<gh_stars>0
a = 7
b = 1 if a >= 5 else 42
print(b)
|
StarcoderdataPython
|
1764149
|
# built-in
from argparse import REMAINDER, ArgumentParser
# app
from ..actions import get_python_env, get_resolver
from ..config import builders
from ..controllers import analyze_conflict
from ..models import Requirement
from ..package_manager import PackageManager
from .base import BaseCommand
class PackageInstallCommand(BaseCommand):
"""Download and install package into project environment.
"""
@classmethod
def get_parser(cls) -> ArgumentParser:
parser = cls._get_default_parser()
builders.build_config(parser)
builders.build_resolver(parser)
builders.build_venv(parser)
builders.build_output(parser)
builders.build_other(parser)
parser.add_argument('name', nargs=REMAINDER, help='package to install')
return parser
def __call__(self) -> bool:
# resolve
resolver = get_resolver(reqs=self.args.name)
self.logger.info('build dependencies graph...')
resolved = resolver.resolve(silent=self.config['silent'])
if not resolved:
conflict = analyze_conflict(resolver=resolver)
self.logger.warning('conflict was found')
print(conflict)
return False
# get executable
python = get_python_env(config=self.config)
# install
reqs = Requirement.from_graph(graph=resolver.graph, lock=True)
self.logger.info('installation...', extra=dict(
executable=str(python.path),
packages=len(reqs),
))
code = PackageManager(executable=python.path).install(reqs=reqs)
if code != 0:
return False
self.logger.info('installed')
return True
|
StarcoderdataPython
|
83972
|
import json
from multiprocessing import Pool
from random import randint
from typing import List, Dict, Callable, Any
import numpy as np
import os
from tqdm import tqdm
from pietoolbelt.datasets.common import BasicDataset
from pietoolbelt.pipeline.abstract_step import AbstractStep, DatasetInPipeline, AbstractStepDirResult
class StratificationResult(AbstractStepDirResult):
def __init__(self, path: str):
super().__init__(path)
self._meta_file = os.path.join(path, 'meta.json')
if os.path.exists(self._meta_file):
with open(self._meta_file, 'r') as meta_file:
self._meta = json.load(meta_file)
else:
self._meta = dict()
self._name2file = lambda name: name + '.npy' if len(name) < 4 or name[-4:] != '.npy' else name
self._name2path = lambda name: os.path.join(self._path, self._name2file(name))
def add_indices(self, indices: List[np.uint], name: str, dataset: BasicDataset):
dataset.set_indices(indices).flush_indices(self._name2path(name))
self._meta[name] = {'indices_num': len(indices)}
with open(self._meta_file, 'w') as meta_file:
json.dump(self._meta, meta_file)
def get_folds(self) -> List[str]:
return list(self._meta.keys())
def get_indices(self, name: str) -> List[np.ndarray]:
file_path = os.path.join(self._path, self._name2file(name))
if not os.path.exists(file_path):
raise RuntimeError('Indices file doesnt exists [{}]'.format(file_path))
return np.load(file_path)
def get_output_paths(self) -> List[str]:
return [self._path]
class DatasetStratification:
def __init__(self, dataset: BasicDataset, calc_target_label: Callable[[Any], Any], result: StratificationResult, workers_num: int = 0):
self._dataset = dataset
self._calc_label = calc_target_label
self._progress_clbk = None
self._workers_num = workers_num
self._result = result
@staticmethod
def __fill_hist(target_hist: [], indices: {}):
def pick(d):
idx = randint(0, len(indices[d]) - 1)
res = indices[d][idx]
del indices[d][idx]
return res
res = {}
for idx, d in enumerate(target_hist):
idxes = []
for _ in range(d):
idxes.append(pick(idx))
res[idx] = idxes
return res
def calc_hist(self, dataset: BasicDataset):
labels = []
if self._workers_num > 1:
with Pool(self._workers_num) as pool, tqdm(total=len(dataset)) as pbar:
for label in pool.imap(self._calc_label, dataset.get_items(), chunksize=self._workers_num * 10):
labels.append(label)
pbar.update()
else:
for d in tqdm(dataset.get_items(), total=len(dataset)):
labels.append(self._calc_label(d))
hist = [[] for _ in range(max(labels))]
for i, idxes in enumerate(labels):
hist[idxes - 1].append(i)
return np.array([len(v) for v in hist]), hist
def cal_multi_hist(self, dataset: BasicDataset):
labels = []
if self._workers_num > 1:
with Pool(self._workers_num) as pool, tqdm(total=len(dataset)) as pbar:
for label in pool.imap(self._calc_label, dataset.get_items(), chunksize=self._workers_num * 10):
labels.append(label)
pbar.update()
else:
for d in tqdm(dataset.get_items(), total=len(dataset)):
labels.append(self._calc_label(d))
percent = np.percentile(np.array(labels)[:, 1], np.linspace(0, 100, 10)).tolist()
out_p = []
for p in percent:
if percent.index(p) % 2 != 0:
out_p.append(p)
hist_1 = [[] for _ in range(int(max(np.array(labels)[:, 0])) + 1)]
for i, idxes in enumerate(labels):
hist_1[int(idxes[0])].append(i)
hist_2 = [[] for _ in range(len(out_p))]
for i, idxes in enumerate(labels):
for p in range(len(out_p)):
if p == 0 and idxes[1] <= out_p[p]:
hist_2[p].append(i)
elif p != 0 and out_p[p - 1] < idxes[1] <= out_p[p]:
hist_2[p].append(i)
hist = [[] for _ in range(len(hist_1) * len(hist_2))]
z = lambda x, y: [y.index(h) if x in h else -1 for h in y]
for i, idxes in enumerate(labels):
index_h1, index_h2 = self.get_hist_idx(i, hist_1), self.get_hist_idx(i, hist_2)
if index_h2 == -1 or index_h1 == -1:
raise Exception("Index error in histograms")
hist[int(index_h1 * index_h2) - 1].append(i)
return np.array([len(v) for v in hist]), hist
def stratificate_dataset(self, hist: np.ndarray, indices: list, parts: [float]) -> []:
res = []
for part in parts[:len(parts) - 1]:
target_hist = (hist.copy() * part).astype(np.uint32)
res.append([target_hist, self.__fill_hist(target_hist, indices)])
res.append([np.array([len(i) for i in indices]).astype(np.uint32), {i: v for i, v in enumerate(indices)}])
return res
@staticmethod
def get_hist_idx(x, hist):
res = -1
for h in hist:
res = hist.index(h) if x in h else res
return res
@staticmethod
def check_indices_for_intersection(indices: []):
for i in range(len(indices)):
for index in indices[i]:
for other_indices in indices[i + 1:]:
if index in other_indices:
raise Exception('Indices intersects')
def balance_classes(self, hist: np.ndarray, indices: {}) -> tuple:
target_hist = hist.copy()
target_hist[np.argmax(target_hist)] = np.sum(target_hist[target_hist != target_hist.max()])
return target_hist, self.__fill_hist(target_hist, indices)
def _flush_indices(self, indices: [], part_indices: [], path: str):
inner_indices = [part_indices[it] for bin in indices[1].values() for it in bin]
self._result.add_indices(indices=inner_indices, name=path, dataset=self._dataset)
return inner_indices
def run(self, parts: {str: float}, multi_hist=False) -> None:
if sum(parts.values()) > 1:
raise RuntimeError("Sum of target parts greater than 1")
parts = [[path, part] for path, part in parts.items()]
pathes = [p[0] for p in parts]
parts = [p[1] for p in parts]
part_indices = {i: i for i in range(len(self._dataset))}
hist, indices = self.cal_multi_hist(self._dataset) if multi_hist else self.calc_hist(self._dataset)
stratificated_indices = self.stratificate_dataset(hist, indices, parts)
indices_to_check = []
for i, cur_indices in enumerate(stratificated_indices):
indices_to_check.append(self._flush_indices(cur_indices, part_indices, pathes[i]))
self._dataset.remove_indices()
self.check_indices_for_intersection(indices_to_check)
class PipelineDatasetStratification(DatasetStratification, AbstractStep):
def __init__(self, dataset: DatasetInPipeline, calc_target_label: callable, result: StratificationResult, workers_num: int = 1):
DatasetStratification.__init__(self, dataset, calc_target_label, result=result, workers_num=workers_num)
AbstractStep.__init__(self, input_results=[dataset], output_res=result)
|
StarcoderdataPython
|
3237286
|
<filename>test_all.py<gh_stars>0
import copy
import random
import unittest
from unittest.mock import MagicMock
import enemy
import tower
from extras import Shot
from main import towerDefense
class GameTest(unittest.TestCase):
def tearDown(self):
towerDefense._instance = None
def test_shot(self):
enemy_colours = ["white", "pink", "yellow", "cyan", "maroon"]
colour = random.choice(enemy_colours)
dummy_enemy = enemy.Enemy(0, 0, 40, (0, 0), [[]], 5, colour)
dummy_tower = tower.OrangeTower(0, 0, [[]], 40)
self.assertEqual(0, len(dummy_tower.shots))
r = random.randint(0, 10)
print(r)
print(colour)
for i in range(r):
dummy_tower.fireShot(dummy_enemy)
self.assertEqual(r, len(dummy_tower.shots))
def test_getRowCol(self):
enemy_colours = ["white", "pink", "yellow", "cyan", "maroon"]
colour = random.choice(enemy_colours)
dummy_enemy = enemy.Enemy(15, 15, 40, (0, 1), [[]], 5, colour)
l1 = dummy_enemy.location
c1 = dummy_enemy.center
dummy_enemy.moveEnemy()
l2 = dummy_enemy.location
c2 = dummy_enemy.center
print(l1)
print(l2)
print(c1)
print(c2)
def test_get_color(self):
tower_classes = (tower.OrangeTower, tower.RedTower,
tower.PurpleTower, tower.GreenTower)
tower_class = random.choice(tower_classes)
dummy_tower_colour = tower_class.__name__.replace("Tower", "").lower()
if dummy_tower_colour == "purple":
dummy_tower_colour = "#8C489F"
dummy_tower = tower_class(0, 0, [[]], 40)
self.assertEqual(dummy_tower_colour, dummy_tower.get_color())
def test_isOffScreen(self):
board = [[0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0],
[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 0],
[0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0],
[0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0],
[0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 1, 0, 0],
[0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 0, 0],
[0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0],
[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 3]]
tower_classes = (tower.OrangeTower, tower.RedTower,
tower.PurpleTower, tower.GreenTower)
tower_class = random.choice(tower_classes)
dummy_tower = tower_class(0, 0, [[]], 40)
enemy_colours = ["white", "pink", "yellow", "cyan", "maroon"]
colour = random.choice(enemy_colours)
dummy_enemy = enemy.Enemy(15, 15, 40, (0, 1), [[]], 5, colour)
shot = Shot(dummy_tower, dummy_enemy, board, 40)
nshot = copy.deepcopy(shot)
index = random.randint(0, 1)
value = random.randint(0, 100)
nshot.location[index] = -value
self.assertEqual(nshot.isOffScreen(), True)
self.assertEqual(shot.isOffScreen(), False)
def test_slowSpeed(self):
enemy_colours = ["white", "pink", "yellow", "cyan", "maroon"]
colour = random.choice(enemy_colours)
dummy_enemy = enemy.Enemy(15, 15, 40, (0, 1), [[]], 5, colour)
dummy_enemy.slowSpeed()
assert (dummy_enemy.speedFactor == 0.2)
def test_setSpeedFactor(self):
enemy_colours = ["white", "pink", "yellow", "cyan", "maroon"]
colour = random.choice(enemy_colours)
dummy_enemy = enemy.Enemy(15, 15, 40, (0, 1), [[]], 5, colour)
if colour == "pink" or colour == "yellow":
assert (dummy_enemy.speedFactor == 8.0 / 5)
elif colour == "cyan" or colour == "maroon":
assert (dummy_enemy.speedFactor == 2.0)
else:
assert (dummy_enemy.speedFactor == 1)
def test_tower_defense_is_a_singleton(self):
instance = towerDefense()
self.assertEqual(instance, instance._instance)
self.assertRaisesRegexp(RuntimeError,
expected_regex="This class is a singleton!",
callable=towerDefense)
def test_draw_tower_desc_uses_correct_tower(self):
def get_tower_defense(boardDim, width, height):
towerDefense._instance = None
_td = towerDefense()
_td.orangeTower = 'Orange'
_td.redTower = 'Red'
_td.greenTower = 'Green'
_td.purpleTower = 'Purple'
_td.boardDim = boardDim
_td.width = width
_td.height = height
_td.drawTowerIcon = lambda _: None
_td.drawTowerChars = MagicMock()
_td.getText = lambda _: 'dummy_text'
_td.canvas = MagicMock()
return _td
button = MagicMock()
for color in ('Orange', 'Red', 'Green', 'Purple'):
td = get_tower_defense(1, 2, 3)
button.iconColor = color
td.drawTowerDesc(button)
td.drawTowerChars.assert_called_with(color)
td.canvas.create_text.assert_called_with(
-98.5,
-42,
text='dummy_text',
fill='white',
justify='center')
td = get_tower_defense(None, None, None)
button.iconColor = None
self.assertRaisesRegexp(RuntimeError,
'Invalid button icon color.',
td.drawTowerDesc,
button)
|
StarcoderdataPython
|
1799077
|
"""
Convert plain text to format accepted by model (token idxs + special tokens).
"""
import warnings
import functools
from collections import namedtuple, Counter, OrderedDict
import spacy
import numpy as np
NLP = None
def get_spacy():
global NLP
if NLP is None:
NLP = spacy.load("en_core_web_sm", disable=["parser", "tagger", "ner", "textcat"])
NLP.max_length = (
800000000 # approximately one volume of the encyclopedia britannica.
)
return NLP
EncodedOutput = namedtuple(
"EncodedOutput",
[
"token_ids", # list of list of subtoken ids (ints)
"tokens", # list of list of subtokens (strs)
"token_ends", # list of list of character locations (ints)
"token_starts", # list of list of character starts (locs are character ends) (ints)
"useful_start", # an int token idx - where to begin using the predictions when chunking
"useful_end", # an int token idx - where to stop using the predictions when chunking
"input_text", # The raw text - pre tokenization.
"offset", # Offset applied to the token_starts and ends - currently used only by DocumentLabeler
],
)
EncodedOutput.__new__.__defaults__ = (None,) * len(EncodedOutput._fields)
SUBS = {"—": "-", "–": "-", "―": "-", "…": "...", "´": "'"}
INFO_KEYS = ["text", "start", "end", "first_col", "first_row", "last_col", "last_row"]
def get_pairs(word):
"""
Return set of symbol pairs in a word.
word is represented as tuple of symbols (symbols being variable-length strings)
"""
pairs = set()
prev_char = word[0]
for char in word[1:]:
pairs.add((prev_char, char))
prev_char = char
return pairs
def _flatten(nested_lists):
return functools.reduce(lambda x, y: x + y, nested_lists, [])
def _remove_repeated_whitespace(encoded):
batch_token_idxs = []
batch_tokens = []
batch_char_ends = []
batch_char_starts = []
for token_ids, tokens, token_ends, token_starts in zip(
encoded.token_ids, encoded.tokens, encoded.token_ends, encoded.token_starts
):
mask = [
i != 0 and token.strip(" ") == tokens[i - 1].strip(" ") == "" for i, token in enumerate(tokens)
]
batch_token_idxs.append([x for x, c in zip(token_ids, mask) if not c])
batch_tokens.append([x for x, c in zip(tokens, mask) if not c])
batch_char_ends.append([x for x, c in zip(token_ends, mask) if not c])
batch_char_starts.append([x for x, c in zip(token_starts, mask) if not c])
return EncodedOutput(
token_ids=batch_token_idxs,
tokens=batch_tokens,
token_ends=batch_char_ends,
token_starts=batch_char_starts,
)
class CacheDict(OrderedDict):
def __init__(self, *args, cache_len: int = 50000, **kwargs):
"""
Simple Cache dictionary
modified from https://gist.github.com/davesteele/44793cd0348f59f8fadd49d7799bd306
"""
if cache_len < 1:
raise ValueError(
"cache_len must be greater than 1 got cache_len={}".format(cache_len)
)
self.cache_len = cache_len
super().__init__(*args, **kwargs)
def __setitem__(self, key, value):
super().__setitem__(key, value)
super().move_to_end(key)
if len(self) > self.cache_len:
oldkey = next(iter(self))
super().__delitem__(oldkey)
def __getitem__(self, key):
val = super().__getitem__(key)
super().move_to_end(key)
return val
class SingletonMeta(type):
_instances = {}
def __call__(cls, *args, **kwargs):
if cls not in cls._instances:
cls._instances[cls] = super(SingletonMeta, cls).__call__(*args, **kwargs)
return cls._instances[cls]
class BaseEncoder(metaclass=SingletonMeta):
"""
Base class for text encoders.
Translates raw texts into structured token arrays
"""
UNK_IDX = 0
def __init__(self, encoder_path, vocab_path):
self.encoder_path = encoder_path
self.vocab_path = vocab_path
# Required public attributes -- consider refactor to prevent requiring direct
# access of these attributes
self.special_tokens = None
self.start_token = None
self.delimiter_token = None
self.end_token = None
self.encoder = None
self.decoder = None
self.cache = CacheDict()
@property
def vocab_size(self):
return len(self.encoder)
def __getitem__(self, key):
return self.encoder[key]
def __setitem__(self, key, value):
self.encoder[key] = value
def _encode(self, texts):
"""
Convert a batch of raw text to a batch of byte-pair encoded token indices.
"""
raise NotImplementedError
def decode(self, ids):
"""
Convert a batch of ids [batch_size, id] into text(ish).
"""
raise NotImplementedError
def _cut_and_concat(
self,
*,
encoded,
max_length,
special_tokens=None,
start=None,
delimiter=None,
end=None,
include_bos_eos=True,
eos_on_cut=True,
):
"""
Takes some tokenized text and arranges it into a format that maximises the amount of kept text from each
whilst keeping the overall sequence length within max_length tokens. It also adds the 3 special tokens. Start,
Classify and Delimiter.
:param encoded: Lists of shape [batch, n_fields, num_tokens]
:param max_length: Int representing the max length of a single sample
:param start: Override the default start token.
:param delimiter: Override the default delimiter token.
:param end: Override the default classify token
:return: Formatted outputs of the form. [batch, num_tokens] where num_tokens' <= max_length
"""
start = start or special_tokens or self.start_token
delimiter = delimiter or special_tokens or self.delimiter_token
clf_token = end or special_tokens or self.end_token
num_samples = len(encoded)
adjusted_max_length = max_length - num_samples - 1
allocated_max_len = adjusted_max_length // num_samples
overflows = [allocated_max_len - len(sequence) for sequence in encoded]
spare = sum(overflows)
if spare >= 0:
cut_len = None
else:
warnings.warn(
"Document is longer than max length allowed, trimming document to {} tokens. Try chunk_long_sequences=True".format(
max_length
)
)
empty_tokens = sum(max(overflow, 0) for overflow in overflows)
num_over = [max(overflow, 0) for overflow in overflows].count(0)
if num_over == 0:
cut_len = allocated_max_len
else:
cut_len = allocated_max_len + (empty_tokens // num_over)
if include_bos_eos == True or include_bos_eos == "bos":
joined = [start]
else:
joined = []
for d in encoded:
joined += d[:cut_len] + [delimiter]
joined = joined[:-1]
if include_bos_eos == True or include_bos_eos == "eos":
if eos_on_cut or cut_len is None:
joined += [clf_token]
return joined
def _token_length(self, token):
return len(token)
def encode_multi_input(
self,
Xs,
max_length=None,
remove_repeated_whitespace=False,
include_bos_eos=True,
):
"""
Encodes the text for passing to the model, also tracks the location of each token to allow reconstruction.
It can also, optionally, construct a per-token labels as required for training.
:param Xs: A list of lists of string -- [n_fields, n_segments]
:param Y: A list of list of targets -- [n_batch, n_segments]
:param max_length: Max length of the sequences.
:return: A Labeled Sequence Object.
"""
encoded = self._encode(Xs)
if remove_repeated_whitespace:
encoded = _remove_repeated_whitespace(encoded)
# merge fields + truncate if necessary
token_ids = self._cut_and_concat(
encoded=encoded.token_ids,
max_length=max_length,
include_bos_eos=include_bos_eos,
)
tokens = self._cut_and_concat(
encoded=encoded.tokens,
max_length=max_length,
include_bos_eos=include_bos_eos,
)
token_ends = self._cut_and_concat(
encoded=encoded.token_ends,
max_length=max_length,
special_tokens=-1,
include_bos_eos=include_bos_eos,
)
token_starts = self._cut_and_concat(
encoded=encoded.token_starts,
max_length=max_length,
special_tokens=-1,
include_bos_eos=include_bos_eos,
)
return EncodedOutput(
token_ids=np.asarray(token_ids),
tokens=np.array(tokens),
token_ends=np.asarray(token_ends),
token_starts=np.asarray(token_starts),
)
def __setstate__(self, state):
self.__init__()
def __getstate__(self):
raise ValueError(
"We do not want to be serializing text encoders. Please use getters from input_pipeline."
)
def tokenize_context(context, encoded_output, config):
""" Tokenize the context corresponding to a single sequence of text """
# in the edge case where the chunk is just a single end token, we don't need to alter our context chunk
seq_len = len(encoded_output.token_ids)
context_keys = list(k for k in sorted(context[0].keys()) if k not in INFO_KEYS)
context_by_char_loc = sorted(
[(c["end"], [c[k] for k in context_keys], c.get("text")) for c in context],
key=lambda c: c[0],
)
# default context is set by user in config
default_context = [config.default_context[k] for k in context_keys]
current_char_loc = 0
tokenized_context = []
assert len(encoded_output.tokens) == len(encoded_output.token_ends)
assert encoded_output.token_starts[1] <= encoded_output.token_ends[-2]
for i, (token, char_loc) in enumerate(
zip(encoded_output.tokens, encoded_output.token_ends)
):
# Note: this assumes that the tokenization will never lump multiple tokens into one
# (this would not be the case if multiple context spans make up the same token)
if char_loc == -1:
tokenized_context.append(default_context)
else:
while token.strip() and char_loc > context_by_char_loc[current_char_loc][0]:
current_char_loc += 1
if current_char_loc >= len(context_by_char_loc):
raise ValueError(
"Context cannot be fully matched as it appears to not cover the end of the sequence for token {}".format(
token
)
)
if (
context_by_char_loc[current_char_loc][2]
and token.strip() not in context_by_char_loc[current_char_loc][2]
):
warnings.warn(
"subtoken: {} has matched up with the context for token: {}".format(
repr(token), repr(context_by_char_loc[current_char_loc][2])
)
)
tokenized_context.append(context_by_char_loc[current_char_loc][1])
assert len(tokenized_context) == len(encoded_output.token_ends)
# padded value doesn't matter since it will be masked out
expanded_context = np.pad(
tokenized_context, ((0, seq_len - len(tokenized_context)), (0, 0)), "constant"
)
assert len(expanded_context) == len(encoded_output.token_ids)
return expanded_context
|
StarcoderdataPython
|
56089
|
<filename>lightning_pass/gui/mouse_randomness.py
"""Module containing classes used for operations with mouse randomness generation."""
import random
import string
from typing import Generator, NamedTuple, Optional
from PyQt5 import QtCore, QtWidgets
class MouseTracker(QtCore.QObject):
"""This class contains functionality for setting up a mouse tracker over a chosen label.
:param QLabel widget: QLabel widget which will be used for tracking
"""
position_changed = QtCore.pyqtSignal(QtCore.QPoint)
__slots__ = "widget"
def __init__(self, widget: QtWidgets.QLabel) -> None:
"""Class constructor."""
super().__init__(widget)
self.widget = widget
self.widget.setMouseTracking(True)
self.widget.installEventFilter(self)
def __repr__(self):
"""Provide information about this class."""
return f"{self.__class__.__qualname__}({self.widget!r})"
def eventFilter(
self,
label: QtWidgets.QLabel,
event: QtCore.QEvent.MouseMove,
) -> object:
"""Event filter.
:param QLabel label: Label object
:param MouseMove event: Mouse move event
:returns: eventFilter of super class
"""
if label is self.widget and event.type() == QtCore.QEvent.MouseMove:
self.position_changed.emit(event.pos())
return super().eventFilter(label, event)
@staticmethod
def setup_tracker(
label: QtWidgets.QLabel,
on_change: QtCore.pyqtBoundSignal,
) -> None:
"""Set up a mouse tracker over a specified label."""
tracker = MouseTracker(label)
tracker.position_changed.connect(on_change)
class PasswordOptions(NamedTuple):
"""Store the chosen password options."""
length: int
numbers: bool
symbols: bool
lowercase: bool
uppercase: bool
class Chars(NamedTuple):
"""Store the characters that should be used while password generation."""
chars: str
length: int
def printable_options(options: PasswordOptions) -> Chars:
"""Return all of the printable chars to be used.
:param options: The given options
"""
final = "".join(
(
chars
for option, chars in zip(
# not using first length value
options[1:],
(
string.digits,
string.punctuation,
string.ascii_lowercase,
string.ascii_uppercase,
),
)
if option
),
)
return Chars(final, len(final))
class PwdGenerator:
"""Holds user's chosen parameters for password generation and contains the password generation functionality.
:param options: The data container containing the password options chosen by the user
"""
__slots__ = "options", "chars", "password", "div", "coro"
def __init__(self, options: PasswordOptions) -> None:
"""Construct the class."""
self.options = options
self.chars = printable_options(self.options)
self.password = ""
self.div = int(1_000 // self.options.length)
self.coro = self.coro_div_check()
# advance the generator to the first yield
next(self.coro)
def __repr__(self) -> str:
"""Provide information about this class."""
return f"{self.__class__.__qualname__}({self.options!r})"
def coro_div_check(self) -> Generator[bool, int, bool]:
"""Coroutine used to check whether a character should be collected."""
# stops yielding if length has been reached
while len(self.password) <= self.options.length:
try:
# wait for sent value
yield True if (yield) % self.div == 0 else False
except (ZeroDivisionError, TypeError):
yield False
return False
def get_character(self, x: int, y: int) -> Optional[str]:
"""Get a eligible password character by generating a random seed from the mouse position axis.
Chooses an item from the ``chars`` attribute based on the calculated index.
:param x: The x axis mouse position
:param y: The y axis mouse position
"""
if len(self.password) > self.options.length:
return
random.seed(x + 1j * y)
flt = random.random()
div = 1 / self.chars.length
index = int(flt // div)
self.password += self.chars.chars[index]
__all__ = [
"Chars",
"MouseTracker",
"PasswordOptions",
"PwdGenerator",
"printable_options",
]
|
StarcoderdataPython
|
3280557
|
from BiModNeuroCNN.version import __version__
|
StarcoderdataPython
|
11923
|
# Generated by Django 2.1.5 on 2019-05-04 07:55
import blog.formatChecker
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('blog', '0040_auto_20190504_0840'),
]
operations = [
migrations.AlterField(
model_name='videos',
name='video',
field=models.FileField(blank=True, null=True, upload_to='uploads/', validators=[blog.formatChecker.file_size]),
),
]
|
StarcoderdataPython
|
78436
|
<reponame>hkhpub/dstc6-track1<filename>scripts/score.py
import json
### The current dialog format
### [{dialog_id : " ", lst_candidate_id: [{candidate_id: " ", rank: " "}, ...]}]
def do_parse_cmdline():
from optparse import OptionParser
parser = OptionParser()
parser.add_option("--input-result-file-test", dest="inputfiletest",
# default="../output/output-result-task1-tfidf.json",
# default="../output/output-result-task2-tfidf.json",
# default="../output/output-result-task3-tfidf.json",
# default="../output/output-result-task4-tfidf.json",
default="../output/output-result-task5-tfidf.json",
help="filename of the task", metavar="FILE")
parser.add_option("--input-result-file-truth", dest="inputfiletruth",
# default="../output/output-task1-truth.json",
# default="../output/output-task2-truth.json",
# default="../output/output-task3-truth.json",
# default="../output/output-task4-truth.json",
default="../output/output-task5-truth.json",
help="filename of the task", metavar="FILE")
parser.add_option("--input-nb-candidate", dest="inputnbcandidate",
default=10,
help="nbr. candidate", metavar="INTEGER")
(options, args) = parser.parse_args()
return options.inputfiletest, options.inputfiletruth, \
int(options.inputnbcandidate)
def do_load_json_result(filename, nb_candidate):
dict_result = {}
with open(filename, 'rb') as fd:
json_data = json.load(fd)
if (type(json_data) != list):
print "[Error] The result file should be a list ..."
exit(1)
for item in json_data:
if (item.get('dialog_id') == None):
print "[Error] No dialog_id key founded ..."
continue
if (item.get('lst_candidate_id') == None):
print "[Error] No lst_candidate_id key founded ..."
exit(1)
lst_candidate = [None] * nb_candidate
for candidate in item['lst_candidate_id']:
if (candidate.get('rank') == None):
print "[Error] one candidate has no rank key ..."
exit(1)
if (candidate.get('candidate_id') == None):
print "[Error] one candidate has no candidate_id key ..."
exit(1)
if (int(candidate["rank"]) <= nb_candidate):
lst_candidate[int(candidate["rank"]) - 1] = candidate['candidate_id']
dict_result[item['dialog_id']] = lst_candidate
return dict_result
def do_compute_score(dict_result_truth, dict_result_test, precision_at):
nb_true = 0.0
for key in dict_result_truth.keys():
if dict_result_test.get(key) is not None:
if dict_result_truth[key][0] in dict_result_test[key][0:precision_at]:
nb_true += 1.0
return nb_true / len(dict_result_truth)
if __name__ == '__main__':
# Parsing command line
inputfiletest, inputfiletruth, nbcandidate = do_parse_cmdline()
dict_result_test = do_load_json_result(inputfiletest, nbcandidate)
dict_result_truth = do_load_json_result(inputfiletruth, 1)
### Accuracy - Precision @1
print str("Precision @1: ") + str(do_compute_score(dict_result_truth, dict_result_test, 1))
print str("Precision @2: ") + str(do_compute_score(dict_result_truth, dict_result_test, 2))
print str("Precision @5: ") + str(do_compute_score(dict_result_truth, dict_result_test, 5))
|
StarcoderdataPython
|
93163
|
<filename>p3_collab-compet/test_maddpg_agent.py
import unittest
from model import Actor, Critic
from maddpg_agent import MultiAgent
import numpy as np
from replay_buffer import ReplayBuffer
from noise import OUNoise
import torch
from maddpg_agent_other import MADDPG_Agent
from collections import namedtuple
class TestAgent(unittest.TestCase):
def setUp(self):
self.state_dim = 24
self.action_dim = 2
self.num_agents = 2
seed = 2
def create_actor():
return Actor(state_dim=self.state_dim, action_dim=self.action_dim, fc1_units=64, fc2_units=64, seed=seed)
def create_critic():
return Critic(
state_dim=self.state_dim * self.num_agents,
action_dim=self.action_dim * self.num_agents,
fc1_units=64,
fc2_units=64,
seed=seed)
def create_noise():
return OUNoise(size=self.action_dim, seed=seed)
self.multi_agent = MultiAgent(
num_agents=self.num_agents,
create_actor=create_actor,
create_critic=create_critic,
replay_buffer=None,
create_noise=create_noise,
state_dim=self.state_dim,
action_dim=self.action_dim,
episodes_before_train=100,
seed=seed)
self.multi_agent_other = MADDPG_Agent(
n_agents=self.num_agents,
dim_obs=self.state_dim,
dim_act=self.action_dim,
batch_size=10,
capacity=int(1e5),
eps_b_train=100)
def test_act(self):
states = np.random.random_sample((self.num_agents, self.state_dim))
actions = self.multi_agent.act(states)
actions_other = self.multi_agent_other.act(torch.tensor(states).float()).data.numpy()
np.testing.assert_array_equal(actions, actions_other)
def test_learn(self):
samples = self.n_samples(10)
tensor_samples = self.multi_agent.to_tensor(samples=samples)
actor_loss, critic_loss = self.multi_agent.learn(agent_i=0, samples=tensor_samples)
critic_loss1, actor_loss1, = self.multi_agent_other.learn_impl(transitions=self.to_experiences(tensor_samples), agent=0)
self.assertEqual(critic_loss, critic_loss1.detach().item())
self.assertEqual(actor_loss, actor_loss1.detach().item())
def n_samples(self, n):
states = []
full_states = []
actions = []
rewards = []
next_states = []
full_next_states = []
dones = []
for _ in range(n):
state = np.random.random_sample((self.num_agents, self.state_dim))
full_state = state.reshape(-1)
action = np.random.random_sample((self.num_agents, self.action_dim))
reward = np.random.random_sample(self.num_agents)
next_state = np.random.random_sample((self.num_agents, self.state_dim))
full_next_state = next_state.reshape(-1)
done = np.random.choice(a=[False, True], size=self.num_agents, p=[0.95, 0.05])
states.append(state)
full_states.append(full_state)
actions.append(action)
rewards.append(reward)
next_states.append(next_state)
full_next_states.append(full_next_state)
dones.append(done)
return (np.array(states), np.array(full_states), np.array(actions), np.array(rewards),
np.array(next_states), np.array(full_next_states), np.array(dones))
@staticmethod
def to_experiences(samples):
states, full_states, actions, rewards, next_states, full_next_states, dones = samples
experiences = [(s, a, ns, r) for s, a, r, ns in zip(states, actions, rewards, next_states)]
return experiences
def test_target_and_local_act(self):
n_samples = 10
states = np.random.random_sample((n_samples, self.num_agents, self.state_dim))
states = torch.from_numpy(states).float()
actions = self.multi_agent.target_act(states)
self.assertEqual((n_samples, self.num_agents, self.action_dim), actions.shape)
actions2 = self.multi_agent.local_act(states)
self.assertEqual((n_samples, self.num_agents, self.action_dim), actions2.shape)
|
StarcoderdataPython
|
1716728
|
<filename>aula07/ex06.py
tabuada = int(input("insira um número pra ver sua tabuada: "))
um = tabuada*1
dois = tabuada*2
tres = tabuada*3
quatro = tabuada*4
cinco = tabuada*5
seis = tabuada*6
sete = tabuada*7
oito = tabuada*8
nove = tabuada*9
dez = tabuada*10
print('-' * 12)
print('{} x 1 = {}'.format(tabuada, um))
print('{} x 2 = {}'.format(tabuada, dois))
print('{} x 3 = {}'.format(tabuada, tres))
print('{} x 4 = {}'.format(tabuada, quatro))
print('{} x 5 = {}'.format(tabuada, cinco))
print('{} x 6 = {}'.format(tabuada, seis))
print('{} x 7 = {}'.format(tabuada, sete))
print('{} x 8 = {}'.format(tabuada, oito))
print('{} x 9 = {}'.format(tabuada, nove))
print('{} x 10 = {}'.format(tabuada, dez))
print('-' * 12)
|
StarcoderdataPython
|
26789
|
<gh_stars>10-100
#!/usr/bin/env python
f = open("repair.log", "r");
lines = f.readlines();
cnt = 0;
for line in lines:
tokens = line.strip().split();
if (len(tokens) > 3):
if (tokens[0] == "Total") and (tokens[1] == "return"):
cnt += int(tokens[3]);
if (tokens[0] == "Total") and (tokens[2] == "different") and (tokens[3] == "repair"):
cnt += int(tokens[1]);
print "Total size: " + str(cnt);
|
StarcoderdataPython
|
3322970
|
# Copyright 2016, 2019 <NAME>. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Order Store Service
Paths:
------
GET /orders - Returns a list all of the orders
GET /orders/{id} - Returns the order with a given id number
POST /orders - creates a new order record in the database
PUT /orders/{id} - updates an order record in the database
DELETE /orders/{id} - deletes an order record in the database
"""
import sys
import secrets
import logging
from functools import wraps
from flask import jsonify, request, url_for, make_response, abort
from flask_restx import Api, Resource, fields, reqparse, inputs
from service.models import Order, items, DataValidationError, DatabaseConnectionError
from . import app, status # HTTP Status Codes
######################################################################
# Configure the Root route before OpenAPI
######################################################################
@app.route("/")
def index():
"""Base URL for our service"""
return app.send_static_file("index.html")
######################################################################
# Configure Swagger before initializing it
######################################################################
api = Api(app,
version='1.0.0',
title='Order Demo REST API Service',
description='This is a sample server Order server.',
default='orders',
default_label='Order operations',
doc='/apidocs', # default also could use doc='/apidocs/'
)
# Define the model so that the docs reflect what can be sent
create_model = api.model('Order', {
'id': fields.Integer(readOnly=True,
description='The unique id assigned internally by service')
})
order_model = api.inherit(
'OrderModel',
create_model,
{
'date': fields.Date(required=True,
description='The date of the Order'),
'customer': fields.String(required=True,
description='The customer name of the Order (e.g., Yoda, Obiwan, Mace, etc.)'),
'total': fields.Float(required=True,
description='The total quantity of the Order'),
'status': fields.String(required=True,
description='The status of the Order (e.g., Open, Closed, Cancelled, Refunded, etc.)')
}
)
# query string arguments
order_args = reqparse.RequestParser()
order_args.add_argument('customer', type=str, required=False, help='List Orders by customer')
order_args.add_argument('status', type=str, required=False, help='List Orders by status')
item_model = api.inherit(
'ItemModel',
create_model,
{
'order_id': fields.Integer(required=True,
description='The id assigned to the Order'),
'product_id': fields.Integer(required=True,
description='The id assigned to the Product'),
'quantity': fields.Integer(required=True,
description='The quantity of the Order Item'),
'price': fields.Integer(required=True,
description='The price of the Order Item'),
'total': fields.Float(required=True,
description='The total price of the Order Item')
}
)
######################################################################
# Special Error Handlers
######################################################################
@api.errorhandler(DataValidationError)
def request_validation_error(error):
""" Handles Value Errors from bad data """
message = str(error)
app.logger.error(message)
return {
'status_code': status.HTTP_400_BAD_REQUEST,
'error': 'Bad Request',
'message': message
}, status.HTTP_400_BAD_REQUEST
@api.errorhandler(DatabaseConnectionError)
def database_connection_error(error):
""" Handles Database Errors from connection attempts """
message = str(error)
app.logger.critical(message)
return {
'status_code': status.HTTP_503_SERVICE_UNAVAILABLE,
'error': 'Service Unavailable',
'message': message
}, status.HTTP_503_SERVICE_UNAVAILABLE
######################################################################
# PATH: /orders/{id}
######################################################################
@api.route('/orders/<order_id>')
@api.param('order_id', 'The Order identifier')
class OrderResource(Resource):
"""
OrderResource class
Allows the manipulation of a single Order
GET /order{id} - Returns a Order with the id
PUT /order{id} - Update a Order with the id
DELETE /order{id} - Deletes a Order with the id
"""
######################################################################
# RETRIEVE AN ORDER
######################################################################
@api.doc('get_orders')
@api.response(404, 'Order not found')
@api.marshal_with(order_model)
def get(self, order_id):
"""
Retrieve a single Order
This endpoint will return a Order based on it's id
"""
app.logger.info("Request for order with id: %s", order_id)
order = Order.find(order_id)
if not order:
abort(status.HTTP_404_NOT_FOUND, "Order with id '{}' was not found.".format(order_id))
app.logger.info("Returning order: %s", order.id)
return order.serialize(), status.HTTP_200_OK
######################################################################
# UPDATE AN EXISTING ORDER
######################################################################
@api.doc('update_orders')
@api.response(404, 'Order not found')
@api.response(400, 'The posted Order data was not valid')
@api.expect(order_model)
@api.marshal_with(order_model)
def put(self, order_id):
"""
Update an Order
This endpoint will update an Order based the body that is posted
"""
app.logger.info("Request to update order with id: %s", order_id)
check_content_type("application/json")
order = Order.find(order_id)
if not order:
abort(status.HTTP_404_NOT_FOUND, "Order with id '{}' was not found.".format(order_id))
app.logger.debug('Payload = %s', api.payload)
data = api.payload
order.deserialize(data)
order.id = order_id
order.update()
app.logger.info("Order with ID [%s] updated.", Order.id)
return order.serialize(), status.HTTP_200_OK
######################################################################
# DELETE AN ORDER
######################################################################
@api.doc('delete_orders')
@api.response(204, 'Order deleted')
def delete(self, order_id):
"""
Delete a Order
This endpoint will delete a Order based the id specified in the path
"""
app.logger.info("Request to delete order with id: %s", order_id)
order = Order.find(order_id)
if order:
order.delete()
app.logger.info("Order with ID [%s] delete complete.", order_id)
return '', status.HTTP_204_NO_CONTENT
######################################################################
# PATH: /orders
######################################################################
@api.route('/orders', strict_slashes=False)
class OrderCollection(Resource):
""" Handles all interactions with collections of Orders """
######################################################################
# LIST ALL ORDERS
######################################################################
@api.doc('list_orders')
@api.expect(order_args, validate=True)
@api.marshal_list_with(order_model)
def get(self):
"""Returns all of the Orders"""
app.logger.info("Request for Order List")
orders = []
customer = request.args.get("customer")
order_status = request.args.get("status")
if customer:
orders = Order.find_by_customer(customer)
elif order_status:
orders = Order.find_by_status(order_status)
else:
orders = Order.all()
results = [order.serialize() for order in orders]
app.logger.info("Returning %d orders", len(results))
return results, status.HTTP_200_OK
######################################################################
# ADD A NEW ORDER
######################################################################
@api.doc('create_orders')
@api.response(400, 'The posted data was not valid')
@api.expect(create_model)
@api.marshal_with(order_model, code=201)
def post(self):
"""
Creates a Order
This endpoint will create a Order based the data in the body that is posted
"""
app.logger.info("Request to create a order")
check_content_type("application/json")
order = Order()
app.logger.debug('Payload = %s', api.payload)
order.deserialize(request.get_json())
order.create()
app.logger.info("Order with ID [%s] created.", order.id)
location_url = api.url_for(OrderResource, order_id=order.id, _external=True)
return order.serialize(), status.HTTP_201_CREATED, {"Location": location_url}
######################################################################
# PATH: /orders/{id}/cancelled
######################################################################
@api.route('/orders/<int:order_id>/cancelled')
@api.param('order_id', 'The Order identifier')
class CancelResource(Resource):
""" Cancel actions on a Order """
def put(self, order_id):
"""
Cancel an Order
This endpoint will cancel an Order based the body that is posted
"""
app.logger.info("Request to Cancel order with id: %s", order_id)
order = Order.find(order_id)
if not order:
abort(status.HTTP_404_NOT_FOUND, f"Order with id '{order_id}' was not found.")
if order.status == "Cancelled":
abort(status.HTTP_409_CONFLICT, f"Order with id '{order_id}' was cancelled.")
order.status = "Cancelled"
order.update()
app.logger.info("Order with ID [%s] cancelled.", Order.id)
return order.serialize(), status.HTTP_200_OK
######################################################################
# PATH: /orders/{order_id}/items/{id}
######################################################################
@api.route('/orders/<int:order_id>/items/<int:id>', strict_slashes=False)
class OrderItemsResource(Resource):
"""
OrderItemResource class
Allows the manipulation of a single Order Item
GET /orders/{order_id}/items/{id} - Returns a Order Item with the id
PUT /orders/{order_id}/items/{id} - Update a Order Item with the id
DELETE /orders/{order_id}/items/{id} - Deletes a Order Item with the id
"""
######################################################################
# RETRIEVE An ORDER ITEM FROM ORDER
######################################################################
@api.doc('get_items')
@api.response(404, 'Item not found')
@api.marshal_with(item_model)
def get(self, order_id, id):
"""
Get an Order Item
This endpoint returns just an order item
"""
app.logger.info("Request to retrieve Order Item %s for Order id: %s", (id, order_id))
item = items.find(id)
if not item:
abort(status.HTTP_404_NOT_FOUND, f"Order with id '{id}' could not be found.")
return item.serialize(), status.HTTP_200_OK
######################################################################
# UPDATE AN ITEM
######################################################################
@api.doc('update_items')
@api.response(404, 'Item not found')
@api.response(400, 'The posted Item data was not valid')
@api.expect(item_model)
@api.marshal_with(item_model)
def put(self, order_id, id):
"""
Update an Item
This endpoint will update an Item based the body that is posted
"""
app.logger.info("Request to update Item %s for Order id: %s", (id, order_id))
check_content_type("application/json")
item = items.find(id)
if not item:
abort(status.HTTP_404_NOT_FOUND, f"Order with id '{id}' could not be found.")
item.deserialize(api.payload)
item.id = id
item.update()
return item.serialize(), status.HTTP_200_OK
######################################################################
# DELETE AN ITEM
######################################################################
@api.doc('delete_items')
@api.response(204, 'Order Item deleted')
def delete(self, order_id, id):
"""
Delete an Item
This endpoint will delete an Item based the id specified in the path
"""
app.logger.info("Request to delete Item %s for Order id: %s", (id, order_id))
item = items.find(id)
if item:
item.delete()
return make_response("", status.HTTP_204_NO_CONTENT)
######################################################################
# PATH: /orders/{id}/items
######################################################################
@api.route('/orders/<int:order_id>/items', strict_slashes=False)
class OrderItemsCollection(Resource):
""" Handles all interactions with collections of Order Items """
######################################################################
# LIST ORDER ITEMS
######################################################################
@api.doc('list_items')
@api.marshal_list_with(item_model)
def get(self, order_id):
""" Returns all of the Items for an Order """
app.logger.info("Request for all Items for Order with id: %s", order_id)
order = Order.find(order_id)
if not order:
abort(status.HTTP_404_NOT_FOUND, f"Order with id '{order_id}' could not be found.")
results = [item.serialize() for item in order.items]
return results, status.HTTP_200_OK
######################################################################
# ADD AN ITEM TO AN ORDER
######################################################################
@api.doc('create_items')
@api.response(400, 'The posted data was not valid')
@api.expect(create_model)
@api.marshal_with(item_model, code=201)
def post(self, order_id):
"""
Create an Item on an Order
This endpoint will add an item to an order
"""
app.logger.info("Request to create an Item for Order with id: %s", order_id)
check_content_type("application/json")
order = Order.find(order_id)
if not order:
abort(status.HTTP_404_NOT_FOUND, f"Order with id '{order_id}' could not be found.")
item = items()
item.deserialize(request.get_json())
order.items.append(item)
order.update()
message = item.serialize()
return message, status.HTTP_201_CREATED
######################################################################
# U T I L I T Y F U N C T I O N S
######################################################################
def abort(error_code: int, message: str):
"""Logs errors before aborting"""
app.logger.error(message)
api.abort(error_code, message)
@app.before_first_request
def init_db():
""" Initializes the SQLAlchemy app """
global app
Order.init_db(app)
items.init_db(app)
def check_content_type(content_type):
""" Checks that the media type is correct """
if request.headers["Content-Type"] == content_type:
return
app.logger.error("Invalid Content-Type: %s", request.headers["Content-Type"])
abort(status.HTTP_415_UNSUPPORTED_MEDIA_TYPE, f"Content-Type must be {content_type}")
# load sample data
def data_load(payload):
""" Loads a Order into the database """
order = Order(payload['date'], payload['customer'], payload['total'], payload['status'])
order.create()
def data_reset():
""" Removes all Orders from the database """
Order.remove_all()
|
StarcoderdataPython
|
1717488
|
import pandas as pd
import re
import math
import os
calc_pricing = pd.read_csv("calc_pricing_results.csv")
schedule_contracts = pd.read_csv("schedule_contracts_summary.csv")
def is_nan(x):
try:
return math.isnan(x)
except:
return False
def parse_shortened_sin(SIN):
if SIN.count(",") > 1:
tmp = [elem.strip() for elem in SIN.split(",")]
new_tmp = []
full_syn = tmp[0].split("-")[0] +"-"
for elem in tmp:
if len(elem) == 1 or len(elem) == 3:
new_tmp.append(full_syn+elem)
else:
new_tmp.append(elem)
return ", ".join(new_tmp)
else:
return SIN
def dealing_with_and(SIN):
SIN = SIN.replace("and",", ")
SIN = SIN.replace("&",", ")
SIN = ' '.join(SIN.split())
SIN = SIN.replace(" , ",", ")
return SIN
def replace_parens(SIN):
#this comes from this stackoverflow:
#http://stackoverflow.com/questions/14596884/remove-text-between-and-in-python
return re.sub("[\(\[].*?[\)\]]", "", SIN)
def iterate_sins(SIN):
if SIN.count("-") > 1 and "," not in SIN:
split_string = SIN.split("-")
prefix = split_string[0]
start = int(split_string[1])
end = int(split_string[2])
return ",".join([prefix+"-"+str(elem) for elem in range(start,end+1)])
return SIN
def parse_whitespace_only(SIN):
if SIN.count(",") == 0:
SIN = ",".join(SIN.split())
return SIN
else:
return SIN
def parse_sin(SIN,count):
before_processing = SIN
SIN = SIN.replace("R","")
SIN = SIN.replace("\n",", ")
SIN = SIN.replace("plus RC SINs","")
SIN = SIN.replace("SINS","")
SIN = SIN.replace(";",",")
SIN = SIN.replace("C","")
SIN = SIN.replace(" -","")
SIN = SIN.strip()
SIN = ",".join(list(set([elem.strip() for elem in SIN.split(",")])))
SIN = parse_whitespace_only(SIN)
SIN = replace_parens(SIN)
SIN = dealing_with_and(SIN)
SIN = SIN.replace("RC","")
SIN = SIN.replace("/",", ")
SIN = iterate_sins(SIN)
if SIN.count(" ") > 1 and SIN.count(",") == 0:
SIN = SIN.replace(" ",", ")
try:
SIN = parse_shortened_sin(SIN)
except:
import code
code.interact(local=locals())
if SIN == before_processing:
count += 1
return SIN,count
os.chdir("results")
part = 0
new_calc_pricing = pd.DataFrame()
count = 0
for i in calc_pricing.index:
print("on element ",i)
print("count of SIN numbers that didn't need processing ",count)
row = calc_pricing.ix[i].to_dict()
if not is_nan(row["SIN"]):
print("value is not nan")
try:
SINS,count = parse_sin(row["SIN"],count)
except:
import code
code.interact(local=locals())
SIN_list = []
for SIN in SINS.split(","):
tmp = row.copy()
tmp["SIN"] = SIN
SIN_list.append(tmp)
#data is everything in the row
for data in SIN_list:
new_calc_pricing = new_calc_pricing.append(data,ignore_index=True)
else:
new_calc_pricing = new_calc_pricing.append(row, ignore_index=True)
if i % 1000 == 0:
new_calc_pricing.to_csv("new_calc_pricing_part_"+str(part)+".csv")
new_calc_pricing = pd.DataFrame()
part += 1
new_calc_pricing.to_csv("new_calc_pricing_part_"+str(part)+".csv")
|
StarcoderdataPython
|
133332
|
#!/usr/bin/env python
# Copyright: (c) 2020, <NAME>
# Apache 2.0 License, http://www.apache.org/licenses/
from __future__ import absolute_import, division, print_function
import sys
import os
import os.path
__metaclass__ = type
DOCUMENTATION = r"""
---
module: python_script
short_description: Evaluate python code
# If this is part of a collection, you need to use semantic versioning,
# i.e. the version is of the form "2.5.0" and not "2.4".
version_added: "1.0.0"
description:
- This C(python_script) module allows to manipulate ansible facts with python instead of jinja
options:
script:
description: Python script code
type: str
version_added: '1.1'
script_args:
description: This is argument to the python code
required: false
type: raw
# Specify this value according to your collection
# in format of namespace.collection.doc_fragment_name
extends_documentation_fragment:
- dmrub.util.python_script
author:
- <NAME> (@dmrub)
"""
EXAMPLES = r"""
# Pass in string
- name: Test with string
python_script:
script_args: hello world
script: |
result["script_args"] = 'goodbye'
result["changed"] = True
"""
RETURN = r"""
# These are examples of possible return values, and in general should use other names for return values.
"""
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_bytes, to_native
from ansible.module_utils.six import PY3
def run_module():
# define available arguments/parameters a user can pass to the module
module_args = dict(
script=dict(type="str", no_log=True, required=True),
script_args=dict(type="raw", required=False),
)
# seed the result dict in the object
# we primarily care about changed and state
# changed is if this module effectively modified the target
# state will include any script_args that you want your module to pass back
# for consumption, for example, in a subsequent task
result = dict(
changed=False,
)
# the AnsibleModule object will be our abstraction working with Ansible
# this includes instantiation, a couple of common attr would be the
# args/params passed to the execution, as well as if the module
# supports check mode
module = AnsibleModule(argument_spec=module_args, supports_check_mode=True)
script = module.params["script"]
# if the user is working with this module in only check mode we do not
# want to make any changes to the environment, just return the current
# state with no modifications
if module.check_mode:
module.exit_json(**result)
exec_globals = globals()
exec_globals["module"] = module
exec_globals["result"] = result
exec(script, exec_globals)
# manipulate or modify the state as needed (this is going to be the
# part where your module will do what it needs to do)
# result['original_message'] = module.params['name']
# result['message'] = 'goodbye'
# use whatever logic you need to determine whether or not this module
# made any modifications to your target
# if module.params['new']:
# result['changed'] = True
# during the execution of the module, if there is an exception or a
# conditional state that effectively causes a failure, run
# AnsibleModule.fail_json() to pass in the message and the result
# if module.params['name'] == 'fail me':
# module.fail_json(msg='You requested this to fail', **result)
# in the event of a successful module execution, you will want to
# simple AnsibleModule.exit_json(), passing the key/value results
module.exit_json(**result)
def main():
run_module()
if __name__ == "__main__":
main()
|
StarcoderdataPython
|
133745
|
import opentracing
import logging
import time
from opentracing import tags
from haystack import HaystackTracer
from haystack import LoggerRecorder
def setup_tracer():
global recorder
recorder = LoggerRecorder()
# instantiate a haystack tracer for this service and set a common tag which applies to all traces
tracer = HaystackTracer("Service-A",
recorder,
common_tags={"app.version": "1234"})
# now set the global tracer, so we can reference it with opentracing.tracer anywhere in our app
opentracing.set_global_tracer(tracer)
def handle_request(request_body):
logging.info(f"handling new request - {request_body}")
# this next line does a few things.. namely, it starts a new scope (which contains the span) to represent
# the scope of this "work". In this case, it should represent the work involved in processing the entire request
with opentracing.tracer.start_active_span("a_controller_method") as parent_scope:
# once within the context of an active span, there are three different ways to assign additional info or
# or attributes to the span
"""
First we'll add some tags to the span
Tags are key:value pairs that enable user-defined annotation of spans in order to query, filter, and
comprehend trace data
Tags have semantic conventions, see https://opentracing.io/specification/conventions/
*tags do NOT propagate to child spans
"""
parent_scope.span.set_tag(tags.HTTP_URL, "http://localhost/mocksvc")
parent_scope.span.set_tag(tags.HTTP_METHOD, "GET")
parent_scope.span.set_tag(tags.SPAN_KIND, "server")
"""
Next we'll add some baggage to the span.
Baggage carries data across process boundaries.. aka it DOES propagate to child spans
"""
parent_scope.span.set_baggage_item("business_id", "1234")
"""
Next lets assume you need to authenticate the client making the request
"""
with opentracing.tracer.start_active_span("authenticate"):
time.sleep(.25) # fake doing some authentication work..
"""
Finally, we'll add a log event to the request level span.
Logs are key:value pairs that are useful for capturing timed log messages and other
debugging or informational output from the application itself. Logs may be useful for
documenting a specific moment or event within the span (in contrast to tags which
should apply to the span regardless of time).
"""
parent_scope.span.log_kv(
{
"some_string_value": "foobar",
"an_int_value": 42,
"a_float_value": 4.2,
"a_bool_value": True,
"an_obj_as_value": {
"ok": "hmm",
"blah": 4324
}
})
try:
"""
Now lets say that as part of processing this request, we need to invoke some downstream service
"""
make_a_downstream_request()
except Exception:
# if that fails, we'll tag the request-scoped span with an error so we have success/fail metrics in haystack
parent_scope.span.set_tag(tags.ERROR, True)
def act_as_remote_service(headers):
# remote service would have it"s own tracer
with HaystackTracer("Service-B", recorder) as tracer:
# simulate network transfer delay
time.sleep(.25)
# now as-if this was executing on the remote service, extract the parent span ctx from headers
upstream_span_ctx = tracer.extract(opentracing.Format.HTTP_HEADERS, headers)
with tracer.start_active_span("controller_method", child_of=upstream_span_ctx) as parent_scope:
parent_scope.span.set_tag(tags.SPAN_KIND, "server")
# simulate downstream service doing some work before replying
time.sleep(1)
def make_a_downstream_request():
# create a child span representing the downstream request from current span.
# Behind the scenes this uses the scope_manger to access the current active
# span (which would be our request-scoped span called "a_controller_method" and create a child of it.
with opentracing.tracer.start_active_span("downstream_req") as child_scope:
child_scope.span.set_tag(tags.SPAN_KIND, "client")
# In order for the downstream client to use this trace as a parent, we must propagate the current span context.
# This is done by calling .inject() on the tracer
headers = {}
opentracing.tracer.inject(child_scope.span.context, opentracing.Format.HTTP_HEADERS, headers)
act_as_remote_service(headers)
# process the response from downstream
time.sleep(.15)
def main():
"""
This function represents a "parent" application/service.. i.e. the originating
service of our traces in this example.
In this scenario, we're pretending to be a web server.
"""
# at some point during application init, you'll want to instantiate the global tracer
setup_tracer()
# here we assume the web framework invokes this method to handle the given request
handle_request("hello world")
# app shutdown
logging.info("done")
if __name__ == "__main__":
logging.basicConfig(level=logging.DEBUG)
main()
|
StarcoderdataPython
|
3290733
|
<reponame>jscholes/accessify-prototype
from datetime import datetime
import logging
import os
import os.path
import platform
import sys
from appdirs import user_config_dir
import tolk
import ujson as json
import wx
from accessify import constants
try:
from accessify import credentials
has_credentials = True
except ImportError:
has_credentials = False
from accessify import gui
from accessify import ipc
from accessify import library
from accessify import playback
from accessify import spotify
logger = logging.getLogger(__package__)
LOG_RECORD_FORMAT = '%(levelname)s - %(asctime)s:%(msecs)d:\n%(name)s: %(message)s'
LOG_DATE_TIME_FORMAT = '%d-%m-%Y @ %H:%M:%S'
LOG_FILE_DATE_TIME_FORMAT = '%Y_%m_%d-%H_%M_%S'
def main():
config_directory = user_config_dir(appname=constants.APP_NAME, appauthor=False, roaming=True)
hwnd_file = os.path.join(config_directory, '{0}.hwnd'.format(constants.APP_NAME))
app = wx.App()
instance_checker = wx.SingleInstanceChecker()
if instance_checker.IsAnotherRunning():
hwnd = ipc.get_existing_hwnd(hwnd_file)
if hwnd:
ipc.focus_window(hwnd)
else:
gui.utils.show_error(None, 'Accessify is already running.')
return
config_directory = user_config_dir(appname=constants.APP_NAME, appauthor=False, roaming=True)
log_directory = os.path.join(config_directory, 'logs')
try:
os.makedirs(log_directory)
except FileExistsError:
pass
log_filename = '{0}.{1}.log'.format(constants.APP_NAME.lower(), datetime.utcnow().strftime(LOG_FILE_DATE_TIME_FORMAT))
log_path = os.path.join(log_directory, log_filename)
root_logger = logging.getLogger()
root_logger.setLevel(logging.DEBUG)
handler = logging.FileHandler(log_path, mode='w', encoding='utf-8')
handler.setFormatter(logging.Formatter(LOG_RECORD_FORMAT, LOG_DATE_TIME_FORMAT))
root_logger.addHandler(handler)
log_startup_info()
config_path = os.path.join(config_directory, 'config.json')
config = load_config(config_path)
if has_credentials:
client_id = credentials.client_id
client_secret = credentials.client_secret
else:
client_id = os.environ.get('SPOTIFY_CLIENT_ID')
client_secret = os.environ.get('SPOTIFY_CLIENT_SECRET')
if not client_id or not client_secret:
print('No Spotify credentials provided. Please either set the environment variables SPOTIFY_CLIENT_ID and SPOTIFY_CLIENT_SECRET or create a credentials module with client_id and client_secret variables.')
logger.error('No Spotify credentials provided.')
return
try:
tolk.load()
except Exception:
pass
auth_agent = spotify.webapi.authorisation.AuthorisationAgent(client_id, client_secret)
spotify_api_client = spotify.webapi.WebAPIClient(auth_agent)
psignalman = playback.PlaybackSignalman()
playback_controller = playback.PlaybackController.start(psignalman, config)
playback_proxy = playback_controller.proxy()
lsignalman = library.LibrarySignalman()
library_controller = library.LibraryController.start(lsignalman, config, spotify_api_client)
library_proxy = library_controller.proxy()
window = gui.main.MainWindow(playback_proxy, library_proxy)
ipc.save_hwnd(window.GetHandle(), hwnd_file)
psignalman.state_changed.connect(window.onPlaybackStateChange)
psignalman.track_changed.connect(window.onTrackChange)
psignalman.unplayable_content.connect(window.onUnplayableContent)
psignalman.connection_established.connect(window.onSpotifyConnectionEstablished)
psignalman.spotify_not_running.connect(window.onSpotifyNotRunning)
psignalman.error.connect(window.onSpotifyError)
lsignalman.authorisation_required.connect(window.onAuthorisationRequired)
lsignalman.authorisation_completed.connect(window.onAuthorisationCompleted)
# lsignalman.authorisation_error.connect(window.onAuthorisationError)
playback_proxy.connect_to_spotify()
library_proxy.log_in()
app.MainLoop()
# Shutdown
playback_controller.stop()
library_controller.stop()
save_config(config, config_path)
tolk.unload()
logger.info('Application shutdown complete')
def log_startup_info():
logger.info('Version: {0}'.format(constants.APP_VERSION))
# Windows info
release, version, service_pack, processor_type = platform.win32_ver()
uname = platform.uname()
logger.info('OS: Windows {0} {1} ({2}) running on {3}'.format(release, service_pack, version, uname.machine))
# Python info
logger.info('Python {0}'.format(sys.version))
def load_config(path):
logger.info('Attempting to load config from {0}'.format(path))
try:
with open(path, 'r', encoding='utf-8') as f:
config = json.load(f)
except (FileNotFoundError, ValueError):
logger.info('Valid existing config not found, creating default')
return default_config
for key in default_config.keys():
if key not in config:
config.update({key: default_config[key]})
return config
def save_config(config_dict, path):
with open(path, 'w', encoding='utf-8') as f:
json.dump(config_dict, f, indent=4)
logger.info('Config saved to {0}'.format(path))
default_config = {
'spotify_access_token': '',
'spotify_refresh_token': '',
'spotify_polling_interval': 60,
}
if __name__ == '__main__':
main()
|
StarcoderdataPython
|
96910
|
from werkzeug.utils import find_modules, import_string
from config import Config
from flower_bed_designer import helpers
from flower_bed_designer.blueprints.plant import views
def register_blueprints(app):
for name in find_modules('flower_bed_designer.blueprints', recursive=True):
mod = import_string(name)
if hasattr(mod, 'bp'):
app.register_blueprint(mod.bp)
def create_app(test_config=None):
app = helpers.ApiFlask(__name__)
if test_config is None:
app.config.from_object(Config)
else:
app.config.from_mapping(test_config)
app.register_error_handler(helpers.ApiException, lambda err: err.to_result())
register_blueprints(app)
return app
|
StarcoderdataPython
|
4824419
|
<gh_stars>1-10
"""
Proteomics Datatypes
"""
import logging
import re
from galaxy.datatypes import data
from galaxy.datatypes.binary import Binary
from galaxy.datatypes.data import Text
from galaxy.datatypes.sequence import Sequence
from galaxy.datatypes.sniff import build_sniff_from_prefix
from galaxy.datatypes.tabular import Tabular, TabularData
from galaxy.datatypes.xml import GenericXml
from galaxy.util import nice_size
log = logging.getLogger(__name__)
class Wiff(Binary):
"""Class for wiff files."""
edam_data = "data_2536"
edam_format = "format_3710"
file_ext = 'wiff'
allow_datatype_change = False
composite_type = 'auto_primary_file'
def __init__(self, **kwd):
Binary.__init__(self, **kwd)
self.add_composite_file(
'wiff',
description='AB SCIEX files in .wiff format. This can contain all needed information or only metadata.',
is_binary=True)
self.add_composite_file(
'wiff_scan',
description='AB SCIEX spectra file (wiff.scan), if the corresponding .wiff file only contains metadata.',
optional='True', is_binary=True)
def generate_primary_file(self, dataset=None):
rval = ['<html><head><title>Wiff Composite Dataset </title></head><p/>']
rval.append('<div>This composite dataset is composed of the following files:<p/><ul>')
for composite_name, composite_file in self.get_composite_files(dataset=dataset).items():
fn = composite_name
opt_text = ''
if composite_file.optional:
opt_text = ' (optional)'
if composite_file.get('description'):
rval.append('<li><a href="%s" type="text/plain">%s (%s)</a>%s</li>' % (fn, fn, composite_file.get('description'), opt_text))
else:
rval.append('<li><a href="%s" type="text/plain">%s</a>%s</li>' % (fn, fn, opt_text))
rval.append('</ul></div></html>')
return "\n".join(rval)
@build_sniff_from_prefix
class MzTab(Text):
"""
exchange format for proteomics and metabolomics results
>>> from galaxy.datatypes.sniff import get_test_fname
>>> fname = get_test_fname('test.mztab')
>>> MzTab().sniff(fname)
True
>>> fname = get_test_fname('test.mztab2')
>>> MzTab().sniff(fname)
False
"""
edam_data = "data_3681"
file_ext = "mztab"
# section names (except MTD)
_sections = ["PRH", "PRT", "PEH", "PEP", "PSH", "PSM", "SMH", "SML", "COM"]
# mandatory metadata fields and list of allowed entries (in lower case)
# (or None if everything is allowed)
_man_mtd = {"mzTab-mode": ["complete", "summary"],
"mzTab-type": ['quantification', 'identification'],
"description": None}
_version_re = r"(1)(\.[0-9])?(\.[0-9])?"
def __init__(self, **kwd):
super(MzTab, self).__init__(**kwd)
def set_peek(self, dataset, is_multi_byte=False):
"""Set the peek and blurb text"""
if not dataset.dataset.purged:
dataset.peek = data.get_file_peek(dataset.file_name)
dataset.blurb = 'mzTab Format'
else:
dataset.peek = 'file does not exist'
dataset.blurb = 'file purged from disk'
def sniff_prefix(self, file_prefix):
""" Determines whether the file is the correct type. """
has_version = False
found_man_mtd = set()
contents = file_prefix.string_io()
for line in contents:
if re.match(r"^\s*$", line):
continue
line = line.strip("\r\n").split("\t")
if line[0] == "MTD":
if line[1] == "mzTab-version" and re.match(self._version_re, line[2]) is not None:
has_version = True
elif line[1] in self._man_mtd and (self._man_mtd[line[1]] is None or line[2].lower() in self._man_mtd[line[1]]):
found_man_mtd.add(line[1])
elif not line[0] in self._sections:
return False
return has_version and found_man_mtd == set(self._man_mtd.keys())
class MzTab2(MzTab):
"""
exchange format for proteomics and metabolomics results
>>> from galaxy.datatypes.sniff import get_test_fname
>>> fname = get_test_fname('test.mztab2')
>>> MzTab2().sniff(fname)
True
>>> fname = get_test_fname('test.mztab')
>>> MzTab2().sniff(fname)
False
"""
file_ext = "mztab2"
_sections = ["SMH", "SML", "SFH", "SMF", "SEH", "SME", "COM"]
_version_re = r"(2)(\.[0-9])?(\.[0-9])?-M$"
_man_mtd = {"mzTab-ID": None}
def __init__(self, **kwd):
super(MzTab2, self).__init__(**kwd)
def set_peek(self, dataset, is_multi_byte=False):
"""Set the peek and blurb text"""
if not dataset.dataset.purged:
dataset.peek = data.get_file_peek(dataset.file_name)
dataset.blurb = 'mzTab2 Format'
else:
dataset.peek = 'file does not exist'
dataset.blurb = 'file purged from disk'
@build_sniff_from_prefix
class Kroenik(Tabular):
"""
Kroenik (HardKloer sibling) files
>>> from galaxy.datatypes.sniff import get_test_fname
>>> fname = get_test_fname('test.kroenik')
>>> Kroenik().sniff(fname)
True
>>> fname = get_test_fname('test.peplist')
>>> Kroenik().sniff(fname)
False
"""
file_ext = "kroenik"
def __init__(self, **kwd):
super(Kroenik, self).__init__(**kwd)
self.column_names = ["File", "First Scan", "Last Scan", "Num of Scans", "Charge", "Monoisotopic Mass", "Base Isotope Peak", "Best Intensity", "Summed Intensity", "First RTime", "Last RTime", "Best RTime", "Best Correlation", "Modifications"]
def display_peek(self, dataset):
"""Returns formated html of peek"""
return self.make_html_table(dataset, column_names=self.column_names)
def sniff_prefix(self, file_prefix):
fh = file_prefix.string_io()
line = [_.strip() for _ in fh.readline().split("\t")]
if line != self.column_names:
return False
line = fh.readline().split("\t")
try:
[int(_) for _ in line[1:5]]
[float(_) for _ in line[5:13]]
except ValueError:
return False
return True
@build_sniff_from_prefix
class PepList(Tabular):
"""
Peplist file as used in OpenMS
https://github.com/OpenMS/OpenMS/blob/0fc8765670a0ad625c883f328de60f738f7325a4/src/openms/source/FORMAT/FileHandler.cpp#L432
>>> from galaxy.datatypes.sniff import get_test_fname
>>> fname = get_test_fname('test.peplist')
>>> PepList().sniff(fname)
True
>>> fname = get_test_fname('test.psms')
>>> PepList().sniff(fname)
False
"""
file_ext = "peplist"
def __init__(self, **kwd):
super(PepList, self).__init__(**kwd)
self.column_names = ["m/z", "rt(min)", "snr", "charge", "intensity"]
def display_peek(self, dataset):
"""Returns formated html of peek"""
return self.make_html_table(dataset, column_names=self.column_names)
def sniff_prefix(self, file_prefix):
fh = file_prefix.string_io()
line = [_.strip() for _ in fh.readline().split("\t")]
if line == self.column_names:
return True
return False
@build_sniff_from_prefix
class PSMS(Tabular):
"""
Percolator tab-delimited output (PSM level, .psms) as used in OpenMS
https://github.com/OpenMS/OpenMS/blob/0fc8765670a0ad625c883f328de60f738f7325a4/src/openms/source/FORMAT/FileHandler.cpp#L453
see also http://www.kojak-ms.org/docs/percresults.html
Note that the data rows can have more columns than the header line
since ProteinIds are listed tab-separated.
>>> from galaxy.datatypes.sniff import get_test_fname
>>> fname = get_test_fname('test.psms')
>>> PSMS().sniff(fname)
True
>>> fname = get_test_fname('test.kroenik')
>>> PSMS().sniff(fname)
False
"""
file_ext = "psms"
def __init__(self, **kwd):
super(PSMS, self).__init__(**kwd)
self.column_names = ["PSMId", "score", "q-value", "posterior_error_prob", "peptide", "proteinIds"]
def display_peek(self, dataset):
"""Returns formated html of peek"""
return self.make_html_table(dataset, column_names=self.column_names)
def sniff_prefix(self, file_prefix):
fh = file_prefix.string_io()
line = [_.strip() for _ in fh.readline().split("\t")]
if line == self.column_names:
return True
return False
@build_sniff_from_prefix
class PEFF(Sequence):
"""
PSI Extended FASTA Format
https://github.com/HUPO-PSI/PEFF
"""
file_ext = "peff"
def sniff_prefix(self, file_prefix):
"""
>>> from galaxy.datatypes.sniff import get_test_fname
>>> fname = get_test_fname( 'test.peff' )
>>> PEFF().sniff( fname )
True
>>> fname = get_test_fname( 'sequence.fasta' )
>>> PEFF().sniff( fname )
False
"""
fh = file_prefix.string_io()
if re.match(r"# PEFF \d+.\d+", fh.readline()):
return True
else:
return False
class PepXmlReport(Tabular):
"""pepxml converted to tabular report"""
edam_data = "data_2536"
file_ext = "pepxml.tsv"
def __init__(self, **kwd):
super(PepXmlReport, self).__init__(**kwd)
self.column_names = ['Protein', 'Peptide', 'Assumed Charge', 'Neutral Pep Mass (calculated)', 'Neutral Mass', 'Retention Time', 'Start Scan', 'End Scan', 'Search Engine', 'PeptideProphet Probability', 'Interprophet Probability']
def display_peek(self, dataset):
"""Returns formated html of peek"""
return self.make_html_table(dataset, column_names=self.column_names)
class ProtXmlReport(Tabular):
"""protxml converted to tabular report"""
edam_data = "data_2536"
file_ext = "protxml.tsv"
comment_lines = 1
def __init__(self, **kwd):
super(ProtXmlReport, self).__init__(**kwd)
self.column_names = [
"Entry Number", "Group Probability",
"Protein", "Protein Link", "Protein Probability",
"Percent Coverage", "Number of Unique Peptides",
"Total Independent Spectra", "Percent Share of Spectrum ID's",
"Description", "Protein Molecular Weight", "Protein Length",
"Is Nondegenerate Evidence", "Weight", "Precursor Ion Charge",
"Peptide sequence", "Peptide Link", "NSP Adjusted Probability",
"Initial Probability", "Number of Total Termini",
"Number of Sibling Peptides Bin", "Number of Instances",
"Peptide Group Designator", "Is Evidence?"]
def display_peek(self, dataset):
"""Returns formated html of peek"""
return self.make_html_table(dataset, column_names=self.column_names)
class Dta(TabularData):
"""dta
The first line contains the singly protonated peptide mass (MH+) and the
peptide charge state separated by a space. Subsequent lines contain space
separated pairs of fragment ion m/z and intensity values.
"""
file_ext = "dta"
comment_lines = 0
def set_meta(self, dataset, **kwd):
column_types = []
data_row = []
data_lines = 0
if dataset.has_data():
with open(dataset.file_name, 'r') as dtafile:
for line in dtafile:
data_lines += 1
# Guess column types
for cell in data_row:
column_types.append(self.guess_type(cell))
# Set metadata
dataset.metadata.data_lines = data_lines
dataset.metadata.comment_lines = 0
dataset.metadata.column_types = ['float', 'float']
dataset.metadata.columns = 2
dataset.metadata.column_names = ['m/z', 'intensity']
dataset.metadata.delimiter = " "
@build_sniff_from_prefix
class Dta2d(TabularData):
"""
dta2d: files with three tab/space-separated columns.
The default format is: retention time (seconds) , m/z , intensity.
If the first line starts with '#', a different order is defined by the the
order of the keywords 'MIN' (retention time in minutes) or 'SEC' (retention
time in seconds), 'MZ', and 'INT'.
Example: '#MZ MIN INT'
The peaks of one retention time have to be in subsequent lines.
Note: sniffer detects (tab or space separated) dta2d files with correct
header, wo header seems to generic
>>> from galaxy.datatypes.sniff import get_test_fname
>>> fname = get_test_fname('test.dta2d')
>>> Dta2d().sniff(fname)
True
>>> fname = get_test_fname('test.edta')
>>> Dta2d().sniff(fname)
False
"""
file_ext = "dta2d"
comment_lines = 0
def _parse_header(self, line):
if len(line) != 3 or len(line[0]) < 3 or not line[0].startswith("#"):
return None
line[0] = line[0].lstrip("#")
line = [_.strip() for _ in line]
if 'MZ' not in line or 'INT' not in line or ('MIN' not in line and 'SEC' not in line):
return None
return line
def _parse_delimiter(self, line):
if len(line.split(" ")) == 3:
return " "
elif len(line.split("\t")) == 3:
return "\t"
return None
def _parse_dataline(self, line):
try:
line = [float(_) for _ in line]
except ValueError:
return False
if not all(_ >= 0 for _ in line):
return False
return True
def set_meta(self, dataset, **kwd):
data_lines = 0
delim = None
if dataset.has_data():
with open(dataset.file_name, 'r') as dtafile:
for line in dtafile:
if delim is None:
delim = self._parse_delimiter(line)
dataset.metadata.column_names = self._parse_header(line.split(delim))
data_lines += 1
# Set metadata
if delim is not None:
dataset.metadata.delimiter = delim
dataset.metadata.data_lines = data_lines
dataset.metadata.comment_lines = 0
dataset.metadata.column_types = ['float', 'float', 'float']
dataset.metadata.columns = 3
if dataset.metadata.column_names is None or dataset.metadata.column_names == []:
dataset.metadata.comment_lines += 1
dataset.metadata.data_lines -= 1
dataset.metadata.column_names = ['SEC', 'MZ', 'INT']
def sniff_prefix(self, file_prefix):
sep = None
header = None
for idx, line in enumerate(file_prefix.line_iterator()):
line = line.strip()
if sep is None:
sep = self._parse_delimiter(line)
if sep is None:
return False
line = line.split(sep)
if len(line) != 3:
return False
if idx == 0:
header = self._parse_header(line)
if (header is None) and not self._parse_dataline(line):
return False
elif not self._parse_dataline(line):
return False
if sep is None or header is None:
return False
return True
@build_sniff_from_prefix
class Edta(TabularData):
"""
Input text file containing tab, space or comma separated columns.
The separator between columns is checked in the first line in this order.
It supports three variants of this format.
1. Columns are: RT, MZ, Intensity A header is optional.
2. Columns are: RT, MZ, Intensity, Charge, <Meta-Data> columns{0,} A header is mandatory.
3. Columns are: (RT, MZ, Intensity, Charge){1,}, <Meta-Data> columns{0,}
Header is mandatory. First quadruplet is the consensus. All following
quadruplets describe the sub-features. This variant is discerned from
variant #2 by the name of the fifth column, which is required to be RT1
(or rt1). All other column names for sub-features are faithfully ignored.
Note the sniffer only detects files with header.
>>> from galaxy.datatypes.sniff import get_test_fname
>>> fname = get_test_fname('test.edta')
>>> Edta().sniff(fname)
True
>>> fname = get_test_fname('test.dta2d')
>>> Edta().sniff(fname)
False
"""
file_ext = "edta"
comment_lines = 0
def _parse_delimiter(self, line):
if len(line.split(" ")) >= 3:
return " "
elif len(line.split("\t")) >= 3:
return "\t"
elif len(line.split(",")) >= 3:
return "\t"
return None
def _parse_type(self, line):
"""
parse the type from the header line
types 1-3 as in the class docs, 0: type 1 wo/wrong header
"""
if len(line) < 3:
return None
line = [_.lower().replace("/", "") for _ in line]
if len(line) == 3:
if line[0] == "rt" and line[1] == "mz" and (line[2] == "int" or line[2] == "intensity"):
return 1
else:
return None
if line[0] != "rt" or line[1] != "mz" or (line[2] != "int" and line[2] != "intensity") or line[3] != "charge":
return None
if not line[4].startswith("rt"):
return 2
else:
return 3
def _parse_dataline(self, line, tpe):
if tpe == 2 or tpe == 3:
idx = 4
else:
idx = 3
try:
line = [float(_) for _ in line[:idx]]
except ValueError:
return False
if not all(_ >= 0 for _ in line[:idx]):
return False
return True
def _clean_header(self, line):
for idx, el in enumerate(line):
el = el.lower()
if el.startswith("rt"):
line[idx] = "RT"
elif el.startswith("int"):
line[idx] = "intensity"
elif el.startswith("mz"):
line[idx] = "m/z"
elif el.startswith("charge"):
line[idx] = "charge"
else:
break
if idx // 4 > 0:
line[idx] += str(idx // 4)
return line
def set_meta(self, dataset, **kwd):
data_lines = 0
delim = None
if dataset.has_data():
with open(dataset.file_name, 'r') as dtafile:
for idx, line in enumerate(dtafile):
if idx == 0:
delim = self._parse_delimiter(line)
tpe = self._parse_type(line.split(delim))
if tpe == 0:
dataset.metadata.column_names = ["RT", "m/z", "intensity"]
else:
dataset.metadata.column_names = self._clean_header(line.split(delim))
data_lines += 1
# Set metadata
if delim is not None:
dataset.metadata.delimiter = delim
for c in dataset.metadata.column_names:
if any(c.startswith(_) for _ in ["RT", "m/z", "intensity", "charge"]):
dataset.metadata.column_types.append("float")
else:
dataset.metadata.column_types.append("str")
dataset.metadata.data_lines = data_lines
dataset.metadata.comment_lines = 0
dataset.metadata.columns = len(dataset.metadata.column_names)
if tpe > 0:
dataset.metadata.comment_lines += 1
dataset.metadata.data_lines -= 1
def sniff_prefix(self, file_prefix):
sep = None
tpe = None
for idx, line in enumerate(file_prefix.line_iterator()):
line = line.strip("\r\n")
if sep is None:
sep = self._parse_delimiter(line)
if sep is None:
return False
line = line.split(sep)
if idx == 0:
tpe = self._parse_type(line)
if tpe is None:
return False
elif tpe == 0 and not self._parse_dataline(line, tpe):
return False
elif not self._parse_dataline(line, tpe):
return False
if tpe is None:
return False
return True
class ProteomicsXml(GenericXml):
""" An enhanced XML datatype used to reuse code across several
proteomic/mass-spec datatypes. """
edam_data = "data_2536"
edam_format = "format_2032"
def sniff_prefix(self, file_prefix):
""" Determines whether the file is the correct XML type. """
contents = file_prefix.string_io()
while True:
line = contents.readline().strip()
if line is None or not line.startswith('<?'):
break
# pattern match <root or <ns:root for any ns string
pattern = r'<(\w*:)?%s' % self.root
return line is not None and re.search(pattern, line) is not None
def set_peek(self, dataset, is_multi_byte=False):
"""Set the peek and blurb text"""
if not dataset.dataset.purged:
dataset.peek = data.get_file_peek(dataset.file_name)
dataset.blurb = self.blurb
else:
dataset.peek = 'file does not exist'
dataset.blurb = 'file purged from disk'
class ParamXml(ProteomicsXml):
"""store Parameters in XML formal"""
file_ext = "paramxml"
blurb = "parameters in xmls"
root = "parameters|PARAMETERS"
class PepXml(ProteomicsXml):
"""pepXML data"""
edam_format = "format_3655"
file_ext = "pepxml"
blurb = 'pepXML data'
root = "msms_pipeline_analysis"
class MascotXML(ProteomicsXml):
"""mzXML data"""
file_ext = "mascotxml"
blurb = "mascot Mass Spectrometry data"
root = "mascot_search_results"
class MzML(ProteomicsXml):
"""mzML data"""
edam_format = "format_3244"
file_ext = "mzml"
blurb = 'mzML Mass Spectrometry data'
root = "(mzML|indexedmzML)"
class NmrML(ProteomicsXml):
"""nmrML data"""
# No edam format number yet.
file_ext = "nmrml"
blurb = 'nmrML NMR data'
root = "nmrML"
class ProtXML(ProteomicsXml):
"""protXML data"""
file_ext = "protxml"
blurb = 'prot XML Search Results'
root = "protein_summary"
class MzXML(ProteomicsXml):
"""mzXML data"""
edam_format = "format_3654"
file_ext = "mzxml"
blurb = "mzXML Mass Spectrometry data"
root = "mzXML"
class MzData(ProteomicsXml):
"""mzData data"""
edam_format = "format_3245"
file_ext = "mzdata"
blurb = "mzData Mass Spectrometry data"
root = "mzData"
class MzIdentML(ProteomicsXml):
edam_format = "format_3247"
file_ext = "mzid"
blurb = "XML identified peptides and proteins."
root = "MzIdentML"
class TraML(ProteomicsXml):
edam_format = "format_3246"
file_ext = "traml"
blurb = "TraML transition list"
root = "TraML"
class TrafoXML(ProteomicsXml):
file_ext = "trafoxml"
blurb = "RT alignment tranformation"
root = "TrafoXML"
class MzQuantML(ProteomicsXml):
edam_format = "format_3248"
file_ext = "mzq"
blurb = "XML quantification data"
root = "MzQuantML"
class ConsensusXML(ProteomicsXml):
file_ext = "consensusxml"
blurb = "OpenMS multiple LC-MS map alignment file"
root = "consensusXML"
class FeatureXML(ProteomicsXml):
file_ext = "featurexml"
blurb = "OpenMS feature file"
root = "featureMap"
class IdXML(ProteomicsXml):
file_ext = "idxml"
blurb = "OpenMS identification file"
root = "IdXML"
class TandemXML(ProteomicsXml):
edam_format = "format_3711"
file_ext = "tandem"
blurb = "X!Tandem search results file"
root = "bioml"
class UniProtXML(ProteomicsXml):
file_ext = "uniprotxml"
blurb = "UniProt Proteome file"
root = "uniprot"
class XquestXML(ProteomicsXml):
file_ext = "xquest.xml"
blurb = "XQuest XML file"
root = "xquest_results"
class XquestSpecXML(ProteomicsXml):
"""spec.xml"""
file_ext = "spec.xml"
blurb = 'xquest_spectra'
root = "xquest_spectra"
class QCML(ProteomicsXml):
"""qcml
https://github.com/OpenMS/OpenMS/blob/113c49d01677f7f03343ce7cd542d83c99b351ee/share/OpenMS/SCHEMAS/mzQCML_0_0_5.xsd
https://github.com/OpenMS/OpenMS/blob/3cfc57ad1788e7ab2bd6dd9862818b2855234c3f/share/OpenMS/SCHEMAS/qcML_0.0.7.xsd
"""
file_ext = "qcml"
blurb = 'QualityAssessments to runs'
root = "qcML|MzQualityML)"
class Mgf(Text):
"""Mascot Generic Format data"""
edam_data = "data_2536"
edam_format = "format_3651"
file_ext = "mgf"
def set_peek(self, dataset, is_multi_byte=False):
"""Set the peek and blurb text"""
if not dataset.dataset.purged:
dataset.peek = data.get_file_peek(dataset.file_name)
dataset.blurb = 'mgf Mascot Generic Format'
else:
dataset.peek = 'file does not exist'
dataset.blurb = 'file purged from disk'
def sniff(self, filename):
mgf_begin_ions = "BEGIN IONS"
max_lines = 100
with open(filename) as handle:
for i, line in enumerate(handle):
line = line.rstrip()
if line == mgf_begin_ions:
return True
if i > max_lines:
return False
class MascotDat(Text):
"""Mascot search results """
edam_data = "data_2536"
edam_format = "format_3713"
file_ext = "mascotdat"
def set_peek(self, dataset, is_multi_byte=False):
"""Set the peek and blurb text"""
if not dataset.dataset.purged:
dataset.peek = data.get_file_peek(dataset.file_name)
dataset.blurb = 'mascotdat Mascot Search Results'
else:
dataset.peek = 'file does not exist'
dataset.blurb = 'file purged from disk'
def sniff(self, filename):
mime_version = "MIME-Version: 1.0 (Generated by Mascot version 1.0)"
max_lines = 10
with open(filename) as handle:
for i, line in enumerate(handle):
line = line.rstrip()
if line == mime_version:
return True
if i > max_lines:
return False
class ThermoRAW(Binary):
"""Class describing a Thermo Finnigan binary RAW file"""
edam_data = "data_2536"
edam_format = "format_3712"
file_ext = "thermo.raw"
def sniff(self, filename):
# Thermo Finnigan RAW format is proprietary and hence not well documented.
# Files start with 2 bytes that seem to differ followed by F\0i\0n\0n\0i\0g\0a\0n
# This combination represents 17 bytes, but to play safe we read 20 bytes from
# the start of the file.
try:
header = open(filename, 'rb').read(20)
finnigan = b'F\0i\0n\0n\0i\0g\0a\0n'
if header.find(finnigan) != -1:
return True
return False
except Exception:
return False
def set_peek(self, dataset, is_multi_byte=False):
if not dataset.dataset.purged:
dataset.peek = "Thermo Finnigan RAW file"
dataset.blurb = nice_size(dataset.get_size())
else:
dataset.peek = 'file does not exist'
dataset.blurb = 'file purged from disk'
def display_peek(self, dataset):
try:
return dataset.peek
except Exception:
return "Thermo Finnigan RAW file (%s)" % (nice_size(dataset.get_size()))
@build_sniff_from_prefix
class Msp(Text):
""" Output of NIST MS Search Program chemdata.nist.gov/mass-spc/ftp/mass-spc/PepLib.pdf """
file_ext = "msp"
@staticmethod
def next_line_starts_with(contents, prefix):
next_line = contents.readline()
return next_line is not None and next_line.startswith(prefix)
def sniff_prefix(self, file_prefix):
""" Determines whether the file is a NIST MSP output file."""
begin_contents = file_prefix.contents_header
if "\n" not in begin_contents:
return False
lines = begin_contents.splitlines()
if len(lines) < 2:
return False
return lines[0].startswith("Name:") and lines[1].startswith("MW:")
class SPLibNoIndex(Text):
"""SPlib without index file """
file_ext = "splib_noindex"
def set_peek(self, dataset, is_multi_byte=False):
"""Set the peek and blurb text"""
if not dataset.dataset.purged:
dataset.peek = data.get_file_peek(dataset.file_name)
dataset.blurb = 'Spectral Library without index files'
else:
dataset.peek = 'file does not exist'
dataset.blurb = 'file purged from disk'
@build_sniff_from_prefix
class SPLib(Msp):
"""SpectraST Spectral Library. Closely related to msp format"""
file_ext = "splib"
composite_type = 'auto_primary_file'
def __init__(self, **kwd):
Msp.__init__(self, **kwd)
self.add_composite_file('library.splib',
description='Spectral Library. Contains actual library spectra',
is_binary=False)
self.add_composite_file('library.spidx',
description='Spectrum index', is_binary=False)
self.add_composite_file('library.pepidx',
description='Peptide index', is_binary=False)
def generate_primary_file(self, dataset=None):
rval = ['<html><head><title>Spectral Library Composite Dataset </title></head><p/>']
rval.append('<div>This composite dataset is composed of the following files:<p/><ul>')
for composite_name, composite_file in self.get_composite_files(dataset=dataset).items():
fn = composite_name
opt_text = ''
if composite_file.optional:
opt_text = ' (optional)'
if composite_file.get('description'):
rval.append('<li><a href="%s" type="text/plain">%s (%s)</a>%s</li>' % (fn, fn, composite_file.get('description'), opt_text))
else:
rval.append('<li><a href="%s" type="text/plain">%s</a>%s</li>' % (fn, fn, opt_text))
rval.append('</ul></div></html>')
return "\n".join(rval)
def set_peek(self, dataset, is_multi_byte=False):
"""Set the peek and blurb text"""
if not dataset.dataset.purged:
dataset.peek = data.get_file_peek(dataset.file_name)
dataset.blurb = 'splib Spectral Library Format'
else:
dataset.peek = 'file does not exist'
dataset.blurb = 'file purged from disk'
def sniff_prefix(self, file_prefix):
""" Determines whether the file is a SpectraST generated file.
"""
contents = file_prefix.string_io()
return Msp.next_line_starts_with(contents, "Name:") and Msp.next_line_starts_with(contents, "LibID:")
@build_sniff_from_prefix
class Ms2(Text):
file_ext = "ms2"
def sniff_prefix(self, file_prefix):
""" Determines whether the file is a valid ms2 file."""
contents = file_prefix.string_io()
header_lines = []
while True:
line = contents.readline()
if not line:
return False
if line.strip() == "":
continue
elif line.startswith('H\t'):
header_lines.append(line)
else:
break
for header_field in ['CreationDate', 'Extractor', 'ExtractorVersion', 'ExtractorOptions']:
found_header = False
for header_line in header_lines:
if header_line.startswith('H\t%s' % (header_field)):
found_header = True
break
if not found_header:
return False
return True
# unsniffable binary format, should do something about this
class XHunterAslFormat(Binary):
""" Annotated Spectra in the HLF format http://www.thegpm.org/HUNTER/format_2006_09_15.html """
file_ext = "hlf"
class Sf3(Binary):
"""Class describing a Scaffold SF3 files"""
file_ext = "sf3"
class ImzML(Binary):
"""
Class for imzML files.
http://www.imzml.org
"""
edam_format = "format_3682"
file_ext = 'imzml'
allow_datatype_change = False
composite_type = 'auto_primary_file'
def __init__(self, **kwd):
Binary.__init__(self, **kwd)
"""The metadata"""
self.add_composite_file(
'imzml',
description='The imzML metadata component.',
is_binary=False)
"""The mass spectral data"""
self.add_composite_file(
'ibd',
description='The mass spectral data component.',
is_binary=True)
def generate_primary_file(self, dataset=None):
rval = ['<html><head><title>imzML Composite Dataset </title></head><p/>']
rval.append('<div>This composite dataset is composed of the following files:<p/><ul>')
for composite_name, composite_file in self.get_composite_files(dataset=dataset).items():
fn = composite_name
opt_text = ''
if composite_file.get('description'):
rval.append('<li><a href="%s" type="text/plain">%s (%s)</a>%s</li>' % (fn, fn, composite_file.get('description'), opt_text))
else:
rval.append('<li><a href="%s" type="text/plain">%s</a>%s</li>' % (fn, fn, opt_text))
rval.append('</ul></div></html>')
return "\n".join(rval)
class Analyze75(Binary):
"""
Mayo Analyze 7.5 files
http://www.imzml.org
"""
file_ext = 'analyze75'
allow_datatype_change = False
composite_type = 'auto_primary_file'
def __init__(self, **kwd):
Binary.__init__(self, **kwd)
"""The header file. Provides information about dimensions, identification, and processing history."""
self.add_composite_file(
'hdr',
description='The Analyze75 header file.',
is_binary=True)
"""The image file. Image data, whose data type and ordering are described by the header file."""
self.add_composite_file(
'img',
description='The Analyze75 image file.',
is_binary=True)
"""The optional t2m file."""
self.add_composite_file(
't2m',
description='The Analyze75 t2m file.',
optional=True,
is_binary=True)
def generate_primary_file(self, dataset=None):
rval = ['<html><head><title>Analyze75 Composite Dataset.</title></head><p/>']
rval.append('<div>This composite dataset is composed of the following files:<p/><ul>')
for composite_name, composite_file in self.get_composite_files(dataset=dataset).items():
fn = composite_name
opt_text = ''
if composite_file.optional:
opt_text = ' (optional)'
if composite_file.get('description'):
rval.append('<li><a href="%s" type="text/plain">%s (%s)</a>%s</li>' % (fn, fn, composite_file.get('description'), opt_text))
else:
rval.append('<li><a href="%s" type="text/plain">%s</a>%s</li>' % (fn, fn, opt_text))
rval.append('</ul></div></html>')
return "\n".join(rval)
|
StarcoderdataPython
|
144870
|
from flask import Flask, render_template, request
from werkzeug.utils import secure_filename
from fitparse import FitFile
import os
import subprocess
import json
app = Flask(__name__)
app.config["UPLOAD_FOLDER"] = "temp/"
app.config["MAX_CONTENT_PATH"] = 5000000
@app.route('/')
def upload():
return render_template('index.html')
@app.route('/display', methods = ['GET', 'POST'])
def display_file():
if request.method == 'POST':
f = request.files['file']
filename = secure_filename(f.filename)
f.save(app.config['UPLOAD_FOLDER'] + filename)
#file = open(app.config['UPLOAD_FOLDER'] + filename,"r")
process = subprocess.run('fitdump ./temp/'+filename+' -n device_info -t json', shell=True, check=True, stdout=subprocess.PIPE, universal_newlines=True)
output = process.stdout
data = json.loads(output)
content = {}
for i in range(len(data)):
index = (data[i]['data']['device_index'])
version = (data[i]['data']['software_version'])
manufacturer = (data[i]['data']['manufacturer'])
serial = (data[i]['data']['serial_number'])
battery_status = (data[i]['data']['battery_status'])
battery_voltage = (data[i]['data']['battery_voltage'])
timestamp = (data[i]['data']['timestamp'])
content[i] = {
"index": index,
"version": version,
"manufacturer": manufacturer,
"serial": serial,
"battery_status": battery_status,
"battery_voltage": battery_voltage,
"timestamp": timestamp
}
os.remove("./temp/"+filename)
#print(content)
return render_template('content.html', len = len(content), content=content)
if __name__ == '__main__':
app.run(host='0.0.0.0')
|
StarcoderdataPython
|
1743145
|
# -*- coding: utf-8 -*-
"""
pybitcoin
~~~~~
:copyright: (c) 2014-2016 by Halfmoon Labs, Inc.
:license: MIT, see LICENSE for more details.
"""
import opcodes
from .network import broadcast_transaction, send_to_address, get_unspents, \
embed_data_in_blockchain, make_send_to_address_tx, make_op_return_tx, \
analyze_private_key, serialize_sign_and_broadcast, \
sign_all_unsigned_inputs
from .scripts import make_pay_to_address_script, make_op_return_script, \
script_to_hex
from .serialize import serialize_input, serialize_output, \
serialize_transaction, deserialize_transaction
from .outputs import make_op_return_outputs, make_pay_to_address_outputs
from .utils import flip_endian, variable_length_int
|
StarcoderdataPython
|
4825918
|
class plural:
def __init__(self, value):
self.value = value
def __format__(self, format_spec):
v = self.value
singular, sep, plural = format_spec.partition('|')
plural = plural or f'{singular}s'
if abs(v) != 1:
return f'{v} {plural}'
return f'{v} {singular}'
def human_join(seq, delim=', ', final='or'):
size = len(seq)
if size == 0:
return ''
if size == 1:
return seq[0]
if size == 2:
return f'{seq[0]} {final} {seq[1]}'
return delim.join(seq[:-1]) + f' {final} {seq[-1]}'
class TabularData:
def __init__(self):
self._widths = []
self._columns = []
self._rows = []
def set_columns(self, columns):
self._columns = columns
self._widths = [len(c) + 2 for c in columns]
def add_row(self, row):
rows = [str(r) for r in row]
self._rows.append(rows)
for index, element in enumerate(rows):
width = len(element) + 2
if width > self._widths[index]:
self._widths[index] = width
def add_rows(self, rows):
for row in rows:
self.add_row(row)
def render(self):
"""Renders a table in rST format.
Example:
+-------+-----+
| Name | Age |
+-------+-----+
| Alice | 24 |
| Bob | 19 |
+-------+-----+
"""
sep = '+'.join('-' * w for w in self._widths)
sep = f'+{sep}+'
to_draw = [sep]
def get_entry(d):
elem = '|'.join(f'{e:^{self._widths[i]}}' for i, e in enumerate(d))
return f'|{elem}|'
to_draw.append(get_entry(self._columns))
to_draw.append(sep)
for row in self._rows:
to_draw.append(get_entry(row))
to_draw.append(sep)
return '\n'.join(to_draw)
|
StarcoderdataPython
|
30385
|
<filename>curso em video/python/mundo 1/ex033.py
c = int(input('digite o primeiro numero: '))
b = int(input('digite o segundo numero: '))
a = int(input('digite o terceiro numero: '))
cores= {'vermelho': '\033[0;31m',
'azul' : '\033[1;34m',
'zero': '\033[m' }
# qual o maior
maior = a
if b > c and b > a:
maior = b
if c > b and c > a:
maior = c
print('O maior valor foi {}{}{}'.format(cores['azul'],maior,cores['zero']))
# qual o menor
menor = c
if a < b and a < c:
menor = a
if b < a and b < c:
menor = b
print('O menor valor foi {}{}{}'.format(cores['vermelho'],menor,cores['zero']))
|
StarcoderdataPython
|
1733450
|
from .utils.codeparsers import code_tree
from .utils.objecthashers import complex_hasher
def determine_metadata(func, args, kwargs,
exclusion_list, globals_list,
old_version=False):
metadata = dict()
metadata['func'] = func
metadata['args'] = args
metadata['kwargs'] = kwargs
(metadata['code'],
metadata['other_globals']) = code_tree(func, args, kwargs,
exclusion_list, globals_list,
old_version=old_version)
if old_version:
metadata.pop('other_globals')
return refactor_metadata_for_storage(metadata)
def refactor_metadata_for_readability(metadata):
m = metadata.copy()
code = m['code']
code = {k: '-code snipped-' for k, v in code.items()}
args = m['args']
args = [(arg[:20] + ['...', '-args snipped-']
if isinstance(arg, list) and len(arg) > 20 else arg)
for arg in args]
args = [(set(list(arg)[:20]).union(set(['...', '-args snipped-']))
if isinstance(arg, set) and len(arg) > 20 else arg)
for arg in args]
args = [dict_refactor(arg) if isinstance(arg, dict) else arg
for arg in args]
kwargs = m['kwargs']
kwargs = dict_refactor(kwargs)
other_globals = m['other_globals']
for key, val in other_globals.items():
if isinstance(val, list) and len(val) > 20:
other_globals[key] = val[:20] + ['...', '-other_globals snipped-']
m2 = metadata.copy()
m2['code'] = code
m2['args'] = args
m2['kwargs'] = kwargs
m2['other_globals'] = other_globals
return m2
def dict_refactor(kwargs):
for key, val in kwargs.items():
if isinstance(val, list) and len(val) > 20:
kwargs[key] = val[:20] + ['...', '-snipped-']
elif isinstance(val, set) and len(val) > 20:
kwargs[key] = set(list(val)[:20]).union(
set(['...', '-snipped-']))
elif isinstance(val, dict):
for key1, val1 in val.items():
if isinstance(val1, list) and len(val1) > 20:
val[key1] = val1[:20] + ['...', '-snipped-']
kwargs[key] = val
return kwargs
def refactor_metadata_for_storage(metadata):
m, m2 = metadata.copy(), metadata.copy()
args, kwargs = m['args'], m['kwargs']
args = [complex_hasher(arg) for arg in args]
args = hash_arglist(args)
kw = dict_hasher(kwargs.copy())
m2['args'] = tuple(args)
m2['kwargs'] = kw
return m2
def hash_arglist(arglist):
if isinstance(arglist, list) or isinstance(arglist, tuple):
arglist = hash_all_in_arglist(arglist)
argsnew = []
for arg in arglist:
if isinstance(arg, list) or isinstance(arg, tuple):
arg = hash_all_in_arglist(arg)
elif isinstance(arg, dict):
arg = dict_hasher(arg.copy())
argsnew.append(arg)
if isinstance(arglist, tuple):
return tuple(argsnew)
elif isinstance(arglist, list):
return argsnew
return arglist
def hash_all_in_arglist(arglist):
argsnew = []
for arg in arglist:
if isinstance(arg, list) or isinstance(arg, tuple):
arg2 = [complex_hasher(a) for a in arg]
arg2 = hash_all_in_arglist(arg2)
if isinstance(arg, tuple):
arg2 = tuple(arg2)
else:
arg2 = arg
argsnew.append(arg2)
if isinstance(arglist, tuple):
return tuple(argsnew)
return argsnew
def dict_hasher(kw):
kw = kw.copy()
for key, val in kw.items():
kw[key] = complex_hasher(val)
if isinstance(val, list):
kw[key] = [complex_hasher(arg) for arg in val]
elif isinstance(val, dict):
m3 = val.copy()
for key_small, val_small in m3.items():
m3[key_small] = complex_hasher(val_small)
kw[key] = m3
return kw
|
StarcoderdataPython
|
167620
|
<reponame>Rohitpandit021/jina
import sys
import pytest
from jina import Document
from jina.clients.request import request_generator
from jina.proto import jina_pb2
from jina.types.message import Message
from jina.types.request import _trigger_fields, Request
from jina.enums import CompressAlgo
from tests import random_docs
@pytest.mark.parametrize(
'field',
_trigger_fields.difference({'command', 'args', 'flush', 'propagate', 'targets'}),
)
@pytest.mark.parametrize(
'algo',
[None, CompressAlgo.NONE],
)
def test_lazy_access(field, algo):
reqs = (
Request(r.SerializeToString(), algo)
for r in request_generator('/', random_docs(10))
)
for r in reqs:
assert not r.is_decompressed
# access r.train
print(getattr(r, field))
# now it is read
assert r.is_decompressed
@pytest.mark.parametrize(
'algo',
[None, CompressAlgo.NONE],
)
def test_multiple_access(algo):
reqs = [
Request(r.SerializeToString(), algo)
for r in request_generator('/', random_docs(10))
]
for r in reqs:
assert not r.is_decompressed
assert r
assert not r.is_decompressed
for r in reqs:
assert not r.is_decompressed
assert r.data
assert r.is_decompressed
@pytest.mark.parametrize(
'algo',
[None, CompressAlgo.NONE],
)
def test_lazy_nest_access(algo):
reqs = (
Request(r.SerializeToString(), algo)
for r in request_generator('/', random_docs(10))
)
for r in reqs:
assert not r.is_decompressed
# write access r.train
r.docs[0].id = '1' * 16
# now it is read
assert r.is_decompressed
assert r.data.docs[0].id == '1' * 16
@pytest.mark.parametrize(
'algo',
[None, CompressAlgo.NONE],
)
def test_lazy_change_message_type(algo):
reqs = (
Request(r.SerializeToString(), algo)
for r in request_generator('/', random_docs(10))
)
for r in reqs:
assert not r.is_decompressed
# write access r.train
r.control.command = jina_pb2.RequestProto.ControlRequestProto.IDLE
# now it is read
assert r.is_decompressed
assert len(r.data.docs) == 0
@pytest.mark.parametrize(
'algo',
[None, CompressAlgo.NONE],
)
def test_lazy_append_access(algo):
reqs = (
Request(r.SerializeToString(), algo)
for r in request_generator('/', random_docs(10))
)
for r in reqs:
assert not r.is_decompressed
r = Request().as_typed_request('data')
# write access r.train
r.docs.append(Document())
# now it is read
assert r.is_decompressed
@pytest.mark.parametrize(
'algo',
[None, CompressAlgo.NONE],
)
def test_lazy_clear_access(algo):
reqs = (
Request(r.SerializeToString(), algo)
for r in request_generator('/', random_docs(10))
)
for r in reqs:
assert not r.is_decompressed
# write access r.train
r.ClearField('data')
# now it is read
assert r.is_decompressed
@pytest.mark.parametrize(
'algo',
[None, CompressAlgo.NONE],
)
def test_lazy_nested_clear_access(algo):
reqs = (
Request(r.SerializeToString(), algo)
for r in request_generator('/', random_docs(10))
)
for r in reqs:
assert not r.is_decompressed
# write access r.train
r.data.ClearField('docs')
# now it is read
assert r.is_decompressed
def test_lazy_msg_access():
# this test does not make much sense, when `message` is instantiated without `envelope`, the `request` header is accessed and therefore decompressed
messages = [
Message(
None,
r.SerializeToString(),
'test',
'123',
request_id='123',
request_type='DataRequest',
)
for r in request_generator('/', random_docs(10))
]
for m in messages:
assert m.request.is_decompressed
assert m.envelope
assert len(m.dump()) == 3
assert m.request.is_decompressed
for m in messages:
assert m.request.is_decompressed
assert m.request
assert len(m.dump()) == 3
assert m.request.is_decompressed
for m in messages:
assert m.request.is_decompressed
assert m.request.data.docs
assert len(m.dump()) == 3
assert m.request.is_decompressed
def test_lazy_msg_access_with_envelope():
envelope_proto = jina_pb2.EnvelopeProto()
envelope_proto.compression.algorithm = 'NONE'
envelope_proto.request_type = 'DataRequest'
messages = [
Message(
envelope_proto,
r.SerializeToString(),
)
for r in request_generator('/', random_docs(10))
]
for m in messages:
assert not m.request.is_decompressed
assert m.envelope
assert len(m.dump()) == 3
assert not m.request.is_decompressed
assert m.request._pb_body is None
assert m.request._buffer is not None
assert m.proto
assert m.request.is_decompressed
assert m.request._pb_body is not None
assert m.request._buffer is None
def test_message_size():
reqs = [
Message(None, r, 'test', '123') for r in request_generator('/', random_docs(10))
]
for r in reqs:
assert r.size == 0
assert sys.getsizeof(r.envelope.SerializeToString())
assert sys.getsizeof(r.request.SerializeToString())
assert len(r.dump()) == 3
assert r.size > sys.getsizeof(r.envelope.SerializeToString()) + sys.getsizeof(
r.request.SerializeToString()
)
@pytest.mark.parametrize(
'algo',
[None, CompressAlgo.NONE],
)
def test_lazy_request_fields(algo):
reqs = (
Request(r.SerializeToString(), algo)
for r in request_generator('/', random_docs(10))
)
for r in reqs:
assert list(r.DESCRIPTOR.fields_by_name.keys())
@pytest.mark.parametrize(
'typ,pb_typ',
[
('data', jina_pb2.RequestProto.DataRequestProto),
('control', jina_pb2.RequestProto.ControlRequestProto),
],
)
def test_empty_request_type(typ, pb_typ):
r = Request()
assert r.request_type is None
with pytest.raises(ValueError):
print(r.body)
r = r.as_typed_request(typ)
assert r._request_type == typ
assert isinstance(r.body, pb_typ)
@pytest.mark.parametrize(
'typ,pb_typ',
[
('data', jina_pb2.RequestProto.DataRequestProto),
],
)
def test_add_doc_to_type(typ, pb_typ):
r = Request().as_typed_request(typ)
for _ in range(10):
r.docs.append(Document())
r.groundtruths.append(Document())
assert len(r.docs) == 10
assert len(r.groundtruths) == 10
|
StarcoderdataPython
|
167874
|
__author__ = '<NAME>'
__email__ = '<EMAIL>'
__version__ = '0.0.1'
|
StarcoderdataPython
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.